diff --git a/Containerfile.helpers-ci b/Containerfile.helpers-ci new file mode 100644 index 0000000000..4a244dca96 --- /dev/null +++ b/Containerfile.helpers-ci @@ -0,0 +1,11 @@ +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.22-builder-multi-openshift-4.18 AS builder +ENV GOCACHE="/go/.cache" \ + GOMODCACHE="/go/pkg/mod" +WORKDIR /go/src/github.com/openshift/machine-config-operator +COPY . . +RUN --mount=type=cache,target=/go/.cache,z \ + --mount=type=cache,target=/go/pkg/mod,z \ + make install-helpers DESTDIR=/helpers + +FROM registry.ci.openshift.org/ocp/builder:rhel-9-enterprise-base-multi-openshift-4.18 AS final +COPY --from=builder /helpers/usr/bin /usr/bin diff --git a/Makefile b/Makefile index b1db147e03..44a5945126 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,10 @@ +E2E_ROOT_DIR = ./test +E2E_SUITES = $(notdir $(wildcard $(E2E_ROOT_DIR)/e2e*)) + MCO_COMPONENTS = daemon controller server operator EXTRA_COMPONENTS = apiserver-watcher machine-os-builder ALL_COMPONENTS = $(patsubst %,machine-config-%,$(MCO_COMPONENTS)) $(EXTRA_COMPONENTS) +ALL_COMPONENTS_PATHS = $(patsubst %,cmd/%,$(ALL_COMPONENTS)) PREFIX ?= /usr GO111MODULE?=on PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) @@ -36,9 +40,17 @@ clean: # Build machine configs. Intended to be called via another target. # Example: -# make _build-machine-config-operator -_build-%: - WHAT=$* hack/build-go.sh +# make _build-component-machine-config-operator +_build-component-%: + WHAT_PATH=cmd/$* WHAT=$(basename $*) hack/build-go.sh + +# Build the helpers under devex/cmd. +_build-helper-%: + WHAT_PATH=devex/cmd/$* WHAT=$(basename $*) hack/build-go.sh + +# Verify that an e2e test is valid Golang by doing a trial compilation. +_verify-e2e-%: + go test -c -tags=$(GOTAGS) -o _output/$* ./test/$*/... # Use podman to build the image. image: @@ -98,28 +110,67 @@ endif install-tools: install-golangci-lint install-go-junit-report install-setup-envtest -# Run verification steps -# Example: -# make verify -verify: install-tools +# Runs golangci-lint +lint: install-tools ./hack/golangci-lint.sh $(GOTAGS) + +# Verifies templates. +verify-templates: hack/verify-templates.sh +# Verifies devex helpers +verify-helpers: + # Conditionally tries to build the helper binaries in CI. + hack/verify-helpers.sh + +# Runs all verification steps +# Example: +# make verify +verify: install-tools verify-e2e lint verify-templates verify-helpers + +HELPERS_DIR := devex/cmd +HELPER_BINARIES := $(notdir $(wildcard $(HELPERS_DIR)/*)) + +.PHONY: helpers +helpers: $(patsubst %,_build-helper-%,$(HELPER_BINARIES)) + # Template for defining build targets for binaries. define target_template = .PHONY: $(1) - $(1): _build-$(1) + $(1): _build-component-$(1) endef # Create a target for each component $(foreach C, $(EXTRA_COMPONENTS), $(eval $(call target_template,$(C)))) $(foreach C, $(MCO_COMPONENTS), $(eval $(call target_template,$(patsubst %,machine-config-%,$(C))))) -.PHONY: binaries install +# Template for defining build targets for helper binaries. +define helper_target_template = + .PHONY: $(1) + $(1): _build-helper-$(1) +endef +# Create a target for each component +$(foreach C, $(HELPER_BINARIES), $(eval $(call helper_target_template,$(C)))) + +define verify_e2e_target_template = + .PHONY: $(1) + $(1): _verify-e2e-$(1) +endef +# Create a target for each e2e suite +$(foreach C, $(E2E_SUITES), $(eval $(call verify_e2e_target_template,$(C)))) + + +.PHONY: binaries helpers install # Build all binaries: # Example: # make binaries -binaries: $(patsubst %,_build-%,$(ALL_COMPONENTS)) +binaries: $(patsubst %,_build-component-%,$(ALL_COMPONENTS)) + +# Installs the helper binaries from devex/cmd. +install-helpers: helpers + for helper in $(HELPER_BINARIES); do \ + install -D -m 0755 _output/linux/$(GOARCH)/$${helper} $(DESTDIR)$(PREFIX)/bin/$${helper}; \ + done install: binaries for component in $(ALL_COMPONENTS); do \ @@ -130,6 +181,9 @@ Dockerfile.rhel7: Dockerfile Makefile (echo '# THIS FILE IS GENERATED FROM '$<' DO NOT EDIT' && \ sed -e s,org/openshift/release,org/ocp/builder, -e s,/openshift/origin-v4.0:base,/ocp/4.0:base, < $<) > $@.tmp && mv $@.tmp $@ +# Validates that all of the e2e test suites are valid Golang by performing a test compilation. +verify-e2e: $(patsubst %,_verify-e2e-%,$(E2E_SUITES)) + # This was copied from https://github.com/openshift/cluster-image-registry-operator test-e2e: install-go-junit-report set -o pipefail; go test -tags=$(GOTAGS) -failfast -timeout 170m -v$${WHAT:+ -run="$$WHAT"} ./test/e2e/ ./test/e2e-techpreview-shared/ | ./hack/test-with-junit.sh $(@) @@ -140,6 +194,9 @@ test-e2e-techpreview: install-go-junit-report test-e2e-single-node: install-go-junit-report set -o pipefail; go test -tags=$(GOTAGS) -failfast -timeout 120m -v$${WHAT:+ -run="$$WHAT"} ./test/e2e-single-node/ | ./hack/test-with-junit.sh $(@) +test-e2e-ocl: install-go-junit-report + set -o pipefail; go test -tags=$(GOTAGS) -failfast -timeout 120m -v$${WHAT:+ -run="$$WHAT"} ./test/e2e-ocl/ | ./hack/test-with-junit.sh $(@) + bootstrap-e2e: install-go-junit-report install-setup-envtest @echo "Setting up KUBEBUILDER_ASSETS" @KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --index https://raw.githubusercontent.com/openshift/api/master/envtest-releases.yaml --bin-dir $(PROJECT_DIR)/bin -p path)" && \ diff --git a/cmd/common/helpers.go b/cmd/common/helpers.go index a7ad807128..9c4d723028 100644 --- a/cmd/common/helpers.go +++ b/cmd/common/helpers.go @@ -4,6 +4,7 @@ import ( "os" "os/signal" "syscall" + "time" "github.com/openshift/machine-config-operator/internal/clients" corev1 "k8s.io/api/core/v1" @@ -98,3 +99,23 @@ func SignalHandler(runCancel context.CancelFunc) { klog.Fatalf("Received shutdown signal twice, exiting: %s", sig) } + +func SignalHandlerWithDelay(runCancel context.CancelFunc, delay time.Duration) { + // make a signal handling channel for os signals + ch := make(chan os.Signal, 1) + // stop listening for signals when we leave this function + defer func() { signal.Stop(ch) }() + // catch SIGINT and SIGTERM + signal.Notify(ch, os.Interrupt, syscall.SIGTERM) + sig := <-ch + klog.Infof("Received shutdown signal: %s. Delaying shutdown...", sig) + // Wait for the delay + time.Sleep(delay) + // if we're shutting down, cancel the context so everything else will stop + klog.Infof("Shutting down after delay %s", delay) + runCancel() + klog.Infof("Context cancelled") + sig = <-ch + klog.Fatalf("Received shutdown signal twice, exiting: %s", sig) + +} diff --git a/cmd/machine-config-controller/start.go b/cmd/machine-config-controller/start.go index 068ae0df43..abff4f3a84 100644 --- a/cmd/machine-config-controller/start.go +++ b/cmd/machine-config-controller/start.go @@ -236,7 +236,7 @@ func createControllers(ctx *ctrlcommon.ControllerContext) []ctrlcommon.Controlle ctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), ctx.KubeInformerFactory.Core().V1().Nodes(), ctx.KubeInformerFactory.Core().V1().Pods(), - ctx.TechPreviewInformerFactory.Machineconfiguration().V1alpha1().MachineOSConfigs(), + ctx.InformerFactory.Machineconfiguration().V1().MachineOSConfigs(), ctx.ConfigInformerFactory.Config().V1().Schedulers(), ctx.ClientBuilder.KubeClientOrDie("node-update-controller"), ctx.ClientBuilder.MachineConfigClientOrDie("node-update-controller"), diff --git a/cmd/machine-os-builder/start.go b/cmd/machine-os-builder/start.go index e3bc1eee09..d2ee4e1178 100644 --- a/cmd/machine-os-builder/start.go +++ b/cmd/machine-os-builder/start.go @@ -4,6 +4,7 @@ import ( "context" "flag" "os" + "time" "github.com/openshift/machine-config-operator/cmd/common" "github.com/openshift/machine-config-operator/internal/clients" @@ -53,7 +54,9 @@ func runStartCmd(_ *cobra.Command, _ []string) { } run := func(ctx context.Context) { - go common.SignalHandler(cancel) + // When shutting down, wait for 30 seconds before actually shutting down + // This will ensure that all the cleanup is done + go common.SignalHandlerWithDelay(cancel, 30*time.Second) ctrlCtx := ctrlcommon.CreateControllerContext(ctx, cb) diff --git a/devex/README.md b/devex/README.md new file mode 100644 index 0000000000..134eef882c --- /dev/null +++ b/devex/README.md @@ -0,0 +1,18 @@ +# devex + +## Background + +This directory contains Golang programs which are most likely to be of use to +fellow OpenShift developers, especially members of the +[machine-config-operator](https://github.com/openshift/machine-config-operator) +team. The helpers found here may be of use to you. They may not. They may +completely break entirely. + +It is worth mentioning that these helpers may get your cluster into a +difficult-to-recover-from state. So do not use these on a production OpenShift +cluster. + +## Installation + +From the repo root, run: `make install-helpers`. Note: You'll periodically have +to update the helpers based upon the current state of the MCO repository. diff --git a/devex/cmd/mcdiff/diff.go b/devex/cmd/mcdiff/diff.go new file mode 100644 index 0000000000..b3ffa2b0f0 --- /dev/null +++ b/devex/cmd/mcdiff/diff.go @@ -0,0 +1,170 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + + ign3types "github.com/coreos/ignition/v2/config/v3_4/types" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + "sigs.k8s.io/yaml" +) + +var ( + diffCmd = &cobra.Command{ + Use: "diff", + Short: "Diffs MachineConfigs", + Long: "", + RunE: func(_ *cobra.Command, args []string) error { + return diffMCs(args) + }, + } + + convertToYAML bool + keepFiles bool +) + +func init() { + rootCmd.AddCommand(diffCmd) + diffCmd.PersistentFlags().BoolVar(&convertToYAML, "convert-to-yaml", false, "Converts any JSON payloads that are found into YAML before diffing") + diffCmd.PersistentFlags().BoolVar(&keepFiles, "keep-files", false, "Keeps the files used for diffing") +} + +func diffMCs(args []string) error { + if len(args) == 0 { + return fmt.Errorf("no MachineConfigs given") + } + + if len(args) == 1 { + return fmt.Errorf("only one MachineConfig given") + } + + cs := framework.NewClientSet("") + + eg := errgroup.Group{} + + mc1 := args[0] + mc2 := args[1] + + dirname := "" + if keepFiles { + cwd, err := os.Getwd() + if err != nil { + return err + } + + dirname = cwd + } else { + + tempdir, err := os.MkdirTemp("", "") + if err != nil { + return err + } + + defer os.RemoveAll(tempdir) + + dirname = tempdir + } + + eg.Go(func() error { + return getMCAndWriteToFile(cs, dirname, mc1) + }) + + eg.Go(func() error { + return getMCAndWriteToFile(cs, dirname, mc2) + }) + + if err := eg.Wait(); err != nil { + return err + } + + klog.Infof("Running dyff command") + out := exec.Command("dyff", "between", getMCFilename(dirname, mc1), getMCFilename(dirname, mc2)) + out.Stdout = os.Stdout + out.Stderr = os.Stderr + + return out.Run() +} + +func getMCAndWriteToFile(cs *framework.ClientSet, dirname, name string) error { + mc, err := cs.MachineConfigs().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return err + } + + outBytes, err := yaml.Marshal(mc) + if err != nil { + return err + } + + genericized := map[string]interface{}{} + + if err := yaml.Unmarshal(outBytes, &genericized); err != nil { + return err + } + + parsedIgnConfig, err := getParsedIgnConfig(mc) + if err != nil { + return err + } + + genericized["spec"].(map[string]interface{})["config"] = parsedIgnConfig + + filename := getMCFilename(dirname, name) + + outBytes, err = yaml.Marshal(genericized) + if err != nil { + return err + } + + if err := os.WriteFile(filename, outBytes, 0o755); err != nil { + return err + } + + klog.Infof("Wrote %s", filename) + + return nil +} + +func getMCFilename(dirname, mcName string) string { + return filepath.Join(dirname, fmt.Sprintf("%s.yaml", mcName)) +} + +func getParsedIgnConfig(mc *mcfgv1.MachineConfig) (*ign3types.Config, error) { + // Convert the raw Ignition bytes into an Ignition struct. + ignConfig, err := ctrlcommon.ParseAndConvertConfig(mc.Spec.Config.Raw) + if err != nil { + return nil, err + } + + for i, file := range ignConfig.Storage.Files { + // Decode each files contents + decoded, err := ctrlcommon.DecodeIgnitionFileContents(file.Contents.Source, file.Contents.Compression) + if err != nil { + return nil, err + } + + if file.Contents.Source != nil { + if convertToYAML { + decoded, err = yaml.JSONToYAML(decoded) + if err != nil { + return nil, err + } + } + + out := string(decoded) + ignConfig.Storage.Files[i].Contents.Source = &out + } + } + + return &ignConfig, nil +} diff --git a/devex/cmd/mcdiff/main.go b/devex/cmd/mcdiff/main.go new file mode 100644 index 0000000000..03119b25bc --- /dev/null +++ b/devex/cmd/mcdiff/main.go @@ -0,0 +1,25 @@ +package main + +import ( + "flag" + "os" + + "github.com/spf13/cobra" + "k8s.io/component-base/cli" +) + +var ( + rootCmd = &cobra.Command{ + Use: "mcdiff", + Short: "Diffs MachineConfigs", + Long: "", + } +) + +func init() { + rootCmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) +} + +func main() { + os.Exit(cli.Run(rootCmd)) +} diff --git a/devex/cmd/mco-builder/README.md b/devex/cmd/mco-builder/README.md new file mode 100644 index 0000000000..6f59a08659 --- /dev/null +++ b/devex/cmd/mco-builder/README.md @@ -0,0 +1,123 @@ +# mco-builder + +The `mco-builder` binary exists to make MCO developers lives easier. It's +purpose is to build an MCO image using ones local changes and automatically +push that image into the developers' sandbox cluster for quick and easy +iterative development. + +## Quick Start + +The quickest and easiest way to get started once you've downloaded this tool is to use the local build mode coupled with direct cluster pushes: + +```console +$ mco-builder local --direct --repo-root /path/to/your/mco/git/repo +``` + +This mode will perform a local build on your machine and push the resulting +image directly into your clusters' internal image registry. Then it will roll +the image out. + +## Repo Build Modes + +There are three build modes available within the `mco-builder` binary. They are: + +1. `normal` - Uses the Dockerfile contained within the MCO repository root to build. Using this mode will allow you to reflect the exact state that is in your current MCO repository. However, this mode is the slowest since it performs a complete build within the build context. +2. `fast` - This mode uses a hacked Dockerfile and Makefile in order to build the binaries outside of the container build context and then copy them into the container. Additionally, this mode uses a different final base image which may not always be up-to-date. However, this mode allows one to have the fastest possible local builds by leveraging Golang's incremental build capability as well as container image caching. +3. `cluster` - This mode uses also uses a hacked Dockerfile. However, this is optimized for the case where one wishes to leverage their OpenShift cluster to perform their builds instead of their local machine (see: Local vs. Cluster). + +## Supported Local Image Builders + +Right now, this tool supports building images locally with Podman and Docker. +However, it has a pluggable architecture that will allow other local image +builders (e.g., Buildah, Kaniko, et. al.) to be used in place of Podman or +Docker. + +This has mostly only been tested using an Intel Mac host running Docker targeting an AMD64 OpenShift cluster. Other configurations may not work yet. + +It is worth mentioning that using direct mode requires Skopeo since it offers +more control over how the image is pushed. + +The default builder depends upon what platform this binary is being run on. If on a Linux machine, it will default to `podman`. On Mac, it will default to `docker`. Either of these options may be easily overridden by using the `--builder` flag. + +## Local vs. Cluster + +There are two main modes of operation that one can select from: + +### Local Mode + +```console +$ mco-builder local --help + +Builds the MCO image locally using the specified builder and options. Can either push to a remote image registry (such as Quay.io) or can expose a route to enable pushing directly into ones sandbox cluster. + +Usage: + mco-builder local [flags] + +Flags: + --build-mode string What build mode to use: [fast normal] (default "fast") + --builder string What image builder to use: [docker podman] (default "docker") + --direct Exposes a route and pushes the image directly into ones cluster + --final-image-pullspec string Where to push the final image (not needed in direct mode) + -h, --help help for local + --push-secret string Path to the push secret path needed to push to the provided pullspec (not needed in direct mode) + --repo-root string Path to the local MCO Git repo + +Global Flags: + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) + -v, --v Level number for the log level verbosity + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging (only works for the default text log format) +``` + +This mode uses your local image builder (either Podman or Docker) to build the +image locally on your developer machine. When used with the `--direct` option, +one can push their built image directly into their cluster without the need of +an external container registry such as Quay.io. Using direct mode will +implicitly create an ImageStream within the MCO namespace called +`machine-config-operator` to push the image to. + +If the `--direct` option is not used, one must also provide the path to a push +secret as well as the final image pullspec indicating where the image should be +pushed to. + +### Cluster Mode + +```console +$ mco-builder cluster --help +Performs the build operation within the sandbox cluster using an OpenShift Image Build + +Usage: + mco-builder cluster [flags] + +Flags: + --follow Stream build logs (default true) + -h, --help help for cluster + --repo-root string Path to the local MCO Git repo + +Global Flags: + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) + -v, --v Level number for the log level verbosity + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging (only works for the default text log format) +``` + +This mode requires that you push your changes to a personal Git fork of the MCO +repository. This will leverage the OpenShift Image Builder capability to clone +your repo, build the image, and apply it all from within your sandbox cluster. +This mode is ideally suited for situations where one either does not have fast +local compute or fast local network. + +This mode creates an ImageStream within the MCO namespace called `machine-config-operator` to push the image to. + +## Reverting + +Should you wish to revert your sandbox cluster back to its original state, one can use the `revert` subcommand thusly: + +```console +$ mco-builder revert +``` + +This command performs the following actions: +1. Rolls the MCO back to its original image for that OpenShift release. +2. Unexposes the internal image stream route. +3. Deletes any ImageStreams that were created. diff --git a/devex/cmd/mco-builder/helpers.go b/devex/cmd/mco-builder/helpers.go new file mode 100644 index 0000000000..5cb994c50d --- /dev/null +++ b/devex/cmd/mco-builder/helpers.go @@ -0,0 +1,99 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + corev1 "k8s.io/api/core/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/klog/v2" +) + +func writeBuilderSecretToTempDir(cs *framework.ClientSet, hostname string) (string, error) { + secrets, err := cs.Secrets(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return "", err + } + + tmpDir, err := os.MkdirTemp("", "") + if err != nil { + return "", err + } + + var foundDockerCfg *corev1.Secret + names := []string{} + for _, secret := range secrets.Items { + secret := secret + names = append(names, secret.Name) + if strings.HasPrefix(secret.Name, "builder-dockercfg") { + foundDockerCfg = &secret + break + } + } + + if foundDockerCfg == nil { + return "", fmt.Errorf("did not find a matching secret, foundDockerCfg: %v", names) + } + + converted, _, err := canonicalizePullSecretBytes(foundDockerCfg.Data[corev1.DockerConfigKey], hostname) + if err != nil { + return "", err + } + + secretPath := filepath.Join(tmpDir, "config.json") + if err := os.WriteFile(secretPath, converted, 0o755); err != nil { + return "", err + } + + klog.Infof("Secret %q has been written to %s", foundDockerCfg.Name, secretPath) + + return secretPath, nil +} + +// Converts a legacy Docker pull secret into a more modern representation. +// Essentially, it converts {"registry.hostname.com": {"username": "user"...}} +// into {"auths": {"registry.hostname.com": {"username": "user"...}}}. If it +// encounters a pull secret already in this configuration, it will return the +// input secret as-is. Returns either the supplied data or the newly-configured +// representation of said data, a boolean to indicate whether it was converted, +// and any errors resulting from the conversion process. Additionally, this +// function will add an additional entry for the external cluster image +// registry hostname. +func canonicalizePullSecretBytes(secretBytes []byte, extHostname string) ([]byte, bool, error) { + type newStyleAuth struct { + Auths map[string]interface{} `json:"auths,omitempty"` + } + + // Try marshaling the new-style secret first: + newStyleDecoded := &newStyleAuth{} + if err := json.Unmarshal(secretBytes, newStyleDecoded); err != nil { + return nil, false, fmt.Errorf("could not decode new-style pull secret: %w", err) + } + + // We have an new-style secret, so we can just return here. + if len(newStyleDecoded.Auths) != 0 { + return secretBytes, false, nil + } + + // We need to convert the legacy-style secret to the new-style. + oldStyleDecoded := map[string]interface{}{} + if err := json.Unmarshal(secretBytes, &oldStyleDecoded); err != nil { + return nil, false, fmt.Errorf("could not decode legacy-style pull secret: %w", err) + } + + oldStyleDecoded[extHostname] = oldStyleDecoded[internalRegistryHostname] + + out, err := json.Marshal(&newStyleAuth{ + Auths: oldStyleDecoded, + }) + + return out, err == nil, err +} diff --git a/devex/cmd/mco-builder/imagestream.go b/devex/cmd/mco-builder/imagestream.go new file mode 100644 index 0000000000..10667e28e7 --- /dev/null +++ b/devex/cmd/mco-builder/imagestream.go @@ -0,0 +1,34 @@ +package main + +import ( + "context" + + imagev1 "github.com/openshift/api/image/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" +) + +func createImagestream(cs *framework.ClientSet, name string) error { + is := &imagev1.ImageStream{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ctrlcommon.MCONamespace, + }, + } + + _, err := cs.ImageV1Interface.ImageStreams(ctrlcommon.MCONamespace).Create(context.TODO(), is, metav1.CreateOptions{}) + if err == nil { + klog.Infof("Imagestream %q created", name) + return nil + } + + if apierrs.IsAlreadyExists(err) { + klog.Infof("Imagestream %q already exists, will re-use", name) + return nil + } + + return err +} diff --git a/devex/cmd/mco-builder/internal/builders/common.go b/devex/cmd/mco-builder/internal/builders/common.go new file mode 100644 index 0000000000..0f5d4ba857 --- /dev/null +++ b/devex/cmd/mco-builder/internal/builders/common.go @@ -0,0 +1,102 @@ +package builders + +import ( + "fmt" + "os" + "os/exec" + "strings" + + "github.com/openshift/machine-config-operator/devex/internal/pkg/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" +) + +type BuilderType string + +const ( + BuilderTypePodman BuilderType = "podman" + BuilderTypeDocker BuilderType = "docker" + BuilderTypeOpenshift BuilderType = "openshift" + BuilderTypeUnknown BuilderType = "unknown-builder-type" +) + +const ( + localPullspec string = "localhost/machine-config-operator:latest" +) + +type Builder interface { + Build() error + Push() error +} + +type Opts struct { + RepoRoot string + PullSecretPath string + PushSecretPath string + FinalPullspec string + DockerfileName string + BuildMode string +} + +func (o *Opts) isDirectClusterPush() bool { + return strings.Contains(o.FinalPullspec, "image-registry-openshift-image-registry") +} + +func NewLocalBuilder(opts Opts) Builder { + return newPodmanBuilder(opts) +} + +func GetBuilderTypes() sets.Set[BuilderType] { + return GetLocalBuilderTypes().Insert(BuilderTypeOpenshift) +} + +func GetLocalBuilderTypes() sets.Set[BuilderType] { + return sets.New[BuilderType](BuilderTypePodman, BuilderTypeDocker) +} + +func GetDefaultBuilderTypeForPlatform() BuilderType { + if _, err := exec.LookPath("podman"); err == nil { + return BuilderTypePodman + } + + if _, err := exec.LookPath("docker"); err == nil { + return BuilderTypeDocker + } + + return BuilderTypeUnknown +} + +func pushWithSkopeo(opts Opts, builder BuilderType) error { + imgStorageMap := map[BuilderType]string{ + BuilderTypePodman: "containers-storage", + BuilderTypeDocker: "docker-daemon", + } + + imgStorage, ok := imgStorageMap[builder] + if !ok { + return fmt.Errorf("unknown builder type %s", imgStorage) + } + + skopeoOpts := []string{ + "--dest-authfile", + opts.PushSecretPath, + fmt.Sprintf("%s:%s", imgStorage, localPullspec), + fmt.Sprintf("docker://%s", opts.FinalPullspec), + } + + if opts.isDirectClusterPush() { + skopeoOpts = append([]string{"copy", "--dest-tls-verify=false"}, skopeoOpts...) + } else { + skopeoOpts = append([]string{"copy"}, skopeoOpts...) + } + + cmd := exec.Command("skopeo", skopeoOpts...) + klog.Infof("Running $ %s", cmd) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return errors.NewExecErrorNoOutput(cmd, err) + } + + return nil +} diff --git a/devex/cmd/mco-builder/internal/builders/podman.go b/devex/cmd/mco-builder/internal/builders/podman.go new file mode 100644 index 0000000000..a18d051136 --- /dev/null +++ b/devex/cmd/mco-builder/internal/builders/podman.go @@ -0,0 +1,81 @@ +package builders + +import ( + "fmt" + "os" + "os/exec" + + "github.com/openshift/machine-config-operator/devex/internal/pkg/errors" + "k8s.io/klog/v2" +) + +type podmanBuilder struct { + opts Opts +} + +func newPodmanBuilder(opts Opts) Builder { + return &podmanBuilder{opts: opts} +} + +func (p *podmanBuilder) Build() error { + + if err := p.buildContainer(); err != nil { + return fmt.Errorf("unable to build container: %w", err) + } + + return nil +} + +func (p *podmanBuilder) Push() error { + if err := p.tagContainerForPush(); err != nil { + return fmt.Errorf("could not tag container: %w", err) + } + + if err := p.pushContainer(); err != nil { + klog.Info("Push failed, falling back to Skopeo") + return pushWithSkopeo(p.opts, BuilderTypePodman) + } + + return nil +} + +func (p *podmanBuilder) tagContainerForPush() error { + cmd := exec.Command("podman", "tag", localPullspec, p.opts.FinalPullspec) + if out, err := cmd.CombinedOutput(); err != nil { + return errors.NewExecError(cmd, out, err) + } + + return nil +} + +func (p *podmanBuilder) buildContainer() error { + podmanOpts := []string{"build", "-t", localPullspec, "--network", "slirp4netns", "--jobs", "3", "--file", p.opts.DockerfileName, p.opts.RepoRoot} + if p.opts.PullSecretPath != "" { + podmanOpts = append([]string{"--authfile", p.opts.PullSecretPath}, podmanOpts...) + } + + cmd := exec.Command("podman", podmanOpts...) + cmd.Dir = p.opts.RepoRoot + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + klog.Infof("Running %s", cmd) + return cmd.Run() +} + +func (p *podmanBuilder) pushContainer() error { + podmanPushOpts := []string{"--authfile", p.opts.PushSecretPath, p.opts.FinalPullspec} + if p.opts.isDirectClusterPush() { + podmanPushOpts = append([]string{"--tls-verify=false"}, podmanPushOpts...) + } + + podmanPushOpts = append([]string{"push"}, podmanPushOpts...) + + cmd := exec.Command("podman", podmanPushOpts...) + cmd.Dir = p.opts.RepoRoot + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + klog.Infof("Running %s", cmd) + return cmd.Run() +} diff --git a/devex/cmd/mco-builder/local.go b/devex/cmd/mco-builder/local.go new file mode 100644 index 0000000000..905b2481b6 --- /dev/null +++ b/devex/cmd/mco-builder/local.go @@ -0,0 +1,269 @@ +package main + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + + "github.com/openshift/machine-config-operator/devex/cmd/mco-builder/internal/builders" + "github.com/openshift/machine-config-operator/devex/internal/pkg/containers" + "github.com/openshift/machine-config-operator/devex/internal/pkg/rollout" + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + aggerrs "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" +) + +type localBuildOpts struct { + builderKind string // builders.BuilderType + finalImagePushSecretPath string + finalImagePullspec string + directPush bool + repoRoot string + skipRollout bool +} + +func (l *localBuildOpts) getBuilderType() builders.BuilderType { + return builders.BuilderType(l.builderKind) +} + +func (l *localBuildOpts) validate() error { + if l.repoRoot == "" { + return fmt.Errorf("--repo-root must be provided") + } + + if err := utils.CheckForBinaries([]string{"oc"}); err != nil { + return err + } + + if _, err := os.Stat(l.repoRoot); err != nil { + return err + } + + localBuilderTypes := builders.GetLocalBuilderTypes() + if !localBuilderTypes.Has(l.getBuilderType()) { + return fmt.Errorf("invalid builder type %s, valid builder types: %v", l.getBuilderType(), sets.List(localBuilderTypes)) + } + + if _, err := exec.LookPath(l.builderKind); err != nil { + return err + } + + if l.directPush { + if l.finalImagePushSecretPath != "" { + return fmt.Errorf("--push-secret may not be used in direct mode") + } + + if l.finalImagePullspec != "" { + return fmt.Errorf("--final-image-pullspec may not be used in direct mode") + } + + return utils.CheckForBinaries([]string{"skopeo"}) + } + + if l.finalImagePushSecretPath == "" { + return fmt.Errorf("--push-secret must be provided when not using direct mode") + } + + if _, err := os.Stat(l.finalImagePushSecretPath); err != nil { + return err + } + + if l.finalImagePullspec == "" { + return fmt.Errorf("--final-image-pullspec must be provided when not using direct mode") + } + + parsedPullspec, err := containers.AddLatestTagIfMissing(l.finalImagePullspec) + if err != nil { + return fmt.Errorf("could not parse final image pullspec %q: %w", l.finalImagePullspec, err) + } + + l.finalImagePullspec = parsedPullspec + + return nil +} + +func init() { + + opts := localBuildOpts{} + + localCmd := &cobra.Command{ + Use: "local", + Short: "Builds an MCO image locally and deploys it to your sandbox cluster.", + Long: "Builds the MCO image locally using the specified builder and options. Can either push to a remote image registry (such as Quay.io) or can expose a route to enable pushing directly into ones sandbox cluster.", + RunE: func(_ *cobra.Command, _ []string) error { + if err := opts.validate(); err != nil { + return err + } + return runLocalCmd(opts) + }, + } + + localCmd.PersistentFlags().BoolVar(&opts.directPush, "direct", false, "Exposes a route and pushes the image directly into ones cluster") + localCmd.PersistentFlags().StringVar(&opts.repoRoot, "repo-root", "", "Path to the local MCO Git repo") + localCmd.PersistentFlags().StringVar(&opts.finalImagePushSecretPath, "push-secret", "", "Path to the push secret path needed to push to the provided pullspec (not needed in direct mode)") + localCmd.PersistentFlags().StringVar(&opts.finalImagePullspec, "final-image-pullspec", "", "Where to push the final image (not needed in direct mode)") + localCmd.PersistentFlags().StringVar(&opts.builderKind, "builder", string(builders.GetDefaultBuilderTypeForPlatform()), fmt.Sprintf("What image builder to use: %v", sets.List(builders.GetLocalBuilderTypes()))) + localCmd.PersistentFlags().BoolVar(&opts.skipRollout, "skip-rollout", false, "Builds and pushes the image, but does not update the MCO deployment / daemonset objects") + + rootCmd.AddCommand(localCmd) +} + +func runLocalCmd(opts localBuildOpts) error { + + cs := framework.NewClientSet("") + + if err := validateLocalAndClusterArches(cs); err != nil { + if !errors.Is(err, errInvalidArch) { + return err + } + + // TODO: Return the error here instead. Need to validate GOARCH against Linux ARM64 and Darwin ARM64. + klog.Warning(err) + } + + if opts.directPush { + return buildLocallyAndPushIntoCluster(cs, opts) + } + + return buildLocallyAndDeploy(cs, opts) +} + +func buildLocallyAndDeploy(cs *framework.ClientSet, buildOpts localBuildOpts) error { + // TODO: Return these out of this function. + deferredErrs := []error{} + defer func() { + if err := aggerrs.NewAggregate(deferredErrs); err != nil { + klog.Fatalf("teardown encountered error(s): %s", err) + } + }() + + opts := builders.Opts{ + RepoRoot: buildOpts.repoRoot, + FinalPullspec: buildOpts.finalImagePullspec, + PushSecretPath: buildOpts.finalImagePushSecretPath, + DockerfileName: filepath.Join(buildOpts.repoRoot, "Dockerfile"), + } + + builder := builders.NewLocalBuilder(opts) + + if err := builder.Build(); err != nil { + return err + } + + if err := builder.Push(); err != nil { + return err + } + + digestedPullspec, err := containers.ResolveToDigestedPullspec(buildOpts.finalImagePullspec, buildOpts.finalImagePushSecretPath) + if err != nil { + return fmt.Errorf("could not resolve %s to digested image pullspec: %w", buildOpts.finalImagePullspec, err) + } + + klog.Infof("Pushed image has digested pullspec %s", digestedPullspec) + + if buildOpts.skipRollout { + klog.Infof("Skipping rollout since --skip-rollout was used") + return nil + } + + if err := rollout.ReplaceMCOImage(cs, buildOpts.finalImagePullspec, false); err != nil { + return err + } + + klog.Infof("New MCO rollout complete!") + return nil +} + +func buildLocallyAndPushIntoCluster(cs *framework.ClientSet, buildOpts localBuildOpts) error { + // TODO: Return these out of this function. + deferredErrs := []error{} + defer func() { + if err := aggerrs.NewAggregate(deferredErrs); err != nil { + klog.Fatalf("encountered error(s) during teardown: %s", err) + } + }() + + extHostname, err := rollout.ExposeClusterImageRegistry(cs) + if err != nil { + return err + } + + klog.Infof("Cluster is set up for direct pushes") + + secretPath, err := writeBuilderSecretToTempDir(cs, extHostname) + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(filepath.Dir(secretPath)); err != nil { + deferredErrs = append(deferredErrs, err) + } + }() + + extPullspec := fmt.Sprintf("%s/%s/machine-config-operator:latest", extHostname, ctrlcommon.MCONamespace) + + opts := builders.Opts{ + RepoRoot: buildOpts.repoRoot, + FinalPullspec: extPullspec, + PushSecretPath: secretPath, + DockerfileName: filepath.Join(buildOpts.repoRoot, "Dockerfile"), + } + + builder := builders.NewLocalBuilder(opts) + + if err := builder.Build(); err != nil { + return err + } + + if err := builder.Push(); err != nil { + return err + } + + digestedPullspec, err := containers.ResolveToDigestedPullspec(extPullspec, secretPath) + if err != nil { + return fmt.Errorf("could not resolve %s to digested image pullspec: %w", extPullspec, err) + } + + klog.Infof("Pushed image has digested pullspec %s", digestedPullspec) + + if buildOpts.skipRollout { + klog.Infof("Skipping rollout since --skip-rollout was used") + return nil + } + + if err := rollout.ReplaceMCOImage(cs, imagestreamPullspec, false); err != nil { + return err + } + + klog.Infof("New MCO rollout complete!") + return nil +} + +var errInvalidArch = fmt.Errorf("local and cluster arch differ") + +func validateLocalAndClusterArches(cs *framework.ClientSet) error { + nodes, err := cs.CoreV1Interface.Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + + // TODO: Handle multiarch cases. + clusterArch := nodes.Items[0].Status.NodeInfo.Architecture + + if clusterArch != runtime.GOARCH { + return fmt.Errorf("local (%s) / cluster (%s): %w", runtime.GOARCH, clusterArch, errInvalidArch) + } + + klog.Infof("Local (%s) arch matches cluster (%s)", runtime.GOARCH, clusterArch) + + return nil +} diff --git a/devex/cmd/mco-builder/main.go b/devex/cmd/mco-builder/main.go new file mode 100644 index 0000000000..b7afb8ae06 --- /dev/null +++ b/devex/cmd/mco-builder/main.go @@ -0,0 +1,32 @@ +package main + +import ( + "flag" + "os" + + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/spf13/cobra" + "k8s.io/component-base/cli" +) + +const ( + internalRegistryHostname string = "image-registry.openshift-image-registry.svc:5000" + imagestreamName string = "machine-config-operator" + imagestreamPullspec string = internalRegistryHostname + "/" + ctrlcommon.MCONamespace + "/" + imagestreamName + ":latest" +) + +var ( + rootCmd = &cobra.Command{ + Use: "mco-builder", + Short: "Automates the build and replacement of the machine-config-operator (MCO) image in an OpenShift cluster for testing purposes.", + Long: "", + } +) + +func init() { + rootCmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) +} + +func main() { + os.Exit(cli.Run(rootCmd)) +} diff --git a/devex/cmd/mco-builder/revert.go b/devex/cmd/mco-builder/revert.go new file mode 100644 index 0000000000..0e43bcbc11 --- /dev/null +++ b/devex/cmd/mco-builder/revert.go @@ -0,0 +1,42 @@ +package main + +import ( + "context" + "fmt" + + "github.com/openshift/machine-config-operator/devex/internal/pkg/rollout" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func init() { + revertCmd := &cobra.Command{ + Use: "revert", + Short: "Reverts the changes to the sandbox cluster and rolls back to the stock MCO image.", + Long: "", + RunE: doRevert, + } + + rootCmd.AddCommand(revertCmd) +} + +func doRevert(_ *cobra.Command, _ []string) error { + cs := framework.NewClientSet("") + + if err := cs.ImageStreams(ctrlcommon.MCONamespace).Delete(context.TODO(), imagestreamName, metav1.DeleteOptions{}); err != nil && !apierrs.IsNotFound(err) { + return fmt.Errorf("could not remove imagestream %s: %w", imagestreamName, err) + } + + if err := rollout.RevertToOriginalMCOImage(cs, false); err != nil { + return fmt.Errorf("could not revert to original MCO image: %w", err) + } + + if err := rollout.UnexposeClusterImageRegistry(cs); err != nil { + return fmt.Errorf("could not unexpose cluster image registry: %w", err) + } + + return nil +} diff --git a/devex/cmd/mco-push/README.md b/devex/cmd/mco-push/README.md new file mode 100644 index 0000000000..cfb81941a9 --- /dev/null +++ b/devex/cmd/mco-push/README.md @@ -0,0 +1,108 @@ +# mco-push + +This utility can be used to replace the MCO image in a running cluster with the provided image pullspec. + +```console +$ mco-push --help +Automates the replacement of the machine-config-operator (MCO) image in an OpenShift cluster for testing purposes. + +Usage: + mco-push [command] + +Available Commands: + completion Generate the autocompletion script for the specified shell + help Help about any command + replace Replaces the MCO image with the provided container image pullspec + restart Restarts all of the MCO pods + revert Reverts the MCO image to the one in the OpenShift release + version Print the current version + +Flags: + -h, --help help for mco-push + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) + -v, --v Level number for the log level verbosity + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging (only works for the default text log format) + +Use "mco-push [command] --help" for more information about a command. +``` + +## Usage + +### Replace + +To replace the MCO container image, one can do the following: + +```console +$ mco replace quay.io/org/repo:latest +I0425 15:24:25.401465 1153643 rollout.go:360] Setting replicas for openshift-cluster-version/cluster-version-operator to 0 +I0425 15:24:25.490284 1153643 rollout.go:360] Setting replicas for openshift-machine-config-operator/machine-config-operator to 0 +W0425 15:24:25.539731 1153643 rollout.go:249] ConfigMap machine-config-operator-images has pullspec quay.io/org/repo:original-image, which will change to quay.io/org/repo:latest. A MachineConfig update will occur as a result. +I0425 15:24:25.551096 1153643 rollout.go:300] Updating deployment/machine-config-operator +I0425 15:24:25.551120 1153643 rollout.go:300] Updating deployment/machine-config-controller +I0425 15:24:25.552020 1153643 rollout.go:321] Updating daemonset/machine-config-server +I0425 15:24:25.552411 1153643 rollout.go:321] Updating daemonset/machine-config-daemon +I0425 15:24:25.577878 1153643 rollout.go:281] Set machineConfigOperator in images.json in ConfigMap machine-config-operator-images to quay.io/org/repo:latest +I0425 15:24:26.216581 1153643 rollout.go:300] Updating deployment/machine-config-operator +I0425 15:24:26.424259 1153643 rollout.go:360] Setting replicas for openshift-machine-config-operator/machine-config-operator to 1 +I0425 15:24:26.821919 1153643 replace.go:61] Successfully replaced the stock MCO image with quay.io/org/repo:latest. +``` + +This will perform the following operations: + +1. Scale down the `cluster-version-operator`. +2. Temporarily scale down the `machine-config-operator`. +3. Update the MCO deployments and daemonsets. +4. Replace pullspec under the `machineConfigOperator` key in the `machine-config-operator-images` ConfigMap. +5. Scale the `machine-config-operator` back up. + +One should use a tagged image pullspec instead of a digested pullspec for this +because it will incur a MachineConfig update which will roll out to all of the +nodes on the cluster. When a tagged pullspec is used, this MachineConfig update +will only occur the first time. Future replacements with the same tagged +pullspec will not incur a MachineConfig update. + +### Revert + +To replace the overridden MCO image with the stock image, one can do the following: + +```console +$ mco revert +I0425 15:32:50.985415 1183777 rollout.go:58] Found original MCO image quay.io/org/repo:original-tag for the currently running cluster release (quay.io/org/repo@sha256:78864229b34c78744f15eb6af0824c5f7a88ae70a90bfd1bd77aff7e8f3c3965) +I0425 15:32:50.985455 1183777 rollout.go:360] Setting replicas for openshift-cluster-version/cluster-version-operator to 0 +I0425 15:32:51.014501 1183777 rollout.go:360] Setting replicas for openshift-machine-config-operator/machine-config-operator to 0 +I0425 15:32:51.070697 1183777 rollout.go:300] Updating deployment/machine-os-builder +W0425 15:32:51.071006 1183777 rollout.go:249] ConfigMap machine-config-operator-images has pullspec quay.io/org/repo:latest, which will change to quay.io/org/repo:original-tag. A MachineConfig update will occur as a result. +I0425 15:32:51.083054 1183777 rollout.go:300] Updating deployment/machine-config-operator +I0425 15:32:51.083075 1183777 rollout.go:300] Updating deployment/machine-config-controller +I0425 15:32:51.083386 1183777 rollout.go:321] Updating daemonset/machine-config-server +I0425 15:32:51.083426 1183777 rollout.go:321] Updating daemonset/machine-config-daemon +I0425 15:32:51.117268 1183777 rollout.go:281] Set machineConfigOperator in images.json in ConfigMap machine-config-operator-images to quay.io/org/repo:original-tag +I0425 15:32:52.004867 1183777 rollout.go:300] Updating deployment/machine-config-operator +I0425 15:32:52.214458 1183777 rollout.go:360] Setting replicas for openshift-machine-config-operator/machine-config-operator to 1 +I0425 15:32:52.614863 1183777 rollout.go:360] Setting replicas for openshift-cluster-version/cluster-version-operator to 1 +I0425 15:32:53.010042 1183777 revert.go:36] Successfully rolled back to the original MCO image +``` + +This will look what the stock image should be, perform the above steps, and restore the `cluster-version-operator`'s replicas. + +### Restart + +```console +$ mco restart +mco-push restart +I0425 15:35:07.693697 1191976 rollout.go:360] Setting replicas for openshift-cluster-version/cluster-version-operator to 0 +I0425 15:35:07.734319 1191976 rollout.go:360] Setting replicas for openshift-machine-config-operator/machine-config-operator to 0 +I0425 15:35:07.809768 1191976 rollout.go:304] Restarting deployment/machine-config-operator +I0425 15:35:07.820955 1191976 rollout.go:254] ConfigMap machine-config-operator-images already has pullspec quay.io/org/repo:latest. Will restart MCO components to cause an update. +I0425 15:35:07.821322 1191976 rollout.go:304] Restarting deployment/machine-os-builder +I0425 15:35:07.821446 1191976 rollout.go:325] Restarting daemonset/machine-config-server +I0425 15:35:07.821717 1191976 rollout.go:325] Restarting daemonset/machine-config-daemon +I0425 15:35:07.821741 1191976 rollout.go:304] Restarting deployment/machine-config-controller +I0425 15:35:08.711907 1191976 rollout.go:304] Restarting deployment/machine-config-operator +I0425 15:35:08.922522 1191976 rollout.go:360] Setting replicas for openshift-machine-config-operator/machine-config-operator to 1 +``` + +This will restart the MCO's deployments and daemonsets. There is an optional +`--force` flag that one can use that will delete the pods corresponding to +those deployments and daemonsets to force a reboot faster. diff --git a/devex/cmd/mco-push/main.go b/devex/cmd/mco-push/main.go new file mode 100644 index 0000000000..7acbfb057e --- /dev/null +++ b/devex/cmd/mco-push/main.go @@ -0,0 +1,25 @@ +package main + +import ( + "flag" + "os" + + "github.com/spf13/cobra" + "k8s.io/component-base/cli" +) + +var ( + rootCmd = &cobra.Command{ + Use: "mco-push", + Short: "Automates the replacement of the machine-config-operator (MCO) image in an OpenShift cluster for testing purposes.", + Long: "", + } +) + +func init() { + rootCmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) +} + +func main() { + os.Exit(cli.Run(rootCmd)) +} diff --git a/devex/cmd/mco-push/replace.go b/devex/cmd/mco-push/replace.go new file mode 100644 index 0000000000..8c06ab024a --- /dev/null +++ b/devex/cmd/mco-push/replace.go @@ -0,0 +1,64 @@ +package main + +import ( + "fmt" + + "github.com/openshift/machine-config-operator/devex/internal/pkg/containers" + "github.com/openshift/machine-config-operator/devex/internal/pkg/rollout" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + "k8s.io/klog/v2" +) + +type replaceOpts struct { + validatePullspec bool + forceRestart bool + pullspec string +} + +func init() { + replaceOpts := replaceOpts{} + + replaceCmd := &cobra.Command{ + Use: "replace", + Short: "Replaces the MCO image with the provided container image pullspec", + Long: "", + RunE: func(_ *cobra.Command, args []string) error { + if len(args) == 0 { + return fmt.Errorf("no pullspec provided") + } + + if len(args) > 1 { + return fmt.Errorf("only one pullspec may be provided") + } + + replaceOpts.pullspec = args[0] + + return replace(replaceOpts) + }, + } + + replaceCmd.PersistentFlags().BoolVar(&replaceOpts.validatePullspec, "validate-pullspec", false, "Ensures that the supplied pullspec exists.") + replaceCmd.PersistentFlags().BoolVar(&replaceOpts.forceRestart, "force", false, "Deletes the pods to forcefully restart the MCO.") + + rootCmd.AddCommand(replaceCmd) +} + +func replace(opts replaceOpts) error { + if opts.validatePullspec { + digestedPullspec, err := containers.ResolveToDigestedPullspec(opts.pullspec, "") + if err != nil { + return fmt.Errorf("could not validate pullspec %s: %w", opts.pullspec, err) + } + + klog.Infof("Resolved to %s to validate that the pullspec exists", digestedPullspec) + } + + cs := framework.NewClientSet("") + if err := rollout.ReplaceMCOImage(cs, opts.pullspec, opts.forceRestart); err != nil { + return err + } + + klog.Infof("Successfully replaced the stock MCO image with %s.", opts.pullspec) + return nil +} diff --git a/devex/cmd/mco-push/restart.go b/devex/cmd/mco-push/restart.go new file mode 100644 index 0000000000..3f06c2f4ca --- /dev/null +++ b/devex/cmd/mco-push/restart.go @@ -0,0 +1,39 @@ +package main + +import ( + "github.com/openshift/machine-config-operator/devex/internal/pkg/rollout" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + "k8s.io/klog/v2" +) + +func init() { + var forceRestart bool + + restartCmd := &cobra.Command{ + Use: "restart", + Short: "Restarts all of the MCO pods", + Long: "", + RunE: func(_ *cobra.Command, _ []string) error { + return restart(forceRestart) + }, + } + + restartCmd.PersistentFlags().BoolVar(&forceRestart, "force", false, "Deletes the pods to forcefully restart the MCO.") + rootCmd.AddCommand(restartCmd) +} + +func restart(forceRestart bool) error { + cs := framework.NewClientSet("") + + if forceRestart { + klog.Infof("Will delete pods to force restart") + } + + if err := rollout.RestartMCO(cs, forceRestart); err != nil { + return err + } + + klog.Infof("Successfully restartd the MCO pods") + return nil +} diff --git a/devex/cmd/mco-push/revert.go b/devex/cmd/mco-push/revert.go new file mode 100644 index 0000000000..5962a3de6a --- /dev/null +++ b/devex/cmd/mco-push/revert.go @@ -0,0 +1,35 @@ +package main + +import ( + "github.com/openshift/machine-config-operator/devex/internal/pkg/rollout" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + "k8s.io/klog/v2" +) + +func init() { + var forceRestart bool + + revertCmd := &cobra.Command{ + Use: "revert", + Short: "Reverts the MCO image to the one in the OpenShift release", + Long: "", + RunE: func(_ *cobra.Command, _ []string) error { + return revert(forceRestart) + }, + } + + revertCmd.PersistentFlags().BoolVar(&forceRestart, "force", false, "Deletes the pods to forcefully restart the MCO.") + + rootCmd.AddCommand(revertCmd) +} + +func revert(forceRestart bool) error { + cs := framework.NewClientSet("") + if err := rollout.RevertToOriginalMCOImage(cs, forceRestart); err != nil { + return err + } + + klog.Infof("Successfully rolled back to the original MCO image") + return nil +} diff --git a/devex/cmd/onclustertesting/README.md b/devex/cmd/onclustertesting/README.md new file mode 100644 index 0000000000..0e4ad64d78 --- /dev/null +++ b/devex/cmd/onclustertesting/README.md @@ -0,0 +1,131 @@ +# onclustertesting + +## Overview + +Provides a very simple binary for setting up / tearing down on-cluster layering +to make testing / development go faster. + +## Prerequisites +- An OpenShift cluster running 4.16+ +- Kubeconfig for the aforementioned cluster +- OpenShift CLI (`oc`) +- _(optional, but recommended)_ [K9s](https://k9scli.io/) + +## Usage + +### Setup + +Once you've installed the binary, you can set up a simple on-cluster layering +testing situation which makes use of some handy defaults such as using the +global pull secret and an in-cluster OpenShift ImageStream for pushing the +built image to. + +To do this, run: +`$ onclustertesting setup in-cluster-registry --enable-feature-gate --pool=layered` + +Under the hood, this will perform the following actions: +1. Verify that the appropriate feature gate is enabled, and enable it if desired. +2. Create an ImageStream within the MCO namespace called `os-image`. +3. Create a MachineConfigPool to target (defaults to `layered`) and waits for it to get an initial MachineConfig (this wait does not block the other operations, with the exception of creating a MachineOSConfig). +4. Clones the global pull secret into the MCO namespace. +5. Creates additional secrets / ConfigMaps as needed (see Other Features section below for more details). If a ConfigMap or Secret was previously created by the `onclustertesting` tool, it will be deleted and recreated. +6. Creates a MachineOSConfig for the newly-created pool once the new MachineConfigPool has an initial config, optionally adding a custom Containerfile to it. +7. The MCO should start the `machine-os-builder` pod and the build will begin. Note: `onclustertesting` will not wait for the build to complete before it exits. So you may want to use something like [K9s](https://k9scli.io/) to watch its progress. + +This setup will use the in-cluster registry which requires no external +credentials to be used. The pool will start building as soon as it can. Note: +If the `TechPreviewNoUpgrade` feature gate was not previously enabled, this +will create a new MachineConfig in all MachineConfigPools, incurring a full +MachineConfig rollout before the build will start. + +All objects created by the `onclustertesting` tool include the label +`machineconfiguration.openshift.io/createdByOnClusterBuildsHelper`. This +allows the tool to completely remove any and all objects that it created during +the teardown phase. + +### Teardown + +Assuming you have not applied any built images to your cluster nodes, one can +easily teardown everything set up by `onclustertesting` just by running the `$ +onclustertesting teardown` command. + +This will do the following: +1. Delete any ConfigMaps or Secrets created by the `onclustertesting` tool. +2. Delete all build objects created by the `machine-os-builder` process, including any running build pods, ConfigMaps, Secrets, etc. +3. Delete all MachineConfigPools created by the `onclustertesting` tool. +4. Delete all MachineConfigs applied to the MachineConfigPool(s) created by the `onclustertesting` tool. +5. Delete all MachineOSBuild objects. +6. Delete all MachineOSConfig objects. +7. Delete all ImageStreams created by the `onclustertesting` tool. + +### Rollouts + +By itself, `onclustertesting` will only test the build and push phases of the +on-cluster layering process. + +To test the rollout process (applying the newly-built image to a node), you'll +need to move a given node into the MachineConfigPool created by the +`onclustertesting` program. One can do that by running the following command: + +`$ onclustertesting optin --pool= --node=` + +There is also an optout helper that performs the inverse operation: + +`$ onclustertesting optout --pool= --node=` + +Alternatively, you can use `oc` or `kubectl` to perform the same action: + +`$ oc label node/ -l 'node-role.kubernetes.io/='` + +It is worth mentioning that once a node is opted in, using `onclustertesting`'s +teardown process may leave your cluster in a difficult-to-recover-from state. +This is mostly because the revert feature does not work yet. Once that is +feature is implemented in OCL, this should no longer be a problem. + +## Other features + +### RHEL entitlements + +If your cluster has the `etc-pki-entitlement` secret in the +`openshift-config-managed` namespace, the operator will automatically +copy it into the MCO namespace, when a build is required. + +### /etc/yum.repos.d and /etc/pki/rpm-gpg + +If you want to test the `/etc/yum.repos.d` and `/etc/pki/rpm-gpg` injection +capabilities of on-cluster layering, you can provide the `--inject-yum-repos` +flag with the `setup` command. Doing this will cause the following to occur: + +1. `onclustertesting` will use the `oc` +command to extract the `/etc/yum.repos.d` and `/etc/pki/rpm-gpg` directories +from a container image found at `quay.io/zzlotnik/devex:epel`. For speed, this +image only contains those directories and is built using [this +Containerfile](https://github.com/cheesesashimi/containerfiles/commit/33ddc71dc3f480055cfb41f3c6a568c70d4d81da#diff-79251285e7d83f5a8acf208fdb543c6f7ac5c2af3d533cb0458d6c4010a5d4d3). +2. These files will be placed in a temporary directory before being converted into a ConfigMap and Secret. +3. The ConfigMap `etc-yum-repos-d` and the Secret `etc-pki-rpm-gpg` will be created within the MCO namespace. +4. The temporary directory will be removed. + +These objects will be removed during the teardown process. + +### Custom Containerfiles + +During the setup process, you can optionally inject a custom Containerfile by +supplying the `--containerfile-path` flag and a path to a Containerfile present on +your machine. + +The Containerfile will be read and injected into the MachineOSConfig for the +MachineConfigPool that the tool creates. + +**Note:** The custom Containerfile must include a stage beginning with `FROM +configs AS final` in order for your customizations to be built into the final image. + +### CI mode + +CI mode is a new subcommand under the `setup` command, which is intended to opt +a given cluster into on-cluster layering so that a test suite (such as +`openshift-e2e`) may be run against it. Using this command does the following: + +1. Creates two ImageStreams; one for the control-plane and one for the worker pool. +2. Clones the global pull secret into the MCO namespace. +3. Creates a MachineOSConfig for both the control-plane and worker pool. Then waits for the builds to complete. +4. Waits for the newly-built image to roll out to each node in both the control-plane and worker pools. diff --git a/devex/cmd/onclustertesting/build.go b/devex/cmd/onclustertesting/build.go new file mode 100644 index 0000000000..548e579522 --- /dev/null +++ b/devex/cmd/onclustertesting/build.go @@ -0,0 +1,114 @@ +package main + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + "golang.org/x/sync/errgroup" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" +) + +const ( + yumReposContainerImagePullspec string = "quay.io/zzlotnik/devex:epel" +) + +func extractAndInjectYumEpelRepos(cs *framework.ClientSet) error { + eg := errgroup.Group{} + + eg.Go(func() error { + yumReposContents, err := convertFilesFromContainerImageToBytesMap(yumReposContainerImagePullspec, "/etc/yum.repos.d/") + if err != nil { + return err + } + + return createConfigMap(cs, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etc-yum-repos-d", + Namespace: ctrlcommon.MCONamespace, + }, + // Note: Even though the BuildController retrieves this ConfigMap, it only + // does so to determine whether or not it is present. It does not look at + // its contents. For that reason, we can use the BinaryData field here + // because the Build Pod will use its contents the same regardless of + // whether its string data or binary data. + BinaryData: yumReposContents, + }) + }) + + eg.Go(func() error { + rpmGpgContents, err := convertFilesFromContainerImageToBytesMap(yumReposContainerImagePullspec, "/etc/pki/rpm-gpg/") + if err != nil { + return err + } + + return utils.CreateOrRecreateSecret(cs, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etc-pki-rpm-gpg", + Namespace: ctrlcommon.MCONamespace, + Labels: map[string]string{ + createdByOnClusterBuildsHelper: "", + }, + }, + Data: rpmGpgContents, + }) + }) + + return eg.Wait() +} + +// Extracts the contents of a directory within a given container to a temporary +// directory. Next, it loads them into a bytes map keyed by filename. It does +// not handle nested directories, so use with caution. +func convertFilesFromContainerImageToBytesMap(pullspec, containerFilepath string) (map[string][]byte, error) { + tempDir, err := os.MkdirTemp("", "prefix") + if err != nil { + return nil, err + } + + path := fmt.Sprintf("%s:%s", containerFilepath, tempDir) + cmd := exec.Command("oc", "image", "extract", pullspec, "--path", path) + klog.Infof("Extracting files under %q from %q to %q; running %s", containerFilepath, pullspec, tempDir, cmd.String()) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return nil, err + } + + out := map[string][]byte{} + + err = filepath.Walk(tempDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + contents, err := os.ReadFile(path) + if err != nil { + return err + } + + out[filepath.Base(path)] = contents + return nil + }) + + if err != nil { + return nil, err + } + + err = os.RemoveAll(tempDir) + if err == nil { + klog.Infof("Tempdir %q from fetching files from %q removed", tempDir, pullspec) + } + + return out, err +} diff --git a/devex/cmd/onclustertesting/ci.go b/devex/cmd/onclustertesting/ci.go new file mode 100644 index 0000000000..d59bba16c4 --- /dev/null +++ b/devex/cmd/onclustertesting/ci.go @@ -0,0 +1,158 @@ +package main + +import ( + "context" + "fmt" + "time" + + "github.com/openshift/machine-config-operator/devex/internal/pkg/rollout" + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + "github.com/openshift/machine-config-operator/test/framework" + "golang.org/x/sync/errgroup" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/klog/v2" +) + +const ( + controlPlanePoolName string = "master" + workerPoolName string = "worker" +) + +func runCiSetupCmd(setupOpts opts) error { + utils.ParseFlags() + + if err := utils.CheckForBinaries([]string{"oc"}); err != nil { + return err + } + + cs := framework.NewClientSet("") + + if err := checkForRequiredFeatureGates(cs, setupOpts); err != nil { + return err + } + + if err := setupForCI(cs, setupOpts); err != nil { + return err + } + + klog.Infof("Setup for CI complete!") + + return nil +} + +func setupForCI(cs *framework.ClientSet, setupOpts opts) error { + start := time.Now() + klog.Infof("Beginning setup of on-cluster layering (OCL) for CI testing") + + // If the containerfile is provided using the <() shell redirect, it will + // only be read and applied to one MachineOSConfig. Instead, we should read + // it once and apply it to both MachineOSConfigs. + if setupOpts.containerfilePath != "" { + contents, err := setupOpts.getContainerfileContent() + if err != nil { + return fmt.Errorf("could not get containerfile content from %s: %w", setupOpts.containerfilePath, err) + } + + setupOpts.containerfileContents = contents + } + + eg := errgroup.Group{} + + eg.Go(func() error { + return createSecrets(cs, setupOpts) + }) + + pools := []string{ + workerPoolName, + controlPlanePoolName, + } + + for _, pool := range pools { + pool := pool + eg.Go(func() error { + return setupMoscForCI(cs, setupOpts.deepCopy(), pool) + }) + } + + if err := eg.Wait(); err != nil { + return fmt.Errorf("could not setup MachineOSConfig for CI test: %w", err) + } + + klog.Infof("All builds completed after %s", time.Since(start)) + + for _, pool := range pools { + if err := utils.UnpauseMachineConfigPool(context.TODO(), cs, pool); err != nil { + return fmt.Errorf("could not unpause MachineConfigPool %s: %w", pool, err) + } + } + + if err := waitForPoolsToComplete(cs, pools); err != nil { + return fmt.Errorf("pools did not complete: %w", err) + } + + klog.Infof("Completed on-cluster layering (OCL) setup for CI testing after %s", time.Since(start)) + + return nil +} + +func setupMoscForCI(cs *framework.ClientSet, opts opts, poolName string) error { + waitTime := time.Minute * 20 + ctx, cancel := context.WithTimeout(context.Background(), waitTime) + defer cancel() + + opts.poolName = poolName + + if poolName != controlPlanePoolName && poolName != workerPoolName { + if _, err := createPool(cs, poolName); err != nil { + return fmt.Errorf("could not create MachineConfigPool %s: %w", poolName, err) + } + } + + pullspec, err := createImagestreamAndGetPullspec(cs, poolName) + if err != nil && !apierrs.IsAlreadyExists(err) { + return fmt.Errorf("could not create imagestream or get pullspec: %w", err) + } + + pushSecretName, err := createLongLivedImagePushSecretForPool(context.TODO(), cs, poolName) + if err != nil { + return fmt.Errorf("could not create long-lived secret: %w", err) + } + + opts.finalImagePullSecretName = pushSecretName + opts.pushSecretName = pushSecretName + opts.finalImagePullspec = pullspec + + mosc, err := opts.toMachineOSConfig() + if err != nil { + return fmt.Errorf("could not generate MachineOSConfig: %w", err) + } + + if err := createMachineOSConfig(cs, mosc); err != nil { + return fmt.Errorf("could not insert new MachineOSConfig %s: %w", mosc.Name, err) + } + + // Only pause pools that are not the control plane. + if poolName != controlPlanePoolName { + if err := utils.PauseMachineConfigPool(ctx, cs, poolName); err != nil { + return fmt.Errorf("could not pause MachineConfigPool %s: %w", poolName, err) + } + } + + return waitForBuildToComplete(ctx, cs, poolName) +} + +func waitForPoolsToComplete(cs *framework.ClientSet, pools []string) error { + eg := errgroup.Group{} + + for _, pool := range pools { + pool := pool + + eg.Go(func() error { + // TODO: Figure out why WaitForMachineConfigPoolToComplete() is not + // showing control-plane node status when nodes are considered. + return rollout.WaitForOnlyMachineConfigPoolToComplete(cs, pool, time.Minute*30) + }) + } + + return eg.Wait() +} diff --git a/devex/cmd/onclustertesting/configmaps.go b/devex/cmd/onclustertesting/configmaps.go new file mode 100644 index 0000000000..c6d6dc6fd5 --- /dev/null +++ b/devex/cmd/onclustertesting/configmaps.go @@ -0,0 +1,52 @@ +package main + +import ( + "context" + + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" +) + +func createConfigMap(cs *framework.ClientSet, cm *corev1.ConfigMap) error { //nolint:dupl // These are ConfigMaps. + if !hasOurLabel(cm.Labels) { + if cm.Labels == nil { + cm.Labels = map[string]string{} + } + + cm.Labels[createdByOnClusterBuildsHelper] = "" + } + + _, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Create(context.TODO(), cm, metav1.CreateOptions{}) + if err == nil { + klog.Infof("Created ConfigMap %q in namespace %q", cm.Name, ctrlcommon.MCONamespace) + return nil + } + + if err != nil && !apierrs.IsAlreadyExists(err) { + return err + } + + configMap, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), cm.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !hasOurLabel(configMap.Labels) { + klog.Infof("Found preexisting user-supplied ConfigMap %q, using as-is.", cm.Name) + return nil + } + + // Delete and recreate. + klog.Infof("ConfigMap %q was created by us, but could be out of date. Recreating...", cm.Name) + err = cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Delete(context.TODO(), cm.Name, metav1.DeleteOptions{}) + if err != nil { + return err + } + + return createConfigMap(cs, cm) +} diff --git a/devex/cmd/onclustertesting/featuregate.go b/devex/cmd/onclustertesting/featuregate.go new file mode 100644 index 0000000000..b9293888af --- /dev/null +++ b/devex/cmd/onclustertesting/featuregate.go @@ -0,0 +1,101 @@ +package main + +import ( + "context" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" +) + +func init() { + featureGateCmd := &cobra.Command{ + Use: "enable-featuregate", + Short: "Enables the appropriate feature gates for on=cluster layering to work", + Long: "", + RunE: func(_ *cobra.Command, _ []string) error { + return enableFeatureGate(framework.NewClientSet("")) + }, + } + + rootCmd.AddCommand(featureGateCmd) +} + +func checkForRequiredFeatureGates(cs *framework.ClientSet, setupOpts opts) error { + if err := validateFeatureGatesEnabled(cs, "OnClusterBuild"); err != nil { + if setupOpts.enableFeatureGate { + return enableFeatureGate(cs) + } + + prompt := `You may need to enable TechPreview feature gates on your cluster. Try the following: $ oc patch featuregate/cluster --type=merge --patch='{"spec":{"featureSet":"TechPreviewNoUpgrade"}}'` + klog.Info(prompt) + klog.Info("Alternatively, rerun this command with the --enable-feature-gate flag") + return err + } + + return nil +} + +func enableFeatureGate(cs *framework.ClientSet) error { + fg, err := cs.ConfigV1Interface.FeatureGates().Get(context.TODO(), "cluster", metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("could not enable feature gate(s): %w", err) + } + + fg.Spec.FeatureSet = "TechPreviewNoUpgrade" + + _, err = cs.ConfigV1Interface.FeatureGates().Update(context.TODO(), fg, metav1.UpdateOptions{}) + if err == nil { + klog.Infof("Enabled FeatureGate %s", fg.Spec.FeatureSet) + } + + return err +} + +// Cribbed from: https://github.com/openshift/machine-config-operator/blob/master/test/helpers/utils.go +func validateFeatureGatesEnabled(cs *framework.ClientSet, requiredFeatureGates ...configv1.FeatureGateName) error { + currentFeatureGates, err := cs.ConfigV1Interface.FeatureGates().Get(context.TODO(), "cluster", metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to fetch feature gates: %w", err) + } + + // This uses the new Go generics to construct a typed set of + // FeatureGateNames. Under the hood, sets are map[T]struct{}{} where + // only the keys matter and one cannot have duplicate keys. Perfect for our use-case! + enabledFeatures := sets.New[configv1.FeatureGateName]() + disabledFeatures := sets.New[configv1.FeatureGateName]() + + // Load all of the feature gate names into our set. Duplicates will be + // automatically be ignored. + for _, currentFeatureGateDetails := range currentFeatureGates.Status.FeatureGates { + for _, enabled := range currentFeatureGateDetails.Enabled { + enabledFeatures.Insert(enabled.Name) + } + + for _, disabled := range currentFeatureGateDetails.Disabled { + disabledFeatures.Insert(disabled.Name) + } + } + + // If we have all of the required feature gates, we're done! + if enabledFeatures.HasAll(requiredFeatureGates...) && !disabledFeatures.HasAny(requiredFeatureGates...) { + klog.Infof("All required feature gates %v are enabled", requiredFeatureGates) + return nil + } + + // Now, lets validate that our FeatureGates are just disabled and not unknown. + requiredFeatures := sets.New[configv1.FeatureGateName](requiredFeatureGates...) + allFeatures := enabledFeatures.Union(disabledFeatures) + if !allFeatures.HasAll(requiredFeatureGates...) { + return fmt.Errorf("unknown FeatureGate(s): %v, available FeatureGate(s): %v", sets.List(requiredFeatures.Difference(allFeatures)), sets.List(allFeatures)) + } + + // If we don't, lets diff against what we have vs. what we want and return that information. + disabledRequiredFeatures := requiredFeatures.Difference(enabledFeatures) + return fmt.Errorf("required FeatureGate(s) %v not enabled; have: %v", sets.List(disabledRequiredFeatures), sets.List(enabledFeatures)) +} diff --git a/devex/cmd/onclustertesting/helpers.go b/devex/cmd/onclustertesting/helpers.go new file mode 100644 index 0000000000..7f5e643a25 --- /dev/null +++ b/devex/cmd/onclustertesting/helpers.go @@ -0,0 +1,490 @@ +package main + +import ( + "context" + _ "embed" + "fmt" + "strings" + + "github.com/openshift/machine-config-operator/devex/cmd/onclustertesting/internal/legacycmds" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/openshift/machine-config-operator/test/helpers" + "golang.org/x/sync/errgroup" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + buildconstants "github.com/openshift/machine-config-operator/pkg/controller/build/constants" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/pkg/daemon/constants" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" + + apierrs "k8s.io/apimachinery/pkg/api/errors" +) + +const ( + defaultLayeredPoolName string = legacycmds.DefaultLayeredPoolName + createdByOnClusterBuildsHelper string = legacycmds.CreatedByOnClusterBuildsHelper + globalPullSecretCloneName string = "global-pull-secret-copy" +) + +func hasOurLabel(labels map[string]string) bool { + if labels == nil { + return false + } + + _, ok := labels[createdByOnClusterBuildsHelper] + return ok +} + +func createPool(cs *framework.ClientSet, poolName string) (*mcfgv1.MachineConfigPool, error) { //nolint:unparam // This may eventually be used. + pool := &mcfgv1.MachineConfigPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: poolName, + Labels: map[string]string{ + createdByOnClusterBuildsHelper: "", + }, + }, + Spec: mcfgv1.MachineConfigPoolSpec{ + MachineConfigSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: mcfgv1.MachineConfigRoleLabelKey, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"worker", poolName}, + }, + }, + }, + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "node-role.kubernetes.io/" + poolName: "", + }, + }, + }, + } + + klog.Infof("Creating MachineConfigPool %q", pool.Name) + + _, err := cs.MachineConfigPools().Create(context.TODO(), pool, metav1.CreateOptions{}) + switch { + case apierrs.IsAlreadyExists(err): + klog.Infof("MachineConfigPool %q already exists, will reuse", poolName) + case err != nil && !apierrs.IsAlreadyExists(err): + return nil, err + } + + klog.Infof("Waiting for MachineConfigPool %s to get a rendered MachineConfig", poolName) + + if _, err := legacycmds.WaitForRenderedConfigs(cs, poolName, "99-worker-ssh"); err != nil { + return nil, err + } + + return cs.MachineConfigPools().Get(context.TODO(), poolName, metav1.GetOptions{}) +} + +func teardownPool(cs *framework.ClientSet, mcp *mcfgv1.MachineConfigPool) error { + err := cs.MachineConfigPools().Delete(context.TODO(), mcp.Name, metav1.DeleteOptions{}) + if apierrs.IsNotFound(err) { + klog.Infof("MachineConfigPool %s not found", mcp.Name) + return nil + } + + if err != nil && !apierrs.IsNotFound(err) { + return err + } + + klog.Infof("Deleted MachineConfigPool %s", mcp.Name) + return deleteAllMachineConfigsForPool(cs, mcp) +} + +func deleteAllPoolsWithOurLabel(cs *framework.ClientSet) error { + pools, err := cs.MachineConfigPools().List(context.TODO(), getListOptsForOurLabel()) + if err != nil { + return err + } + + eg := errgroup.Group{} + + for _, pool := range pools.Items { + pool := pool + eg.Go(func() error { + return teardownPool(cs, &pool) + }) + } + + return eg.Wait() +} + +func resetAllNodeAnnotations(cs *framework.ClientSet) error { + workerPool, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) + if err != nil { + return err + } + + nodes, err := cs.CoreV1Interface.Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + + for _, node := range nodes.Items { + if err := resetNodeAnnotationsAndLabels(cs, workerPool, &node); err != nil { + return err + } + } + + return nil +} + +func resetNodeAnnotationsAndLabels(cs *framework.ClientSet, originalPool *mcfgv1.MachineConfigPool, node *corev1.Node) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + node, err := cs.CoreV1Interface.Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + expectedNodeRoles := map[string]struct{}{ + "node-role.kubernetes.io/worker": {}, + "node-role.kubernetes.io/master": {}, + "node-role.kubernetes.io/control-plane": {}, + } + + for label := range node.Labels { + _, isExpectedNodeRole := expectedNodeRoles[label] + if strings.HasPrefix(label, "node-role.kubernetes.io") && !isExpectedNodeRole { + delete(node.Labels, label) + } + } + + if _, ok := node.Labels[helpers.MCPNameToRole(originalPool.Name)]; ok { + node.Annotations[constants.CurrentMachineConfigAnnotationKey] = originalPool.Spec.Configuration.Name + node.Annotations[constants.DesiredMachineConfigAnnotationKey] = originalPool.Spec.Configuration.Name + delete(node.Annotations, constants.CurrentImageAnnotationKey) + delete(node.Annotations, constants.DesiredImageAnnotationKey) + } + + _, err = cs.CoreV1Interface.Nodes().Update(context.TODO(), node, metav1.UpdateOptions{}) + return err + }) +} + +func deleteAllMachineConfigsForPool(cs *framework.ClientSet, mcp *mcfgv1.MachineConfigPool) error { + machineConfigs, err := cs.MachineConfigs().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + + eg := errgroup.Group{} + + for _, mc := range machineConfigs.Items { + mc := mc + eg.Go(func() error { + if _, ok := mc.Annotations[helpers.MCPNameToRole(mcp.Name)]; ok && !strings.HasPrefix(mc.Name, "rendered-") { + if err := cs.MachineConfigs().Delete(context.TODO(), mc.Name, metav1.DeleteOptions{}); err != nil { + return err + } + klog.Infof("Deleted MachineConfig %s, which belonged to MachineConfigPool %s", mc.Name, mcp.Name) + } + + return nil + }) + } + + return eg.Wait() +} + +func deleteBuildObjects(cs *framework.ClientSet) error { + deletionSelectors, err := getSelectorsForDeletion() + if err != nil { + return err + } + + eg := errgroup.Group{} + + for _, selector := range deletionSelectors { + selector := selector + eg.Go(func() error { + return deleteBuildObjectsForSelector(cs, selector) + }) + } + + return eg.Wait() +} + +func getSelectorsForDeletion() ([]labels.Selector, error) { + selectors := []labels.Selector{} + + requirementsLists := [][]string{ + { + buildconstants.OnClusterLayeringLabelKey, + buildconstants.EphemeralBuildObjectLabelKey, + }, + { + // TODO: Use constant for this. + "machineconfiguration.openshift.io/used-by-e2e-test", + }, + { + createdByOnClusterBuildsHelper, + }, + } + + for _, requirementsList := range requirementsLists { + selector := labels.NewSelector() + + for _, requirement := range requirementsList { + req, err := labels.NewRequirement(requirement, selection.Exists, []string{}) + if err != nil { + return nil, fmt.Errorf("could not add requirement %q to selector: %w", requirement, err) + } + selector = selector.Add(*req) + } + + selectors = append(selectors, selector) + } + + return selectors, nil +} + +func deleteBuildObjectsForSelector(cs *framework.ClientSet, selector labels.Selector) error { + eg := errgroup.Group{} + + klog.Infof("Deleting build objects for selector %q:", selector.String()) + + eg.Go(func() error { + return cleanupConfigMaps(cs, selector) + }) + + eg.Go(func() error { + return cleanupPods(cs, selector) + }) + + eg.Go(func() error { + return cleanupSecrets(cs, selector) + }) + + eg.Go(func() error { + return cleanupImagestreams(cs, selector) + }) + + eg.Go(func() error { + return cleanupNamespaces(cs, selector) + }) + + return eg.Wait() +} + +func cleanupPods(cs *framework.ClientSet, selector labels.Selector) error { + eg := errgroup.Group{} + + pods, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: selector.String(), + }) + + if err != nil { + return err + } + + for _, pod := range pods.Items { + pod := pod + eg.Go(func() error { + return deleteObjectAndIgnoreIfNotFound(&pod, cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace)) + }) + } + + if err := eg.Wait(); err != nil { + return err + } + + if len(pods.Items) > 0 { + klog.Infof("Cleaned up all Pods for selector %s", selector.String()) + } + + return nil +} + +func cleanupConfigMaps(cs *framework.ClientSet, selector labels.Selector) error { + eg := errgroup.Group{} + + configMaps, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: selector.String(), + }) + + if err != nil { + return err + } + + for _, configMap := range configMaps.Items { + configMap := configMap + eg.Go(func() error { + return deleteObjectAndIgnoreIfNotFound(&configMap, cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace)) + }) + } + + if err := eg.Wait(); err != nil { + return err + } + + if len(configMaps.Items) > 0 { + klog.Infof("Cleaned up all ConfigMaps for selector %s", selector.String()) + } + + return nil +} + +func cleanupSecrets(cs *framework.ClientSet, selector labels.Selector) error { + eg := errgroup.Group{} + + secrets, err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: selector.String(), + }) + + if err != nil { + return err + } + + for _, secret := range secrets.Items { + secret := secret + eg.Go(func() error { + return deleteObjectAndIgnoreIfNotFound(&secret, cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace)) + }) + } + + if err := eg.Wait(); err != nil { + return err + } + + if len(secrets.Items) > 0 { + klog.Infof("Cleaned up all Secrets for selector %s", selector.String()) + } + + return nil +} + +func cleanupImagestreams(cs *framework.ClientSet, selector labels.Selector) error { + eg := errgroup.Group{} + + isList, err := cs.ImageV1Interface.ImageStreams(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: selector.String(), + }) + + if err != nil { + return err + } + + for _, is := range isList.Items { + is := is + eg.Go(func() error { + return deleteObjectAndIgnoreIfNotFound(&is, cs.ImageV1Interface.ImageStreams(ctrlcommon.MCONamespace)) + }) + } + + if err := eg.Wait(); err != nil { + return err + } + + if len(isList.Items) > 0 { + klog.Infof("Cleaned up all Imagestreams for selector %s", selector.String()) + } + + return nil +} + +func cleanupNamespaces(cs *framework.ClientSet, selector labels.Selector) error { + eg := errgroup.Group{} + + nsList, err := cs.CoreV1Interface.Namespaces().List(context.TODO(), metav1.ListOptions{ + LabelSelector: selector.String(), + }) + + if err != nil { + return err + } + + for _, ns := range nsList.Items { + ns := ns + eg.Go(func() error { + return deleteObjectAndIgnoreIfNotFound(&ns, cs.CoreV1Interface.Namespaces()) + }) + } + + if err := eg.Wait(); err != nil { + return err + } + + if len(nsList.Items) > 0 { + klog.Infof("Cleaned up all Namespaces for selector %s", selector.String()) + } + + return nil +} + +type deleter interface { + Delete(context.Context, string, metav1.DeleteOptions) error +} + +func deleteObjectAndIgnoreIfNotFound(obj metav1.Object, deleter deleter) error { + err := deleter.Delete(context.TODO(), obj.GetName(), metav1.DeleteOptions{}) + if err == nil { + klog.Infof("Deleted %T %s", obj, obj.GetName()) + return nil + } + + if apierrs.IsNotFound(err) { + klog.Infof("%T %s was not found, skipping deletion", obj, obj.GetName()) + return nil + } + + return fmt.Errorf("could not delete %T %s: %w", obj, obj.GetName(), err) +} + +func errIfNotSet(in, name string) error { + if isEmpty(in) { + if !strings.HasPrefix(name, "--") { + name = "--" + name + } + return fmt.Errorf("required flag %q not set", name) + } + + return nil +} + +func isNoneSet(in1, in2 string) bool { + return isEmpty(in1) && isEmpty(in2) +} + +func isOnlyOneSet(in1, in2 string) bool { + if !isEmpty(in1) && !isEmpty(in2) { + return false + } + + return true +} + +func isEmpty(in string) bool { + return in == "" +} + +func getListOptsForOurLabel() metav1.ListOptions { + req, err := labels.NewRequirement(createdByOnClusterBuildsHelper, selection.Exists, []string{}) + if err != nil { + klog.Fatalln(err) + } + + return metav1.ListOptions{ + LabelSelector: req.String(), + } +} + +func ignoreIsNotFound(err error) error { + if err == nil { + return nil + } + + if apierrs.IsNotFound(err) { + return nil + } + + return err +} diff --git a/devex/cmd/onclustertesting/imagestream.go b/devex/cmd/onclustertesting/imagestream.go new file mode 100644 index 0000000000..c70692c4a8 --- /dev/null +++ b/devex/cmd/onclustertesting/imagestream.go @@ -0,0 +1,76 @@ +package main + +import ( + "context" + "fmt" + + "github.com/containers/image/v5/docker/reference" + imagev1 "github.com/openshift/api/image/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" +) + +const ( + imagestreamName string = "os-image" + imagestreamPullspec string = "image-registry.openshift-image-registry.svc:5000/" + ctrlcommon.MCONamespace + "/" + imagestreamName + ":latest" +) + +func createImagestreamAndGetPullspec(cs *framework.ClientSet, name string) (string, error) { + if err := createImagestream(cs, name); err != nil { + return "", err + } + + return getImagestreamPullspec(cs, name) +} + +func getImagestreamPullspec(cs *framework.ClientSet, name string) (string, error) { + is, err := cs.ImageV1Interface.ImageStreams(ctrlcommon.MCONamespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + return appendTagToPullspec(is.Status.DockerImageRepository, "latest") +} + +// Not sure if this is strictly required, but we'll do it anyway. +func appendTagToPullspec(pullspec, tag string) (string, error) { + named, err := reference.ParseNamed(pullspec) + if err != nil { + return "", fmt.Errorf("could not parse %s: %w", pullspec, err) + } + + tagged, err := reference.WithTag(named, tag) + if err != nil { + return "", fmt.Errorf("could not add tag %s to image pullspec %s: %w", tag, pullspec, err) + } + + return tagged.String(), nil +} + +func createImagestream(cs *framework.ClientSet, name string) error { + is := &imagev1.ImageStream{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ctrlcommon.MCONamespace, + Labels: map[string]string{ + createdByOnClusterBuildsHelper: "", + }, + }, + } + + created, err := cs.ImageV1Interface.ImageStreams(ctrlcommon.MCONamespace).Create(context.TODO(), is, metav1.CreateOptions{}) + if err == nil { + klog.Infof("Imagestream %q created", name) + return nil + } + + if apierrs.IsAlreadyExists(err) && hasOurLabel(created.Labels) { + klog.Infof("Imagestream %q already exists and has our labels, will re-use", name) + return nil + } + + return err +} diff --git a/devex/cmd/onclustertesting/internal/legacycmds/clearstatus.go b/devex/cmd/onclustertesting/internal/legacycmds/clearstatus.go new file mode 100644 index 0000000000..d98572606a --- /dev/null +++ b/devex/cmd/onclustertesting/internal/legacycmds/clearstatus.go @@ -0,0 +1,75 @@ +package legacycmds + +import ( + "context" + "fmt" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" +) + +var ( + clearStatusOpts struct { + poolName string + } +) + +func ClearStatusCommand() *cobra.Command { + clearStatusCmd := &cobra.Command{ + Use: "clear-build-status", + Short: "Tears down the pool for on-cluster build testing", + Long: "", + RunE: runClearStatusCmd, + } + + clearStatusCmd.PersistentFlags().StringVar(&clearStatusOpts.poolName, "pool", DefaultLayeredPoolName, "Pool name to clear build status on") + + return clearStatusCmd +} + +func runClearStatusCmd(_ *cobra.Command, _ []string) error { + utils.ParseFlags() + + if clearStatusOpts.poolName == "" { + return fmt.Errorf("no pool name provided") + } + + return clearBuildStatusesOnPool(framework.NewClientSet(""), clearStatusOpts.poolName) +} + +func clearBuildStatusesOnPool(cs *framework.ClientSet, targetPool string) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + buildConditions := map[mcfgv1.MachineConfigPoolConditionType]struct{}{ + mcfgv1.MachineConfigPoolBuildSuccess: {}, + mcfgv1.MachineConfigPoolBuildFailed: {}, + mcfgv1.MachineConfigPoolBuildPending: {}, + mcfgv1.MachineConfigPoolBuilding: {}, + } + + filtered := []mcfgv1.MachineConfigPoolCondition{} + for _, cond := range mcp.Status.Conditions { + if _, ok := buildConditions[cond.Type]; !ok { + filtered = append(filtered, cond) + } + } + + mcp.Status.Conditions = filtered + _, err = cs.MachineConfigPools().UpdateStatus(context.TODO(), mcp, metav1.UpdateOptions{}) + if err != nil { + return err + } + + klog.Infof("Cleared build statuses on MachineConfigPool %s", targetPool) + return nil + }) +} diff --git a/devex/cmd/onclustertesting/internal/legacycmds/extract.go b/devex/cmd/onclustertesting/internal/legacycmds/extract.go new file mode 100644 index 0000000000..623747cb41 --- /dev/null +++ b/devex/cmd/onclustertesting/internal/legacycmds/extract.go @@ -0,0 +1,116 @@ +package legacycmds + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" +) + +var ( + extractOpts struct { + poolName string + machineConfig string + targetDir string + noConfigMaps bool + } +) + +func ExtractCommand() *cobra.Command { + extractCmd := &cobra.Command{ + Use: "extract", + Short: "Extracts the Dockerfile and MachineConfig from an on-cluster build", + Long: "", + RunE: runExtractCmd, + } + + extractCmd.PersistentFlags().StringVar(&extractOpts.poolName, "pool", DefaultLayeredPoolName, "Pool name to extract") + extractCmd.PersistentFlags().StringVar(&extractOpts.machineConfig, "machineconfig", "", "MachineConfig name to extract") + extractCmd.PersistentFlags().StringVar(&extractOpts.targetDir, "dir", "", "Dir to store extract build objects") + + return extractCmd +} + +func runExtractCmd(_ *cobra.Command, _ []string) error { + utils.ParseFlags() + + if extractOpts.poolName != "" && extractOpts.machineConfig != "" { + return fmt.Errorf("either pool name or MachineConfig must be provided; not both") + } + + targetDir, err := GetDir(extractOpts.targetDir) + if err != nil { + return err + } + + cs := framework.NewClientSet("") + + if extractOpts.machineConfig != "" { + return extractBuildObjectsForRenderedMC(cs, extractOpts.machineConfig, targetDir) + } + + if extractOpts.poolName != "" { + return extractBuildObjectsForTargetPool(cs, extractOpts.poolName, targetDir) + } + + return fmt.Errorf("no pool name or MachineConfig name provided") +} + +func extractBuildObjectsForTargetPool(cs *framework.ClientSet, targetPool, targetDir string) error { + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + return ExtractBuildObjects(cs, mcp, targetDir) +} + +func extractBuildObjectsForRenderedMC(cs *framework.ClientSet, mcName, targetDir string) error { + ctx := context.Background() + + dockerfileCM, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Get(ctx, "dockerfile-"+mcName, metav1.GetOptions{}) + if err != nil { + return err + } + + mcCM, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Get(ctx, "mc-"+mcName, metav1.GetOptions{}) + if err != nil { + return err + } + + klog.Infof("Extracted Dockerfile from %q", dockerfileCM.Name) + klog.Infof("Extracted MachineConfig %s from %q", mcName, mcCM.Name) + + return storeBuildObjectsOnDisk(dockerfileCM.Data["Dockerfile"], mcCM.Data["machineconfig.json.gz"], filepath.Join(targetDir, "build-objects-"+mcName)) +} + +func storeBuildObjectsOnDisk(dockerfile, machineConfig, targetDir string) error { + mcDirName := filepath.Join(targetDir, "machineconfig") + dockerfileName := filepath.Join(targetDir, "Dockerfile") + mcFilename := filepath.Join(targetDir, "machineconfig.json.gz") + + if err := os.MkdirAll(mcDirName, 0o755); err != nil { + return err + } + + if err := os.WriteFile(dockerfileName, []byte(dockerfile), 0o755); err != nil { + return err + } + + klog.Infof("Wrote Dockerfile to %s", dockerfileName) + + if err := os.WriteFile(mcFilename, []byte(machineConfig), 0o755); err != nil { + return err + } + + klog.Infof("Wrote MachineConfig to %s", mcFilename) + + return nil +} diff --git a/devex/cmd/onclustertesting/internal/legacycmds/legacycmds.go b/devex/cmd/onclustertesting/internal/legacycmds/legacycmds.go new file mode 100644 index 0000000000..d4958d7d09 --- /dev/null +++ b/devex/cmd/onclustertesting/internal/legacycmds/legacycmds.go @@ -0,0 +1,82 @@ +package legacycmds + +import ( + "context" + "fmt" + "os" + "time" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + "github.com/openshift/machine-config-operator/test/framework" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +const ( + DefaultLayeredPoolName string = "layered" + CreatedByOnClusterBuildsHelper string = "machineconfiguration.openshift.io/createdByOnClusterBuildsHelper" +) + +func ExtractBuildObjects(cs *framework.ClientSet, mcp *mcfgv1.MachineConfigPool, targetDir string) error { + return extractBuildObjectsForRenderedMC(cs, mcp.Spec.Configuration.Name, targetDir) +} + +func GetDir(target string) (string, error) { + if target != "" { + return target, nil + } + + return os.Getwd() +} + +func WaitForRenderedConfigs(cs *framework.ClientSet, pool string, mcNames ...string) (string, error) { + var renderedConfig string + startTime := time.Now() + found := make(map[string]bool) + + ctx := context.Background() + + if err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 5*time.Minute, true, func(ctx context.Context) (bool, error) { + // Set up the list + for _, name := range mcNames { + found[name] = false + } + + // Update found based on the MCP + mcp, err := cs.MachineConfigPools().Get(ctx, pool, metav1.GetOptions{}) + if err != nil { + return false, err + } + for _, mc := range mcp.Spec.Configuration.Source { + if _, ok := found[mc.Name]; ok { + found[mc.Name] = true + } + } + + // If any are still false, then they weren't included in the MCP + for _, nameFound := range found { + if !nameFound { + return false, nil + } + } + + // All the required names were found + renderedConfig = mcp.Spec.Configuration.Name + return true, nil + }); err != nil { + return "", fmt.Errorf("machine configs %v hasn't been picked by pool %s (waited %s): %w", notFoundNames(found), pool, time.Since(startTime), err) + } + klog.Infof("Pool %s has rendered configs %v with %s (waited %v)", pool, mcNames, renderedConfig, time.Since(startTime)) + return renderedConfig, nil +} + +func notFoundNames(foundNames map[string]bool) []string { + out := []string{} + for name, found := range foundNames { + if !found { + out = append(out, name) + } + } + return out +} diff --git a/devex/cmd/onclustertesting/internal/legacycmds/machineconfig.go b/devex/cmd/onclustertesting/internal/legacycmds/machineconfig.go new file mode 100644 index 0000000000..d2c1d9781e --- /dev/null +++ b/devex/cmd/onclustertesting/internal/legacycmds/machineconfig.go @@ -0,0 +1,140 @@ +package legacycmds + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/ghodss/yaml" + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + + ign3types "github.com/coreos/ignition/v2/config/v3_4/types" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/openshift/machine-config-operator/test/helpers" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/klog/v2" +) + +var ( + machineConfigOpts struct { + poolName string + machineConfig string + sshMC bool + dryRun bool + } +) + +func MachineConfigCommand() *cobra.Command { + machineConfigCmd := &cobra.Command{ + Use: "machineconfig", + Short: "Creates a MachineConfig in a layered MachineConfigPool to cause a build", + Long: "", + RunE: runMachineConfigCmd, + } + + machineConfigCmd.PersistentFlags().StringVar(&machineConfigOpts.poolName, "pool", DefaultLayeredPoolName, "Pool name to target") + machineConfigCmd.PersistentFlags().StringVar(&machineConfigOpts.machineConfig, "machineconfig", "", "MachineConfig name to create") + machineConfigCmd.PersistentFlags().BoolVar(&machineConfigOpts.sshMC, "ssh-config", false, "Creates a MachineConfig that adds an SSH key to avoid reboots") + machineConfigCmd.PersistentFlags().BoolVar(&machineConfigOpts.dryRun, "dry-run", false, "Dump the MachineConfig to stdout instead of applying it") + + return machineConfigCmd + +} + +func runMachineConfigCmd(_ *cobra.Command, _ []string) error { + utils.ParseFlags() + + if extractOpts.poolName == "" { + return fmt.Errorf("no pool name provided") + } + + cs := framework.NewClientSet("") + + return createMachineConfig(cs, machineConfigOpts.poolName, machineConfigOpts.machineConfig) +} + +func createMachineConfig(cs *framework.ClientSet, targetPool, name string) error { + mc := getMachineConfig(machineConfigOpts.machineConfig, machineConfigOpts.poolName, machineConfigOpts.sshMC) + mc.Labels = map[string]string{ + CreatedByOnClusterBuildsHelper: "", + } + + if machineConfigOpts.dryRun { + return dumpYAMLToStdout(mc) + } + + _, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + _, err = cs.MachineConfigs().Create(context.TODO(), mc, metav1.CreateOptions{}) + if err != nil { + return err + } + + klog.Infof("Created MachineConfig %q targeting pool %q", name, targetPool) + + renderedConfig, err := WaitForRenderedConfigs(cs, targetPool, name) + if err != nil { + return err + } + + klog.Infof("MachineConfigPool %s got rendered config %q", targetPool, renderedConfig) + + return nil +} + +func getMachineConfig(name, targetPool string, sshMC bool) *mcfgv1.MachineConfig { + if name == "" { + name = fmt.Sprintf("%s-%s", targetPool, uuid.NewUUID()) + } + + if !sshMC { + return helpers.NewMachineConfig(name, helpers.MCLabelForRole(targetPool), "", []ign3types.File{ + helpers.CreateEncodedIgn3File(filepath.Join("/etc", name), name, 420), + }) + } + + return getSSHMachineConfig(name, targetPool, string(uuid.NewUUID())) +} + +func getSSHMachineConfig(mcName, mcpName, sshKeyContent string) *mcfgv1.MachineConfig { + // Adding authorized key for user core + testIgnConfig := ctrlcommon.NewIgnConfig() + + testIgnConfig.Passwd.Users = []ign3types.PasswdUser{ + { + Name: "core", + SSHAuthorizedKeys: []ign3types.SSHAuthorizedKey{ign3types.SSHAuthorizedKey(sshKeyContent)}, + }, + } + + return &mcfgv1.MachineConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: mcName, + Labels: helpers.MCLabelForRole(mcpName), + }, + Spec: mcfgv1.MachineConfigSpec{ + Config: runtime.RawExtension{ + Raw: helpers.MarshalOrDie(testIgnConfig), + }, + }, + } +} + +func dumpYAMLToStdout(in interface{}) error { + out, err := yaml.Marshal(in) + if err != nil { + return err + } + + _, err = os.Stdout.Write(out) + return err +} diff --git a/devex/cmd/onclustertesting/internal/legacycmds/setimage.go b/devex/cmd/onclustertesting/internal/legacycmds/setimage.go new file mode 100644 index 0000000000..2b2b207d8d --- /dev/null +++ b/devex/cmd/onclustertesting/internal/legacycmds/setimage.go @@ -0,0 +1,118 @@ +package legacycmds + +import ( + "context" + "fmt" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" +) + +var ( + setImageOpts struct { + poolName string + imageName string + } +) + +func SetImageCommand() *cobra.Command { + setImageCmd := &cobra.Command{ + Use: "set-image", + Short: "Sets an image pullspec on a MachineConfigPool", + Long: "", + RunE: runSetImageCmd, + } + + setImageCmd.PersistentFlags().StringVar(&setImageOpts.poolName, "pool", DefaultLayeredPoolName, "Pool name to set build status on") + setImageCmd.PersistentFlags().StringVar(&setImageOpts.imageName, "image", "", "The image pullspec to set") + + return setImageCmd +} + +func runSetImageCmd(_ *cobra.Command, _ []string) error { + utils.ParseFlags() + + if setImageOpts.poolName == "" { + return fmt.Errorf("no pool name provided") + } + + if setImageOpts.imageName == "" { + return fmt.Errorf("no image name provided") + } + + return setImageOnPool(framework.NewClientSet(""), setImageOpts.poolName, setImageOpts.imageName) +} + +func setImageOnPool(cs *framework.ClientSet, targetPool, pullspec string) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := optInPool(cs, targetPool); err != nil { + return err + } + + return addImageToLayeredPool(cs, pullspec, targetPool) + }) +} + +func addImageToLayeredPool(cs *framework.ClientSet, pullspec, targetPool string) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + if mcp.Labels == nil { + if err := optInPool(cs, targetPool); err != nil { + return err + } + } + + if mcp.Annotations == nil { + mcp.Annotations = map[string]string{} + } + + mcp.Annotations[ctrlcommon.ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] = pullspec + mcp, err = cs.MachineConfigPools().Update(context.TODO(), mcp, metav1.UpdateOptions{}) + if err != nil { + return err + } + + klog.Infof("Applied image %q to MachineConfigPool %s", pullspec, mcp.Name) + return clearThenSetStatusOnPool(cs, targetPool, mcfgv1.MachineConfigPoolBuildSuccess, corev1.ConditionTrue) + }) +} + +func clearThenSetStatusOnPool(cs *framework.ClientSet, targetPool string, condType mcfgv1.MachineConfigPoolConditionType, status corev1.ConditionStatus) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := clearBuildStatusesOnPool(cs, targetPool); err != nil { + return err + } + + return setStatusOnPool(cs, targetPool, condType, status) + }) +} + +func optInPool(cs *framework.ClientSet, targetPool string) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + if mcp.Labels == nil { + mcp.Labels = map[string]string{} + } + + mcp.Labels[ctrlcommon.LayeringEnabledPoolLabel] = "" + + klog.Infof("Opted MachineConfigPool %q into layering", mcp.Name) + _, err = cs.MachineConfigPools().Update(context.TODO(), mcp, metav1.UpdateOptions{}) + return err + }) +} diff --git a/devex/cmd/onclustertesting/internal/legacycmds/setstatus.go b/devex/cmd/onclustertesting/internal/legacycmds/setstatus.go new file mode 100644 index 0000000000..721a52a1e1 --- /dev/null +++ b/devex/cmd/onclustertesting/internal/legacycmds/setstatus.go @@ -0,0 +1,107 @@ +package legacycmds + +import ( + "context" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + "github.com/openshift/machine-config-operator/pkg/apihelpers" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" +) + +var ( + setStatusCmd = &cobra.Command{ + Use: "set-build-status", + Short: "Sets the build status on a given MachineConfigPool", + Long: "", + Run: runSetStatusCmd, + } + + setStatusOpts struct { + poolName string + condType string + status bool + } +) + +func SetStatusCommand() *cobra.Command { + setStatusCmd := &cobra.Command{ + Use: "set-build-status", + Short: "Sets the build status on a given MachineConfigPool", + Long: "", + Run: runSetStatusCmd, + } + + setStatusCmd.PersistentFlags().StringVar(&setStatusOpts.poolName, "pool", DefaultLayeredPoolName, "Pool name to set build status on") + setStatusCmd.PersistentFlags().StringVar(&setStatusOpts.condType, "type", "", "The condition type to set") + setStatusCmd.PersistentFlags().BoolVar(&setStatusOpts.status, "status", false, "Condition true or false") + + return setStatusCmd +} + +func runSetStatusCmd(_ *cobra.Command, _ []string) { + utils.ParseFlags() + + if setStatusOpts.poolName == "" { + klog.Fatalln("No pool name provided!") + } + + validCondTypes := []mcfgv1.MachineConfigPoolConditionType{ + mcfgv1.MachineConfigPoolUpdated, + mcfgv1.MachineConfigPoolUpdating, + mcfgv1.MachineConfigPoolNodeDegraded, + mcfgv1.MachineConfigPoolRenderDegraded, + mcfgv1.MachineConfigPoolDegraded, + mcfgv1.MachineConfigPoolBuildPending, + mcfgv1.MachineConfigPoolBuilding, + mcfgv1.MachineConfigPoolBuildSuccess, + mcfgv1.MachineConfigPoolBuildFailed, + } + + var condTypeToSet mcfgv1.MachineConfigPoolConditionType + for _, condType := range validCondTypes { + if string(condType) == setStatusOpts.condType { + condTypeToSet = mcfgv1.MachineConfigPoolConditionType(setStatusOpts.condType) + break + } + } + + if condTypeToSet == "" { + klog.Fatalf("unknown condition type %q, valid options: %v", setStatusOpts.condType, validCondTypes) + } + + status := map[bool]corev1.ConditionStatus{ + true: corev1.ConditionTrue, + false: corev1.ConditionFalse, + } + + if err := setStatusOnPool(framework.NewClientSet(""), setStatusOpts.poolName, condTypeToSet, status[setStatusOpts.status]); err != nil { + klog.Fatal(err) + } +} + +func setStatusOnPool(cs *framework.ClientSet, targetPool string, condType mcfgv1.MachineConfigPoolConditionType, status corev1.ConditionStatus) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + newCond := apihelpers.NewMachineConfigPoolCondition(condType, status, "", "") + apihelpers.SetMachineConfigPoolCondition(&mcp.Status, *newCond) + + _, err = cs.MachineConfigPools().UpdateStatus(context.TODO(), mcp, metav1.UpdateOptions{}) + if err != nil { + return err + } + + klog.Infof("Set %s / %s on %s", condType, status, targetPool) + + return nil + }) +} diff --git a/devex/cmd/onclustertesting/legacycmds.go b/devex/cmd/onclustertesting/legacycmds.go new file mode 100644 index 0000000000..3ab43e8df7 --- /dev/null +++ b/devex/cmd/onclustertesting/legacycmds.go @@ -0,0 +1,24 @@ +package main + +import ( + "os" + + "github.com/openshift/machine-config-operator/devex/cmd/onclustertesting/internal/legacycmds" + "github.com/spf13/cobra" +) + +func init() { + cmds := map[string]func() *cobra.Command{ + "ENABLE_SET_IMAGE_COMMAND": legacycmds.SetImageCommand, + "ENABLE_SET_STATUS_COMMAND": legacycmds.SetStatusCommand, + "ENABLE_EXTRACT_COMMAND": legacycmds.ExtractCommand, + "ENABLE_CLEAR_STATUS_COMMAND": legacycmds.ClearStatusCommand, + "ENABLE_MACHINECONFIG_COMMAND": legacycmds.MachineConfigCommand, + } + + for envVarName, cmd := range cmds { + if _, ok := os.LookupEnv(envVarName); ok { + rootCmd.AddCommand(cmd()) + } + } +} diff --git a/devex/cmd/onclustertesting/machineosconfigs.go b/devex/cmd/onclustertesting/machineosconfigs.go new file mode 100644 index 0000000000..438ed0c67e --- /dev/null +++ b/devex/cmd/onclustertesting/machineosconfigs.go @@ -0,0 +1,207 @@ +package main + +import ( + "context" + "fmt" + "time" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + clientmachineconfigv1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1" + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +type moscOpts struct { + poolName string + containerfileContents string + pullSecretName string + pushSecretName string + finalPullSecretName string + finalImagePullspec string +} + +func newMachineOSConfig(opts moscOpts) *mcfgv1.MachineOSConfig { + return &mcfgv1.MachineOSConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: opts.poolName, + Labels: map[string]string{ + createdByOnClusterBuildsHelper: "", + }, + }, + Spec: mcfgv1.MachineOSConfigSpec{ + MachineConfigPool: mcfgv1.MachineConfigPoolReference{ + Name: opts.poolName, + }, + BaseImagePullSecret: &mcfgv1.ImageSecretObjectReference{ + Name: opts.pullSecretName, + }, + RenderedImagePushSecret: mcfgv1.ImageSecretObjectReference{ + Name: opts.pushSecretName, + }, + RenderedImagePushSpec: mcfgv1.ImageTagFormat(opts.finalImagePullspec), + ImageBuilder: mcfgv1.MachineOSImageBuilder{ + ImageBuilderType: mcfgv1.JobBuilder, + }, + Containerfile: []mcfgv1.MachineOSContainerfile{ + { + ContainerfileArch: mcfgv1.NoArch, + Content: opts.containerfileContents, + }, + }, + }, + } + +} + +func getMachineOSConfigForPool(cs *framework.ClientSet, pool *mcfgv1.MachineConfigPool) (*mcfgv1.MachineOSConfig, error) { + client := clientmachineconfigv1.NewForConfigOrDie(cs.GetRestConfig()) + + moscList, err := client.MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + + found := filterMachineOSConfigsForPool(moscList, pool) + if len(found) == 1 { + return found[0], nil + } + + if len(found) == 0 { + return nil, fmt.Errorf("no MachineOSConfigs exist for MachineConfigPool %s", pool.Name) + } + + names := []string{} + for _, mosc := range found { + names = append(names, mosc.Name) + } + + return nil, fmt.Errorf("expected one MachineOSConfig for MachineConfigPool %s, found multiple: %v", pool.Name, names) +} + +func filterMachineOSConfigsForPool(moscList *mcfgv1.MachineOSConfigList, pool *mcfgv1.MachineConfigPool) []*mcfgv1.MachineOSConfig { + found := []*mcfgv1.MachineOSConfig{} + + for _, mosc := range moscList.Items { + if mosc.Spec.MachineConfigPool.Name == pool.Name { + mosc := mosc + found = append(found, &mosc) + } + } + + return found +} + +func createMachineOSConfig(cs *framework.ClientSet, mosc *mcfgv1.MachineOSConfig) error { + client := clientmachineconfigv1.NewForConfigOrDie(cs.GetRestConfig()) + + _, err := client.MachineOSConfigs().Create(context.TODO(), mosc, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("could not create MachineOSConfig %s: %w", mosc.Name, err) + } + + klog.Infof("Created MachineOSConfig %s", mosc.Name) + return nil +} + +func deleteMachineOSConfigs(cs *framework.ClientSet) error { + client := clientmachineconfigv1.NewForConfigOrDie(cs.GetRestConfig()) + + moscList, err := client.MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + + for _, mosc := range moscList.Items { + err := client.MachineOSConfigs().Delete(context.TODO(), mosc.Name, metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("could not delete MachineOSConfig %s: %w", mosc.Name, err) + } + + klog.Infof("Deleted MachineOSConfig %s", mosc.Name) + } + + return err +} + +func deleteMachineOSBuilds(cs *framework.ClientSet) error { + client := clientmachineconfigv1.NewForConfigOrDie(cs.GetRestConfig()) + + mosbList, err := client.MachineOSBuilds().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + + for _, mosb := range mosbList.Items { + err := client.MachineOSBuilds().Delete(context.TODO(), mosb.Name, metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("could not delete MachineOSBuild %s: %w", mosb.Name, err) + } + + klog.Infof("Deleted MachineOSBuild %s", mosb.Name) + } + + return err +} + +func waitForBuildToComplete(ctx context.Context, cs *framework.ClientSet, poolName string) error { + isExists := false + isPending := false + isBuilding := false + isSuccess := false + + start := time.Now() + + return waitForMachineOSBuildToReachState(ctx, cs, poolName, func(mosb *mcfgv1.MachineOSBuild, err error) (bool, error) { + // There is a lag between when the MachineOSConfig is created and the + // MachineOSBuild object gets created and is available. + if err != nil && !utils.IsNotFoundErr(err) { + return false, err + } + + // If the MachineOSBuild has not been created yet, try again later. + if utils.IsNotFoundErr(err) { + return false, nil + } + + // If the MachineOSBuild exists, we can interrogate its state. + if !isExists && mosb != nil && err == nil { + isExists = true + klog.Infof("Build %s exists after %s", mosb.Name, time.Since(start)) + } + + state := ctrlcommon.NewMachineOSBuildState(mosb) + + if !isPending && state.IsBuildPending() { + isPending = true + klog.Infof("Build %s is now pending after %s", mosb.Name, time.Since(start)) + } + + if !isBuilding && state.IsBuilding() { + isBuilding = true + klog.Infof("Build %s is now running after %s", mosb.Name, time.Since(start)) + } + + if !isSuccess && state.IsBuildSuccess() { + isSuccess = true + klog.Infof("Build %s is complete after %s", mosb.Name, time.Since(start)) + return true, nil + } + + if state.IsBuildFailure() { + return false, fmt.Errorf("build %s failed after %s", mosb.Name, time.Since(start)) + } + + return false, nil + }) +} + +func waitForMachineOSBuildToReachState(ctx context.Context, cs *framework.ClientSet, poolName string, condFunc func(*mcfgv1.MachineOSBuild, error) (bool, error)) error { + return wait.PollUntilContextCancel(ctx, time.Second, true, func(funcCtx context.Context) (bool, error) { + mosb, err := utils.GetMachineOSBuildForPoolName(funcCtx, cs, poolName) + return condFunc(mosb, err) + }) +} diff --git a/devex/cmd/onclustertesting/main.go b/devex/cmd/onclustertesting/main.go new file mode 100644 index 0000000000..abd3f44edd --- /dev/null +++ b/devex/cmd/onclustertesting/main.go @@ -0,0 +1,25 @@ +package main + +import ( + "flag" + "os" + + "github.com/spf13/cobra" + "k8s.io/component-base/cli" +) + +var ( + rootCmd = &cobra.Command{ + Use: "onclustertesting", + Short: "Help with testing on-cluster builds", + Long: "", + } +) + +func init() { + rootCmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) +} + +func main() { + os.Exit(cli.Run(rootCmd)) +} diff --git a/devex/cmd/onclustertesting/optin.go b/devex/cmd/onclustertesting/optin.go new file mode 100644 index 0000000000..27ff73e19e --- /dev/null +++ b/devex/cmd/onclustertesting/optin.go @@ -0,0 +1,100 @@ +package main + +import ( + "context" + "fmt" + + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/openshift/machine-config-operator/test/helpers" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" +) + +type optInAndOutOpts struct { + poolName string + nodeName string + force bool +} + +func init() { + optInOpts := optInAndOutOpts{} + + optInCmd := &cobra.Command{ + Use: "optin", + Short: "Opts a node into on-cluster builds", + Long: "", + RunE: func(_ *cobra.Command, _ []string) error { + return runOptInCmd(optInOpts) + }, + } + + optInCmd.PersistentFlags().StringVar(&optInOpts.poolName, "pool", defaultLayeredPoolName, "Pool name") + optInCmd.PersistentFlags().StringVar(&optInOpts.nodeName, "node", "", "Node name") + + rootCmd.AddCommand(optInCmd) +} + +func runOptInCmd(opts optInAndOutOpts) error { + utils.ParseFlags() + + if isEmpty(opts.poolName) { + return fmt.Errorf("no pool name provided") + } + + if isEmpty(opts.nodeName) { + return fmt.Errorf("no node name provided") + } + + return optInNode(framework.NewClientSet(""), opts.nodeName, opts.poolName) +} + +func optInNode(cs *framework.ClientSet, nodeName, targetPool string) error { + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil { + return err + } + + klog.Infof("Found pool %q", targetPool) + + mosc, err := getMachineOSConfigForPool(cs, mcp) + if err != nil { + return err + } + + klog.Infof("Found MachineOSConfig %s for pool", mosc.Name) + + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + node, err := cs.CoreV1Interface.Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + if err != nil { + return err + } + + klog.Infof("Found node %q", nodeName) + + invalidNodeRoles := []string{ + helpers.MCPNameToRole("master"), + helpers.MCPNameToRole("control-plane"), + } + + for _, invalidNodeRole := range invalidNodeRoles { + if _, ok := node.Labels[invalidNodeRole]; ok { + return fmt.Errorf("cannot opt node with role %q into layering", invalidNodeRole) + } + } + + if _, ok := node.Labels[helpers.MCPNameToRole(targetPool)]; ok { + return fmt.Errorf("node %q already has label %s", node.Name, helpers.MCPNameToRole(targetPool)) + } + + node.Labels[helpers.MCPNameToRole(targetPool)] = "" + + _, err = cs.CoreV1Interface.Nodes().Update(context.TODO(), node, metav1.UpdateOptions{}) + if err == nil { + klog.Infof("Node %q opted into layering via pool %q", node.Name, mcp.Name) + } + return err + }) +} diff --git a/devex/cmd/onclustertesting/optout.go b/devex/cmd/onclustertesting/optout.go new file mode 100644 index 0000000000..3487e69888 --- /dev/null +++ b/devex/cmd/onclustertesting/optout.go @@ -0,0 +1,83 @@ +package main + +import ( + "context" + "fmt" + + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/openshift/machine-config-operator/test/helpers" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" +) + +func init() { + optOutOpts := optInAndOutOpts{} + + optOutCmd := &cobra.Command{ + Use: "optout", + Short: "Opts a node out of on-cluster builds", + Long: "", + RunE: func(_ *cobra.Command, _ []string) error { + return runOptOutCmd(optOutOpts) + }, + } + + optOutCmd.PersistentFlags().StringVar(&optOutOpts.poolName, "pool", defaultLayeredPoolName, "Pool name") + optOutCmd.PersistentFlags().StringVar(&optOutOpts.nodeName, "node", "", "Node name") + optOutCmd.PersistentFlags().BoolVar(&optOutOpts.force, "force", false, "Forcefully opt node out") + + rootCmd.AddCommand(optOutCmd) +} + +func runOptOutCmd(optOutOpts optInAndOutOpts) error { + utils.ParseFlags() + + if !optOutOpts.force && isEmpty(optOutOpts.poolName) { + return fmt.Errorf("no pool name provided") + } + + if isEmpty(optOutOpts.nodeName) { + return fmt.Errorf("no node name provided") + } + + return optOutNode(framework.NewClientSet(""), optOutOpts.nodeName, optOutOpts.poolName, optOutOpts.force) +} + +func optOutNode(cs *framework.ClientSet, nodeName, poolName string, force bool) error { + klog.Warningf("WARNING! You will need to recover the node manually if you do this!") + + workerMCP, err := cs.MachineConfigPools().Get(context.TODO(), "worker", metav1.GetOptions{}) + if err != nil { + return err + } + + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + node, err := cs.CoreV1Interface.Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + if err != nil { + return err + } + + if force { + klog.Infof("Forcefully opting node %q out of layering", node.Name) + return resetNodeAnnotationsAndLabels(cs, workerMCP, node) + } + + role := helpers.MCPNameToRole(poolName) + + if _, ok := node.Labels[role]; !ok { + return fmt.Errorf("node %q does not have a label matching %q", node.Name, role) + } + + delete(node.Labels, role) + + _, err = cs.CoreV1Interface.Nodes().Update(context.TODO(), node, metav1.UpdateOptions{}) + if err == nil { + klog.Infof("Opted node %q out of on-cluster builds", node.Name) + } + + return err + }) +} diff --git a/devex/cmd/onclustertesting/opts.go b/devex/cmd/onclustertesting/opts.go new file mode 100644 index 0000000000..84551ef2b3 --- /dev/null +++ b/devex/cmd/onclustertesting/opts.go @@ -0,0 +1,164 @@ +package main + +import ( + "fmt" + "os" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + "k8s.io/klog/v2" +) + +type opts struct { + pushSecretName string + pullSecretName string + finalImagePullSecretName string + pushSecretPath string + pullSecretPath string + finalImagePullspec string + containerfilePath string + containerfileContents string + poolName string + injectYumRepos bool + waitForBuildInfo bool + enableFeatureGate bool +} + +func (o *opts) deepCopy() opts { + return opts{ + pushSecretName: o.pushSecretName, + pullSecretName: o.pullSecretName, + pushSecretPath: o.pushSecretPath, + pullSecretPath: o.pullSecretPath, + finalImagePullspec: o.finalImagePullspec, + finalImagePullSecretName: o.finalImagePullSecretName, + containerfilePath: o.containerfilePath, + containerfileContents: o.containerfileContents, + poolName: o.poolName, + injectYumRepos: o.injectYumRepos, + waitForBuildInfo: o.waitForBuildInfo, + enableFeatureGate: o.enableFeatureGate, + } +} + +func (o *opts) getContainerfileContent() (string, error) { + if o.containerfileContents != "" { + return o.containerfileContents, nil + } + + if o.containerfilePath == "" { + return "", fmt.Errorf("no custom Containerfile path provided") + } + + containerfileBytes, err := os.ReadFile(o.containerfilePath) + if err != nil { + return "", fmt.Errorf("cannot read Containerfile from %s: %w", o.containerfilePath, err) + } + + klog.Infof("Using contents in Containerfile %q for %s custom Containerfile", o.containerfilePath, o.poolName) + return string(containerfileBytes), nil +} + +func (o *opts) maybeGetContainerfileContent() (string, error) { + if o.containerfileContents != "" { + return o.containerfileContents, nil + } + + if o.containerfilePath == "" { + return "", nil + } + + return o.getContainerfileContent() +} + +func (o *opts) shouldCloneGlobalPullSecret() bool { + if o.pullSecretName == globalPullSecretCloneName && o.pullSecretPath == "" { + return true + } + + return isNoneSet(o.pullSecretName, o.pullSecretPath) +} + +func (o *opts) toMachineOSConfig() (*mcfgv1.MachineOSConfig, error) { + pushSecretName, err := o.getPushSecretName() + if err != nil { + return nil, err + } + + pullSecretName, err := o.getPullSecretName() + if err != nil { + return nil, err + } + + containerfileContents, err := o.maybeGetContainerfileContent() + if err != nil { + return nil, err + } + + finalPullSecretName, err := o.getFinalPullSecretName() + if err != nil { + return nil, err + } + + moscOpts := moscOpts{ + poolName: o.poolName, + containerfileContents: containerfileContents, + pullSecretName: pullSecretName, + pushSecretName: pushSecretName, + finalImagePullspec: o.finalImagePullspec, + finalPullSecretName: finalPullSecretName, + } + + return newMachineOSConfig(moscOpts), nil +} + +func (o *opts) getFinalPullSecretName() (string, error) { + if o.finalImagePullSecretName == "" { + return "", fmt.Errorf("no final image pull secret name given") + } + + return o.finalImagePullSecretName, nil +} + +func (o *opts) getPullSecretName() (string, error) { + if o.shouldCloneGlobalPullSecret() { + return globalPullSecretCloneName, nil + } + + if o.pullSecretName != "" { + return o.pullSecretName, nil + } + + name, err := getSecretNameFromFile(o.pullSecretPath) + if err != nil { + return "", fmt.Errorf("could not get pull secret name from file: %w", err) + } + + return name, nil +} + +func (o *opts) getPushSecretName() (string, error) { + if o.pushSecretName != "" { + return o.pushSecretName, nil + } + + name, err := getSecretNameFromFile(o.pushSecretPath) + if err != nil { + return "", fmt.Errorf("could not get push secret name from file: %w", err) + } + + return name, nil +} + +func (o *opts) getSecretNameParams() []string { + secretNames := []string{} + + if o.pullSecretName != "" { + secretNames = append(secretNames, o.pullSecretName) + } + + if o.pushSecretName != "" { + secretNames = append(secretNames, o.pushSecretName) + } + + return secretNames +} diff --git a/devex/cmd/onclustertesting/secrets.go b/devex/cmd/onclustertesting/secrets.go new file mode 100644 index 0000000000..9c04994323 --- /dev/null +++ b/devex/cmd/onclustertesting/secrets.go @@ -0,0 +1,244 @@ +package main + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/ghodss/yaml" + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/openshift/machine-config-operator/test/helpers" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/labels" +) + +// Copies the global pull secret from openshift-config/pull-secret into the MCO +// namespace so that it can be used by the custom build pod. +func copyGlobalPullSecret(cs *framework.ClientSet) error { + src := utils.SecretRef{ + Name: "pull-secret", + Namespace: "openshift-config", + } + + dst := utils.SecretRef{ + Name: globalPullSecretCloneName, + Namespace: ctrlcommon.MCONamespace, + } + + labels := map[string]string{ + createdByOnClusterBuildsHelper: "", + } + + return utils.CloneSecretWithLabels(cs, src, dst, labels) +} + +func getSecretNameFromFile(path string) (string, error) { + secret, err := loadSecretFromFile(path) + + if err != nil { + return "", fmt.Errorf("could not get secret name from %q: %w", path, err) + } + + return secret.Name, nil +} + +func loadSecretFromFile(pushSecretPath string) (*corev1.Secret, error) { + pushSecretBytes, err := os.ReadFile(pushSecretPath) + if err != nil { + return nil, err + } + + secret := &corev1.Secret{} + if err := yaml.Unmarshal(pushSecretBytes, &secret); err != nil { + return nil, err + } + + secret.Labels = map[string]string{ + createdByOnClusterBuildsHelper: "", + } + + secret.Namespace = ctrlcommon.MCONamespace + + return secret, nil +} + +func createSecretFromFile(cs *framework.ClientSet, path string) error { + secret, err := loadSecretFromFile(path) + if err != nil { + return err + } + + klog.Infof("Loaded secret %q from %s", secret.Name, path) + return utils.CreateOrRecreateSecret(cs, secret) +} + +func deleteSecret(cs *framework.ClientSet, name string) error { + err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) + + if err != nil { + return fmt.Errorf("could not delete secret %s: %w", name, err) + } + + klog.Infof("Deleted secret %q from namespace %q", name, ctrlcommon.MCONamespace) + return nil +} + +func getBuilderPushSecretName(cs *framework.ClientSet) (string, error) { + secrets, err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return "", err + } + + for _, secret := range secrets.Items { + if strings.HasPrefix(secret.Name, "builder-dockercfg") { + klog.Infof("Will use builder secret %q in namespace %q", secret.Name, ctrlcommon.MCONamespace) + return secret.Name, nil + } + } + + return "", fmt.Errorf("could not find matching secret name in namespace %s", ctrlcommon.MCONamespace) +} + +func getDefaultPullSecretName(cs *framework.ClientSet) (string, error) { + secrets, err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return "", err + } + + for _, secret := range secrets.Items { + if strings.HasPrefix(secret.Name, "default-dockercfg") && !strings.Contains(secret.Name, "canonical") { + klog.Infof("Will use default secret %q in namespace %q", secret.Name, ctrlcommon.MCONamespace) + return secret.Name, nil + } + } + + return "", fmt.Errorf("could not find matching secret name in namespace %s", ctrlcommon.MCONamespace) +} + +// TODO: Dedupe these funcs from BuildController helpers. +func validateSecret(cs *framework.ClientSet, secretName string) error { + // Here we just validate the presence of the secret, and not its content + secret, err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).Get(context.TODO(), secretName, metav1.GetOptions{}) + if err != nil && apierrs.IsNotFound(err) { + return fmt.Errorf("secret %q not found in namespace %q. Did you use the right secret name?", secretName, ctrlcommon.MCONamespace) + } + + if err != nil { + return fmt.Errorf("could not get secret %s: %w", secretName, err) + } + + if _, err := getPullSecretKey(secret); err != nil { + return err + } + + return nil +} + +// Looks up a given secret key for a given secret type and validates that the +// key is present and the secret is a non-zero length. Returns an error if it +// is the incorrect secret type, missing the appropriate key, or the secret is +// a zero-length. +func getPullSecretKey(secret *corev1.Secret) (string, error) { + if secret.Type != corev1.SecretTypeDockerConfigJson && secret.Type != corev1.SecretTypeDockercfg { + return "", fmt.Errorf("unknown secret type %s", secret.Type) + } + + secretTypes := map[corev1.SecretType]string{ + corev1.SecretTypeDockercfg: corev1.DockerConfigKey, + corev1.SecretTypeDockerConfigJson: corev1.DockerConfigJsonKey, + } + + key := secretTypes[secret.Type] + + val, ok := secret.Data[key] + if !ok { + return "", fmt.Errorf("missing %q in %s", key, secret.Name) + } + + if len(val) == 0 { + return "", fmt.Errorf("empty value %q in %s", key, secret.Name) + } + + return key, nil +} + +func validateSecretsExist(cs *framework.ClientSet, names []string) error { + for _, name := range names { + if err := validateSecret(cs, name); err != nil { + return err + } + klog.Infof("Secret %q exists in namespace %q", name, ctrlcommon.MCONamespace) + } + + return nil +} + +func createLongLivedImagePushSecretForPool(ctx context.Context, cs *framework.ClientSet, poolName string) (string, error) { + opts := helpers.LongLivedSecretOpts{ + ServiceAccount: metav1.ObjectMeta{ + Name: "builder", + Namespace: ctrlcommon.MCONamespace, + }, + Secret: metav1.ObjectMeta{ + Name: fmt.Sprintf("ocl-%s-push-secret", poolName), + Namespace: ctrlcommon.MCONamespace, + }, + Lifetime: "24h", + } + + if err := createLongLivedPullSecret(ctx, cs, opts); err != nil { + return "", err + } + + return opts.Secret.Name, nil +} + +func createLongLivedPullSecret(ctx context.Context, cs *framework.ClientSet, opts helpers.LongLivedSecretOpts) error { + secretLabels := map[string]string{ + createdByOnClusterBuildsHelper: "", + "machineconfiguration.openshift.io/long-lived-image-pull-secret": "", + } + + secret, err := cs.CoreV1Interface.Secrets(opts.Secret.Namespace).Get(ctx, opts.Secret.Name, metav1.GetOptions{}) + if err != nil && !apierrs.IsNotFound(err) { + return fmt.Errorf("could not look up secret %q: %w", opts.Secret.Name, err) + } + + sel, err := labels.Set(secretLabels).AsValidatedSelector() + if err != nil { + return fmt.Errorf("could not validate selector labels: %w", err) + } + + if secret != nil && sel.Matches(labels.Set(secret.Labels)) { + klog.Infof("Found preexisting long-lived secret %q, reusing", opts.Secret.Name) + return nil + } + + if err := helpers.CreateLongLivedPullSecret(ctx, cs, opts); err != nil { + return fmt.Errorf("could not create long-lived pull secret %s: %w", opts.Secret.Name, err) + } + + secret, err = cs.CoreV1Interface.Secrets(opts.Secret.Namespace).Get(ctx, opts.Secret.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("could not fetch long-lived pull secret %s for labelling: %w", opts.Secret.Name, err) + } + + for k, v := range secretLabels { + metav1.SetMetaDataLabel(&secret.ObjectMeta, k, v) + } + + _, err = cs.CoreV1Interface.Secrets(opts.Secret.Namespace).Update(ctx, secret, metav1.UpdateOptions{}) + + if err == nil { + klog.Infof("Created long-lived image pull secret %q", opts.Secret.Name) + } + + return err +} diff --git a/devex/cmd/onclustertesting/setup.go b/devex/cmd/onclustertesting/setup.go new file mode 100644 index 0000000000..0cf288e256 --- /dev/null +++ b/devex/cmd/onclustertesting/setup.go @@ -0,0 +1,231 @@ +package main + +import ( + "context" + "fmt" + "time" + + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" + "k8s.io/klog/v2" +) + +func init() { + setupOpts := opts{} + + setupCmd := &cobra.Command{ + Use: "setup", + Short: "Sets up pool for on-cluster build testing", + Long: "", + RunE: func(_ *cobra.Command, _ []string) error { + return runSetupCmd(setupOpts) + }, + } + + inClusterRegistryCmd := &cobra.Command{ + Use: "in-cluster-registry", + Short: "Sets up pool for on-cluster build testing using an ImageStream", + Long: "", + RunE: func(_ *cobra.Command, _ []string) error { + return runInClusterRegistrySetupCmd(setupOpts) + }, + } + + ciSetupCmd := &cobra.Command{ + Use: "ci", + Short: "Sets up a cluster for on-cluster builds in a CI context.", + Long: "", + RunE: func(_ *cobra.Command, _ []string) error { + return runCiSetupCmd(setupOpts) + }, + } + + setupCmd.AddCommand(inClusterRegistryCmd) + setupCmd.AddCommand(ciSetupCmd) + setupCmd.PersistentFlags().StringVar(&setupOpts.poolName, "pool", defaultLayeredPoolName, "Pool name to setup") + setupCmd.PersistentFlags().BoolVar(&setupOpts.waitForBuildInfo, "wait-for-build", false, "Wait for build info") + setupCmd.PersistentFlags().StringVar(&setupOpts.pullSecretName, "pull-secret-name", "", "The name of a preexisting secret to use as the pull secret. If absent, will clone global pull secret.") + setupCmd.PersistentFlags().StringVar(&setupOpts.pushSecretName, "push-secret-name", "", "The name of a preexisting secret to use as the push secret.") + setupCmd.PersistentFlags().StringVar(&setupOpts.pullSecretPath, "pull-secret-path", "", "Path to a pull secret K8s YAML to use. If absent, will clone global pull secret.") + setupCmd.PersistentFlags().StringVar(&setupOpts.pushSecretPath, "push-secret-path", "", "Path to a push secret K8s YAML to use.") + setupCmd.PersistentFlags().StringVar(&setupOpts.finalImagePullspec, "final-pullspec", "", "The final image pushspec to use for testing") + setupCmd.PersistentFlags().StringVar(&setupOpts.containerfilePath, "containerfile-path", "", "Optional Containerfile to inject for the build.") + setupCmd.PersistentFlags().BoolVar(&setupOpts.enableFeatureGate, "enable-feature-gate", false, "Enables the required featuregates if not already enabled.") + setupCmd.PersistentFlags().BoolVar(&setupOpts.injectYumRepos, "inject-yum-repos", false, fmt.Sprintf("Injects contents from the /etc/yum.repos.d and /etc/pki/rpm-gpg directories found in %s into the %s namespace.", yumReposContainerImagePullspec, ctrlcommon.MCONamespace)) + + rootCmd.AddCommand(setupCmd) +} + +func runSetupCmd(setupOpts opts) error { + utils.ParseFlags() + + // TODO: Figure out how to use cobra flags for validation directly. + if err := errIfNotSet(setupOpts.poolName, "pool"); err != nil { + return err + } + + if err := errIfNotSet(setupOpts.finalImagePullspec, "final-pullspec"); err != nil { + return err + } + + if isNoneSet(setupOpts.pushSecretPath, setupOpts.pushSecretName) { + return fmt.Errorf("either --push-secret-name or --push-secret-path must be provided") + } + + if !isOnlyOneSet(setupOpts.pushSecretPath, setupOpts.pushSecretName) { + return fmt.Errorf("flags --pull-secret-name and --pull-secret-path cannot be combined") + } + + if !isOnlyOneSet(setupOpts.pullSecretPath, setupOpts.pullSecretName) { + return fmt.Errorf("flags --push-secret-name and --push-secret-path cannot be combined") + } + + if err := utils.CheckForBinaries([]string{"oc"}); err != nil { + return err + } + + cs := framework.NewClientSet("") + + if err := checkForRequiredFeatureGates(cs, setupOpts); err != nil { + return err + } + + return mobSetup(cs, opts{ + pushSecretName: setupOpts.pushSecretName, + pullSecretName: setupOpts.pullSecretName, + pushSecretPath: setupOpts.pushSecretPath, + pullSecretPath: setupOpts.pullSecretPath, + finalImagePullspec: setupOpts.finalImagePullspec, + containerfilePath: setupOpts.containerfilePath, + poolName: setupOpts.poolName, + injectYumRepos: setupOpts.injectYumRepos, + }) +} + +func runInClusterRegistrySetupCmd(setupOpts opts) error { + utils.ParseFlags() + + if err := errIfNotSet(setupOpts.poolName, "pool"); err != nil { + return err + } + + cs := framework.NewClientSet("") + + if err := checkForRequiredFeatureGates(cs, setupOpts); err != nil { + return err + } + + // TODO: Validate that pulls work with the pull image secret. + pushSecretName, err := createLongLivedImagePushSecretForPool(context.TODO(), cs, setupOpts.poolName) + if err != nil { + return fmt.Errorf("could not create long-lived push and pull secrets: %w", err) + } + + imagestreamName := "os-image" + if err := createImagestream(cs, imagestreamName); err != nil { + return err + } + + pullspec, err := getImagestreamPullspec(cs, imagestreamName) + if err != nil { + return err + } + + setupOpts.pullSecretName = globalPullSecretCloneName + setupOpts.finalImagePullSecretName = pushSecretName + setupOpts.pushSecretName = pushSecretName + setupOpts.finalImagePullspec = pullspec + + return mobSetup(cs, setupOpts) +} + +func mobSetup(cs *framework.ClientSet, setupOpts opts) error { + eg := errgroup.Group{} + + eg.Go(func() error { + _, err := createPool(cs, setupOpts.poolName) + return err + }) + + eg.Go(func() error { + return createSecrets(cs, setupOpts) + }) + + if err := eg.Wait(); err != nil { + return err + } + + mosc, err := setupOpts.toMachineOSConfig() + if err != nil { + return err + } + + if err := createMachineOSConfig(cs, mosc); err != nil { + return err + } + + if !setupOpts.waitForBuildInfo { + return nil + } + + return waitForBuildInfo(cs, setupOpts.poolName) +} + +func createSecrets(cs *framework.ClientSet, opts opts) error { + start := time.Now() + + eg := errgroup.Group{} + + eg.Go(func() error { + if opts.shouldCloneGlobalPullSecret() { + if err := copyGlobalPullSecret(cs); err != nil { + // Not sure why this snarfs any errors from this process. + return nil + } + } + + return nil + }) + + if opts.pushSecretPath != "" { + pushSecretPath := opts.pushSecretPath + eg.Go(func() error { + return createSecretFromFile(cs, pushSecretPath) + }) + } + + if opts.pullSecretPath != "" { + pullSecretPath := opts.pullSecretPath + eg.Go(func() error { + return createSecretFromFile(cs, pullSecretPath) + }) + + } + + if opts.injectYumRepos { + eg.Go(func() error { + return extractAndInjectYumEpelRepos(cs) + }) + } + + if err := eg.Wait(); err != nil { + return err + } + + secretNames := opts.getSecretNameParams() + if err := validateSecretsExist(cs, secretNames); err != nil { + return err + } + + klog.Infof("All secrets set up after %s", time.Since(start)) + + return nil +} + +func waitForBuildInfo(_ *framework.ClientSet, _ string) error { + klog.Infof("no-op for now") + return nil +} diff --git a/devex/cmd/onclustertesting/teardown.go b/devex/cmd/onclustertesting/teardown.go new file mode 100644 index 0000000000..cbcae7ce01 --- /dev/null +++ b/devex/cmd/onclustertesting/teardown.go @@ -0,0 +1,89 @@ +package main + +import ( + "context" + + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" +) + +type teardownOpts struct { + poolName string + dir string +} + +func init() { + teardownOpts := teardownOpts{} + + teardownCmd := &cobra.Command{ + Use: "teardown", + Short: "Tears down the pool for on-cluster build testing", + Long: "", + RunE: func(_ *cobra.Command, _ []string) error { + return runTeardownCmd(teardownOpts) + }, + } + + teardownCmd.PersistentFlags().StringVar(&teardownOpts.poolName, "pool", defaultLayeredPoolName, "Pool name to teardown") + teardownCmd.PersistentFlags().StringVar(&teardownOpts.dir, "dir", "", "Dir to store extract build objects") + + rootCmd.AddCommand(teardownCmd) +} + +func runTeardownCmd(opts teardownOpts) error { + utils.ParseFlags() + + if opts.poolName == "" { + klog.Fatalln("No pool name provided!") + } + + return mobTeardown(framework.NewClientSet(""), opts.poolName) +} + +func mobTeardown(cs *framework.ClientSet, targetPool string) error { + mcp, err := cs.MachineConfigPools().Get(context.TODO(), targetPool, metav1.GetOptions{}) + if err != nil && !apierrs.IsNotFound(err) { + return err + } + + if err == nil && !hasOurLabel(mcp.Labels) { + klog.Warningf("Provided MachineConfigPool %q was not created by this program, will ignore", mcp.Name) + klog.Infof("Will do a label query searching for %q", createdByOnClusterBuildsHelper) + + mcp = nil + } + + if apierrs.IsNotFound(err) { + if targetPool == defaultLayeredPoolName { + klog.Infof("Default MachineConfigPool %q not found, maybe you forgot to provide the pool name?", defaultLayeredPoolName) + } else { + klog.Infof("Provided MachineConfigPool %q not found, maybe you provided the wrong pool name?", targetPool) + } + + mcp = nil + } + + if err := deleteBuildObjects(cs); err != nil { + return err + } + + if mcp != nil { + if err := teardownPool(cs, mcp); err != nil { + return err + } + } + + if err := deleteAllPoolsWithOurLabel(cs); err != nil { + return err + } + + if err := deleteMachineOSBuilds(cs); err != nil { + return err + } + + return deleteMachineOSConfigs(cs) +} diff --git a/devex/cmd/run-on-all-nodes/README.md b/devex/cmd/run-on-all-nodes/README.md new file mode 100644 index 0000000000..56a5fbef53 --- /dev/null +++ b/devex/cmd/run-on-all-nodes/README.md @@ -0,0 +1,243 @@ +# run-on-all-nodes + +```console +Automates running a command on all nodes in a given OpenShift cluster + +Usage: + run-on-all-nodes [flags] [command] + run-on-all-nodes [command] + +Available Commands: + completion Generate the autocompletion script for the specified shell + help Help about any command + version Print the current version + +Flags: + --exit-zero Return zero even if a command fails + -h, --help help for run-on-all-nodes + --json Write output in JSON format + --keep-going Do not stop on first command error + --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster. + --label-selector string Label selector for nodes. + --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) + -v, --v Level number for the log level verbosity + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging (only works for the default text log format) + --write-logs Write command logs to disk under $PWD/.log + +Use "run-on-all-nodes [command] --help" for more information about a command. +``` + +This command allows one to run a command across all (or a subset) of their cluster nodes. + +## How to use + +Let's say that you want to run `$ rpm-ostree status` on all of your cluster nodes: + +```console +$ run-on-all-nodes 'rpm-ostree status' +Running on nodes: [ip-10-0-11-145.ec2.internal ip-10-0-16-30.ec2.internal ip-10-0-34-4.ec2.internal ip-10-0-59-143.ec2.internal ip-10-0-59-75.ec2.internal ip-10-0-6-62.ec2.internal] + +[ip-10-0-59-143.ec2.internal - [node-role.kubernetes.io/worker]]: +$ rpm-ostree status +State: idle +Deployments: +* ostree-unverified-registry:registry.ci.openshift.org/ocp/4.16-2024-02-13-072746@sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Digest: sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Version: 416.94.202402060821-0 (2024-02-13T13:20:23Z) + + +[ip-10-0-59-75.ec2.internal - [node-role.kubernetes.io/worker]]: +$ rpm-ostree status +State: idle +Deployments: +* ostree-unverified-registry:registry.ci.openshift.org/ocp/4.16-2024-02-13-072746@sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Digest: sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Version: 416.94.202402060821-0 (2024-02-13T13:20:13Z) + + +[ip-10-0-11-145.ec2.internal - [node-role.kubernetes.io/control-plane node-role.kubernetes.io/master]]: +$ rpm-ostree status +State: idle +Deployments: +* ostree-unverified-registry:registry.ci.openshift.org/ocp/4.16-2024-02-13-072746@sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Digest: sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Version: 416.94.202402060821-0 (2024-02-13T13:12:20Z) + + +[ip-10-0-16-30.ec2.internal - [node-role.kubernetes.io/control-plane node-role.kubernetes.io/master]]: +$ rpm-ostree status +State: idle +Deployments: +* ostree-unverified-registry:registry.ci.openshift.org/ocp/4.16-2024-02-13-072746@sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Digest: sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Version: 416.94.202402060821-0 (2024-02-13T13:12:34Z) + + +[ip-10-0-6-62.ec2.internal - [node-role.kubernetes.io/worker]]: +$ rpm-ostree status +State: idle +Deployments: +* ostree-unverified-registry:registry.ci.openshift.org/ocp/4.16-2024-02-13-072746@sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Digest: sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Version: 416.94.202402060821-0 (2024-02-13T15:46:41Z) + + +[ip-10-0-34-4.ec2.internal - [node-role.kubernetes.io/control-plane node-role.kubernetes.io/master]]: +$ rpm-ostree status +State: idle +Deployments: +* ostree-unverified-registry:registry.ci.openshift.org/ocp/4.16-2024-02-13-072746@sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Digest: sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Version: 416.94.202402060821-0 (2024-02-13T13:12:21Z) +``` + +Now, let's say that you only want to run it on your worker nodes. You can add the node-role label selector thusly: + +```console +$ run-on-all-nodes --label-selector 'node-role.kubernetes.io/worker=' 'rpm-ostree status' + +Using label selector: node-role.kubernetes.io/worker= +Running on nodes: [ip-10-0-59-143.ec2.internal ip-10-0-59-75.ec2.internal ip-10-0-6-62.ec2.internal] + +[ip-10-0-59-143.ec2.internal - [node-role.kubernetes.io/worker]]: +$ rpm-ostree status +State: idle +Deployments: +* ostree-unverified-registry:registry.ci.openshift.org/ocp/4.16-2024-02-13-072746@sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Digest: sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Version: 416.94.202402060821-0 (2024-02-13T13:20:23Z) + + +[ip-10-0-59-75.ec2.internal - [node-role.kubernetes.io/worker]]: +$ rpm-ostree status +State: idle +Deployments: +* ostree-unverified-registry:registry.ci.openshift.org/ocp/4.16-2024-02-13-072746@sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Digest: sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Version: 416.94.202402060821-0 (2024-02-13T13:20:13Z) + + +[ip-10-0-6-62.ec2.internal - [node-role.kubernetes.io/worker]]: +$ rpm-ostree status +State: idle +Deployments: +* ostree-unverified-registry:registry.ci.openshift.org/ocp/4.16-2024-02-13-072746@sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Digest: sha256:6fb6e4d6d6e813ac88ad56f2b822cf28bfa4cf97ec8744df8301fdb817485636 + Version: 416.94.202402060821-0 (2024-02-13T15:46:41Z) +``` + +The program will halt on the first error it encounters while running commands. For example, if you attempt to run an unknown command: + +```console +$ run-on-all-nodes 'unknown-command' +E0213 11:41:15.559442 8081 run.go:74] "command failed" err="could not run command /Users/zzlotnik/bin/oc debug node/ip-10-0-16-30.ec2.internal -- chroot /host /bin/bash -c unknown-command: exit status 1" +``` + +To keep executing, use the `--keep-going` flag: + +```console +$ run-on-all-nodes --keep-going 'unknown-command' +Running on nodes: [ip-10-0-11-145.ec2.internal ip-10-0-16-30.ec2.internal ip-10-0-34-4.ec2.internal ip-10-0-59-143.ec2.internal ip-10-0-59-75.ec2.internal ip-10-0-6-62.ec2.internal] + +[ip-10-0-11-145.ec2.internal - [node-role.kubernetes.io/control-plane node-role.kubernetes.io/master]]: +$ unknown-command +/bin/bash: line 1: unknown-command: command not found + + +[ip-10-0-6-62.ec2.internal - [node-role.kubernetes.io/worker]]: +$ unknown-command +/bin/bash: line 1: unknown-command: command not found + + +[ip-10-0-16-30.ec2.internal - [node-role.kubernetes.io/control-plane node-role.kubernetes.io/master]]: +$ unknown-command +/bin/bash: line 1: unknown-command: command not found + + +[ip-10-0-59-143.ec2.internal - [node-role.kubernetes.io/worker]]: +$ unknown-command +/bin/bash: line 1: unknown-command: command not found + + +[ip-10-0-34-4.ec2.internal - [node-role.kubernetes.io/master node-role.kubernetes.io/control-plane]]: +$ unknown-command +/bin/bash: line 1: unknown-command: command not found + + +[ip-10-0-59-75.ec2.internal - [node-role.kubernetes.io/worker]]: +$ unknown-command +/bin/bash: line 1: unknown-command: command not found +``` + +To capture output from each node, use the `--write-logs` flag: + +```console +$ run-on-all-nodes --write-logs 'uptime' +Running on nodes: [ip-10-0-11-145.ec2.internal ip-10-0-16-30.ec2.internal ip-10-0-34-4.ec2.internal ip-10-0-59-143.ec2.internal ip-10-0-59-75.ec2.internal ip-10-0-6-62.ec2.internal] + +[ip-10-0-16-30.ec2.internal - [node-role.kubernetes.io/control-plane node-role.kubernetes.io/master]]: +$ uptime + 17:14:37 up 1:18, 0 users, load average: 6.99, 6.75, 6.71 + +Writing log to ip-10-0-16-30.ec2.internal.log + +[ip-10-0-11-145.ec2.internal - [node-role.kubernetes.io/control-plane node-role.kubernetes.io/master]]: +$ uptime + 17:14:37 up 1:23, 0 users, load average: 7.02, 6.92, 7.07 + +Writing log to ip-10-0-11-145.ec2.internal.log + +[ip-10-0-59-75.ec2.internal - [node-role.kubernetes.io/worker]]: +$ uptime + 17:14:37 up 1:36, 0 users, load average: 6.21, 5.46, 5.26 + +Writing log to ip-10-0-59-75.ec2.internal.log + +[ip-10-0-34-4.ec2.internal - [node-role.kubernetes.io/control-plane node-role.kubernetes.io/master]]: +$ uptime + 17:14:37 up 1:29, 0 users, load average: 9.38, 7.76, 7.76 + +Writing log to ip-10-0-34-4.ec2.internal.log + +[ip-10-0-6-62.ec2.internal - [node-role.kubernetes.io/worker]]: +$ uptime + 17:14:37 up 1:27, 0 users, load average: 4.56, 4.28, 4.25 + +Writing log to ip-10-0-6-62.ec2.internal.log + +[ip-10-0-59-143.ec2.internal - [node-role.kubernetes.io/worker]]: +$ uptime + 17:14:37 up 1:32, 0 users, load average: 4.87, 5.05, 5.00 + +Writing log to ip-10-0-59-143.ec2.internal.log +``` + +The logs will be written to the current directory: + +```console +$ ls -la *.log +-rwxr-xr-x@ 1 zzlotnik staff 62 Feb 13 12:14 ip-10-0-11-145.ec2.internal.log +-rwxr-xr-x@ 1 zzlotnik staff 62 Feb 13 12:14 ip-10-0-16-30.ec2.internal.log +-rwxr-xr-x@ 1 zzlotnik staff 62 Feb 13 12:14 ip-10-0-34-4.ec2.internal.log +-rwxr-xr-x@ 1 zzlotnik staff 62 Feb 13 12:14 ip-10-0-59-143.ec2.internal.log +-rwxr-xr-x@ 1 zzlotnik staff 62 Feb 13 12:14 ip-10-0-59-75.ec2.internal.log +-rwxr-xr-x@ 1 zzlotnik staff 62 Feb 13 12:14 ip-10-0-6-62.ec2.internal.log + +$ cat ip-10-0-11-145.ec2.internal.log + 17:14:37 up 1:23, 0 users, load average: 7.02, 6.92, 7.07 +``` + +## How does it work? + +This program shells out to the `oc` binary and uses the `oc debug` command. In +order to set up a suitable environment to run the command, we pass `chroot +/host /bin/bash -c ""` which ensures that we can use all of the +binaries available on the host. + +For speed, we spawn multiple concurrent instances of `oc debug` and wait for +them to complete. Care is taken to ensure that output from each command is kept +separate so there will be no output interleaving. + +## Notes +- If a single command encounters an error, the rest of the commands may not execute. This behavior can be overridden using the `--keep-going` flag. +- The `--exit-zero` flag will cause `run-on-all-nodes` to exit with the exit code `0`, even when an error is encountered. This can be useful for running investigative commands that you know will fail. diff --git a/devex/cmd/run-on-all-nodes/main.go b/devex/cmd/run-on-all-nodes/main.go new file mode 100644 index 0000000000..c84135871d --- /dev/null +++ b/devex/cmd/run-on-all-nodes/main.go @@ -0,0 +1,253 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "os" + "os/exec" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/component-base/cli" + "k8s.io/klog/v2" + + "golang.org/x/sync/errgroup" + + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + aggerrs "k8s.io/apimachinery/pkg/util/errors" +) + +type runOpts struct { + command string + kubeconfig string + labelSelector string + keepGoing bool + writeLogs bool + writeJSONLogs bool + json bool + exitZero bool +} + +func main() { + opts := runOpts{} + + rootCmd := &cobra.Command{ + Use: "run-on-all-nodes [flags] [command]", + Short: "Automates running a command on all nodes in a given OpenShift cluster", + Long: "", + RunE: func(_ *cobra.Command, args []string) error { + if args[0] == "" { + return fmt.Errorf("no command provided") + } + + opts.command = args[0] + + return runOnAllNodes(opts) + }, + Args: cobra.ExactArgs(1), + } + + rootCmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) + rootCmd.PersistentFlags().StringVar(&opts.labelSelector, "label-selector", "", "Label selector for nodes.") + rootCmd.PersistentFlags().BoolVar(&opts.keepGoing, "keep-going", false, "Do not stop on first command error") + rootCmd.PersistentFlags().BoolVar(&opts.writeLogs, "write-logs", false, "Write command logs to disk under $PWD/-{stdout,stderr}.log") + rootCmd.PersistentFlags().BoolVar(&opts.writeJSONLogs, "write-json-logs", false, "Write logs in JSON format to disk under $PWD/-results.json") + rootCmd.PersistentFlags().BoolVar(&opts.json, "json", false, "Write output in JSON format") + rootCmd.PersistentFlags().BoolVar(&opts.exitZero, "exit-zero", false, "Return zero even if a command fails") + + os.Exit(cli.Run(rootCmd)) +} + +func getNodeRoles(node *corev1.Node) []string { + roles := []string{} + + for label := range node.Labels { + if strings.Contains(label, "node-role.kubernetes.io") { + roles = append(roles, label) + } + } + + return roles +} + +func getNodeNames(nodes *corev1.NodeList) []string { + names := []string{} + + for _, node := range nodes.Items { + names = append(names, node.Name) + } + + return names +} + +func runCommand(outChan chan output, node *corev1.Node, opts runOpts) error { + cmd := exec.Command("oc", "debug", fmt.Sprintf("node/%s", node.Name), "--", "chroot", "/host", "/bin/bash", "-c", opts.command) + + stdout := bytes.NewBuffer([]byte{}) + stderr := bytes.NewBuffer([]byte{}) + cmd.Stdout = stdout + cmd.Stderr = stderr + cmd.Env = utils.ToEnvVars(map[string]string{ + "KUBECONFIG": opts.kubeconfig, + }) + + runErr := cmd.Run() + + out := output{ + RemoteCommand: opts.command, + LocalCommand: cmd.String(), + node: node, + stdout: stdout, + stderr: stderr, + err: runErr, + } + + outChan <- out + + // If we're not supposed to keep going and we encounter an error, stop here. + if !opts.keepGoing && runErr != nil { + return fmt.Errorf("could not run command %s, stdout %q, stderr %q: %w", cmd, stdout.String(), stderr.String(), runErr) + } + + if opts.writeLogs { + return aggerrs.NewAggregate([]error{out.ToFile(), runErr}) + } + + if opts.writeJSONLogs { + return aggerrs.NewAggregate([]error{out.ToJSONFile(), runErr}) + } + + return runErr +} + +func writeToLogs(out output) error { + writeLog := func(node *corev1.Node, streamName string, buf *bytes.Buffer) error { + logFileName := fmt.Sprintf("%s-%s.log", node.Name, streamName) + klog.Infof("Writing output to %s", logFileName) + return os.WriteFile(logFileName, buf.Bytes(), 0o644) + } + + eg := errgroup.Group{} + + eg.Go(func() error { + return writeLog(out.node, "stdout", out.stdout) + }) + + eg.Go(func() error { + return writeLog(out.node, "stderr", out.stderr) + }) + + return eg.Wait() +} + +func runCommandOnAllNodes(nodes *corev1.NodeList, opts runOpts) error { + eg := new(errgroup.Group) + + // Spwan a separate error-collection Goroutine so that we can collect all errors + // received to determine the exit code. + errChan := make(chan error) + + errs := []error{} + go func() { + for err := range errChan { + errs = append(errs, err) + } + }() + + outChan := make(chan output) + + // Spawn a separate logging Goroutine so that outputs are not interweaved. + go func() { + for msg := range outChan { + if !opts.json { + klog.Info(msg) + continue + } + + out, err := json.Marshal(msg) + if err != nil { + // Send the error to the error channel for later handling / processing. + errChan <- err + klog.Errorf("could not write output from node %s: %s", msg.node.Name, err) + } + + klog.Info(string(out)) + } + }() + + for _, node := range nodes.Items { + node := node + // For each node, spawn an oc command and run the provided command on the node. + eg.Go(func() error { + err := runCommand(outChan, &node, opts) + // If we should keep going, collect the error via the error channel for + // future processing. + if opts.keepGoing { + errChan <- err + return nil + } + + // If we should not keep going, return the error value directly. This + // will stop all of the other goroutines in this errorgroup. + return err + }) + } + + if err := eg.Wait(); err != nil { + return err + } + + close(outChan) + close(errChan) + + return aggerrs.NewAggregate(errs) +} + +func runOnAllNodes(opts runOpts) error { + if err := utils.CheckForBinaries([]string{"oc"}); err != nil { + return err + } + + if opts.writeLogs && opts.writeJSONLogs { + return fmt.Errorf("--write-logs and --write-json-logs cannot be combined") + } + + cs := framework.NewClientSet("") + + kubeconfig, err := cs.GetKubeconfig() + if err != nil { + return err + } + + opts.kubeconfig = kubeconfig + + listOpts := metav1.ListOptions{} + + if opts.labelSelector != "" { + listOpts.LabelSelector = opts.labelSelector + klog.Info("Using label selector:", opts.labelSelector) + } + + nodes, err := cs.CoreV1Interface.Nodes().List(context.TODO(), listOpts) + if err != nil { + return err + } + + klog.Info("Running on nodes:", getNodeNames(nodes)) + klog.Info("") + + err = runCommandOnAllNodes(nodes, opts) + if opts.exitZero { + klog.Info("--exit-zero set, will return zero even though the following error(s) occurred") + klog.Error(err) + return nil + } + + return err +} diff --git a/devex/cmd/run-on-all-nodes/output.go b/devex/cmd/run-on-all-nodes/output.go new file mode 100644 index 0000000000..9ac8de7303 --- /dev/null +++ b/devex/cmd/run-on-all-nodes/output.go @@ -0,0 +1,95 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "strings" + + "golang.org/x/sync/errgroup" + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +type output struct { + RemoteCommand string `json:"remoteCommand"` + LocalCommand string `json:"localCommand"` + node *corev1.Node + stdout *bytes.Buffer + stderr *bytes.Buffer + err error +} + +func (o output) String() string { + out := &strings.Builder{} + fmt.Fprintf(out, "[%s - %v]:\n", o.node.Name, getNodeRoles(o.node)) + fmt.Fprintf(out, "$ %s\n", o.RemoteCommand) + fmt.Fprintln(out, o.stdout.String()) + fmt.Fprintln(out, o.stderr.String()) + + if o.err != nil { + fmt.Fprintln(out, "Full invocation:", o.LocalCommand) + fmt.Fprintln(out, "Error:", o.err) + } + + return out.String() +} + +func (o output) MarshalJSON() ([]byte, error) { + type out struct { + LocalCommand string `json:"localCommand"` + RemoteCommand string `json:"remoteCommand"` + NodeRoles []string `json:"nodeRoles"` + Node string `json:"node"` + Stdout string `json:"stdout"` + Stderr string `json:"stderr"` + Error string `json:"error,omitempty"` + } + + jsonOut := out{ + LocalCommand: o.LocalCommand, + RemoteCommand: o.RemoteCommand, + Node: o.node.Name, + Stdout: o.stdout.String(), + Stderr: o.stderr.String(), + NodeRoles: getNodeRoles(o.node), + } + + if o.err != nil { + jsonOut.Error = o.err.Error() + } + + return json.Marshal(jsonOut) +} + +func (o output) ToFile() error { + writeLog := func(node *corev1.Node, streamName string, buf *bytes.Buffer) error { + logFileName := fmt.Sprintf("%s-%s.log", node.Name, streamName) + klog.Infof("Writing output to %s", logFileName) + return os.WriteFile(logFileName, buf.Bytes(), 0o644) + } + + eg := errgroup.Group{} + + eg.Go(func() error { + return writeLog(o.node, "stdout", o.stdout) + }) + + eg.Go(func() error { + return writeLog(o.node, "stderr", o.stderr) + }) + + return eg.Wait() +} + +func (o output) ToJSONFile() error { + outBytes, err := json.Marshal(o) + if err != nil { + return err + } + + filename := fmt.Sprintf("%s-results.json", o.node.Name) + klog.Infof("Writing output in JSON format to %s", filename) + return os.WriteFile(filename, outBytes, 0o644) +} diff --git a/devex/cmd/wait-for-mcp/README.md b/devex/cmd/wait-for-mcp/README.md new file mode 100644 index 0000000000..f7a399bec0 --- /dev/null +++ b/devex/cmd/wait-for-mcp/README.md @@ -0,0 +1,41 @@ +# wait-for-mcp + +This is a small utility that will wait for a given MachineConfigPool to complete its rollout. + +## Usage: + +One can specify which MachineConfigPool(s) to watch in a given cluster: + +```console +$ wait-for-mcp worker +I0921 10:58:59.331211 43576 main.go:36] Timeout set to 15m0s +I0921 10:58:59.421173 43576 machineconfigpool.go:103] Current nodes for pool "worker" are: [ip-10-0-4-22.ec2.internal ip-10-0-54-17.ec2.internal] +I0921 10:58:59.421207 43576 machineconfigpool.go:104] Waiting for nodes in pool to reach MachineConfig rendered-worker-1033b215f4eb45fc49be53483af65cb2 +I0921 10:58:59.452833 43576 machineconfigpool.go:136] Node ip-10-0-4-22.ec2.internal in pool worker has completed its update after 31.671925ms. 1 node(s) remaining: [ip-10-0-54-17.ec2.internal] +I0921 10:58:59.452872 43576 machineconfigpool.go:136] Node ip-10-0-54-17.ec2.internal in pool worker has completed its update after 31.720111ms. 0 node(s) remaining: [] +I0921 10:58:59.452884 43576 machineconfigpool.go:142] 2 nodes in pool worker have completed their update after 31.732079ms +``` + +One can watch multiple MachineConfigPools by adding them as space-separated arguments: + +```console +$ wait-for-mcp worker master +I0921 10:59:41.715889 45476 main.go:36] Timeout set to 15m0s +I0921 10:59:45.001080 45476 machineconfigpool.go:103] Current nodes for pool "master" are: [ip-10-0-1-118.ec2.internal ip-10-0-11-70.ec2.internal ip-10-0-6-93.ec2.internal] +I0921 10:59:45.001144 45476 machineconfigpool.go:104] Waiting for nodes in pool to reach MachineConfig rendered-master-0a15368521cd8c2b6eb4688e52019ad5 +I0921 10:59:45.007048 45476 machineconfigpool.go:103] Current nodes for pool "worker" are: [ip-10-0-4-22.ec2.internal ip-10-0-54-17.ec2.internal] +I0921 10:59:45.007069 45476 machineconfigpool.go:104] Waiting for nodes in pool to reach MachineConfig rendered-worker-1033b215f4eb45fc49be53483af65cb2 +I0921 10:59:45.044355 45476 machineconfigpool.go:136] Node ip-10-0-1-118.ec2.internal in pool master has completed its update after 43.299948ms. 2 node(s) remaining: [ip-10-0-11-70.ec2.internal ip-10-0-6-93.ec2.internal] +I0921 10:59:45.044386 45476 machineconfigpool.go:136] Node ip-10-0-11-70.ec2.internal in pool master has completed its update after 43.336307ms. 1 node(s) remaining: [ip-10-0-6-93.ec2.internal] +I0921 10:59:45.044396 45476 machineconfigpool.go:136] Node ip-10-0-6-93.ec2.internal in pool master has completed its update after 43.346557ms. 0 node(s) remaining: [] +I0921 10:59:45.044403 45476 machineconfigpool.go:142] 3 nodes in pool master have completed their update after 43.353696ms +I0921 10:59:45.045320 45476 machineconfigpool.go:136] Node ip-10-0-4-22.ec2.internal in pool worker has completed its update after 38.271006ms. 1 node(s) remaining: [ip-10-0-54-17.ec2.internal] +I0921 10:59:45.045342 45476 machineconfigpool.go:136] Node ip-10-0-54-17.ec2.internal in pool worker has completed its update after 38.296644ms. 0 node(s) remaining: [] +I0921 10:59:45.045351 45476 machineconfigpool.go:142] 2 nodes in pool worker have completed their update after 38.305848ms + +``` + +Alternatively, if no MachineConfigPools are provided, `wait-for-mcp` will watch +all the MachineConfigPools in a given cluster. Additionaly, there is a +`--timeout` flag which can be used to set a custom timeout. The default timeout +is 15 minutes. diff --git a/devex/cmd/wait-for-mcp/main.go b/devex/cmd/wait-for-mcp/main.go new file mode 100644 index 0000000000..097dff982a --- /dev/null +++ b/devex/cmd/wait-for-mcp/main.go @@ -0,0 +1,45 @@ +package main + +import ( + "os" + "time" + + "github.com/openshift/machine-config-operator/devex/internal/pkg/rollout" + "github.com/openshift/machine-config-operator/test/framework" + "github.com/spf13/cobra" + "k8s.io/component-base/cli" + "k8s.io/klog/v2" +) + +func main() { + var timeout string + + rootCmd := &cobra.Command{ + Use: "wait-for-mcp", + Short: "Waits for a given MachineConfigPool to complete its updates.", + Long: "", + RunE: func(_ *cobra.Command, args []string) error { + return waitForMCPRollout(args, timeout) + }, + } + + rootCmd.PersistentFlags().StringVar(&timeout, "timeout", "15m", "Timeout expressed in 0h0m0s format.") + + os.Exit(cli.Run(rootCmd)) +} + +func waitForMCPRollout(args []string, timeout string) error { + parsedTimeout, err := time.ParseDuration(timeout) + if err != nil { + return err + } + + klog.Infof("Timeout set to %s", parsedTimeout) + + if len(args) == 0 { + klog.Infof("No MachineConfigPool(s) provided, will watch all pools") + return rollout.WaitForAllMachineConfigPoolsToComplete(framework.NewClientSet(""), parsedTimeout) + } + + return rollout.WaitForMachineConfigPoolsToComplete(framework.NewClientSet(""), args, parsedTimeout) +} diff --git a/devex/internal/pkg/containers/containers.go b/devex/internal/pkg/containers/containers.go new file mode 100644 index 0000000000..e8073d967b --- /dev/null +++ b/devex/internal/pkg/containers/containers.go @@ -0,0 +1,47 @@ +package containers + +import ( + "context" + "strings" + + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" +) + +func ResolveToDigestedPullspec(pullspec, pullSecretPath string) (string, error) { + sysCtx := &types.SystemContext{ + AuthFilePath: pullSecretPath, + } + + if strings.Contains(pullspec, "image-registry-openshift-image-registry") { + sysCtx.OCIInsecureSkipTLSVerify = true + sysCtx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(true) + } + + tagged, err := docker.ParseReference("//" + pullspec) + if err != nil { + return "", err + } + + digest, err := docker.GetDigest(context.TODO(), sysCtx, tagged) + if err != nil { + return "", err + } + + canonical, err := reference.WithDigest(reference.TrimNamed(tagged.DockerReference()), digest) + if err != nil { + return "", err + } + + return canonical.String(), nil +} + +func AddLatestTagIfMissing(pullspec string) (string, error) { + parsed, err := docker.ParseReference("//" + pullspec) + if err != nil { + return "", err + } + + return parsed.DockerReference().String(), nil +} diff --git a/devex/internal/pkg/containers/containers_test.go b/devex/internal/pkg/containers/containers_test.go new file mode 100644 index 0000000000..9a29385e29 --- /dev/null +++ b/devex/internal/pkg/containers/containers_test.go @@ -0,0 +1,47 @@ +package containers + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAddLatestTagIfMissing(t *testing.T) { + testCases := []struct { + name string + pullspec string + expectedTag string + errExpected bool + }{ + { + name: "no tag provided", + pullspec: "quay.io/example/image", + expectedTag: ":latest", + }, + { + name: "tag provided", + pullspec: "quay.io/example/image:tag", + expectedTag: ":tag", + }, + { + name: "fully qualified pullspec", + pullspec: "quay.io/example/image@sha256:544d9fd59f8c711929d53e50ac22b19b329d95c2fcf1093cb590ac255267b2d8", + expectedTag: "sha256:544d9fd59f8c711929d53e50ac22b19b329d95c2fcf1093cb590ac255267b2d8", + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + result, err := AddLatestTagIfMissing(testCase.pullspec) + + if testCase.errExpected { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.True(t, strings.HasSuffix(result, testCase.expectedTag)) + } + }) + } +} diff --git a/devex/internal/pkg/errors/exec.go b/devex/internal/pkg/errors/exec.go new file mode 100644 index 0000000000..2da7bd365c --- /dev/null +++ b/devex/internal/pkg/errors/exec.go @@ -0,0 +1,40 @@ +package errors + +import ( + "fmt" + "os/exec" +) + +type ExecError struct { + command string + output []byte + err error +} + +func (e *ExecError) Unwrap() error { + return e.err +} + +func (e *ExecError) Error() string { + if e.output != nil { + return fmt.Sprintf("unable to run %s, output %s, error: %s", e.command, string(e.output), e.err) + } + + return fmt.Sprintf("unable to run %s, error: %s", e.command, e.err) +} + +func NewExecErrorWithOutput(cmd *exec.Cmd, output []byte, err error) error { + return NewExecError(cmd, output, err) +} + +func NewExecErrorNoOutput(cmd *exec.Cmd, err error) error { + return NewExecError(cmd, nil, err) +} + +func NewExecError(cmd *exec.Cmd, output []byte, err error) error { + return &ExecError{ + command: cmd.String(), + output: output, + err: err, + } +} diff --git a/devex/internal/pkg/errors/retryable.go b/devex/internal/pkg/errors/retryable.go new file mode 100644 index 0000000000..326d0c8d78 --- /dev/null +++ b/devex/internal/pkg/errors/retryable.go @@ -0,0 +1,76 @@ +package errors + +import "time" + +type Retryer interface { + Clear() + IsReached() bool + Current() interface{} + IsEmpty() bool +} + +type timeRetryer struct { + threshold time.Duration + start *time.Time +} + +func NewTimeRetryer(threshold time.Duration) Retryer { + return &timeRetryer{ + threshold: threshold, + } +} + +func (t *timeRetryer) IsEmpty() bool { + return t.start == nil +} + +func (t *timeRetryer) Current() interface{} { + return t.start +} + +func (t *timeRetryer) IsReached() bool { + if t.start == nil { + now := time.Now() + t.start = &now + } + + return time.Since(*t.start) >= t.threshold +} + +func (t *timeRetryer) Clear() { + t.start = nil +} + +type maxAttemptRetryer struct { + threshold int + start *int +} + +func NewMaxAttemptRetryer(threshold int) Retryer { + return &maxAttemptRetryer{ + threshold: threshold, + } +} + +func (m *maxAttemptRetryer) IsEmpty() bool { + return m.start == nil +} + +func (m *maxAttemptRetryer) Current() interface{} { + return m.start +} + +func (m *maxAttemptRetryer) IsReached() bool { + if m.start == nil { + start := 0 + m.start = &start + } + + *m.start++ + + return *m.start > m.threshold +} + +func (m *maxAttemptRetryer) Clear() { + m.start = nil +} diff --git a/devex/internal/pkg/errors/retryable_test.go b/devex/internal/pkg/errors/retryable_test.go new file mode 100644 index 0000000000..042837d5c6 --- /dev/null +++ b/devex/internal/pkg/errors/retryable_test.go @@ -0,0 +1,63 @@ +package errors + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestMaxCountRetryable(t *testing.T) { + t.Parallel() + + mar := NewMaxAttemptRetryer(10) + + assert.Nil(t, mar.Current()) + + innerTestLoop := func() { + for i := 0; i <= 10; i++ { + if i < 10 { + assert.False(t, mar.IsReached()) + } else { + assert.True(t, mar.IsReached()) + } + + current := mar.Current().(*int) + assert.Equal(t, i+1, *current) + } + } + + for i := 0; i <= 10; i++ { + innerTestLoop() + assert.NotNil(t, mar.Current()) + assert.False(t, mar.IsEmpty()) + mar.Clear() + assert.Nil(t, mar.Current()) + assert.True(t, mar.IsEmpty()) + } +} + +func TestTimeRetryable(t *testing.T) { + t.Parallel() + + tr := NewTimeRetryer(time.Millisecond) + + innerTestLoop := func() { + for i := 0; i <= 10; i++ { + assert.False(t, tr.IsReached()) + assert.NotNil(t, tr.Current()) + assert.False(t, tr.IsEmpty()) + } + } + + for i := 0; i <= 10; i++ { + innerTestLoop() + time.Sleep(time.Millisecond) + assert.True(t, tr.IsReached()) + assert.NotNil(t, tr.Current()) + assert.False(t, tr.IsEmpty()) + tr.Clear() + assert.Nil(t, tr.Current()) + assert.True(t, tr.IsEmpty()) + } +} diff --git a/devex/internal/pkg/releasecontroller/releasecontroller.go b/devex/internal/pkg/releasecontroller/releasecontroller.go new file mode 100644 index 0000000000..d4bcb98b58 --- /dev/null +++ b/devex/internal/pkg/releasecontroller/releasecontroller.go @@ -0,0 +1,149 @@ +package releasecontroller + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" +) + +type ReleaseController string + +func (r *ReleaseController) GetAllReleaseStreams() ([]string, error) { + tmp := map[string][]string{} + err := r.doHTTPRequestIntoStruct("/api/v1/releasestreams/all", &tmp) + out := []string{} + for key := range tmp { + out = append(out, key) + } + return out, err +} + +func (r *ReleaseController) GetLatestReleaseForStream(stream string) (*Release, error) { + out := &Release{} + err := r.doHTTPRequestIntoStruct(filepath.Join("/api/v1/releasestream", stream, "latest"), out) + return out, err +} + +func (r *ReleaseController) GetAllReleasesForStream(stream string) (*ReleaseTags, error) { + out := &ReleaseTags{} + err := r.doHTTPRequestIntoStruct(filepath.Join("/api/v1/releasestream", stream, "tags"), out) + return out, err +} + +func (r *ReleaseController) GetReleaseStatus(stream, tag string) (*APIReleaseInfo, error) { + out := &APIReleaseInfo{} + err := r.doHTTPRequestIntoStruct(filepath.Join("/api/v1/releasestream", stream, "release", tag), out) + return out, err +} + +// https://amd64.ocp.releases.ci.openshift.org/releasetag/4.15.0-0.nightly-2023-11-28-101923/json +// +// This returns raw bytes for now so we can use a dynamic JSON pathing library +// to parse it to avoid fighting with go mod. +// +// The raw bytes returned are very similar to the ones returned by $ oc adm +// release info. The sole difference seems to be that $ oc adm release info +// returns the fully qualified pullspec for the release instead of the tagged +// pullspec. +func (r *ReleaseController) GetReleaseInfo(tag string) ([]byte, error) { + return r.doHTTPRequestIntoBytes(filepath.Join("releasetag", tag, "json")) +} + +func (r *ReleaseController) doHTTPRequest(path string) (*http.Response, error) { + u := url.URL{ + Scheme: "https", + Host: string(*r), + Path: path, + } + + resp, err := http.Get(u.String()) + + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusNotFound { + return nil, fmt.Errorf("got HTTP 404 from %s", u.String()) + } + + return resp, nil +} + +func (r *ReleaseController) doHTTPRequestIntoStruct(path string, out interface{}) error { + resp, err := r.doHTTPRequest(path) + if err != nil { + return err + } + + defer resp.Body.Close() + + return json.NewDecoder(resp.Body).Decode(out) +} + +func (r *ReleaseController) doHTTPRequestIntoBytes(path string) ([]byte, error) { + resp, err := r.doHTTPRequest(path) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + out := bytes.NewBuffer([]byte{}) + + if _, err := io.Copy(out, resp.Body); err != nil { + return nil, err + } + + return out.Bytes(), nil +} + +const ( + Amd64OcpReleaseController ReleaseController = "amd64.ocp.releases.ci.openshift.org" + Arm64OcpReleaseController ReleaseController = "arm64.ocp.releases.ci.openshift.org" + Ppc64leOcpReleaseController ReleaseController = "ppc64le.ocp.releases.ci.openshift.org" + S390xOcpReleaseController ReleaseController = "s390x.ocp.releases.ci.openshift.org" + MultiOcpReleaseController ReleaseController = "multi.ocp.releases.ci.openshift.org" + Amd64OkdReleaseController ReleaseController = "amd64.origin.releases.ci.openshift.org" +) + +type ReleaseTags struct { + Name string `json:"name"` + Tags []Release `json:"tags"` +} + +type Release struct { + Name string `json:"name"` + Phase string `json:"phase"` + Pullspec string `json:"pullSpec"` + DownloadURL string `json:"downloadURL"` +} + +func GetReleaseController(kind, arch string) (ReleaseController, error) { + rcs := map[string]map[string]ReleaseController{ + "ocp": { + "amd64": Amd64OcpReleaseController, + "arm64": Arm64OcpReleaseController, + "multi": MultiOcpReleaseController, + }, + "okd": { + "amd64": Amd64OkdReleaseController, + }, + "okd-scos": { + "amd64": Amd64OkdReleaseController, + }, + } + + if _, ok := rcs[kind]; !ok { + return "", fmt.Errorf("invalid kind %q", kind) + } + + if _, ok := rcs[kind][arch]; !ok { + return "", fmt.Errorf("invalid arch %q for kind %q", arch, kind) + } + + return rcs[kind][arch], nil +} diff --git a/devex/internal/pkg/releasecontroller/releasecontrollertypes.go b/devex/internal/pkg/releasecontroller/releasecontrollertypes.go new file mode 100644 index 0000000000..1ebc97cbf4 --- /dev/null +++ b/devex/internal/pkg/releasecontroller/releasecontrollertypes.go @@ -0,0 +1,57 @@ +package releasecontroller + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// All types in this file were copy/pasted from the ReleaseController repository so I wouldn't have to fight with go mod. + +// APIReleaseInfo encapsulates the release verification results and upgrade history for a release tag. +type APIReleaseInfo struct { + // Name is the name of the release tag. + Name string `json:"name"` + // Phase is the phase of the release tag. + Phase string `json:"phase"` + // Results is the status of the release verification jobs for this release tag + Results *VerificationJobsSummary `json:"results,omitempty"` + // UpgradesTo is the list of UpgradeHistory "to" this release tag + UpgradesTo []UpgradeHistory `json:"upgradesTo,omitempty"` + // UpgradesFrom is the list of UpgradeHistory "from" this release tag + UpgradesFrom []UpgradeHistory `json:"upgradesFrom,omitempty"` + // //ChangeLog is the html representation of the changes included in this release tag + // ChangeLog []byte `json:"changeLog,omitempty"` + // //ChangeLogJson is the json representation of the changes included in this release tag + // ChangeLogJson ChangeLog `json:"changeLogJson,omitempty"` +} + +// VerificationJobsSummary an organized, by job type, collection of VerificationStatusMap objects +type VerificationJobsSummary struct { + BlockingJobs VerificationStatusMap `json:"blockingJobs,omitempty"` + InformingJobs VerificationStatusMap `json:"informingJobs,omitempty"` + PendingJobs VerificationStatusMap `json:"pendingJobs,omitempty"` +} + +type VerificationStatusMap map[string]*VerificationStatus + +type VerificationStatus struct { + State string `json:"state"` + URL string `json:"url"` + Retries int `json:"retries,omitempty"` + TransitionTime *metav1.Time `json:"transitionTime,omitempty"` +} + +type UpgradeHistory struct { + From string + To string + + Success int + Failure int + Total int + + History map[string]UpgradeResult +} + +type UpgradeResult struct { + State string `json:"state"` + URL string `json:"url"` +} diff --git a/devex/internal/pkg/releasecontroller/releaseinfo.go b/devex/internal/pkg/releasecontroller/releaseinfo.go new file mode 100644 index 0000000000..6f61157aab --- /dev/null +++ b/devex/internal/pkg/releasecontroller/releaseinfo.go @@ -0,0 +1,41 @@ +package releasecontroller + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "strings" +) + +// There is a way to do this in pure Go, but I'm lazy :P. +func GetComponentPullspecForRelease(componentName, releasePullspec string) (string, error) { + template := fmt.Sprintf("{{range .references.spec.tags}}{{if eq .name %q}}{{.from.name}}{{end}}{{end}}", componentName) + + outBuf := bytes.NewBuffer([]byte{}) + + cmd := exec.Command("oc", "adm", "release", "info", "-o=template="+template, releasePullspec) + cmd.Stdout = outBuf + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("could not get pullspec for component %q from release pullspec %q: %w", componentName, releasePullspec, err) + } + + return strings.TrimSpace(outBuf.String()), nil +} + +func GetReleaseInfo(releasePullspec string) ([]byte, error) { + outBuf := bytes.NewBuffer([]byte{}) + stderrBuf := bytes.NewBuffer([]byte{}) + + cmd := exec.Command("oc", "adm", "release", "info", "-o=json", releasePullspec) + cmd.Stdout = outBuf + cmd.Stderr = stderrBuf + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("could not run %s, got output: %s %s", cmd, outBuf.String(), stderrBuf.String()) + } + + return outBuf.Bytes(), nil +} diff --git a/devex/internal/pkg/rollout/componentstatus.go b/devex/internal/pkg/rollout/componentstatus.go new file mode 100644 index 0000000000..9160d91f1f --- /dev/null +++ b/devex/internal/pkg/rollout/componentstatus.go @@ -0,0 +1,131 @@ +package rollout + +import ( + "context" + "fmt" + "time" + + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" +) + +func WaitForRolloutToComplete(cs *framework.ClientSet, digestedPullspec string, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + allComponents := sets.New[string](mcoDaemonsets...).Insert(mcoDeployments...) + updatedComponents := sets.New[string]() + + return wait.PollUntilContextCancel(ctx, time.Second, true, func(ctx context.Context) (bool, error) { + for _, daemonset := range mcoDaemonsets { + if updatedComponents.Has(daemonset) { + continue + } + + isUpdated, err := isDaemonsetUpToDate(ctx, cs, daemonset, digestedPullspec) + if err != nil { + return false, err + } + + if isUpdated { + updatedComponents.Insert(daemonset) + } + } + + for _, deployment := range mcoDeployments { + if updatedComponents.Has(deployment) { + continue + } + + isUpdated, err := isDeploymenttUpToDate(ctx, cs, deployment, digestedPullspec) + if err != nil { + return false, err + } + + if isUpdated { + updatedComponents.Insert(deployment) + } + } + + return updatedComponents.Equal(allComponents), nil + }) +} + +func isAllPodsForComponentUpdated(ctx context.Context, cs *framework.ClientSet, componentName, digestedPullspec string) (bool, error) { + pods, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("k8s-app=%s", componentName), + }) + + if err != nil { + return false, err + } + + for _, pod := range pods.Items { + pod := pod + if !isPodOnLatestPullspec(&pod, componentName, digestedPullspec) { + return false, nil + } + } + + return true, nil +} + +func isPodOnLatestPullspec(pod *corev1.Pod, componentName, digestedPullspec string) bool { + trueBool := true + + for _, status := range pod.Status.ContainerStatuses { + if status.Name != componentName { + continue + } + + if status.ImageID != digestedPullspec { + return false + } + + if !status.Ready { + return false + } + + if status.Started == nil { + return false + } + + if status.Started != &trueBool { + return false + } + + return true + } + + return false +} + +func isDeploymenttUpToDate(ctx context.Context, cs *framework.ClientSet, componentName, digestedPullspec string) (bool, error) { + dp, err := cs.AppsV1Interface.Deployments(ctrlcommon.MCONamespace).Get(ctx, componentName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + if dp.Status.Replicas != dp.Status.UpdatedReplicas { + return false, nil + } + + return isAllPodsForComponentUpdated(ctx, cs, componentName, digestedPullspec) +} + +func isDaemonsetUpToDate(ctx context.Context, cs *framework.ClientSet, componentName, digestedPullspec string) (bool, error) { + ds, err := cs.AppsV1Interface.DaemonSets(ctrlcommon.MCONamespace).Get(ctx, componentName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + if ds.Status.DesiredNumberScheduled != ds.Status.UpdatedNumberScheduled { + return false, nil + } + + return isAllPodsForComponentUpdated(ctx, cs, componentName, digestedPullspec) +} diff --git a/devex/internal/pkg/rollout/external.go b/devex/internal/pkg/rollout/external.go new file mode 100644 index 0000000000..86302d5cbf --- /dev/null +++ b/devex/internal/pkg/rollout/external.go @@ -0,0 +1,125 @@ +package rollout + +import ( + "context" + "fmt" + "os/exec" + + routeClient "github.com/openshift/client-go/route/clientset/versioned" + "github.com/openshift/machine-config-operator/devex/internal/pkg/errors" + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/framework" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8stypes "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog/v2" +) + +const ( + imageRegistryNamespace string = "openshift-image-registry" + imageRegistryObject string = "image-registry" +) + +func ExposeClusterImageRegistry(cs *framework.ClientSet) (string, error) { + kubeconfig, err := cs.GetKubeconfig() + if err != nil { + return "", err + } + + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return "", err + } + + rc := routeClient.NewForConfigOrDie(config) + + _, err = rc.RouteV1().Routes(imageRegistryNamespace).Get(context.TODO(), imageRegistryObject, metav1.GetOptions{}) + if err != nil && !apierrs.IsNotFound(err) { + return "", err + } + + if apierrs.IsNotFound(err) { + cmd := exec.Command("oc", "expose", "-n", imageRegistryNamespace, fmt.Sprintf("svc/%s", imageRegistryObject)) + cmd.Env = utils.ToEnvVars(map[string]string{ + "KUBECONFIG": kubeconfig, + }) + klog.Infof("Running %s", cmd) + if out, err := cmd.CombinedOutput(); err != nil { + return "", errors.NewExecError(cmd, out, err) + } + } + + // Ensure that the route was created. + _, err = rc.RouteV1().Routes(imageRegistryNamespace).Get(context.TODO(), imageRegistryObject, metav1.GetOptions{}) + if err != nil { + return "", err + } + + registryPatchSpec := []byte(`{"spec": {"tls": {"insecureEdgeTerminationPolicy": "Redirect", "termination": "reencrypt"}}}`) + + _, err = rc.RouteV1().Routes(imageRegistryNamespace).Patch(context.TODO(), imageRegistryObject, k8stypes.MergePatchType, registryPatchSpec, metav1.PatchOptions{}) + if err != nil { + return "", fmt.Errorf("could not patch image-registry: %w", err) + } + klog.Infof("Patched %s", imageRegistryObject) + + cmd := exec.Command("oc", "-n", ctrlcommon.MCONamespace, "policy", "add-role-to-group", "registry-viewer", "system:anonymous") + cmd.Env = utils.ToEnvVars(map[string]string{ + "KUBECONFIG": kubeconfig, + }) + klog.Infof("Running %s", cmd) + if out, err := cmd.CombinedOutput(); err != nil { + return "", errors.NewExecError(cmd, out, err) + } + klog.Infof("Policies added") + + imgRegistryRoute, err := rc.RouteV1().Routes(imageRegistryNamespace).Get(context.TODO(), imageRegistryObject, metav1.GetOptions{}) + if err != nil { + return "", err + } + + extHostname := imgRegistryRoute.Spec.Host + klog.Infof("Cluster image registry exposed using external hostname %s", extHostname) + return extHostname, err +} + +func UnexposeClusterImageRegistry(cs *framework.ClientSet) error { + kubeconfig, err := cs.GetKubeconfig() + if err != nil { + return err + } + + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return err + } + + rc := routeClient.NewForConfigOrDie(config) + + if err := rc.RouteV1().Routes(imageRegistryNamespace).Delete(context.TODO(), imageRegistryObject, metav1.DeleteOptions{}); err != nil && !apierrs.IsNotFound(err) { + return err + } + klog.Infof("Route for %s deleted", imageRegistryObject) + + if err := cs.Services(imageRegistryNamespace).Delete(context.TODO(), imageRegistryObject, metav1.DeleteOptions{}); err != nil && !apierrs.IsNotFound(err) { + return err + } + klog.Infof("Service for %s deleted", imageRegistryObject) + + cmd := exec.Command("oc", "-n", ctrlcommon.MCONamespace, "policy", "remove-role-from-group", "registry-viewer", "system:anonymous") + cmd.Env = utils.ToEnvVars(map[string]string{ + "KUBECONFIG": kubeconfig, + }) + + klog.Infof("Running %s", cmd) + if out, err := cmd.CombinedOutput(); err != nil { + return errors.NewExecError(cmd, out, err) + } + klog.Infof("Policies removed") + + klog.Infof("Cluster image registry is no longer exposed") + + return nil +} diff --git a/devex/internal/pkg/rollout/machineconfigpool.go b/devex/internal/pkg/rollout/machineconfigpool.go new file mode 100644 index 0000000000..b7c9923047 --- /dev/null +++ b/devex/internal/pkg/rollout/machineconfigpool.go @@ -0,0 +1,436 @@ +package rollout + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + errhelpers "github.com/openshift/machine-config-operator/devex/internal/pkg/errors" + "github.com/openshift/machine-config-operator/devex/internal/pkg/utils" + "github.com/openshift/machine-config-operator/pkg/apihelpers" + daemonconsts "github.com/openshift/machine-config-operator/pkg/daemon/constants" + "github.com/openshift/machine-config-operator/test/framework" + "golang.org/x/sync/errgroup" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +var pollInterval = time.Second +var retryableErrThreshold = time.Minute + +func WaitForMachineConfigPoolsToComplete(cs *framework.ClientSet, poolNames []string, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + klog.Infof("Waiting up to %s for MachineConfigPools %v to reach updated state", timeout, poolNames) + + return WaitForMachineConfigPoolsToCompleteWithContext(ctx, cs, poolNames) +} + +func WaitForMachineConfigPoolsToCompleteWithContext(ctx context.Context, cs *framework.ClientSet, poolNames []string) error { + return waitForMachineConfigPoolsToCompleteWithContext(ctx, cs, poolNames) +} + +func WaitForMachineConfigPoolUpdateToCompleteWithContext(ctx context.Context, cs *framework.ClientSet, poolName string) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Wait for the pool to begin updating. + if err := waitForMachineConfigPoolToStart(ctx, cs, poolName); err != nil { + return fmt.Errorf("pool %s did not start updating: %w", poolName, err) + } + + return waitForMachineConfigPoolAndNodesToComplete(ctx, cs, poolName) +} + +func WaitForMachineConfigPoolUpdateToComplete(cs *framework.ClientSet, timeout time.Duration, poolName string) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + klog.Infof("Waiting up to %s for MachineConfigPool %s to reach updated state", timeout, poolName) + + return WaitForMachineConfigPoolUpdateToCompleteWithContext(ctx, cs, poolName) +} + +func validatePoolIsNotDegraded(mcp *mcfgv1.MachineConfigPool) error { + degraded := []mcfgv1.MachineConfigPoolConditionType{ + mcfgv1.MachineConfigPoolNodeDegraded, + mcfgv1.MachineConfigPoolRenderDegraded, + mcfgv1.MachineConfigPoolDegraded, + mcfgv1.MachineConfigPoolPinnedImageSetsDegraded, + mcfgv1.MachineConfigPoolSynchronizerDegraded, + mcfgv1.MachineConfigPoolBuildFailed, + } + + for _, item := range degraded { + if apihelpers.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, item) { + return fmt.Errorf("pool %s degraded %s", mcp.Name, item) + } + } + + return nil +} + +func WaitForAllMachineConfigPoolsToComplete(cs *framework.ClientSet, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + poolNames, err := getMachineConfigPoolNames(ctx, cs) + if err != nil { + return err + } + + klog.Infof("Watching MachineConfigPool(s): %v", poolNames) + + start := time.Now() + + err = waitForMachineConfigPoolsToCompleteWithContext(ctx, cs, poolNames) + if err == nil { + klog.Infof("All pools updated in %s", time.Since(start)) + return nil + } + + return err +} + +// Waits for the MachineConfigPool to complete. This does not consider +// individual node state and does not provide progress output. +func WaitForOnlyMachineConfigPoolToComplete(cs *framework.ClientSet, poolName string, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Wait for the pool to begin updating. + if err := waitForMachineConfigPoolToStart(ctx, cs, poolName); err != nil { + return fmt.Errorf("pool %s did not start updating: %w", poolName, err) + } + + return waitForMachineConfigPoolToComplete(ctx, cs, poolName) +} + +func WaitForMachineConfigPoolToComplete(cs *framework.ClientSet, poolName string, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + return waitForNodesToComplete(ctx, cs, poolName) +} + +func waitForMachineConfigPoolsToCompleteWithContext(ctx context.Context, cs *framework.ClientSet, poolNames []string) error { + eg := errgroup.Group{} + + for _, poolName := range poolNames { + poolName := poolName + + eg.Go(func() error { + return waitForMachineConfigPoolAndNodesToComplete(ctx, cs, poolName) + }) + } + + return eg.Wait() +} + +func waitForMachineConfigPoolToStart(ctx context.Context, cs *framework.ClientSet, poolName string) error { + start := time.Now() + + retryer := errhelpers.NewTimeRetryer(retryableErrThreshold) + + return wait.PollUntilContextCancel(ctx, pollInterval, true, func(ctx context.Context) (bool, error) { + mcp, err := cs.MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + + shouldContinue, err := handleQueryErr(err, retryer) + if err != nil { + return false, err + } + + if !shouldContinue { + return false, nil + } + + if apihelpers.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolUpdating) { + klog.Infof("MachineConfigPool %s began updating after %s", mcp.Name, time.Since(start)) + return true, nil + } + + return false, validatePoolIsNotDegraded(mcp) + }) +} + +func waitForMachineConfigPoolAndNodesToComplete(ctx context.Context, cs *framework.ClientSet, poolName string) error { + eg := errgroup.Group{} + + eg.Go(func() error { + return waitForMachineConfigPoolToComplete(ctx, cs, poolName) + }) + + eg.Go(func() error { + return waitForNodesToComplete(ctx, cs, poolName) + }) + + return eg.Wait() +} + +func waitForMachineConfigPoolToComplete(ctx context.Context, cs *framework.ClientSet, poolName string) error { + start := time.Now() + + initial, err := cs.MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + if err != nil { + return err + } + + retryer := errhelpers.NewTimeRetryer(retryableErrThreshold) + + return wait.PollUntilContextCancel(ctx, pollInterval, true, func(ctx context.Context) (bool, error) { + mcp, err := cs.MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + + shouldContinue, err := handleQueryErr(err, retryer) + if err != nil { + return false, err + } + + if !shouldContinue { + return false, nil + } + + if hasMachineConfigPoolStatusChanged(initial, mcp) { + logMCPChange(initial, mcp, start) + initial = mcp + } + + if apihelpers.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcfgv1.MachineConfigPoolUpdated) { + klog.Infof("MachineConfigPool %s finished updating after %s", mcp.Name, time.Since(start)) + return true, nil + } + + return false, validatePoolIsNotDegraded(mcp) + }) +} + +// Determines if the API error is retryable and ensures we can only retry for a +// finite amount of time. If the error keeps occurring after the given +// threshold, it will return nil. Returns a boolean indicating whether to +// continue from the current point and an error in the event that it is either +// retryable or the threshold has been met. +func handleQueryErr(err error, retryer errhelpers.Retryer) (bool, error) { + if err == nil { + if !retryer.IsEmpty() { + // If error is nil when it was previously not nil, clear the retryer and + // continue. + klog.Infof("The transient error is no longer occurring") + retryer.Clear() + } + + // If no error was encountered, continue as usual. + return true, nil + } + + // If the error is not retryable, stop here. + if !isRetryableErr(err) { + return false, err + } + + // If the retryer is empty and we've encountered an error, log the error. + if retryer.IsEmpty() { + klog.Infof("An error has occurred, will retry for %s or until the error is no longer encountered", retryableErrThreshold) + klog.Warning(err) + } + + // This checks whether the retryer has reached the limit. If the retryer is + // empty, it will populate itself before retrying. + if retryer.IsReached() { + // If no change after our threshold, we stop here. + klog.Infof("Threshold %s reached", retryableErrThreshold) + klog.Warning(err) + return false, err + } + + // At this point, we have a retryable error and know that we can try again. + return false, nil +} + +// Logs the detected MachineConfigPool change. +func logMCPChange(initial, current *mcfgv1.MachineConfigPool, start time.Time) { + changes := []string{ + fmt.Sprintf("MachineConfigPool %s has changed:", initial.Name), + fmt.Sprintf("Degraded: %d -> %d,", initial.Status.DegradedMachineCount, current.Status.DegradedMachineCount), + fmt.Sprintf("Machines: %d -> %d,", initial.Status.MachineCount, current.Status.MachineCount), + fmt.Sprintf("Ready: %d -> %d,", initial.Status.ReadyMachineCount, current.Status.ReadyMachineCount), + fmt.Sprintf("Unavailable: %d -> %d,", initial.Status.UnavailableMachineCount, current.Status.UnavailableMachineCount), + fmt.Sprintf("Updated: %d -> %d", initial.Status.UpdatedMachineCount, current.Status.UpdatedMachineCount), + fmt.Sprintf("after %s", time.Since(start)), + } + + klog.Info(strings.Join(changes, " ")) +} + +// Determines if the MachineConfigPool status has changed by examining the various machine counts. +func hasMachineConfigPoolStatusChanged(initial, current *mcfgv1.MachineConfigPool) bool { + if initial.Status.DegradedMachineCount != current.Status.DegradedMachineCount { + return true + } + + if initial.Status.MachineCount != current.Status.MachineCount { + return true + } + + if initial.Status.ReadyMachineCount != current.Status.ReadyMachineCount { + return true + } + + if initial.Status.UnavailableMachineCount != current.Status.UnavailableMachineCount { + return true + } + + if initial.Status.UpdatedMachineCount != current.Status.UpdatedMachineCount { + return true + } + + return false +} + +func waitForNodesToComplete(ctx context.Context, cs *framework.ClientSet, poolName string) error { + doneNodes := sets.New[string]() + nodesForPool := sets.New[string]() + + nodes, err := cs.CoreV1Interface.Nodes().List(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("node-role.kubernetes.io/%s", poolName), + }) + + if err != nil { + return err + } + + for _, node := range nodes.Items { + nodesForPool.Insert(node.Name) + } + + start := time.Now() + + klog.Infof("Current nodes for pool %q are: %v", poolName, sets.List(nodesForPool)) + + mcp, err := cs.MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + if err != nil { + return err + } + + mosc, err := utils.GetMachineOSConfigForPool(ctx, cs, mcp) + if err != nil && !apierrs.IsNotFound(err) { + return fmt.Errorf("could not get MachineOSConfig: %w", err) + } + + if mosc == nil && apierrs.IsNotFound(err) { + klog.Infof("No MachineOSConfig found, will only consider MachineConfigs") + } + + retryer := errhelpers.NewTimeRetryer(retryableErrThreshold) + + return wait.PollUntilContextCancel(ctx, pollInterval, true, func(ctx context.Context) (bool, error) { + nodes, err := cs.CoreV1Interface.Nodes().List(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("node-role.kubernetes.io/%s", poolName), + }) + + shouldContinue, err := handleQueryErr(err, retryer) + if err != nil { + return false, err + } + + if !shouldContinue { + return false, nil + } + + for _, node := range nodes.Items { + node := node + + if !nodesForPool.Has(node.Name) { + klog.Infof("Pool %s has gained a new node %s", poolName, node.Name) + nodesForPool.Insert(node.Name) + } + + if doneNodes.Has(node.Name) { + continue + } + + isDone := false + if mosc == nil { + isDone = isNodeDoneAtPool(mcp, &node) + } else { + isDone = isNodeDoneAtPool(mcp, &node) && isNodeDoneAtMosc(mosc, &node) + } + + if isDone { + doneNodes.Insert(node.Name) + diff := sets.List(nodesForPool.Difference(doneNodes)) + klog.Infof("Node %s in pool %s updated after %s. %d node(s) remaining: %v", node.Name, poolName, time.Since(start), len(diff), diff) + } + } + + isDone := doneNodes.Equal(nodesForPool) + if isDone { + klog.Infof("%d node(s) in pool %s updated after %s", nodesForPool.Len(), poolName, time.Since(start)) + } + + return isDone, nil + }) +} + +// Determines if an error is retryable. Currently, that means whether the +// context has been canceled or the deadline has been exceeded. In either of +// those scenarios, we cannot retry. +func isRetryableErr(err error) bool { + if errors.Is(err, context.Canceled) { + return false + } + + if errors.Is(err, context.DeadlineExceeded) { + return false + } + + return true +} + +func getMachineConfigPoolNames(ctx context.Context, cs *framework.ClientSet) ([]string, error) { + pools, err := cs.MachineconfigurationV1Interface.MachineConfigPools().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + out := []string{} + + for _, pool := range pools.Items { + out = append(out, pool.Name) + } + + return out, nil +} + +func isNodeConfigDone(node *corev1.Node) bool { + current := node.Annotations[daemonconsts.CurrentMachineConfigAnnotationKey] + desired := node.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey] + return isNodeDone(node) && current == desired +} + +func isNodeDoneAtPool(mcp *mcfgv1.MachineConfigPool, node *corev1.Node) bool { + current := node.Annotations[daemonconsts.CurrentMachineConfigAnnotationKey] + return isNodeDone(node) && isNodeConfigDone(node) && isNodeImageDone(node) && current == mcp.Spec.Configuration.Name +} + +func isNodeImageDone(node *corev1.Node) bool { + current := node.Annotations[daemonconsts.CurrentImageAnnotationKey] + desired := node.Annotations[daemonconsts.DesiredImageAnnotationKey] + return isNodeDone(node) && current == desired +} + +func isNodeDoneAtMosc(mosc *mcfgv1.MachineOSConfig, node *corev1.Node) bool { + current := node.Annotations[daemonconsts.CurrentImageAnnotationKey] + desired := node.Annotations[daemonconsts.DesiredImageAnnotationKey] + return isNodeDone(node) && isNodeImageDone(node) && desired == string(mosc.Status.CurrentImagePullSpec) && desired != "" && current != "" +} + +func isNodeDone(node *corev1.Node) bool { + return node.Annotations[daemonconsts.MachineConfigDaemonStateAnnotationKey] == daemonconsts.MachineConfigDaemonStateDone +} diff --git a/devex/internal/pkg/rollout/machineconfigpool_test.go b/devex/internal/pkg/rollout/machineconfigpool_test.go new file mode 100644 index 0000000000..46dbe8a091 --- /dev/null +++ b/devex/internal/pkg/rollout/machineconfigpool_test.go @@ -0,0 +1,74 @@ +package rollout + +import ( + "context" + "fmt" + "testing" + + errhelpers "github.com/openshift/machine-config-operator/devex/internal/pkg/errors" + "github.com/stretchr/testify/assert" +) + +func TestHandleQueryErr(t *testing.T) { + testCases := []struct { + name string + err error + errExpected bool + expected bool + thresholdReached bool + }{ + { + name: "no error", + errExpected: false, + expected: true, + }, + { + name: "retryable error - threshold not reached", + errExpected: false, + err: fmt.Errorf("retryable"), + expected: false, + }, + { + name: "retryable error - threshold reached", + errExpected: true, + err: fmt.Errorf("retryable"), + expected: false, + thresholdReached: true, + }, + { + name: "non-retryable error", + errExpected: true, + err: context.Canceled, + expected: false, + }, + { + name: "nil error clears threshold", + errExpected: false, + err: nil, + expected: true, + thresholdReached: true, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + retryer := errhelpers.NewMaxAttemptRetryer(1) + + if testCase.thresholdReached { + assert.True(t, retryer.IsEmpty()) + assert.False(t, retryer.IsReached()) + assert.False(t, retryer.IsEmpty()) + assert.True(t, retryer.IsReached()) + } + + shouldContinue, err := handleQueryErr(testCase.err, retryer) + if testCase.errExpected { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + assert.Equal(t, testCase.expected, shouldContinue) + }) + } +} diff --git a/devex/internal/pkg/rollout/nodeimage.go b/devex/internal/pkg/rollout/nodeimage.go new file mode 100644 index 0000000000..6430461bc4 --- /dev/null +++ b/devex/internal/pkg/rollout/nodeimage.go @@ -0,0 +1,15 @@ +package rollout + +import ( + mcfgv1 "github.com/openshift/api/machineconfiguration/v1alpha1" + daemonconsts "github.com/openshift/machine-config-operator/pkg/daemon/constants" + corev1 "k8s.io/api/core/v1" +) + +func isNodeImageEqualToMachineOSConfig(node corev1.Node, mosc *mcfgv1.MachineOSConfig) bool { + desired := node.Annotations[daemonconsts.DesiredImageAnnotationKey] + current := node.Annotations[daemonconsts.CurrentImageAnnotationKey] + mcdState := node.Annotations[daemonconsts.MachineConfigDaemonStateAnnotationKey] + + return desired == current && mcdState == daemonconsts.MachineConfigDaemonStateDone && current == mosc.Status.CurrentImagePullspec +} diff --git a/devex/internal/pkg/rollout/rollout.go b/devex/internal/pkg/rollout/rollout.go new file mode 100644 index 0000000000..382b4de080 --- /dev/null +++ b/devex/internal/pkg/rollout/rollout.go @@ -0,0 +1,370 @@ +package rollout + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "golang.org/x/sync/errgroup" + + "github.com/openshift/machine-config-operator/devex/internal/pkg/releasecontroller" + "github.com/openshift/machine-config-operator/test/framework" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" +) + +var ( + mcoDaemonsets = []string{ + "machine-config-daemon", + "machine-config-server", + } + + mcoDeployments = []string{ + "machine-config-operator", + "machine-config-controller", + "machine-os-builder", + } +) + +const ( + cvoName string = "cluster-version-operator" + cvoNamespace string = "openshift-cluster-version" + mcoName string = "machine-config-operator" + + mcoImagesConfigMap string = "machine-config-operator-images" + mcoImageKey string = "machineConfigOperator" + mcoImagesJSON string = "images.json" +) + +func RevertToOriginalMCOImage(cs *framework.ClientSet, forceRestart bool) error { + clusterVersion, err := cs.ConfigV1Interface.ClusterVersions().Get(context.TODO(), "version", metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("could not get cluster version: %w", err) + } + + currentRelease := clusterVersion.Status.Desired.Image + originalMCOImage, err := releasecontroller.GetComponentPullspecForRelease(mcoName, currentRelease) + if err != nil { + return fmt.Errorf("could not get MCO pullspec for cluster version %s: %w", currentRelease, err) + } + + klog.Infof("Found original MCO image %s for the currently running cluster release (%s)", originalMCOImage, currentRelease) + + if err := ReplaceMCOImage(cs, originalMCOImage, forceRestart); err != nil { + return fmt.Errorf("could not roll MCO back to image %s: %w", originalMCOImage, err) + } + + if err := setDeploymentReplicas(cs, cvoName, cvoNamespace, 1); err != nil { + return fmt.Errorf("could not restore cluster version operator to default replica count of 1") + } + + return nil +} + +func ReplaceMCOImage(cs *framework.ClientSet, pullspec string, forceRestart bool) error { + if err := setDeploymentReplicas(cs, cvoName, cvoNamespace, 0); err != nil { + return fmt.Errorf("could not scale cluster version operator down to zero: %w", err) + } + + if err := setDeploymentReplicas(cs, mcoName, ctrlcommon.MCONamespace, 0); err != nil { + return fmt.Errorf("could not scale machine config operator down to zero: %w", err) + } + + if err := setPullspecOnObjects(cs, pullspec, forceRestart); err != nil { + return err + } + + if err := setDeploymentReplicas(cs, mcoName, ctrlcommon.MCONamespace, 1); err != nil { + return fmt.Errorf("could not scale machine config operator back up: %w", err) + } + + return nil +} + +func RestartMCO(cs *framework.ClientSet, forceRestart bool) error { + if forceRestart { + return forceRestartMCO(cs) + } + + _, images, err := loadMCOImagesConfigMap(cs) + if err != nil { + return fmt.Errorf("could not load or parse ConfigMap %s: %w", mcoImagesConfigMap, err) + } + + return ReplaceMCOImage(cs, images[mcoImageKey], forceRestart) +} + +func forceRestartMCO(cs *framework.ClientSet) error { + eg := errgroup.Group{} + + for _, name := range append(mcoDeployments, mcoDaemonsets...) { + name := name + eg.Go(func() error { + return forceRestartPodsForDeploymentOrDaemonset(cs, name) + }) + } + + return eg.Wait() +} + +func forceRestartPodsForDeploymentOrDaemonset(cs *framework.ClientSet, name string) error { + podList, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("k8s-app==%s", name), + }) + + klog.Infof("Found (%d) pods for %s", len(podList.Items), name) + + if err != nil { + return err + } + + eg := errgroup.Group{} + + for _, pod := range podList.Items { + pod := pod + eg.Go(func() error { + if err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}); err != nil { + return fmt.Errorf("could not delete pod %s: %w", pod.Name, err) + } + + klog.Infof("Deleted pod %s", pod.Name) + + return nil + }) + } + + return eg.Wait() +} + +func setPullspecOnObjects(cs *framework.ClientSet, pullspec string, forceRestart bool) error { + eg := errgroup.Group{} + + eg.Go(func() error { + if err := maybeUpdateMCOConfigMap(cs, pullspec); err != nil { + return fmt.Errorf("could not update MCO images ConfigMap: %w", err) + } + + return nil + }) + + eg.Go(func() error { + if err := updateDaemonsets(cs, pullspec, forceRestart); err != nil { + return fmt.Errorf("could not update daemonsets: %w", err) + } + + return nil + }) + + eg.Go(func() error { + if err := updateDeployments(cs, pullspec, forceRestart); err != nil { + return fmt.Errorf("could not update deployments: %w", err) + } + + return nil + }) + + return eg.Wait() +} + +func updateDeployments(cs *framework.ClientSet, pullspec string, forceRestart bool) error { + eg := errgroup.Group{} + + for _, name := range mcoDeployments { + name := name + eg.Go(func() error { + if err := updateDeployment(cs, name, pullspec); err != nil { + return fmt.Errorf("could not update deployment/%s: %w", name, err) + } + + if forceRestart { + return forceRestartPodsForDeploymentOrDaemonset(cs, name) + } + + return nil + }) + } + + return eg.Wait() +} + +func updateDaemonsets(cs *framework.ClientSet, pullspec string, forceRestart bool) error { + eg := errgroup.Group{} + + for _, name := range mcoDaemonsets { + name := name + eg.Go(func() error { + if err := updateDaemonset(cs, name, pullspec); err != nil { + return fmt.Errorf("could not update daemonset/%s: %w", name, err) + } + + if forceRestart { + return forceRestartPodsForDeploymentOrDaemonset(cs, name) + } + + return nil + }) + } + + return eg.Wait() +} + +func loadMCOImagesConfigMap(cs *framework.ClientSet) (*corev1.ConfigMap, map[string]string, error) { + cm, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), mcoImagesConfigMap, metav1.GetOptions{}) + if err != nil { + return nil, nil, err + } + + _, ok := cm.Data[mcoImagesJSON] + if !ok { + return nil, nil, fmt.Errorf("expected Configmap %s to have key %s, but was missing", mcoImagesConfigMap, mcoImagesJSON) + } + + images := map[string]string{} + + if err := json.Unmarshal([]byte(cm.Data[mcoImagesJSON]), &images); err != nil { + return nil, nil, fmt.Errorf("could not unpack %s in Configmap %s: %w", mcoImagesJSON, mcoImagesConfigMap, err) + } + + if _, ok := images[mcoImageKey]; !ok { + return nil, nil, fmt.Errorf("expected %s in Configmap %s to have key %s, but was missing", mcoImagesJSON, mcoImagesConfigMap, mcoImageKey) + } + + return cm, images, nil +} + +func maybeUpdateMCOConfigMap(cs *framework.ClientSet, pullspec string) error { + _, images, err := loadMCOImagesConfigMap(cs) + if err != nil { + return fmt.Errorf("could not load or parse ConfigMap %s: %w", mcoImagesConfigMap, err) + } + + if images[mcoImageKey] != pullspec { + klog.Warningf("ConfigMap %s has pullspec %s, which will change to %s. A MachineConfig update will occur as a result.", mcoImagesConfigMap, images[mcoImageKey], pullspec) + if err := updateMCOConfigMap(cs, pullspec); err != nil { + return err + } + } else { + klog.Infof("ConfigMap %s already has pullspec %s. Will restart MCO components to cause an update.", mcoImagesConfigMap, pullspec) + } + + return nil +} + +func updateMCOConfigMap(cs *framework.ClientSet, pullspec string) error { + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + cm, images, err := loadMCOImagesConfigMap(cs) + if err != nil { + return err + } + + images[mcoImageKey] = pullspec + + imagesBytes, err := json.Marshal(images) + if err != nil { + return err + } + + cm.Data[mcoImagesJSON] = string(imagesBytes) + + _, err = cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Update(context.TODO(), cm, metav1.UpdateOptions{}) + return err + }) + + if err == nil { + klog.Infof("Set %s in %s in ConfigMap %s to %s", mcoImageKey, mcoImagesJSON, mcoImagesConfigMap, pullspec) + return nil + } + + return fmt.Errorf("could not update ConfigMap %s: %w", mcoImagesConfigMap, err) +} + +func updateDeployment(cs *framework.ClientSet, name, pullspec string) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + deploy, err := cs.AppsV1Interface.Deployments(ctrlcommon.MCONamespace).Get(context.TODO(), name, metav1.GetOptions{}) + if name == "machine-os-builder" && apierrs.IsNotFound(err) { + return nil + } + + if err != nil { + return err + } + + if containersNeedUpdated(name, pullspec, deploy.Spec.Template.Spec.Containers) { + klog.Infof("Updating deployment/%s", name) + deploy.Spec.Template.Spec.Containers = updateContainers(name, pullspec, deploy.Spec.Template.Spec.Containers) + } else { + // Cribbed from: https://github.com/kubernetes/kubectl/blob/master/pkg/polymorphichelpers/objectrestarter.go#L32-L119 and https://github.com/derailed/k9s/blob/master/internal/dao/dp.go#L68-L114 + klog.Infof("Restarting deployment/%s", name) + deploy.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + } + + _, err = cs.AppsV1Interface.Deployments(ctrlcommon.MCONamespace).Update(context.TODO(), deploy, metav1.UpdateOptions{}) + return err + }) +} + +func updateDaemonset(cs *framework.ClientSet, name, pullspec string) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + ds, err := cs.AppsV1Interface.DaemonSets(ctrlcommon.MCONamespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return err + } + + if containersNeedUpdated(name, pullspec, ds.Spec.Template.Spec.Containers) { + klog.Infof("Updating daemonset/%s", name) + ds.Spec.Template.Spec.Containers = updateContainers(name, pullspec, ds.Spec.Template.Spec.Containers) + } else { + // Cribbed from: https://github.com/kubernetes/kubectl/blob/master/pkg/polymorphichelpers/objectrestarter.go#L32-L119 and https://github.com/derailed/k9s/blob/master/internal/dao/dp.go#L68-L114 + klog.Infof("Restarting daemonset/%s", name) + ds.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + } + + _, err = cs.AppsV1Interface.DaemonSets(ctrlcommon.MCONamespace).Update(context.TODO(), ds, metav1.UpdateOptions{}) + return err + }) +} + +func containersNeedUpdated(name, pullspec string, containers []corev1.Container) bool { + for _, container := range containers { + if container.Name == name { + return container.Image != pullspec + } + } + + return false +} + +func updateContainers(name, pullspec string, containers []corev1.Container) []corev1.Container { + out := []corev1.Container{} + + for _, container := range containers { + if container.Name == name { + container.Image = pullspec + container.ImagePullPolicy = corev1.PullAlways + } + + out = append(out, container) + } + + return out +} + +func setDeploymentReplicas(cs *framework.ClientSet, deploymentName, namespace string, replicas int32) error { + klog.Infof("Setting replicas for %s/%s to %d", namespace, deploymentName, replicas) + scale, err := cs.AppsV1Interface.Deployments(namespace).GetScale(context.TODO(), deploymentName, metav1.GetOptions{}) + if err != nil { + return err + } + + scale.Spec.Replicas = replicas + + _, err = cs.AppsV1Interface.Deployments(namespace).UpdateScale(context.TODO(), deploymentName, scale, metav1.UpdateOptions{}) + return err +} diff --git a/devex/internal/pkg/utils/apiutils.go b/devex/internal/pkg/utils/apiutils.go new file mode 100644 index 0000000000..7f68b17dea --- /dev/null +++ b/devex/internal/pkg/utils/apiutils.go @@ -0,0 +1,123 @@ +package utils + +import ( + "context" + "errors" + "fmt" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + "github.com/openshift/machine-config-operator/test/framework" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" +) + +type notFoundErr struct { + poolName string + err error +} + +func newNotFoundErr(resource, poolName string) error { + return ¬FoundErr{ + poolName: poolName, + err: apierrs.NewNotFound(mcfgv1.GroupVersion.WithResource(resource).GroupResource(), ""), + } +} + +func (n *notFoundErr) Error() string { + return fmt.Sprintf("resource not found for MachineConfigPool %s: %s", n.poolName, n.err) +} + +func (n *notFoundErr) Unwrap() error { + return n.err +} + +func IsNotFoundErr(err error) bool { + notFoundErr := ¬FoundErr{} + return errors.As(err, ¬FoundErr) +} + +func IsMachineConfigPoolLayered(ctx context.Context, cs *framework.ClientSet, mcp *mcfgv1.MachineConfigPool) (bool, error) { + mosc, err := GetMachineOSConfigForPool(ctx, cs, mcp) + if err != nil && !IsNotFoundErr(err) { + return false, err + } + + return mosc != nil && !IsNotFoundErr(err), nil +} + +func GetMachineOSBuildForPoolName(ctx context.Context, cs *framework.ClientSet, poolName string) (*mcfgv1.MachineOSBuild, error) { + mcp, err := cs.MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return GetMachineOSBuildForPool(ctx, cs, mcp) +} + +func GetMachineOSConfigForPoolName(ctx context.Context, cs *framework.ClientSet, poolName string) (*mcfgv1.MachineOSConfig, error) { + mcp, err := cs.MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return GetMachineOSConfigForPool(ctx, cs, mcp) +} + +func GetMachineOSBuildForPool(ctx context.Context, cs *framework.ClientSet, mcp *mcfgv1.MachineConfigPool) (*mcfgv1.MachineOSBuild, error) { + mosbList, err := cs.MachineconfigurationV1Interface.MachineOSBuilds().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for _, mosb := range mosbList.Items { + mosb := mosb + if mosb.Spec.MachineConfig.Name == mcp.Spec.Configuration.Name { + return &mosb, nil + } + } + + return nil, newNotFoundErr("machineosbuilds", mcp.Name) +} + +func GetMachineOSConfigForPool(ctx context.Context, cs *framework.ClientSet, mcp *mcfgv1.MachineConfigPool) (*mcfgv1.MachineOSConfig, error) { + moscList, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for _, mosc := range moscList.Items { + mosc := mosc + if mosc.Spec.MachineConfigPool.Name == mcp.Name { + return &mosc, nil + } + } + + return nil, newNotFoundErr("machineosconfigs", mcp.Name) +} + +func PauseMachineConfigPool(ctx context.Context, cs *framework.ClientSet, poolName string) error { + return setMachineConfigPoolPauseState(ctx, cs, poolName, true) +} + +func UnpauseMachineConfigPool(ctx context.Context, cs *framework.ClientSet, poolName string) error { + return setMachineConfigPoolPauseState(ctx, cs, poolName, false) +} + +func setMachineConfigPoolPauseState(ctx context.Context, cs *framework.ClientSet, poolName string, pauseStatus bool) error { + mcp, err := cs.MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("could not get MachineConfigPool %s for pausing: %w", poolName, err) + } + + if pauseStatus { + klog.Infof("Pausing MachineConfigPool %s", poolName) + } else { + klog.Infof("Unpausing MachineConfigPool %s", poolName) + } + + mcp.Spec.Paused = pauseStatus + + _, err = cs.MachineConfigPools().Update(ctx, mcp, metav1.UpdateOptions{}) + return err +} diff --git a/devex/internal/pkg/utils/clone.go b/devex/internal/pkg/utils/clone.go new file mode 100644 index 0000000000..454ac7af9f --- /dev/null +++ b/devex/internal/pkg/utils/clone.go @@ -0,0 +1,112 @@ +package utils + +import ( + "context" + "fmt" + + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + apierrs "k8s.io/apimachinery/pkg/api/errors" + + "github.com/openshift/machine-config-operator/test/framework" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" +) + +const ( + ClonedObjectLabelKey string = "machineconfiguration.openshift.io/cloned-by-mco-helpers" + RecreatableSecretLabelKey string = "machineconfiguration.openshift.io/recreatable-secret" +) + +type SecretRef struct { + Name string + Namespace string +} + +func (s *SecretRef) String() string { + return fmt.Sprintf("%s/%s", s.Namespace, s.Name) +} + +func CloneSecret(cs *framework.ClientSet, src, dst SecretRef) error { + originalSecret, err := cs.CoreV1Interface.Secrets(src.Namespace).Get(context.TODO(), src.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("could not get secret %s: %w", src, err) + } + + return createSecret(cs, prepareSecret(originalSecret, dst, nil)) +} + +func CloneSecretWithLabels(cs *framework.ClientSet, src, dst SecretRef, addlLabels map[string]string) error { + originalSecret, err := cs.CoreV1Interface.Secrets(src.Namespace).Get(context.TODO(), src.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("could not get secret %s: %w", src, err) + } + + return createSecret(cs, prepareSecret(originalSecret, dst, addlLabels)) +} + +func prepareSecret(originalSecret *corev1.Secret, dstRef SecretRef, addlLabels map[string]string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: dstRef.Name, + Namespace: dstRef.Namespace, + Labels: getLabelsForClonedObject(addlLabels), + }, + Data: originalSecret.Data, + Type: originalSecret.Type, + } +} + +func getLabelsForClonedObject(addlLabels map[string]string) map[string]string { + out := map[string]string{ + ClonedObjectLabelKey: "", + RecreatableSecretLabelKey: "", + } + + if addlLabels == nil { + return out + } + + for k, v := range addlLabels { + out[k] = v + } + + return out +} + +func createSecret(cs *framework.ClientSet, s *corev1.Secret) error { + _, err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).Create(context.TODO(), s, metav1.CreateOptions{}) + if err == nil { + klog.Infof("Created secret %q in namespace %q", s.Name, ctrlcommon.MCONamespace) + return nil + } + + if err != nil && !apierrs.IsAlreadyExists(err) { + return err + } + + secret, err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).Get(context.TODO(), s.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + if _, ok := secret.Labels[RecreatableSecretLabelKey]; ok { + if err := cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace).Delete(context.TODO(), s.Name, metav1.DeleteOptions{}); err != nil { + return err + } + + return createSecret(cs, s) + } + + return fmt.Errorf("unmanaged preexisting secret %s already exists, missing label %q", s.Name, RecreatableSecretLabelKey) +} + +func CreateOrRecreateSecret(cs *framework.ClientSet, s *corev1.Secret) error { + if s.Labels == nil { + s.Labels = map[string]string{} + } + + s.Labels[RecreatableSecretLabelKey] = "" + + return createSecret(cs, s) +} diff --git a/devex/internal/pkg/utils/utils.go b/devex/internal/pkg/utils/utils.go new file mode 100644 index 0000000000..c0a1c26dd7 --- /dev/null +++ b/devex/internal/pkg/utils/utils.go @@ -0,0 +1,46 @@ +package utils + +import ( + "flag" + "fmt" + "os" + "os/exec" + + aggerrs "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" +) + +func CheckForBinaries(bins []string) error { + errs := []error{} + + for _, bin := range bins { + if _, err := exec.LookPath(bin); err != nil { + errs = append(errs, fmt.Errorf("required binary %q not found: %w", bin, err)) + } + } + + return aggerrs.NewAggregate(errs) +} + +func ToEnvVars(in map[string]string) []string { + out := os.Environ() + + for key, val := range in { + envVar := fmt.Sprintf("%s=%s", key, val) + out = append(out, envVar) + } + + return out +} + +func ParseFlags() { + flag.Set("v", "4") + flag.Set("logtostderr", "true") + flag.Parse() +} + +func ParseFlagsAndPrintOpts(opts interface{}) { + ParseFlags() + + klog.Infof("Options parsed: %+v", opts) +} diff --git a/go.mod b/go.mod index 5296364068..e74c2fcd71 100644 --- a/go.mod +++ b/go.mod @@ -28,8 +28,8 @@ require ( github.com/google/renameio v0.1.0 github.com/imdario/mergo v0.3.16 github.com/opencontainers/go-digest v1.0.0 - github.com/openshift/api v0.0.0-20241106222702-2429e35d6633 - github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f + github.com/openshift/api v0.0.0-20250102185430-d6d8306a24ec + github.com/openshift/client-go v0.0.0-20250106104058-89709a455e2a github.com/openshift/library-go v0.0.0-20241022210936-abb8c75b88dc github.com/openshift/runtime-utils v0.0.0-20230921210328-7bdb5b9c177b github.com/prometheus/client_golang v1.20.4 @@ -122,6 +122,7 @@ require ( github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/sigstore/fulcio v1.4.3 // indirect github.com/sigstore/rekor v1.2.2 // indirect github.com/sourcegraph/conc v0.3.0 // indirect @@ -288,7 +289,7 @@ require ( github.com/securego/gosec/v2 v2.20.1-0.20240525090044-5f0084eb01a9 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/sigstore/sigstore v1.7.5 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sirupsen/logrus v1.9.3 github.com/sivchari/containedctx v1.0.3 // indirect github.com/sivchari/tenv v1.7.1 // indirect github.com/sonatard/noctx v0.0.2 // indirect diff --git a/go.sum b/go.sum index 6098e477b7..f56cdc9fb9 100644 --- a/go.sum +++ b/go.sum @@ -594,10 +594,10 @@ github.com/opencontainers/runc v1.1.13 h1:98S2srgG9vw0zWcDpFMn5TRrh8kLxa/5OFUstu github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/openshift/api v0.0.0-20241106222702-2429e35d6633 h1:JgeV16qlZA9YRrfPqElgfBlZaD0sgYBKI05rVWz45GY= -github.com/openshift/api v0.0.0-20241106222702-2429e35d6633/go.mod h1:Shkl4HanLwDiiBzakv+con/aMGnVE2MAGvoKp5oyYUo= -github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f h1:FRc0bVNWprihWS0GqQWzb3dY4dkCwpOP3mDw5NwSoR4= -github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f/go.mod h1:KiZi2mJRH1TOJ3FtBDYS6YvUL30s/iIXaGSUrSa36mo= +github.com/openshift/api v0.0.0-20250102185430-d6d8306a24ec h1:VEDRGJmiYeN0V0xW1aI9wfzEMgaMZOVasy3FzEz27Lo= +github.com/openshift/api v0.0.0-20250102185430-d6d8306a24ec/go.mod h1:Shkl4HanLwDiiBzakv+con/aMGnVE2MAGvoKp5oyYUo= +github.com/openshift/client-go v0.0.0-20250106104058-89709a455e2a h1:8lwO4lGTwHuVXsIeFoW3t7AEBROW5quMj5YjH9jF+98= +github.com/openshift/client-go v0.0.0-20250106104058-89709a455e2a/go.mod h1:34qRf2MsrJKXKAL8qxIkxZ3O5G+YhOB7foCR04H26JE= github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0 h1:GPlAy197Jkr+D0T2FNWanamraTdzS/r9ZkT29lxvHaA= github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= github.com/openshift/library-go v0.0.0-20241022210936-abb8c75b88dc h1:fwtWTW+QcTyzGVAYxMPz9amtAURWvSs8p+a37nG/43c= @@ -683,8 +683,8 @@ github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAj github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= github.com/securego/gosec/v2 v2.20.1-0.20240525090044-5f0084eb01a9 h1:rnO6Zp1YMQwv8AyxzuwsVohljJgp4L0ZqiCgtACsPsc= github.com/securego/gosec/v2 v2.20.1-0.20240525090044-5f0084eb01a9/go.mod h1:dg7lPlu/xK/Ut9SedURCoZbVCR4yC7fM65DtH9/CDHs= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= diff --git a/hack/build-go.sh b/hack/build-go.sh index 8cdd133f87..b72a68fde8 100755 --- a/hack/build-go.sh +++ b/hack/build-go.sh @@ -4,6 +4,7 @@ set -eu REPO=github.com/openshift/machine-config-operator WHAT=${WHAT:-machine-config-operator} +WHAT_PATH="${WHAT_PATH:-cmd/${WHAT}}" GOTAGS="${GOTAGS:-} ${TAGS:-}" GLDFLAGS=${GLDFLAGS:-} @@ -39,5 +40,5 @@ if [[ $WHAT == "machine-config-controller" ]]; then GOTAGS="containers_image_openpgp exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_ostree_stub" fi -echo "Building ${REPO}/cmd/${WHAT} (${VERSION_OVERRIDE}, ${HASH}) for $GOOS/$GOARCH" -GOOS=${GOOS} GOARCH=${GOARCH} go build -mod=vendor -tags="${GOTAGS}" -ldflags "${GLDFLAGS} -s -w" -o ${BIN_PATH}/${WHAT} ${REPO}/cmd/${WHAT} +echo "Building ${REPO}/${WHAT_PATH} (${VERSION_OVERRIDE}, ${HASH}) for $GOOS/$GOARCH" +GOOS=${GOOS} GOARCH=${GOARCH} go build -mod=vendor -tags="${GOTAGS}" -ldflags "${GLDFLAGS} -s -w" -o ${BIN_PATH}/${WHAT} ${REPO}/${WHAT_PATH} diff --git a/hack/build-image b/hack/build-image index f3dc4fdb0b..0489999745 100755 --- a/hack/build-image +++ b/hack/build-image @@ -41,6 +41,11 @@ else: print(f"HEAD commit: {gitrev}") args = [podman, 'build', '-t', imgname, '--no-cache', '.'] +# Check if AUTHFILE environment variable is set. Setting this allows the user to specify which file to use +# for the authentication when building the image +authfile = os.environ.get('AUTHFILE') +if authfile: + args.extend(['--authfile', authfile]) for k in openshift_keys: args.append(f'--label={k}=') args.extend([f'--label=vcs-ref={gitrev}', '--label=vcs-type=git', '--label=vcs-url=']) diff --git a/hack/cluster-push-prep.sh b/hack/cluster-push-prep.sh deleted file mode 100755 index eba362cf23..0000000000 --- a/hack/cluster-push-prep.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -# Scale the CVO down and set up podman with a secret ready to push -# to the machine-config-operator namespace. - -# Assumptions: You have set KUBECONFIG to point to your local cluster, -# and you have exposed the registry via e.g. -# https://github.com/openshift/installer/issues/411#issuecomment-445165262 - -set -xeuo pipefail - -oc -n openshift-cluster-version scale --replicas=0 deploy/cluster-version-operator -if ! oc get -n openshift-image-registry route/image-registry &>/dev/null; then - oc expose -n openshift-image-registry svc/image-registry -fi -oc patch -n openshift-image-registry route/image-registry -p '{"spec": {"tls": {"insecureEdgeTerminationPolicy": "Redirect", "termination": "reencrypt"}}}' -registry=$(oc get -n openshift-image-registry -o json route/image-registry | jq -r ".spec.host") -if ! curl -k --head https://"${registry}" >/dev/null; then - if ! grep -q "${registry}" /etc/hosts; then - set +x - echo "error: Failed to contact the registry" - echo "The problem may be DNS; you can e.g. add the registry to your /etc/hosts - as root run:" - echo " echo 127.0.0.1 ${registry} >> /etc/hosts" - exit 1 - fi -fi - -# And allow everything to pull from our namespace -oc -n openshift-machine-config-operator policy add-role-to-group registry-viewer system:anonymous diff --git a/hack/cluster-push.sh b/hack/cluster-push.sh deleted file mode 100755 index 437f07f925..0000000000 --- a/hack/cluster-push.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env bash - -# Build the MCO image and push it to the cluster registry, -# then directly patch the deployments/daemonsets to use that image. -# This is generally faster than building a new release image and -# upgrading to it, but also consequently doesn't work the same way as production -# upgrades work. -# -# To use this, you must first run `cluster-push-prep.sh` (once) for your cluster. -# -# Assumptions: You have set KUBECONFIG to point to your cluster. - -set -xeuo pipefail - -do_build=1 -if [ "${1:-}" = "-n" ]; then - do_build=0 -fi - -registry=$(oc get -n openshift-image-registry -o json route/image-registry | jq -r ".spec.host") -curl -k --head https://"${registry}" >/dev/null - -imgname=machine-config-operator -LOCAL_IMGNAME=localhost/${imgname}:latest -REMOTE_IMGNAME=openshift-machine-config-operator/${imgname} -if [ "${do_build}" = 1 ]; then - ./hack/build-image -fi -builder_secretid=$(oc get -n openshift-machine-config-operator secret | egrep '^builder-token-'| head -1 | cut -f 1 -d ' ') -secret="$(oc get -n openshift-machine-config-operator -o json secret/${builder_secretid} | jq -r '.data.token' | base64 -d)" - -if [[ "${podman:-}" =~ "docker" ]]; then - imgstorage="docker-daemon:" -else - imgstorage="containers-storage:" -fi -skopeo copy --dest-tls-verify=false --dest-creds unused:${secret} "${imgstorage}${LOCAL_IMGNAME}" "docker://${registry}/${REMOTE_IMGNAME}" - -digest=$(skopeo inspect --creds unused:${secret} --tls-verify=false docker://${registry}/${REMOTE_IMGNAME} | jq -r .Digest) -imageid=${REMOTE_IMGNAME}@${digest} - -oc project openshift-machine-config-operator - -IN_CLUSTER_NAME=image-registry.openshift-image-registry.svc:5000/${imageid} - -# Scale down the operator now to avoid it racing with our update. -oc scale --replicas=0 deploy/machine-config-operator - -# Patch the images.json -tmpf=$(mktemp) -oc get -o json configmap/machine-config-operator-images > ${tmpf} -outf=$(mktemp) -python3 > ${outf} <${patch} < /dev/null && pwd ) - -# shellcheck disable=SC1091 -source "$SCRIPT_DIR/lib" - -copy_mcd_to_disk() { - pod="$1" - - oc debug \ - -n "$MCO_NAMESPACE" \ - -c "$MCD_CONTAINER_NAME" \ - --one-container=true \ - "pod/$pod" -- cp -v /usr/bin/machine-config-daemon "$ROOTFS_MCD_PATH" -} - -main() { - # Disable the cluster version operator - # --replicas 1 out-of-the-box - oc scale --replicas 0 -n openshift-cluster-version deployments/cluster-version-operator - - # Disable the MCO - # --replicas 1 out-of-the-box - oc scale --replicas 0 -n "$MCO_NAMESPACE" deployments/machine-config-operator - - # Copy the MCD from the container to rootfs first to avoid startup issues - echo "Copy MCD from container to /rootfs" - mcd_pods="$(oc get pods -n "$MCO_NAMESPACE" -l 'k8s-app=machine-config-daemon' -o go-template='{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}')" - for pod in $mcd_pods; do - copy_mcd_to_disk "$pod" & - done - - echo "Waiting for copies to finish..." - wait - echo "Done" - - # Now we modify the MCD daemonset to copy the MCD binary from rootfs and run it - # Note: This was originally done with a read, however read exits with code 1. - patch="$(/bin/cat <<- EOM -spec: - template: - spec: - containers: - - args: - - -c - - "sha256sum $ROOTFS_MCD_PATH && cp $ROOTFS_MCD_PATH /usr/local/bin/machine-config-daemon && /usr/local/bin/machine-config-daemon start -v 4" - command: ["/bin/bash"] - name: machine-config-daemon -EOM -)" - - echo "Modifying the MCD daemonset" - - # Apply the modifications - oc patch \ - daemonset/machine-config-daemon \ - -n "$MCO_NAMESPACE" \ - --patch="$patch" - - # Wait for the updated daemonset to roll out - oc rollout status -n "$MCO_NAMESPACE" -w "$MCD_DAEMONSET" -} - -can_run && main diff --git a/hack/push-image.sh b/hack/push-image.sh deleted file mode 100755 index 0188cd30a1..0000000000 --- a/hack/push-image.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - -set -eu - -REPO=${REPO:-"openshift"} - -# Push mco image to REPO requested -exec podman push "localhost/machine-config-operator:latest" "${REPO}/origin-machine-config-operator:latest" diff --git a/hack/push-to-mcd-pods.sh b/hack/push-to-mcd-pods.sh deleted file mode 100755 index ebe703399d..0000000000 --- a/hack/push-to-mcd-pods.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env bash - -target_mcd_pod="$1" - -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) - -# shellcheck disable=SC1091 -source "$SCRIPT_DIR/lib" - -copy_binary() { - local -r pod="$1" - local -r bin_path="$2" - - echo "Copying to $pod..." - oc cp -n "$MCO_NAMESPACE" -c "$MCD_CONTAINER_NAME" "$bin_path" "$pod:$ROOTFS_MCD_PATH" -} - -push_binary_to_pod() { - local -r pod="$1" - local -r bin_path="$2" - - local -r local_bin_sha256sum="$(sha256sum "$bin_path" | awk '{print $1;}')" - - # Check if we have the file on the pod in question - oc rsh -n "$MCO_NAMESPACE" -c "$MCD_CONTAINER_NAME" "pod/$pod" sha256sum "$ROOTFS_MCD_PATH" - local -r has_file_retval="$?" - - # We don't have the file, so lets copy it - if [ "$has_file_retval" -ne 0 ]; then - echo "Binary not found on $pod" - copy_binary "$pod" "$bin_path" - return - fi - - local -r remote_bin_sha256sum="$(oc rsh -n "$MCO_NAMESPACE" -c "$MCD_CONTAINER_NAME" "pod/$pod" sha256sum "$ROOTFS_MCD_PATH" | awk '{print $1;}')" - - if [[ "$local_bin_sha256sum" == "$remote_bin_sha256sum" ]]; then - echo "Skipping copy to $pod, binary $ROOTFS_MCD_PATH with equal checksum found: $local_bin_sha256sum" - return - fi - - echo "Local: $local_bin_sha256sum, Remote ($pod): $remote_bin_sha256sum" - copy_binary "$pod" "$bin_path" "$ROOTFS_MCD_PATH" -} - -push_binary_to_all_pods() { - local -r bin_path="$1" - - # Get our target MCD pods - echo "Getting MCD pods" - mcd_pods="$(oc get pods -n "$MCO_NAMESPACE" -l 'k8s-app=machine-config-daemon' -o go-template='{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}')" - - # Concurrently copy the built binary to all MCD pods for speed. - for pod in $mcd_pods; do - push_binary_to_pod "$pod" "$bin_path" & - done - - # Wait for all the copy jobs to complete since we don't block for each one. - wait - - # Restart the MCD pods - oc rollout restart -n "$MCO_NAMESPACE" "$MCD_DAEMONSET" -} - -main() { - # Heterogeneous clusters are targeted for OCP 4.11, so this will need to change eventually. - # - # Ideas: - # - Get all individual arches and build for each of those concurrently. - # - Get a list of MCD pods on nodes and get the arch for each of the nodes just before copying. - - # Gets the architecture and operating system of the first node in the list, e.g., amd64\tlinux - local -r node_info="$(oc get nodes -o go-template='{{(index .items 0).status.nodeInfo.architecture}}{{"\t"}}{{(index .items 0).status.nodeInfo.operatingSystem}}')" - local -r cluster_arch="$(echo "$node_info" | cut -f1)" - - # Not really needed, but lets not make assumptions :) - local -r cluster_os="$(echo "$node_info" | cut -f2)" - - echo "Detected cluster arch / OS: $cluster_arch/$cluster_os" - - echo "Building MCD binary..." - # Need to set both GOOS / GOARCH on Mac otherwise the built binaries will be - # compiled for a Darwin target instead. - # - # Set WHAT to only build the MCD since that's the only component we're - # interested in at this time. - GOOS="$cluster_os" GOARCH="$cluster_arch" WHAT=machine-config-daemon "$SCRIPT_DIR/build-go.sh" - compile_retval="$?" - if [ $compile_retval -ne 0 ]; then - echo "Compilation failed!" - return $compile_retval - fi - - local -r bin_path="./_output/$cluster_os/$cluster_arch/machine-config-daemon" - - # Get the hash of our MCD binary; useful to compare to the startup value in the MCD pod logs - sha256sum "$bin_path" - - if [ -z "$target_mcd_pod" ]; then - # We're not targeting a specific pod, so push the binary to all MCD pods - echo "Will copy to all MCD pods..." - push_binary_to_all_pods "$bin_path" - else - # We're targeting a specific pod, so only push the binary to that pod - echo "Pod $target_mcd_pod specified, will only copy to this pod..." - push_binary_to_pod "$target_mcd_pod" "$bin_path" - - # Delete the pod to force pod re-creation so the new binary will be used. - oc delete -n "$MCO_NAMESPACE" "pod/$target_mcd_pod" - fi - - # Wait for the MCD to finish restarting (not strictly required, but doesn't hurt) - echo "Waiting for MCD daemonsets to restart" - oc rollout status -w -n "$MCO_NAMESPACE" "$MCD_DAEMONSET" - - echo "Current MCD Pods:" - "$SCRIPT_DIR/get-mcd-nodes.py" -} - -can_run && main diff --git a/hack/test-unit.sh b/hack/test-unit.sh index f51a1c588b..a15bd37109 100755 --- a/hack/test-unit.sh +++ b/hack/test-unit.sh @@ -17,7 +17,7 @@ COVERAGE_REPORT="mco-unit-test-coverage.out" function run_tests() { test_opts=("$@") - CGO_ENABLED=0 go test "${test_opts[@]}" -tags="$GOTAGS" './cmd/...' './pkg/...' './lib/...' './test/helpers/...' | ./hack/test-with-junit.sh "$MAKEFILE_TARGET" + CGO_ENABLED=0 go test "${test_opts[@]}" -tags="$GOTAGS" './devex/...' './cmd/...' './pkg/...' './lib/...' './test/helpers/...' | ./hack/test-with-junit.sh "$MAKEFILE_TARGET" } function run_tests_with_coverage() { diff --git a/hack/verify-helpers.sh b/hack/verify-helpers.sh new file mode 100755 index 0000000000..6df700b346 --- /dev/null +++ b/hack/verify-helpers.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# A small script that determines whether we are running in CI to determine +# whether to do a test compilation of the MCO helpers. + +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. + +if [[ -v OPENSHIFT_CI ]]; then + cd "$REPO_ROOT"; + make helpers; +else + echo "OPENSHIFT_CI not set, skipping build check for helpers" +fi diff --git a/internal/clients/builder.go b/internal/clients/builder.go index 27a87f0af0..9979df67b9 100644 --- a/internal/clients/builder.go +++ b/internal/clients/builder.go @@ -9,6 +9,7 @@ import ( mapiclientset "github.com/openshift/client-go/machine/clientset/versioned" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" operatorclientset "github.com/openshift/client-go/operator/clientset/versioned" + routeclientset "github.com/openshift/client-go/route/clientset/versioned" apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -67,6 +68,10 @@ func (cb *Builder) ImageClientOrDie(name string) imageclientset.Interface { return imageclientset.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) } +func (cb *Builder) RouteClientOrDie(name string) routeclientset.Interface { + return routeclientset.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) +} + // MachineClientOrDie returns the machine api client interface for machine api objects. func (cb *Builder) MachineClientOrDie(name string) mapiclientset.Interface { return mapiclientset.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) diff --git a/manifests/machineconfigdaemon/daemonset.yaml b/manifests/machineconfigdaemon/daemonset.yaml index 2b19623272..1d32e44348 100644 --- a/manifests/machineconfigdaemon/daemonset.yaml +++ b/manifests/machineconfigdaemon/daemonset.yaml @@ -42,10 +42,6 @@ spec: - mountPath: /rootfs name: rootfs mountPropagation: HostToContainer - {{- range .MachineOSConfigs }} - - mountPath: /run/secrets/os-image-pull-secrets/{{ .Spec.MachineConfigPool.Name }} - name: {{ .Spec.BuildOutputs.CurrentImagePullSecret.Name }} - {{- end }} livenessProbe: initialDelaySeconds: 120 periodSeconds: 30 @@ -116,11 +112,6 @@ spec: - configMap: name: kube-rbac-proxy name: mcd-auth-proxy-config - {{- range .MachineOSConfigs }} - - secret: - secretName: {{ .Spec.BuildOutputs.CurrentImagePullSecret.Name }} - name: {{ .Spec.BuildOutputs.CurrentImagePullSecret.Name }} - {{- end }} tolerations: # MCD needs to run everywhere. Tolerate all taints. - operator: Exists diff --git a/manifests/machineosbuilder/clusterrole.yaml b/manifests/machineosbuilder/clusterrole.yaml index c5591a9e61..10261fe25b 100644 --- a/manifests/machineosbuilder/clusterrole.yaml +++ b/manifests/machineosbuilder/clusterrole.yaml @@ -30,6 +30,15 @@ rules: - apiGroups: ["image.openshift.io"] resources: ["imagestreams"] verbs: ["get","list","watch","create","update","patch","delete"] +- apiGroups: ["image.openshift.io"] + resources: ["imagestreamtags"] + verbs: ["get","list","watch","create","update","patch","delete"] +- apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch"] +- apiGroups: ["route.openshift.io"] + resources: ["routes"] + verbs: ["get", "list", "watch"] - apiGroups: ["build.openshift.io"] resources: ["builds","buildconfigs","buildconfigs/instantiate"] verbs: ["get","list","watch","create","update","patch","delete"] diff --git a/pkg/apihelpers/machineosbuild.go b/pkg/apihelpers/machineosbuild.go index 5908bcadc6..3bb93b727a 100644 --- a/pkg/apihelpers/machineosbuild.go +++ b/pkg/apihelpers/machineosbuild.go @@ -1,7 +1,7 @@ package apihelpers import ( - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -17,13 +17,13 @@ func NewMachineOSBuildCondition(condType string, status metav1.ConditionStatus, } } -func GetMachineOSBuildCondition(status mcfgv1alpha1.MachineOSBuildStatus, condType mcfgv1alpha1.BuildProgress) *metav1.Condition { +func GetMachineOSBuildCondition(status mcfgv1.MachineOSBuildStatus, condType mcfgv1.BuildProgress) *metav1.Condition { // in case of sync errors, return the last condition that matches, not the first // this exists for redundancy and potential race conditions. var LatestState *metav1.Condition for i := range status.Conditions { c := status.Conditions[i] - if mcfgv1alpha1.BuildProgress(c.Type) == condType { + if mcfgv1.BuildProgress(c.Type) == condType { LatestState = &c } } @@ -32,8 +32,8 @@ func GetMachineOSBuildCondition(status mcfgv1alpha1.MachineOSBuildStatus, condTy // SetMachineOSBuildCondition updates the MachineOSBuild to include the provided condition. If the condition that // we are about to add already exists and has the same status and reason then we are not going to update. -func SetMachineOSBuildCondition(status *mcfgv1alpha1.MachineOSBuildStatus, condition metav1.Condition) { - currentCond := GetMachineOSBuildCondition(*status, mcfgv1alpha1.BuildProgress(condition.Type)) +func SetMachineOSBuildCondition(status *mcfgv1.MachineOSBuildStatus, condition metav1.Condition) { + currentCond := GetMachineOSBuildCondition(*status, mcfgv1.BuildProgress(condition.Type)) if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason && currentCond.Message == condition.Message { return } @@ -43,21 +43,21 @@ func SetMachineOSBuildCondition(status *mcfgv1alpha1.MachineOSBuildStatus, condi } // this may not be necessary - newConditions := filterOutMachineOSBuildCondition(status.Conditions, mcfgv1alpha1.BuildProgress(condition.Type)) + newConditions := filterOutMachineOSBuildCondition(status.Conditions, mcfgv1.BuildProgress(condition.Type)) newConditions = append(newConditions, condition) status.Conditions = newConditions } // RemoveMachineOSBuildCondition removes the MachineOSBuild condition with the provided type. -func RemoveMachineOSBuildCondition(status *mcfgv1alpha1.MachineOSBuildStatus, condType mcfgv1alpha1.BuildProgress) { +func RemoveMachineOSBuildCondition(status *mcfgv1.MachineOSBuildStatus, condType mcfgv1.BuildProgress) { status.Conditions = filterOutMachineOSBuildCondition(status.Conditions, condType) } // filterOutMachineOSBuildCondition returns a new slice of MachineOSBuild conditions without conditions with the provided type. -func filterOutMachineOSBuildCondition(conditions []metav1.Condition, condType mcfgv1alpha1.BuildProgress) []metav1.Condition { +func filterOutMachineOSBuildCondition(conditions []metav1.Condition, condType mcfgv1.BuildProgress) []metav1.Condition { var newConditions []metav1.Condition for _, c := range conditions { - if mcfgv1alpha1.BuildProgress(c.Type) == condType { + if mcfgv1.BuildProgress(c.Type) == condType { continue } newConditions = append(newConditions, c) @@ -65,16 +65,232 @@ func filterOutMachineOSBuildCondition(conditions []metav1.Condition, condType mc return newConditions } -func IsMachineOSBuildConditionTrue(conditions []metav1.Condition, conditionType mcfgv1alpha1.BuildProgress) bool { +func IsMachineOSBuildConditionTrue(conditions []metav1.Condition, conditionType mcfgv1.BuildProgress) bool { return IsMachineOSBuildConditionPresentAndEqual(conditions, conditionType, metav1.ConditionTrue) } // IsMachineOSBuildConditionPresentAndEqual returns true when conditionType is present and equal to status. -func IsMachineOSBuildConditionPresentAndEqual(conditions []metav1.Condition, conditionType mcfgv1alpha1.BuildProgress, status metav1.ConditionStatus) bool { +func IsMachineOSBuildConditionPresentAndEqual(conditions []metav1.Condition, conditionType mcfgv1.BuildProgress, status metav1.ConditionStatus) bool { for _, condition := range conditions { - if mcfgv1alpha1.BuildProgress(condition.Type) == conditionType { + if mcfgv1.BuildProgress(condition.Type) == conditionType { return condition.Status == status } } return false } + +// Represents the successful conditions for a MachineOSBuild. +func MachineOSBuildSucceededConditions() []metav1.Condition { + return []metav1.Condition{ + { + Type: string(mcfgv1.MachineOSBuildPrepared), + Status: metav1.ConditionFalse, + Reason: "Prepared", + Message: "Build Prepared and Pending", + }, + { + Type: string(mcfgv1.MachineOSBuilding), + Status: metav1.ConditionFalse, + Reason: "Building", + Message: "Image Build In Progress", + }, + { + Type: string(mcfgv1.MachineOSBuildFailed), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: "Build Failed", + }, + { + Type: string(mcfgv1.MachineOSBuildInterrupted), + Status: metav1.ConditionFalse, + Reason: "Interrupted", + Message: "Build Interrupted", + }, + { + Type: string(mcfgv1.MachineOSBuildSucceeded), + Status: metav1.ConditionTrue, + Reason: "Ready", + Message: "Build Ready", + }, + } +} + +// Represents the pending conditions for a MachineOSBuild. +func MachineOSBuildPendingConditions() []metav1.Condition { + return []metav1.Condition{ + { + Type: string(mcfgv1.MachineOSBuildPrepared), + Status: metav1.ConditionTrue, + Reason: "Prepared", + Message: "Build Prepared and Pending", + }, + { + Type: string(mcfgv1.MachineOSBuilding), + Status: metav1.ConditionFalse, + Reason: "Building", + Message: "Image Build In Progress", + }, + { + Type: string(mcfgv1.MachineOSBuildFailed), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: "Build Failed", + }, + { + Type: string(mcfgv1.MachineOSBuildInterrupted), + Status: metav1.ConditionFalse, + Reason: "Interrupted", + Message: "Build Interrupted", + }, + { + Type: string(mcfgv1.MachineOSBuildSucceeded), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", + }, + } +} + +// Represents the running conditions for a MachineOSBuild. +func MachineOSBuildRunningConditions() []metav1.Condition { + return []metav1.Condition{ + { + Type: string(mcfgv1.MachineOSBuildPrepared), + Status: metav1.ConditionFalse, + Reason: "Prepared", + Message: "Build Prepared and Pending", + }, + { + Type: string(mcfgv1.MachineOSBuilding), + Status: metav1.ConditionTrue, + Reason: "Building", + Message: "Image Build In Progress", + }, + { + Type: string(mcfgv1.MachineOSBuildFailed), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: "Build Failed", + }, + { + Type: string(mcfgv1.MachineOSBuildInterrupted), + Status: metav1.ConditionFalse, + Reason: "Interrupted", + Message: "Build Interrupted", + }, + { + Type: string(mcfgv1.MachineOSBuildSucceeded), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", + }, + } +} + +// Represents the failure conditions for a MachineOSBuild. +func MachineOSBuildFailedConditions() []metav1.Condition { + return []metav1.Condition{ + { + Type: string(mcfgv1.MachineOSBuildPrepared), + Status: metav1.ConditionFalse, + Reason: "Prepared", + Message: "Build Prepared and Pending", + }, + { + Type: string(mcfgv1.MachineOSBuilding), + Status: metav1.ConditionFalse, + Reason: "Building", + Message: "Image Build In Progress", + }, + { + Type: string(mcfgv1.MachineOSBuildFailed), + Status: metav1.ConditionTrue, + Reason: "Failed", + Message: "Build Failed", + }, + { + Type: string(mcfgv1.MachineOSBuildInterrupted), + Status: metav1.ConditionFalse, + Reason: "Interrupted", + Message: "Build Interrupted", + }, + { + Type: string(mcfgv1.MachineOSBuildSucceeded), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", + }, + } +} + +// Represents the interrupted conditions for a MachineOSBuild. +func MachineOSBuildInterruptedConditions() []metav1.Condition { + return []metav1.Condition{ + { + Type: string(mcfgv1.MachineOSBuildPrepared), + Status: metav1.ConditionFalse, + Reason: "Prepared", + Message: "Build Prepared and Pending", + }, + { + Type: string(mcfgv1.MachineOSBuilding), + Status: metav1.ConditionFalse, + Reason: "Building", + Message: "Image Build In Progress", + }, + { + Type: string(mcfgv1.MachineOSBuildFailed), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: "Build Failed", + }, + { + Type: string(mcfgv1.MachineOSBuildInterrupted), + Status: metav1.ConditionTrue, + Reason: "Interrupted", + Message: "Build Interrupted", + }, + { + Type: string(mcfgv1.MachineOSBuildSucceeded), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", + }, + } +} + +// Represents the initial MachineOSBuild state (all conditions false). +func MachineOSBuildInitialConditions() []metav1.Condition { + return []metav1.Condition{ + { + Type: string(mcfgv1.MachineOSBuildPrepared), + Status: metav1.ConditionFalse, + Reason: "Prepared", + Message: "Build Prepared and Pending", + }, + { + Type: string(mcfgv1.MachineOSBuilding), + Status: metav1.ConditionFalse, + Reason: "Building", + Message: "Image Build In Progress", + }, + { + Type: string(mcfgv1.MachineOSBuildFailed), + Status: metav1.ConditionFalse, + Reason: "Failed", + Message: "Build Failed", + }, + { + Type: string(mcfgv1.MachineOSBuildInterrupted), + Status: metav1.ConditionFalse, + Reason: "Interrupted", + Message: "Build Interrupted", + }, + { + Type: string(mcfgv1.MachineOSBuildSucceeded), + Status: metav1.ConditionFalse, + Reason: "Ready", + Message: "Build Ready", + }, + } +} diff --git a/pkg/apihelpers/machineosconfig.go b/pkg/apihelpers/machineosconfig.go index 10706c4499..3ab0460eeb 100644 --- a/pkg/apihelpers/machineosconfig.go +++ b/pkg/apihelpers/machineosconfig.go @@ -1,7 +1,7 @@ package apihelpers import ( - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -17,7 +17,7 @@ func NewMachineOSConfigCondition(condType string, status metav1.ConditionStatus, } } -func GetMachineOSConfigCondition(status mcfgv1alpha1.MachineOSConfigStatus, condType string) *metav1.Condition { +func GetMachineOSConfigCondition(status mcfgv1.MachineOSConfigStatus, condType string) *metav1.Condition { // in case of sync errors, return the last condition that matches, not the first // this exists for redundancy and potential race conditions. var LatestState *metav1.Condition @@ -32,7 +32,7 @@ func GetMachineOSConfigCondition(status mcfgv1alpha1.MachineOSConfigStatus, cond // SetMachineOSConfigCondition updates the MachineOSConfig to include the provided condition. If the condition that // we are about to add already exists and has the same status and reason then we are not going to update. -func SetMachineOSConfigCondition(status *mcfgv1alpha1.MachineOSConfigStatus, condition metav1.Condition) { +func SetMachineOSConfigCondition(status *mcfgv1.MachineOSConfigStatus, condition metav1.Condition) { currentCond := GetMachineOSConfigCondition(*status, condition.Type) if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason && currentCond.Message == condition.Message { return @@ -49,7 +49,7 @@ func SetMachineOSConfigCondition(status *mcfgv1alpha1.MachineOSConfigStatus, con } // RemoveMachineOSConfigCondition removes the MachineOSConfig condition with the provided type. -func RemoveMachineOSConfigCondition(status *mcfgv1alpha1.MachineOSConfigStatus, condType string) { +func RemoveMachineOSConfigCondition(status *mcfgv1.MachineOSConfigStatus, condType string) { status.Conditions = filterOutMachineOSConfigCondition(status.Conditions, condType) } diff --git a/pkg/controller/build/buildrequest/assets/Containerfile.on-cluster-build-template b/pkg/controller/build/buildrequest/assets/Containerfile.on-cluster-build-template index bbb48bd82e..adaf22796d 100644 --- a/pkg/controller/build/buildrequest/assets/Containerfile.on-cluster-build-template +++ b/pkg/controller/build/buildrequest/assets/Containerfile.on-cluster-build-template @@ -37,12 +37,17 @@ skip_if_unavailable=False" > /etc/yum.repos.d/coreos-extensions.repo && \ RUN ostree container commit {{end}} +# Hardcoded tmpfiles configuration for usbguard and ipsec. +# Eventually when https://github.com/USBGuard/usbguard/pull/652 is backported to RHEL, we will be able to remove the usbguard patch +# For now, libreswan (ipsec) patch will live here until we find a better alternative +RUN test ! -f /usr/lib/tmpfiles.d/usbguard.conf || rm /usr/lib/tmpfiles.d/usbguard.conf +RUN echo -e "d /var/log/usbguard 0755 root root -\nd /var/lib/ipsec 0700 root root -\nd /var/lib/ipsec/nss 0700 root root -" > /usr/lib/tmpfiles.d/usbguard_ipsec.conf + COPY ./openshift-config-user-ca-bundle.crt /etc/pki/ca-trust/source/anchors/openshift-config-user-ca-bundle.crt RUN update-ca-trust -LABEL machineconfig={{.MachineOSBuild.Spec.DesiredConfig.Name}} +LABEL machineconfig={{.MachineOSBuild.Spec.MachineConfig.Name}} LABEL machineconfigpool={{.MachineOSConfig.Spec.MachineConfigPool.Name}} -LABEL releaseversion={{.ReleaseVersion}} LABEL baseOSContainerImage={{.BaseOSImage}} {{if .UserContainerfile}} diff --git a/pkg/controller/build/buildrequest/builder.go b/pkg/controller/build/buildrequest/builder.go index 42c9f65abe..5e8940e1d7 100644 --- a/pkg/controller/build/buildrequest/builder.go +++ b/pkg/controller/build/buildrequest/builder.go @@ -83,3 +83,12 @@ func (b *builder) MachineConfigPool() (string, error) { func (b *builder) RenderedMachineConfig() (string, error) { return utils.GetRequiredLabelValueFromObject(b, constants.RenderedMachineConfigLabelKey) } + +// Gets the UID of the Builder object. +func (b *builder) BuilderUID() (string, error) { + uid := string(b.GetUID()) + if uid == "" { + return uid, fmt.Errorf("object %s has no UID", b.GetName()) + } + return uid, nil +} diff --git a/pkg/controller/build/buildrequest/buildrequest.go b/pkg/controller/build/buildrequest/buildrequest.go index 1cb9a2d0b5..aacfabea87 100644 --- a/pkg/controller/build/buildrequest/buildrequest.go +++ b/pkg/controller/build/buildrequest/buildrequest.go @@ -9,16 +9,17 @@ import ( "text/template" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" "github.com/openshift/machine-config-operator/pkg/controller/build/constants" "github.com/openshift/machine-config-operator/pkg/controller/build/utils" + chelpers "github.com/openshift/machine-config-operator/pkg/controller/common" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" ) //go:embed assets/Containerfile.on-cluster-build-template @@ -45,7 +46,7 @@ type buildRequestImpl struct { } // Constructs an imageBuildRequest from the Kube API server. -func NewBuildRequestFromAPI(ctx context.Context, kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) (BuildRequest, error) { +func NewBuildRequestFromAPI(ctx context.Context, kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig) (BuildRequest, error) { opts, err := newBuildRequestOptsFromAPI(ctx, kubeclient, mcfgclient, mosb, mosc) if err != nil { return nil, err @@ -61,8 +62,8 @@ func newBuildRequest(opts BuildRequestOpts) BuildRequest { } // only support noArch for now - for _, file := range opts.MachineOSConfig.Spec.BuildInputs.Containerfile { - if file.ContainerfileArch == mcfgv1alpha1.NoArch { + for _, file := range opts.MachineOSConfig.Spec.Containerfile { + if file.ContainerfileArch == mcfgv1.NoArch { br.userContainerfile = file.Content break } @@ -118,7 +119,28 @@ func (br buildRequestImpl) ConfigMaps() ([]*corev1.ConfigMap, error) { additionaltrustbundle := br.additionaltrustbundleToConfigMap() - return []*corev1.ConfigMap{containerfile, machineconfig, additionaltrustbundle}, nil + etcPolicy, err := br.etcPolicyToConfigMap(br.opts.MachineConfig) + if err != nil { + return nil, fmt.Errorf("could not convert etc/containers registries files into ConfigMap %q: %w", br.getEtcPolicyConfigMapName(), err) + } + etcRegistries, err := br.etcRegistriesToConfigMap(br.opts.MachineConfig) + if err != nil { + return nil, fmt.Errorf("could not convert registries.conf files into ConfigMap %q: %w", br.getEtcRegistriesConfigMapName(), err) + } + + configMaps := []*corev1.ConfigMap{containerfile, machineconfig, additionaltrustbundle} + if etcPolicy != nil { + configMaps = append(configMaps, etcPolicy) + } else { + klog.Warningf("/etc/containers/policy.json file not found in MachineConfig %q, could not create ConfigMap %q", br.opts.MachineConfig.Name, br.getEtcPolicyConfigMapName()) + } + if etcRegistries != nil { + configMaps = append(configMaps, etcRegistries) + } else { + klog.Warningf("/etc/containers/registries.conf file not found in MachineConfig %q, could not create ConfigMap %q", br.opts.MachineConfig.Name, br.getEtcRegistriesConfigMapName()) + } + + return configMaps, nil } func (br buildRequestImpl) canonicalizeSecret(name string, secret *corev1.Secret) (*corev1.Secret, error) { @@ -199,6 +221,76 @@ func (br buildRequestImpl) additionaltrustbundleToConfigMap() *corev1.ConfigMap return configmap } +func (br buildRequestImpl) etcPolicyToConfigMap(mc *mcfgv1.MachineConfig) (*corev1.ConfigMap, error) { + // Build the ConfigMap data + configMapData, err := br.ignitionFileToConfigMapData(mc, "/etc/containers/policy.json", "/etc/containers/") + if err != nil { + return nil, err + } + if len(configMapData) == 0 { + return nil, nil + } + + // Create the ConfigMap + configmap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: br.getObjectMeta(br.getEtcPolicyConfigMapName()), + Data: configMapData, + } + return configmap, nil +} + +func (br buildRequestImpl) etcRegistriesToConfigMap(mc *mcfgv1.MachineConfig) (*corev1.ConfigMap, error) { + // Build the ConfigMap data + configMapData, err := br.ignitionFileToConfigMapData(mc, "/etc/containers/registries.conf", "/etc/containers/") + if err != nil { + return nil, err + } + if len(configMapData) == 0 { + return nil, nil + } + + // Create the ConfigMap + configmap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: br.getObjectMeta(br.getEtcRegistriesConfigMapName()), + Data: configMapData, + } + return configmap, nil +} + +func (br buildRequestImpl) ignitionFileToConfigMapData(mc *mcfgv1.MachineConfig, filePath, prefixToTrim string) (map[string]string, error) { + if len(mc.Spec.Config.Raw) == 0 { + return nil, nil + } + // Build the ConfigMap data + ignCfg, err := ctrlcommon.ParseAndConvertConfig(mc.Spec.Config.Raw) + if err != nil { + return nil, fmt.Errorf("parsing rendered MC Ignition config failed with error: %w", err) + } + + for _, file := range ignCfg.Storage.Files { + if file.Path != filePath { + continue + } + if file.Contents.Source == nil { + return nil, fmt.Errorf("nil source for %s", file.Path) + } + + // Extract and decode the encoded data + decodedData, err := chelpers.DecodeIgnitionFileContents(file.Contents.Source, file.Contents.Compression) + if err != nil { + return nil, fmt.Errorf("error decoding %s: %v", file.Path, err) + } + + // Key in the configmap is the path without the prefix + fileKey := strings.TrimPrefix(file.Path, prefixToTrim) + return map[string]string{fileKey: string(decodedData)}, nil + } + klog.Infof("Could not find %s in MachineConfig %s, skipping configmap creation....", filePath, mc.Name) + return nil, nil +} + // Renders our Containerfile template. // // TODO: Figure out how to parse the Containerfile using @@ -226,10 +318,9 @@ func (br buildRequestImpl) renderContainerfile() (string, error) { // default to a value from a different location, it makes more sense for us // to implement that logic in Go as opposed to the Go template language. items := struct { - MachineOSBuild *mcfgv1alpha1.MachineOSBuild - MachineOSConfig *mcfgv1alpha1.MachineOSConfig + MachineOSBuild *mcfgv1.MachineOSBuild + MachineOSConfig *mcfgv1.MachineOSConfig UserContainerfile string - ReleaseVersion string BaseOSImage string ExtensionsImage string ExtensionsPackages []string @@ -237,9 +328,8 @@ func (br buildRequestImpl) renderContainerfile() (string, error) { MachineOSBuild: br.opts.MachineOSBuild, MachineOSConfig: br.opts.MachineOSConfig, UserContainerfile: br.userContainerfile, - ReleaseVersion: br.opts.getReleaseVersion(), - BaseOSImage: br.opts.getBaseOSImagePullspec(), - ExtensionsImage: br.opts.getExtensionsImagePullspec(), + BaseOSImage: br.opts.OSImageURLConfig.BaseOSContainerImage, + ExtensionsImage: br.opts.OSImageURLConfig.BaseOSExtensionsContainerImage, ExtensionsPackages: extPkgs, } @@ -253,12 +343,23 @@ func (br buildRequestImpl) renderContainerfile() (string, error) { // podToJob creates a Job with the spec of the given Pod func (br buildRequestImpl) podToJob(pod *corev1.Pod) *batchv1.Job { // Set the backoffLimit to 3 so the job will retry 4 times before reporting a failure - var backoffLimit int32 = 3 + var backoffLimit int32 = constants.JobMaxRetries + // Set completion to 1 so that as soon as the pod has completed successfully the job is // considered a success - var completions int32 = 1 + var completions int32 = constants.JobCompletions + + // Set the owner ref of the job to the MOSB + oref := metav1.NewControllerRef(br.opts.MachineOSBuild, mcfgv1.SchemeGroupVersion.WithKind("MachineOSBuild")) + return &batchv1.Job{ - ObjectMeta: pod.ObjectMeta, + ObjectMeta: metav1.ObjectMeta{ + Name: pod.ObjectMeta.Name, + Namespace: pod.ObjectMeta.Namespace, + Labels: pod.ObjectMeta.Labels, + Annotations: pod.ObjectMeta.Annotations, + OwnerReferences: []metav1.OwnerReference{*oref}, + }, TypeMeta: metav1.TypeMeta{ APIVersion: "batch/v1", Kind: "Job", @@ -315,7 +416,7 @@ func (br buildRequestImpl) toBuildahPod() *corev1.Pod { }, { Name: "TAG", - Value: br.opts.MachineOSBuild.Spec.RenderedImagePushspec, + Value: string(br.opts.MachineOSBuild.Spec.RenderedImagePushSpec), }, { Name: "BASE_IMAGE_PULL_CREDS", @@ -356,6 +457,16 @@ func (br buildRequestImpl) toBuildahPod() *corev1.Pod { Name: "containerfile", MountPath: "/tmp/containerfile", }, + { + Name: "etc-policy", + MountPath: "/etc/containers/policy.json", + SubPath: "policy.json", + }, + { + Name: "etc-registries", + MountPath: "/etc/containers/registries.conf", + SubPath: "registries.conf", + }, { Name: "additional-trust-bundle", MountPath: "/etc/pki/ca-trust/source/anchors", @@ -374,6 +485,7 @@ func (br buildRequestImpl) toBuildahPod() *corev1.Pod { }, } + boolTrue := true volumes := []corev1.Volume{ { // Provides the rendered Containerfile. @@ -409,6 +521,30 @@ func (br buildRequestImpl) toBuildahPod() *corev1.Pod { }, }, }, + { + // Provides the /etc/containers/policy.json content from the node + Name: "etc-policy", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: br.getEtcPolicyConfigMapName(), + }, + Optional: &boolTrue, + }, + }, + }, + { + // Provides the /etc/containers/registries.conf content from the node + Name: "etc-registries", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: br.getEtcRegistriesConfigMapName(), + }, + Optional: &boolTrue, + }, + }, + }, { // Provides the credentials needed to pull the base OS image. Name: "base-image-pull-creds", @@ -489,11 +625,6 @@ func (br buildRequestImpl) toBuildahPod() *corev1.Pod { volumes = append(volumes, opts.volumeForSecret(constants.EtcPkiRpmGpgSecretName)) } - // TODO: We need pull creds with permissions to pull the base image. By - // default, none of the MCO pull secrets can directly pull it. We can use the - // pull-secret creds from openshift-config to do that, though we'll need to - // mirror those creds into the MCO namespace. The operator portion of the MCO - // has some logic to detect whenever that secret changes. return &corev1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -525,7 +656,7 @@ func (br buildRequestImpl) toBuildahPod() *corev1.Pod { // us to avoid parsing log files. Name: "wait-for-done", Command: append(command, waitScript), - Image: br.opts.getBaseOSImagePullspec(), + Image: br.opts.OSImageURLConfig.BaseOSContainerImage, Env: env, ImagePullPolicy: corev1.PullAlways, SecurityContext: securityContext, @@ -543,7 +674,7 @@ func (br buildRequestImpl) getLabelsForObjectMeta() map[string]string { return map[string]string{ constants.EphemeralBuildObjectLabelKey: "", constants.OnClusterLayeringLabelKey: "", - constants.RenderedMachineConfigLabelKey: br.opts.MachineOSBuild.Spec.DesiredConfig.Name, + constants.RenderedMachineConfigLabelKey: br.opts.MachineOSBuild.Spec.MachineConfig.Name, constants.TargetMachineConfigPoolLabelKey: br.opts.MachineOSConfig.Spec.MachineConfigPool.Name, constants.MachineOSConfigNameLabelKey: br.opts.MachineOSConfig.Name, constants.MachineOSBuildNameLabelKey: br.opts.MachineOSBuild.Name, @@ -599,6 +730,14 @@ func (br buildRequestImpl) getMCConfigMapName() string { return utils.GetMCConfigMapName(br.opts.MachineOSBuild) } +func (br buildRequestImpl) getEtcPolicyConfigMapName() string { + return utils.GetEtcPolicyConfigMapName(br.opts.MachineOSBuild) +} + +func (br buildRequestImpl) getEtcRegistriesConfigMapName() string { + return utils.GetEtcRegistriesConfigMapName(br.opts.MachineOSBuild) +} + // Computes the build name based upon the MachineConfigPool name. func (br buildRequestImpl) getBuildName() string { return utils.GetBuildJobName(br.opts.MachineOSBuild) diff --git a/pkg/controller/build/buildrequest/buildrequest_test.go b/pkg/controller/build/buildrequest/buildrequest_test.go index 1f7d0c4e88..4d0c58c2d6 100644 --- a/pkg/controller/build/buildrequest/buildrequest_test.go +++ b/pkg/controller/build/buildrequest/buildrequest_test.go @@ -5,7 +5,6 @@ import ( "testing" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/machine-config-operator/pkg/controller/build/constants" "github.com/openshift/machine-config-operator/pkg/controller/build/fixtures" "github.com/openshift/machine-config-operator/pkg/controller/build/utils" @@ -130,25 +129,6 @@ func TestBuildRequest(t *testing.T) { return opts }, }, - { - name: "MachineOSConfig-provided options override OSImageURLConfig defaults", - optsFunc: func() BuildRequestOpts { - opts := getBuildRequestOpts() - opts.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec = "base-os-image-from-machineosconfig" - opts.MachineOSConfig.Spec.BuildInputs.BaseOSExtensionsImagePullspec = "base-ext-image-from-machineosconfig" - opts.MachineOSConfig.Spec.BuildInputs.ReleaseVersion = "release-version-from-machineosconfig" - opts.MachineConfig.Spec.Extensions = []string{"usbguard"} - return opts - }, - expectedContainerfileContents: []string{ - "FROM base-os-image-from-machineosconfig AS extract", - "FROM base-os-image-from-machineosconfig AS configs", - "RUN --mount=type=bind,from=base-ext-image-from-machineosconfig", - "extensions=\"usbguard\"", - "LABEL releaseversion=release-version-from-machineosconfig", - }, - unexpectedContainerfileContents: expectedContents(), - }, } for _, testCase := range testCases { @@ -169,7 +149,7 @@ func TestBuildRequest(t *testing.T) { if len(testCase.expectedContainerfileContents) == 0 { testCase.expectedContainerfileContents = append(expectedContents(), []string{ machineConfigJSONFilename, - opts.MachineOSConfig.Spec.BuildInputs.Containerfile[0].Content, + opts.MachineOSConfig.Spec.Containerfile[0].Content, }...) } @@ -321,7 +301,7 @@ RUN rpm-ostree install && \ layeredObjects := fixtures.NewObjectBuildersForTest("worker") layeredObjects.MachineOSConfigBuilder. - WithContainerfile(mcfgv1alpha1.NoArch, containerfileContents) + WithContainerfile(mcfgv1.NoArch, containerfileContents) layeredObjects.MachineOSBuildBuilder. // Note: This is set statically so that the test suite is less brittle. diff --git a/pkg/controller/build/buildrequest/buildrequestopts.go b/pkg/controller/build/buildrequest/buildrequestopts.go index 8293f22ac0..c44121aa3b 100644 --- a/pkg/controller/build/buildrequest/buildrequestopts.go +++ b/pkg/controller/build/buildrequest/buildrequestopts.go @@ -9,7 +9,6 @@ import ( "github.com/distribution/reference" configv1 "github.com/openshift/api/config/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" "github.com/openshift/machine-config-operator/pkg/controller/build/constants" "github.com/openshift/machine-config-operator/pkg/controller/build/utils" @@ -22,8 +21,8 @@ import ( // Holds all of the options used to produce a BuildRequest. type BuildRequestOpts struct { //nolint:revive // This name is fine. - MachineOSConfig *mcfgv1alpha1.MachineOSConfig - MachineOSBuild *mcfgv1alpha1.MachineOSBuild + MachineOSConfig *mcfgv1.MachineOSConfig + MachineOSBuild *mcfgv1.MachineOSBuild MachineConfig *mcfgv1.MachineConfig Images *ctrlcommon.Images OSImageURLConfig *ctrlcommon.OSImageURLConfig @@ -31,6 +30,8 @@ type BuildRequestOpts struct { //nolint:revive // This name is fine. BaseImagePullSecret *corev1.Secret FinalImagePushSecret *corev1.Secret + // Has user defined base image pull secret + hasUserDefinedBaseImagePullSecret bool // Has /etc/pki/entitlement HasEtcPkiEntitlementKeys bool // Has /etc/yum.repos.d configs @@ -44,36 +45,6 @@ type BuildRequestOpts struct { //nolint:revive // This name is fine. AdditionalTrustBundle []byte } -// Gets the extensions image pullspec from the MachineOSConfig if available. -// Otherwise, it defaults to the value from the osimageurl ConfigMap. -func (b BuildRequestOpts) getExtensionsImagePullspec() string { - if b.MachineOSConfig.Spec.BuildInputs.BaseOSExtensionsImagePullspec != "" { - return b.MachineOSConfig.Spec.BuildInputs.BaseOSExtensionsImagePullspec - } - - return b.OSImageURLConfig.BaseOSExtensionsContainerImage -} - -// Gets the base OS image pullspec from the MachineOSConfig if available. -// Otherwise, it defaults to the value from the osimageurl ConfigMap. -func (b BuildRequestOpts) getBaseOSImagePullspec() string { - if b.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec != "" { - return b.MachineOSConfig.Spec.BuildInputs.BaseOSImagePullspec - } - - return b.OSImageURLConfig.BaseOSContainerImage -} - -// Gets the release version value from the MachineOSConfig if available. -// Otherwise, it defaults to the value from the osimageurl ConfigMap. -func (b BuildRequestOpts) getReleaseVersion() string { - if b.MachineOSConfig.Spec.BuildInputs.ReleaseVersion != "" { - return b.MachineOSConfig.Spec.BuildInputs.ReleaseVersion - } - - return b.OSImageURLConfig.ReleaseVersion -} - // Gets the packages for the extensions from the MachineConfig, if available. func (b BuildRequestOpts) getExtensionsPackages() ([]string, error) { if len(b.MachineConfig.Spec.Extensions) == 0 { @@ -84,7 +55,7 @@ func (b BuildRequestOpts) getExtensionsPackages() ([]string, error) { } // Gets all of the image build request opts from the Kube API server. -func newBuildRequestOptsFromAPI(ctx context.Context, kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) (*BuildRequestOpts, error) { +func newBuildRequestOptsFromAPI(ctx context.Context, kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig) (*BuildRequestOpts, error) { og := optsGetter{ kubeclient: kubeclient, mcfgclient: mcfgclient, @@ -134,45 +105,41 @@ type optsGetter struct { } // TODO: Deduplicate this. -func (o *optsGetter) validateMachineOSConfig(mosc *mcfgv1alpha1.MachineOSConfig) error { +func (o *optsGetter) validateMachineOSConfig(mosc *mcfgv1.MachineOSConfig) error { if mosc == nil { return fmt.Errorf("expected MachineOSConfig not to be nil") } - if mosc.Spec.BuildInputs.BaseImagePullSecret.Name == "" { - return fmt.Errorf("baseImagePullSecret empty for MachineOSConfig %s", mosc.Name) - } - - if mosc.Spec.BuildInputs.RenderedImagePushSecret.Name == "" { + if mosc.Spec.RenderedImagePushSecret.Name == "" { return fmt.Errorf("renderedImagePushSecret empty for MachineOSConfig %s", mosc.Name) } - if mosc.Spec.BuildInputs.RenderedImagePushspec == "" { + if mosc.Spec.RenderedImagePushSpec == "" { return fmt.Errorf("renderedImagePushspec empty for MachineOSConfig %s", mosc.Name) } - if _, err := reference.ParseNamed(mosc.Spec.BuildInputs.RenderedImagePushspec); err != nil { - return fmt.Errorf("invalid renderedImagePushspec for MachineOSConfig %s: %w", mosc.Name, err) + if _, err := reference.ParseNamed(string(mosc.Spec.RenderedImagePushSpec)); err != nil { + return fmt.Errorf("invalid renderedImagePushSpec for MachineOSConfig %s: %w", mosc.Name, err) } return nil } // Validates that the required fields on a MachineOSBuild are set before beginning the build. -func (o *optsGetter) validateMachineOSBuild(mosb *mcfgv1alpha1.MachineOSBuild) error { +func (o *optsGetter) validateMachineOSBuild(mosb *mcfgv1.MachineOSBuild) error { if mosb == nil { return fmt.Errorf("expected MachineOSBuild not to be nil") } - if mosb.Spec.DesiredConfig.Name == "" { - return fmt.Errorf("desiredConfig.name empty for MachineOSBuild %s", mosb.Name) + if mosb.Spec.MachineConfig.Name == "" { + return fmt.Errorf("machineConfig.name empty for MachineOSBuild %s", mosb.Name) } return nil } // Gets the BuildRequestOpts after making API queries to get all of the necessary info required. -func (o *optsGetter) getOpts(ctx context.Context, mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) (*BuildRequestOpts, error) { +func (o *optsGetter) getOpts(ctx context.Context, mosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig) (*BuildRequestOpts, error) { if err := o.validateMachineOSConfig(mosc); err != nil { return nil, fmt.Errorf("could not validate MachineOSConfig: %w", err) } @@ -196,19 +163,30 @@ func (o *optsGetter) getOpts(ctx context.Context, mosb *mcfgv1alpha1.MachineOSBu return nil, fmt.Errorf("could not get osImageURL config: %w", err) } - baseImagePullSecret, err := o.getValidatedSecret(ctx, mosc.Spec.BuildInputs.BaseImagePullSecret.Name) + var baseImagePullSecretName string + // Check if a base image pull secret was provided + opts.hasUserDefinedBaseImagePullSecret = mosc.Spec.BaseImagePullSecret != nil + if opts.hasUserDefinedBaseImagePullSecret { + baseImagePullSecretName = mosc.Spec.BaseImagePullSecret.Name + } else { + // If not provided, fall back to the global pull secret copy in the MCO namespace + klog.Infof("BaseImagePullSecret not defined for MachineOSConfig %s, falling back to global pull secret", mosc.Name) + baseImagePullSecretName = ctrlcommon.GlobalPullSecretCopyName + } + + baseImagePullSecret, err := o.getValidatedSecret(ctx, baseImagePullSecretName) if err != nil { - return nil, fmt.Errorf("could not get base image pull secret %s: %w", mosc.Spec.BuildInputs.BaseImagePullSecret.Name, err) + return nil, fmt.Errorf("could not get base image pull secret %s: %w", mosc.Spec.BaseImagePullSecret.Name, err) } - finalImagePushSecret, err := o.getValidatedSecret(ctx, mosc.Spec.BuildInputs.RenderedImagePushSecret.Name) + finalImagePushSecret, err := o.getValidatedSecret(ctx, mosc.Spec.RenderedImagePushSecret.Name) if err != nil { - return nil, fmt.Errorf("could not get final image push secret %s: %w", mosc.Spec.BuildInputs.RenderedImagePushSecret.Name, err) + return nil, fmt.Errorf("could not get final image push secret %s: %w", mosc.Spec.RenderedImagePushSecret.Name, err) } - mc, err := o.mcfgclient.MachineconfigurationV1().MachineConfigs().Get(ctx, mosb.Spec.DesiredConfig.Name, metav1.GetOptions{}) + mc, err := o.mcfgclient.MachineconfigurationV1().MachineConfigs().Get(ctx, mosb.Spec.MachineConfig.Name, metav1.GetOptions{}) if err != nil { - return nil, fmt.Errorf("could not retrieve machineconfig %s: %w", mosb.Spec.DesiredConfig.Name, err) + return nil, fmt.Errorf("could not retrieve machineconfig %s: %w", mosb.Spec.MachineConfig.Name, err) } cc, err := o.mcfgclient.MachineconfigurationV1().ControllerConfigs().Get(ctx, ctrlcommon.ControllerConfigName, metav1.GetOptions{}) @@ -245,7 +223,7 @@ func (o *optsGetter) getValidatedSecret(ctx context.Context, name string) (*core // Determines whether the build makes use of entitlements based upon the // presence (or lack thereof) of specific configmaps and secrets. -func (o *optsGetter) resolveEntitlements(ctx context.Context, mosc *mcfgv1alpha1.MachineOSConfig) (*BuildRequestOpts, error) { +func (o *optsGetter) resolveEntitlements(ctx context.Context, mosc *mcfgv1.MachineOSConfig) (*BuildRequestOpts, error) { opts := &BuildRequestOpts{} etcPkiEntitlements, err := o.getOptionalSecret(ctx, constants.EtcPkiEntitlementSecretName+"-"+mosc.Spec.MachineConfigPool.Name) diff --git a/pkg/controller/build/buildrequest/buildrequestopts_test.go b/pkg/controller/build/buildrequest/buildrequestopts_test.go index 3d26f9fa8c..e1a6e81625 100644 --- a/pkg/controller/build/buildrequest/buildrequestopts_test.go +++ b/pkg/controller/build/buildrequest/buildrequestopts_test.go @@ -11,13 +11,16 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" ) func TestBuildRequestOpts(t *testing.T) { testCases := []struct { - name string - addlObjects []runtime.Object - addlAsserts func(*testing.T, BuildRequestOpts) + name string + addlObjects []runtime.Object + addlObjectSetup func(*testing.T, *fixtures.ObjectsForTest) + addlAsserts func(*testing.T, BuildRequestOpts) }{ { name: "no entitlement data", @@ -25,6 +28,7 @@ func TestBuildRequestOpts(t *testing.T) { assert.False(t, brOpts.HasEtcPkiRpmGpgKeys) assert.False(t, brOpts.HasEtcYumReposDConfigs) assert.False(t, brOpts.HasEtcPkiEntitlementKeys) + assert.False(t, brOpts.hasUserDefinedBaseImagePullSecret) }, }, { @@ -41,6 +45,7 @@ func TestBuildRequestOpts(t *testing.T) { assert.False(t, brOpts.HasEtcPkiRpmGpgKeys) assert.False(t, brOpts.HasEtcYumReposDConfigs) assert.True(t, brOpts.HasEtcPkiEntitlementKeys) + assert.False(t, brOpts.hasUserDefinedBaseImagePullSecret) }, }, { @@ -57,6 +62,7 @@ func TestBuildRequestOpts(t *testing.T) { assert.False(t, brOpts.HasEtcPkiRpmGpgKeys) assert.True(t, brOpts.HasEtcYumReposDConfigs) assert.False(t, brOpts.HasEtcPkiEntitlementKeys) + assert.False(t, brOpts.hasUserDefinedBaseImagePullSecret) }, }, { @@ -73,6 +79,7 @@ func TestBuildRequestOpts(t *testing.T) { assert.True(t, brOpts.HasEtcPkiRpmGpgKeys) assert.False(t, brOpts.HasEtcYumReposDConfigs) assert.False(t, brOpts.HasEtcPkiEntitlementKeys) + assert.False(t, brOpts.hasUserDefinedBaseImagePullSecret) }, }, { @@ -101,6 +108,16 @@ func TestBuildRequestOpts(t *testing.T) { assert.True(t, brOpts.HasEtcPkiRpmGpgKeys) assert.True(t, brOpts.HasEtcYumReposDConfigs) assert.True(t, brOpts.HasEtcPkiEntitlementKeys) + assert.False(t, brOpts.hasUserDefinedBaseImagePullSecret) + }, + }, + { + name: "with user defined base image pull secret", + addlObjectSetup: func(t *testing.T, lobj *fixtures.ObjectsForTest) { + lobj.MachineOSConfig.Spec.BaseImagePullSecret = &mcfgv1.ImageSecretObjectReference{Name: fixtures.BaseImagePullSecretName} + }, + addlAsserts: func(t *testing.T, brOpts BuildRequestOpts) { + assert.True(t, brOpts.hasUserDefinedBaseImagePullSecret) }, }, } @@ -113,7 +130,11 @@ func TestBuildRequestOpts(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - kubeclient, mcfgclient, lobj, _ := fixtures.GetClientsForTestWithAdditionalObjects(t, testCase.addlObjects, []runtime.Object{}) + kubeclient, mcfgclient, _, _, lobj, _ := fixtures.GetClientsForTestWithAdditionalObjects(t, testCase.addlObjects, []runtime.Object{}) + + if testCase.addlObjectSetup != nil { + testCase.addlObjectSetup(t, lobj) + } brOpts, err := newBuildRequestOptsFromAPI(ctx, kubeclient, mcfgclient, lobj.MachineOSBuild, lobj.MachineOSConfig) assert.NoError(t, err) diff --git a/pkg/controller/build/buildrequest/interfaces.go b/pkg/controller/build/buildrequest/interfaces.go index bdb5c18af1..a844c11678 100644 --- a/pkg/controller/build/buildrequest/interfaces.go +++ b/pkg/controller/build/buildrequest/interfaces.go @@ -19,6 +19,7 @@ type Builder interface { MachineOSBuild() (string, error) MachineConfigPool() (string, error) RenderedMachineConfig() (string, error) + BuilderUID() (string, error) GetObject() metav1.Object metav1.Object } diff --git a/pkg/controller/build/buildrequest/machineosbuild.go b/pkg/controller/build/buildrequest/machineosbuild.go index 30a9da5dbb..3ac3f398da 100644 --- a/pkg/controller/build/buildrequest/machineosbuild.go +++ b/pkg/controller/build/buildrequest/machineosbuild.go @@ -9,7 +9,7 @@ import ( "github.com/distribution/reference" "github.com/ghodss/yaml" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/machine-config-operator/pkg/controller/build/constants" "github.com/openshift/machine-config-operator/pkg/controller/build/utils" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,7 +32,7 @@ var ( // Holds the objects that are used to construct a MachineOSBuild with a hashed // name. type MachineOSBuildOpts struct { - MachineOSConfig *mcfgv1alpha1.MachineOSConfig + MachineOSConfig *mcfgv1.MachineOSConfig MachineConfigPool *mcfgv1.MachineConfigPool OSImageURLConfig *ctrlcommon.OSImageURLConfig } @@ -58,25 +58,8 @@ func (m *MachineOSBuildOpts) validateForHash() error { return nil } -// Makes a deep-copy of the MachineOSConfig and clears the data from it that -// can come from either the MachineOSConfig or from the OSImageURLConfig -// object. We do this to stabilize the hashing of the name so that whether the -// value comes from the MachineOSConfig or the OSImageURLConfig, the hash will -// be same, provided that the value is the same. -func (m *MachineOSBuildOpts) getMachineOSConfigForHashing() *mcfgv1alpha1.MachineOSConfig { - moscCopy := m.MachineOSConfig.DeepCopy() - moscCopy.Spec.BuildInputs.BaseOSImagePullspec = "" - moscCopy.Spec.BuildInputs.BaseOSExtensionsImagePullspec = "" - moscCopy.Spec.BuildInputs.ReleaseVersion = "" - return moscCopy -} - // Creates a list of objects that are consumed by the SHA256 hash. func (m *MachineOSBuildOpts) objectsForHash() []interface{} { - o := BuildRequestOpts{ - MachineOSConfig: m.MachineOSConfig, - OSImageURLConfig: m.OSImageURLConfig, - } // The objects considered for hashing described inline: out := []interface{}{ @@ -85,20 +68,10 @@ func (m *MachineOSBuildOpts) objectsForHash() []interface{} { // the individual MachineConfigs that went into that rendered // MachineConfig. m.MachineConfigPool.Spec.Configuration, - // The deep-copy of the MachineOSConfig with the multisource data fields - // removed for stability. - m.getMachineOSConfigForHashing().Spec, + // The MachineOSConfig Spec field. + m.MachineOSConfig.Spec, // The complete OSImageURLConfig object. m.OSImageURLConfig, - // The OS image extensions pullspec from either the MachineOSConfig or the - // OSImageURLConfig. - o.getExtensionsImagePullspec(), - // The base OS image pullspec from either the MachineOSConfig or the - // OSImageURLConfig. - o.getBaseOSImagePullspec(), - // The release version from either the MachineOSConfig or the - // OSImageURLConfig. - o.getReleaseVersion(), } return out @@ -147,7 +120,7 @@ func (m *MachineOSBuildOpts) getHashedName() (string, error) { // Constructs the MachineOSBuildOpts by retrieving the OSImageURLConfig from // the API server. -func NewMachineOSBuildOpts(ctx context.Context, kubeclient clientset.Interface, mosc *mcfgv1alpha1.MachineOSConfig, mcp *mcfgv1.MachineConfigPool) (MachineOSBuildOpts, error) { +func NewMachineOSBuildOpts(ctx context.Context, kubeclient clientset.Interface, mosc *mcfgv1.MachineOSConfig, mcp *mcfgv1.MachineConfigPool) (MachineOSBuildOpts, error) { // TODO: Consider an implementation that uses listers instead of API clients // just to cut down on API server traffic. osImageURLs, err := ctrlcommon.GetOSImageURLConfig(ctx, kubeclient) @@ -164,7 +137,7 @@ func NewMachineOSBuildOpts(ctx context.Context, kubeclient clientset.Interface, // Constructs a new MachineOSBuild object or panics trying. Useful for testing // scenarios. -func NewMachineOSBuildOrDie(opts MachineOSBuildOpts) *mcfgv1alpha1.MachineOSBuild { +func NewMachineOSBuildOrDie(opts MachineOSBuildOpts) *mcfgv1.MachineOSBuild { mosb, err := NewMachineOSBuild(opts) if err != nil { @@ -176,7 +149,7 @@ func NewMachineOSBuildOrDie(opts MachineOSBuildOpts) *mcfgv1alpha1.MachineOSBuil // Retrieves the MachineOSBuildOpts from the API and constructs a new // MachineOSBuild object or panics trying. Useful for testing scenarios. -func NewMachineOSBuildFromAPIOrDie(ctx context.Context, kubeclient clientset.Interface, mosc *mcfgv1alpha1.MachineOSConfig, mcp *mcfgv1.MachineConfigPool) *mcfgv1alpha1.MachineOSBuild { +func NewMachineOSBuildFromAPIOrDie(ctx context.Context, kubeclient clientset.Interface, mosc *mcfgv1.MachineOSConfig, mcp *mcfgv1.MachineConfigPool) *mcfgv1.MachineOSBuild { mosb, err := NewMachineOSBuildFromAPI(ctx, kubeclient, mosc, mcp) if err != nil { @@ -188,7 +161,7 @@ func NewMachineOSBuildFromAPIOrDie(ctx context.Context, kubeclient clientset.Int // Retrieves the MachineOSBuildOpts from the API and constructs a new // MachineOSBuild object. -func NewMachineOSBuildFromAPI(ctx context.Context, kubeclient clientset.Interface, mosc *mcfgv1alpha1.MachineOSConfig, mcp *mcfgv1.MachineConfigPool) (*mcfgv1alpha1.MachineOSBuild, error) { +func NewMachineOSBuildFromAPI(ctx context.Context, kubeclient clientset.Interface, mosc *mcfgv1.MachineOSConfig, mcp *mcfgv1.MachineConfigPool) (*mcfgv1.MachineOSBuild, error) { opts, err := NewMachineOSBuildOpts(ctx, kubeclient, mosc, mcp) if err != nil { @@ -200,7 +173,7 @@ func NewMachineOSBuildFromAPI(ctx context.Context, kubeclient clientset.Interfac // Constructs a new MachineOSBuild object with all of the labels, the tagged // image pushpsec, and a hashed name. -func NewMachineOSBuild(opts MachineOSBuildOpts) (*mcfgv1alpha1.MachineOSBuild, error) { +func NewMachineOSBuild(opts MachineOSBuildOpts) (*mcfgv1.MachineOSBuild, error) { mosbName, err := opts.getHashedNameWithConfig() if err != nil { return nil, fmt.Errorf("could not get hashed name for MachineOSBuild: %w", err) @@ -208,7 +181,7 @@ func NewMachineOSBuild(opts MachineOSBuildOpts) (*mcfgv1alpha1.MachineOSBuild, e now := metav1.Now() - namedRef, err := reference.ParseNamed(opts.MachineOSConfig.Spec.BuildInputs.RenderedImagePushspec) + namedRef, err := reference.ParseNamed(string(opts.MachineOSConfig.Spec.RenderedImagePushSpec)) if err != nil { return nil, err } @@ -218,27 +191,32 @@ func NewMachineOSBuild(opts MachineOSBuildOpts) (*mcfgv1alpha1.MachineOSBuild, e return nil, err } - mosb := &mcfgv1alpha1.MachineOSBuild{ + mosb := &mcfgv1.MachineOSBuild{ TypeMeta: metav1.TypeMeta{ Kind: "MachineOSBuild", - APIVersion: "machineconfiguration.openshift.io/v1alpha1", + APIVersion: "machineconfiguration.openshift.io/v1", }, ObjectMeta: metav1.ObjectMeta{ Name: mosbName, Labels: utils.GetMachineOSBuildLabels(opts.MachineOSConfig, opts.MachineConfigPool), + // Set finalzer on MOSB to ensure all it dependents are deleted before the MOSB + Finalizers: []string{ + metav1.FinalizerDeleteDependents, + }, + Annotations: map[string]string{ + constants.RenderedImagePushSecretAnnotationKey: opts.MachineOSConfig.Spec.RenderedImagePushSecret.Name, + }, }, - Spec: mcfgv1alpha1.MachineOSBuildSpec{ - RenderedImagePushspec: taggedRef.String(), - Version: 1, - ConfigGeneration: 1, - DesiredConfig: mcfgv1alpha1.RenderedMachineConfigReference{ + Spec: mcfgv1.MachineOSBuildSpec{ + RenderedImagePushSpec: mcfgv1.ImageTagFormat(taggedRef.String()), + MachineConfig: mcfgv1.MachineConfigReference{ Name: opts.MachineConfigPool.Spec.Configuration.Name, }, - MachineOSConfig: mcfgv1alpha1.MachineOSConfigReference{ + MachineOSConfig: mcfgv1.MachineOSConfigReference{ Name: opts.MachineOSConfig.Name, }, }, - Status: mcfgv1alpha1.MachineOSBuildStatus{ + Status: mcfgv1.MachineOSBuildStatus{ BuildStart: &now, }, } diff --git a/pkg/controller/build/buildrequest/machineosbuild_test.go b/pkg/controller/build/buildrequest/machineosbuild_test.go index a1f58e5fdf..5235d71332 100644 --- a/pkg/controller/build/buildrequest/machineosbuild_test.go +++ b/pkg/controller/build/buildrequest/machineosbuild_test.go @@ -8,7 +8,6 @@ import ( "k8s.io/apimachinery/pkg/labels" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/machine-config-operator/pkg/controller/build/fixtures" "github.com/openshift/machine-config-operator/pkg/controller/build/utils" testhelpers "github.com/openshift/machine-config-operator/test/helpers" @@ -20,7 +19,7 @@ func TestMachineOSBuild(t *testing.T) { poolName := "worker" - getMachineOSConfig := func() *mcfgv1alpha1.MachineOSConfig { + getMachineOSConfig := func() *mcfgv1.MachineOSConfig { return testhelpers.NewMachineOSConfigBuilder(poolName).WithMachineConfigPool(poolName).MachineOSConfig() } @@ -29,7 +28,7 @@ func TestMachineOSBuild(t *testing.T) { } // Some of the test cases expect the hash name to be the same. This is that hash value. - expectedCommonHashName := "worker-d6e1cf069939c5cda06064edf431689c" + expectedCommonHashName := "worker-55592464e51104dcc274a300565fec9e" testCases := []struct { name string @@ -81,109 +80,6 @@ func TestMachineOSBuild(t *testing.T) { OSImageURLConfig: fixtures.OSImageURLConfig(), }, }, - { - name: "Base OS image pullspec provided by MachineOSConfig equal to OSImageURLConfig", - expectedName: expectedCommonHashName, - opts: MachineOSBuildOpts{ - MachineConfigPool: getMachineConfigPool(), - MachineOSConfig: testhelpers.NewMachineOSConfigBuilder(poolName). - WithMachineConfigPool(poolName). - WithBaseOSImagePullspec(fixtures.BaseOSContainerImage). - MachineOSConfig(), - OSImageURLConfig: fixtures.OSImageURLConfig(), - }, - }, - { - name: "Extensions image provided by provided by MachineOSConfig equal to OSImageURLConfig", - expectedName: expectedCommonHashName, - opts: MachineOSBuildOpts{ - MachineConfigPool: getMachineConfigPool(), - MachineOSConfig: testhelpers.NewMachineOSConfigBuilder(poolName). - WithMachineConfigPool(poolName). - WithExtensionsImagePullspec(fixtures.BaseOSExtensionsContainerImage). - MachineOSConfig(), - OSImageURLConfig: fixtures.OSImageURLConfig(), - }, - }, - { - name: "Release version provided by MachineOSConfig equal to OSImageURLConfig", - expectedName: expectedCommonHashName, - opts: MachineOSBuildOpts{ - MachineConfigPool: getMachineConfigPool(), - MachineOSConfig: testhelpers.NewMachineOSConfigBuilder(poolName). - WithMachineConfigPool(poolName). - WithReleaseVersion(fixtures.ReleaseVersion). - MachineOSConfig(), - OSImageURLConfig: fixtures.OSImageURLConfig(), - }, - }, - { - name: "All values provided by MachineOSConfig equal to OSImageURLConfig values", - expectedName: expectedCommonHashName, - opts: MachineOSBuildOpts{ - MachineConfigPool: getMachineConfigPool(), - MachineOSConfig: testhelpers.NewMachineOSConfigBuilder(poolName). - WithMachineConfigPool(poolName). - WithBaseOSImagePullspec(fixtures.BaseOSContainerImage). - WithExtensionsImagePullspec(fixtures.BaseOSExtensionsContainerImage). - WithReleaseVersion(fixtures.ReleaseVersion). - MachineOSConfig(), - OSImageURLConfig: fixtures.OSImageURLConfig(), - }, - }, - // These cases ensure that should the value on the MachineOSConfig differ - // from what is in the OSImageURLConfig (provided it is not empty!), the - // hash will change. - { - name: "Custom base OS image pullspec provided by MachineOSConfig", - expectedName: "worker-45358521eec36e094dfba3d48f67bf2e", - opts: MachineOSBuildOpts{ - MachineConfigPool: getMachineConfigPool(), - MachineOSConfig: testhelpers.NewMachineOSConfigBuilder(poolName). - WithMachineConfigPool(poolName). - WithBaseOSImagePullspec("registry.hostname.com/org/repo:custom-os-image"). - MachineOSConfig(), - OSImageURLConfig: fixtures.OSImageURLConfig(), - }, - }, - { - name: "Custom extensions image provided by provided by MachineOSConfig", - expectedName: "worker-e091d5caee71326bd29f9e30997eda11", - opts: MachineOSBuildOpts{ - MachineConfigPool: getMachineConfigPool(), - MachineOSConfig: testhelpers.NewMachineOSConfigBuilder(poolName). - WithMachineConfigPool(poolName). - WithExtensionsImagePullspec("registry.hostname.com/org/repo:custom-extensions-image"). - MachineOSConfig(), - OSImageURLConfig: fixtures.OSImageURLConfig(), - }, - }, - { - name: "Custom release version provided by MachineOSConfig", - expectedName: "worker-33f019b45084bba3d6e6dfaf0a2335b0", - opts: MachineOSBuildOpts{ - MachineConfigPool: getMachineConfigPool(), - MachineOSConfig: testhelpers.NewMachineOSConfigBuilder(poolName). - WithMachineConfigPool(poolName). - WithReleaseVersion("custom-release-version"). - MachineOSConfig(), - OSImageURLConfig: fixtures.OSImageURLConfig(), - }, - }, - { - name: "All custom values provided by MachineOSConfig", - expectedName: "worker-b9833dc380a7c5892856202669afae9c", - opts: MachineOSBuildOpts{ - MachineConfigPool: getMachineConfigPool(), - MachineOSConfig: testhelpers.NewMachineOSConfigBuilder(poolName). - WithMachineConfigPool(poolName). - WithBaseOSImagePullspec("registry.hostname.com/org/repo:custom-os-image"). - WithExtensionsImagePullspec("registry.hostname.com/org/repo:custom-extensions-image"). - WithReleaseVersion("custom-release-version"). - MachineOSConfig(), - OSImageURLConfig: fixtures.OSImageURLConfig(), - }, - }, // These cases ensure that pausing the MachineConfigPool does not affect the hash. { name: "Unpaused MachineConfigPool", @@ -211,7 +107,7 @@ func TestMachineOSBuild(t *testing.T) { t.Parallel() if testCase.opts.MachineOSConfig != nil { - testCase.opts.MachineOSConfig.Spec.BuildInputs.RenderedImagePushspec = "registry.hostname.com/org/repo:latest" + testCase.opts.MachineOSConfig.Spec.RenderedImagePushSpec = "registry.hostname.com/org/repo:latest" } mosb, err := NewMachineOSBuild(testCase.opts) @@ -226,8 +122,8 @@ func TestMachineOSBuild(t *testing.T) { assert.Equal(t, testCase.expectedName, mosb.Name) expectedPullspec := fmt.Sprintf("registry.hostname.com/org/repo:%s", testCase.expectedName) - assert.Equal(t, expectedPullspec, mosb.Spec.RenderedImagePushspec) - assert.Equal(t, testCase.opts.MachineConfigPool.Spec.Configuration.Name, mosb.Spec.DesiredConfig.Name) + assert.Equal(t, expectedPullspec, string(mosb.Spec.RenderedImagePushSpec)) + assert.Equal(t, testCase.opts.MachineConfigPool.Spec.Configuration.Name, mosb.Spec.MachineConfig.Name) assert.NotNil(t, mosb.Status.BuildStart) assert.True(t, utils.MachineOSBuildSelector(testCase.opts.MachineOSConfig, testCase.opts.MachineConfigPool).Matches(labels.Set(mosb.Labels))) diff --git a/pkg/controller/build/clients.go b/pkg/controller/build/clients.go index f0594907f7..36e85cf1f0 100644 --- a/pkg/controller/build/clients.go +++ b/pkg/controller/build/clients.go @@ -13,9 +13,9 @@ import ( mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" mcfginformers "github.com/openshift/client-go/machineconfiguration/informers/externalversions" mcfginformersv1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1" - mcfginformersv1alpha1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1" mcfglistersv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" - mcfglistersv1alpha1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1" + coreinformersv1 "k8s.io/client-go/informers/core/v1" + corelistersv1 "k8s.io/client-go/listers/core/v1" "github.com/openshift/machine-config-operator/pkg/controller/build/utils" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" @@ -26,8 +26,9 @@ type informers struct { controllerConfigInformer mcfginformersv1.ControllerConfigInformer machineConfigPoolInformer mcfginformersv1.MachineConfigPoolInformer jobInformer batchinformersv1.JobInformer - machineOSBuildInformer mcfginformersv1alpha1.MachineOSBuildInformer - machineOSConfigInformer mcfginformersv1alpha1.MachineOSConfigInformer + machineOSBuildInformer mcfginformersv1.MachineOSBuildInformer + machineOSConfigInformer mcfginformersv1.MachineOSConfigInformer + nodeInformer coreinformersv1.NodeInformer toStart []interface{ Start(<-chan struct{}) } hasSynced []cache.InformerSynced } @@ -47,16 +48,18 @@ func (i *informers) listers() *listers { machineConfigPoolLister: i.machineConfigPoolInformer.Lister(), jobLister: i.jobInformer.Lister(), controllerConfigLister: i.controllerConfigInformer.Lister(), + nodeLister: i.nodeInformer.Lister(), } } // Holds all of the required listers so that they can be passed around and reused. type listers struct { - machineOSBuildLister mcfglistersv1alpha1.MachineOSBuildLister - machineOSConfigLister mcfglistersv1alpha1.MachineOSConfigLister + machineOSBuildLister mcfglistersv1.MachineOSBuildLister + machineOSConfigLister mcfglistersv1.MachineOSConfigLister machineConfigPoolLister mcfglistersv1.MachineConfigPoolLister jobLister batchlisterv1.JobLister controllerConfigLister mcfglistersv1.ControllerConfigLister + nodeLister corelistersv1.NodeLister } func (l *listers) utilListers() *utils.Listers { @@ -64,6 +67,7 @@ func (l *listers) utilListers() *utils.Listers { MachineOSBuildLister: l.machineOSBuildLister, MachineOSConfigLister: l.machineOSConfigLister, MachineConfigPoolLister: l.machineConfigPoolLister, + NodeLister: l.nodeLister, } } @@ -82,12 +86,14 @@ func newInformers(mcfgclient mcfgclientset.Interface, kubeclient clientset.Inter coreinformers.WithNamespace(ctrlcommon.MCONamespace), coreinformers.WithTweakListOptions(ephemeralBuildObjectsOpts), ) + coreInformerFactoryNodes := coreinformers.NewSharedInformerFactory(kubeclient, 0) controllerConfigInformer := mcoInformerFactory.Machineconfiguration().V1().ControllerConfigs() machineConfigPoolInformer := mcoInformerFactory.Machineconfiguration().V1().MachineConfigPools() - machineOSBuildInformer := mcoInformerFactory.Machineconfiguration().V1alpha1().MachineOSBuilds() - machineOSConfigInformer := mcoInformerFactory.Machineconfiguration().V1alpha1().MachineOSConfigs() + machineOSBuildInformer := mcoInformerFactory.Machineconfiguration().V1().MachineOSBuilds() + machineOSConfigInformer := mcoInformerFactory.Machineconfiguration().V1().MachineOSConfigs() jobInformer := coreInformerFactory.Batch().V1().Jobs() + nodeInformer := coreInformerFactoryNodes.Core().V1().Nodes() return &informers{ controllerConfigInformer: controllerConfigInformer, @@ -95,9 +101,11 @@ func newInformers(mcfgclient mcfgclientset.Interface, kubeclient clientset.Inter machineOSBuildInformer: machineOSBuildInformer, machineOSConfigInformer: machineOSConfigInformer, jobInformer: jobInformer, + nodeInformer: nodeInformer, toStart: []interface{ Start(<-chan struct{}) }{ mcoInformerFactory, coreInformerFactory, + coreInformerFactoryNodes, }, hasSynced: []cache.InformerSynced{ controllerConfigInformer.Informer().HasSynced, @@ -105,6 +113,7 @@ func newInformers(mcfgclient mcfgclientset.Interface, kubeclient clientset.Inter jobInformer.Informer().HasSynced, machineOSBuildInformer.Informer().HasSynced, machineOSConfigInformer.Informer().HasSynced, + nodeInformer.Informer().HasSynced, }, } } diff --git a/pkg/controller/build/constants/constants.go b/pkg/controller/build/constants/constants.go index e75c0d240f..2e8225eb68 100644 --- a/pkg/controller/build/constants/constants.go +++ b/pkg/controller/build/constants/constants.go @@ -15,10 +15,12 @@ const ( // Annotations added to all ephemeral build objects BuildController creates. const ( - MachineOSBuildNameAnnotationKey = "machineconfiguration.openshift.io/machine-os-build" - MachineOSConfigNameAnnotationKey = "machineconfiguration.openshift.io/machine-os-config" - MachineOSConfigNameLabelKey = MachineOSConfigNameAnnotationKey - MachineOSBuildNameLabelKey = MachineOSBuildNameAnnotationKey + MachineOSBuildNameAnnotationKey = "machineconfiguration.openshift.io/machine-os-build" + MachineOSConfigNameAnnotationKey = "machineconfiguration.openshift.io/machine-os-config" + MachineOSConfigNameLabelKey = MachineOSConfigNameAnnotationKey + MachineOSBuildNameLabelKey = MachineOSBuildNameAnnotationKey + JobUIDAnnotationKey = "machineconfiguration.openshift.io/job-uid" + RenderedImagePushSecretAnnotationKey = "machineconfiguration.openshift.io/rendered-image-push-secret" ) // The MachineOSConfig will get updated with this annotation once a @@ -28,6 +30,12 @@ const ( CurrentMachineOSBuildAnnotationKey string = "machineconfiguration.openshift.io/current-machine-os-build" ) +// When this annotation is added to a MachineOSConfig, the current +// MachineOSBuild will be deleted, which will cause a rebuild to occur. +const ( + RebuildMachineOSConfigAnnotationKey string = "machineconfiguration.openshift.io/rebuild" +) + // Entitled build secret names const ( // Name of the etc-pki-entitlement secret from the openshift-config-managed namespace. @@ -57,3 +65,9 @@ const ( EtcYumReposDAnnotationKey = entitlementsAnnotationKeyBase + EtcYumReposDConfigMapName EtcPkiRpmGpgAnnotationKey = entitlementsAnnotationKeyBase + EtcPkiRpmGpgSecretName ) + +// batchv1.Job configuration +const ( + JobMaxRetries int32 = 3 + JobCompletions int32 = 1 +) diff --git a/pkg/controller/build/fixtures/fixtures.go b/pkg/controller/build/fixtures/fixtures.go index 4770960594..46b335611e 100644 --- a/pkg/controller/build/fixtures/fixtures.go +++ b/pkg/controller/build/fixtures/fixtures.go @@ -4,7 +4,9 @@ import ( "testing" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + fakeclientimagev1 "github.com/openshift/client-go/image/clientset/versioned/fake" fakeclientmachineconfigv1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake" + fakeclientroutev1 "github.com/openshift/client-go/route/clientset/versioned/fake" testhelpers "github.com/openshift/machine-config-operator/test/helpers" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -14,19 +16,20 @@ import ( func GetEmptyClientsForTest(t *testing.T) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *testhelpers.Assertions) { kubeclient := fakecorev1client.NewSimpleClientset() mcfgclient := fakeclientmachineconfigv1.NewSimpleClientset() - return kubeclient, mcfgclient, testhelpers.Assert(t, kubeclient, mcfgclient) + imageclient := fakeclientimagev1.NewSimpleClientset() + return kubeclient, mcfgclient, testhelpers.Assert(t, kubeclient, mcfgclient, imageclient) } // Gets the kubeclient and mcfgclients needed for a test with the default Kube // objects in them. -func GetClientsForTest(t *testing.T) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *ObjectsForTest, *testhelpers.Assertions) { +func GetClientsForTest(t *testing.T) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *ObjectsForTest, *testhelpers.Assertions) { return GetClientsForTestWithAdditionalObjects(t, []runtime.Object{}, []runtime.Object{}) } // Gets the kubeclient and mcfgclient, adds any additional objects to them, and // also returns the ObjectsForTest which are instantiated assuming the // pool name "worker". -func GetClientsForTestWithAdditionalObjects(t *testing.T, addlKubeObjects, addlMcfgObjects []runtime.Object) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *ObjectsForTest, *testhelpers.Assertions) { +func GetClientsForTestWithAdditionalObjects(t *testing.T, addlKubeObjects, addlMcfgObjects []runtime.Object) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *ObjectsForTest, *testhelpers.Assertions) { obj := NewObjectsForTest("worker") mcfgObjects := append(addlMcfgObjects, obj.ToRuntimeObjects()...) //nolint:gocritic // It's not supposed to be assigned to the same slice. @@ -40,6 +43,8 @@ func GetClientsForTestWithAdditionalObjects(t *testing.T, addlKubeObjects, addlM kubeclient := fakecorev1client.NewSimpleClientset(addlKubeObjects...) mcfgclient := fakeclientmachineconfigv1.NewSimpleClientset(mcfgObjects...) + imageclient := fakeclientimagev1.NewSimpleClientset() + routeclient := fakeclientroutev1.NewSimpleClientset() - return kubeclient, mcfgclient, &obj, testhelpers.Assert(t, kubeclient, mcfgclient) + return kubeclient, mcfgclient, imageclient, routeclient, &obj, testhelpers.Assert(t, kubeclient, mcfgclient, imageclient) } diff --git a/pkg/controller/build/fixtures/helpers.go b/pkg/controller/build/fixtures/helpers.go index d71dbe93b2..e6210d5eb1 100644 --- a/pkg/controller/build/fixtures/helpers.go +++ b/pkg/controller/build/fixtures/helpers.go @@ -6,7 +6,7 @@ import ( "fmt" "testing" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/stretchr/testify/require" batchv1 "k8s.io/api/batch/v1" @@ -27,7 +27,7 @@ type JobStatus struct { } // Sets the provided job status on a given job under test. If successful, it will also insert the digestfile ConfigMap. -func SetJobStatus(ctx context.Context, t *testing.T, kubeclient clientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, jobStatus JobStatus) { +func SetJobStatus(ctx context.Context, t *testing.T, kubeclient clientset.Interface, mosb *mcfgv1.MachineOSBuild, jobStatus JobStatus) { require.NoError(t, setJobStatusFields(ctx, kubeclient, mosb, jobStatus)) if jobStatus.Succeeded == 1 { @@ -37,7 +37,7 @@ func SetJobStatus(ctx context.Context, t *testing.T, kubeclient clientset.Interf } } -func SetJobDeletionTimestamp(ctx context.Context, t *testing.T, kubeclient clientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, timestamp *metav1.Time) { +func SetJobDeletionTimestamp(ctx context.Context, t *testing.T, kubeclient clientset.Interface, mosb *mcfgv1.MachineOSBuild, timestamp *metav1.Time) { jobName := fmt.Sprintf("build-%s", mosb.Name) j, err := kubeclient.BatchV1().Jobs(ctrlcommon.MCONamespace).Get(ctx, jobName, metav1.GetOptions{}) @@ -49,7 +49,7 @@ func SetJobDeletionTimestamp(ctx context.Context, t *testing.T, kubeclient clien require.NoError(t, err) } -func setJobStatusFields(ctx context.Context, kubeclient clientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, jobStatus JobStatus) error { +func setJobStatusFields(ctx context.Context, kubeclient clientset.Interface, mosb *mcfgv1.MachineOSBuild, jobStatus JobStatus) error { jobName := fmt.Sprintf("build-%s", mosb.Name) j, err := kubeclient.BatchV1().Jobs(ctrlcommon.MCONamespace).Get(ctx, jobName, metav1.GetOptions{}) @@ -67,11 +67,11 @@ func setJobStatusFields(ctx context.Context, kubeclient clientset.Interface, mos return err } -func createDigestfileConfigMap(ctx context.Context, kubeclient clientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild) error { +func createDigestfileConfigMap(ctx context.Context, kubeclient clientset.Interface, mosb *mcfgv1.MachineOSBuild) error { return createDigestfileConfigMapWithDigest(ctx, kubeclient, mosb, getDigest(mosb.Name)) } -func createDigestfileConfigMapWithDigest(ctx context.Context, kubeclient clientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, digest string) error { +func createDigestfileConfigMapWithDigest(ctx context.Context, kubeclient clientset.Interface, mosb *mcfgv1.MachineOSBuild, digest string) error { digestName := fmt.Sprintf("digest-%s", mosb.Name) cm := &corev1.ConfigMap{ @@ -92,7 +92,7 @@ func createDigestfileConfigMapWithDigest(ctx context.Context, kubeclient clients return nil } -func deleteDigestfileConfigMap(ctx context.Context, kubeclient clientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild) error { +func deleteDigestfileConfigMap(ctx context.Context, kubeclient clientset.Interface, mosb *mcfgv1.MachineOSBuild) error { digestName := fmt.Sprintf("digest-%s", mosb.Name) err := kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(ctx, digestName, metav1.DeleteOptions{}) if err != nil && !k8serrors.IsNotFound(err) { diff --git a/pkg/controller/build/fixtures/objects.go b/pkg/controller/build/fixtures/objects.go index f22fe7b806..c3b28a5ed9 100644 --- a/pkg/controller/build/fixtures/objects.go +++ b/pkg/controller/build/fixtures/objects.go @@ -5,7 +5,6 @@ import ( ign3types "github.com/coreos/ignition/v2/config/v3_4/types" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/machine-config-operator/pkg/controller/build/constants" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" testhelpers "github.com/openshift/machine-config-operator/test/helpers" @@ -15,17 +14,17 @@ import ( ) const ( - baseImagePullSecretName string = "base-image-pull-secret" - finalImagePushSecretName string = "final-image-push-secret" - currentImagePullSecretName string = "current-image-pull-secret" + BaseImagePullSecretName string = "base-image-pull-secret" + finalImagePushSecretName string = "final-image-push-secret" + JobUID string = "bfc35cd0f874c9bfdc586e6ba39f1896" ) // Provides consistently instantiated objects for use in a given test. type ObjectsForTest struct { MachineConfigPool *mcfgv1.MachineConfigPool MachineConfigs []*mcfgv1.MachineConfig - MachineOSConfig *mcfgv1alpha1.MachineOSConfig - MachineOSBuild *mcfgv1alpha1.MachineOSBuild + MachineOSConfig *mcfgv1.MachineOSConfig + MachineOSBuild *mcfgv1.MachineOSBuild } // Provides the builders to create consistently instantiated objects for use in @@ -88,11 +87,9 @@ func NewObjectBuildersForTest(poolName string) ObjectBuildersForTest { moscBuilder := testhelpers.NewMachineOSConfigBuilder(moscName). WithMachineConfigPool(poolName). - WithBaseImagePullSecret(baseImagePullSecretName). WithRenderedImagePushSecret(finalImagePushSecretName). - WithCurrentImagePullSecret(currentImagePullSecretName). - WithRenderedImagePushspec("registry.hostname.com/org/repo:latest"). - WithContainerfile(mcfgv1alpha1.NoArch, "FROM configs AS final\n\nRUN echo 'hi' > /etc/hi") + WithRenderedImagePushSpec("registry.hostname.com/org/repo:latest"). + WithContainerfile(mcfgv1.NoArch, "FROM configs AS final\n\nRUN echo 'hi' > /etc/hi") mcpBuilder := testhelpers.NewMachineConfigPoolBuilder(poolName). WithChildConfigs(getChildConfigs(poolName, 5)). @@ -104,6 +101,9 @@ func NewObjectBuildersForTest(poolName string) ObjectBuildersForTest { constants.TargetMachineConfigPoolLabelKey: poolName, constants.RenderedMachineConfigLabelKey: renderedConfigName, constants.MachineOSConfigNameLabelKey: moscName, + }). + WithAnnotations(map[string]string{ + constants.JobUIDAnnotationKey: JobUID, }) return ObjectBuildersForTest{ @@ -134,7 +134,7 @@ func defaultKubeObjects() []runtime.Object { }, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: baseImagePullSecretName, + Name: BaseImagePullSecretName, Namespace: ctrlcommon.MCONamespace, }, Data: map[string][]byte{ @@ -144,7 +144,7 @@ func defaultKubeObjects() []runtime.Object { }, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: currentImagePullSecretName, + Name: ctrlcommon.GlobalPullSecretCopyName, Namespace: ctrlcommon.MCONamespace, }, Data: map[string][]byte{ @@ -246,7 +246,7 @@ func OSImageURLConfig() *ctrlcommon.OSImageURLConfig { } } -func GetExpectedFinalImagePullspecForMachineOSBuild(mosb *mcfgv1alpha1.MachineOSBuild) string { +func GetExpectedFinalImagePullspecForMachineOSBuild(mosb *mcfgv1.MachineOSBuild) string { digest := getDigest(mosb.Name) return "registry.hostname.com/org/repo@" + digest } diff --git a/pkg/controller/build/helpers.go b/pkg/controller/build/helpers.go index 72e5bfc53a..5b9c315aec 100644 --- a/pkg/controller/build/helpers.go +++ b/pkg/controller/build/helpers.go @@ -4,10 +4,10 @@ import ( "context" "errors" "fmt" + "strings" "github.com/containers/image/v5/docker/reference" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/client-go/machineconfiguration/clientset/versioned" mcfglistersv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" "github.com/openshift/machine-config-operator/pkg/controller/build/constants" @@ -24,7 +24,7 @@ import ( // ValidateOnClusterBuildConfig validates the existence of the MachineOSConfig and the required build inputs. func ValidateOnClusterBuildConfig(kubeclient clientset.Interface, mcfgclient versioned.Interface, layeredMCPs []*mcfgv1.MachineConfigPool) error { // Validate the presence of the MachineOSConfig - machineOSConfigs, err := mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) + machineOSConfigs, err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -34,7 +34,7 @@ func ValidateOnClusterBuildConfig(kubeclient clientset.Interface, mcfgclient ver } moscForPoolExists := false - var moscForPool *mcfgv1alpha1.MachineOSConfig + var moscForPool *mcfgv1.MachineOSConfig for _, pool := range layeredMCPs { moscForPoolExists = false for _, mosc := range machineOSConfigs.Items { @@ -61,7 +61,7 @@ func ValidateOnClusterBuildConfig(kubeclient clientset.Interface, mcfgclient ver return nil } -func validateMachineOSConfig(mcpGetter func(string) (*mcfgv1.MachineConfigPool, error), secretGetter func(string) (*corev1.Secret, error), mosc *mcfgv1alpha1.MachineOSConfig) error { +func validateMachineOSConfig(mcpGetter func(string) (*mcfgv1.MachineConfigPool, error), secretGetter func(string) (*corev1.Secret, error), mosc *mcfgv1.MachineOSConfig) error { _, err := mcpGetter(mosc.Spec.MachineConfigPool.Name) if err != nil && k8serrors.IsNotFound(err) { return fmt.Errorf("no MachineConfigPool named %s exists for MachineOSConfig %s", mosc.Spec.MachineConfigPool.Name, mosc.Name) @@ -72,9 +72,11 @@ func validateMachineOSConfig(mcpGetter func(string) (*mcfgv1.MachineConfigPool, } secretFields := map[string]string{ - mosc.Spec.BuildInputs.BaseImagePullSecret.Name: "baseImagePullSecret", - mosc.Spec.BuildInputs.RenderedImagePushSecret.Name: "renderedImagePushSecret", - mosc.Spec.BuildOutputs.CurrentImagePullSecret.Name: "currentImagePullSecret", + mosc.Spec.RenderedImagePushSecret.Name: "renderedImagePushSecret", + } + // Add base image pull secret if it has been defined in the MOSC + if mosc.Spec.BaseImagePullSecret != nil { + secretFields[mosc.Spec.BaseImagePullSecret.Name] = "baseImagePullSecret" } for secretName, fieldName := range secretFields { @@ -83,14 +85,14 @@ func validateMachineOSConfig(mcpGetter func(string) (*mcfgv1.MachineConfigPool, } } - if _, err := reference.ParseNamed(mosc.Spec.BuildInputs.RenderedImagePushspec); err != nil { - return fmt.Errorf("could not validate renderdImagePushspec %s for MachineOSConfig %s: %w", mosc.Spec.BuildInputs.RenderedImagePushspec, mosc.Name, err) + if _, err := reference.ParseNamed(string(mosc.Spec.RenderedImagePushSpec)); err != nil { + return fmt.Errorf("could not validate renderdImagePushspec %s for MachineOSConfig %s: %w", string(mosc.Spec.RenderedImagePushSpec), mosc.Name, err) } return nil } -func ValidateMachineOSConfigFromListers(mcpLister mcfglistersv1.MachineConfigPoolLister, secretLister corelisterv1.SecretLister, mosc *mcfgv1alpha1.MachineOSConfig) error { +func ValidateMachineOSConfigFromListers(mcpLister mcfglistersv1.MachineConfigPoolLister, secretLister corelisterv1.SecretLister, mosc *mcfgv1.MachineOSConfig) error { mcpGetter := func(name string) (*mcfgv1.MachineConfigPool, error) { return mcpLister.Get(name) } @@ -102,7 +104,7 @@ func ValidateMachineOSConfigFromListers(mcpLister mcfglistersv1.MachineConfigPoo return validateMachineOSConfig(mcpGetter, secretGetter, mosc) } -func validateSecret(secretGetter func(string) (*corev1.Secret, error), mosc *mcfgv1alpha1.MachineOSConfig, secretName string) error { +func validateSecret(secretGetter func(string) (*corev1.Secret, error), mosc *mcfgv1.MachineOSConfig, secretName string) error { if secretName == "" { return fmt.Errorf("no secret name provided") } @@ -123,7 +125,7 @@ func validateSecret(secretGetter func(string) (*corev1.Secret, error), mosc *mcf // Determines if a MachineOSBuild status update is needed. These are needed // primarily when we transition from the initial status -> transient state -> // terminal state. -func isMachineOSBuildStatusUpdateNeeded(oldStatus, curStatus mcfgv1alpha1.MachineOSBuildStatus) (bool, string) { +func isMachineOSBuildStatusUpdateNeeded(oldStatus, curStatus mcfgv1.MachineOSBuildStatus) (bool, string) { oldState := ctrlcommon.NewMachineOSBuildStateFromStatus(oldStatus) curState := ctrlcommon.NewMachineOSBuildStateFromStatus(curStatus) @@ -140,9 +142,11 @@ func isMachineOSBuildStatusUpdateNeeded(oldStatus, curStatus mcfgv1alpha1.Machin return true, fmt.Sprintf("transitioned from initial state -> transient state (%s)", curTransientState) } - // From pending -> building. + // From pending -> building, but not building -> pending. if oldState.IsInTransientState() && curState.IsInTransientState() && oldTransientState != curTransientState { - return true, fmt.Sprintf("transitioned from transient state (%s) -> transient state (%s)", oldTransientState, curTransientState) + reason := fmt.Sprintf("transitioned from transient state (%s) -> transient state (%s)", oldTransientState, curTransientState) + isValid := oldTransientState == mcfgv1.MachineOSBuildPrepared && curTransientState == mcfgv1.MachineOSBuilding + return isValid, reason } oldTerminalState := oldState.GetTerminalState() @@ -182,7 +186,7 @@ func isMachineOSBuildStatusUpdateNeeded(oldStatus, curStatus mcfgv1alpha1.Machin } // Converts a list of MachineOSConfigs into a list of their names. -func getMachineOSConfigNames(moscList []*mcfgv1alpha1.MachineOSConfig) []string { +func getMachineOSConfigNames(moscList []*mcfgv1.MachineOSConfig) []string { out := []string{} for _, mosc := range moscList { @@ -193,7 +197,7 @@ func getMachineOSConfigNames(moscList []*mcfgv1alpha1.MachineOSConfig) []string } // Converts a list of MachineOSBuilds into a list of their names. -func getMachineOSBuildNames(mosbList []*mcfgv1alpha1.MachineOSBuild) []string { +func getMachineOSBuildNames(mosbList []*mcfgv1.MachineOSBuild) []string { out := []string{} for _, mosc := range mosbList { @@ -205,7 +209,7 @@ func getMachineOSBuildNames(mosbList []*mcfgv1alpha1.MachineOSBuild) []string { // Determines if a MachineOSBuild is current for a given MachineOSConfig solely // by looking at the current build annotation on the MachineOSConfig. -func isMachineOSBuildCurrentForMachineOSConfig(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) bool { +func isMachineOSBuildCurrentForMachineOSConfig(mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild) bool { // If we don't have the current build annotation, then we cannot even make this determination. if !hasCurrentBuildAnnotation(mosc) { return false @@ -224,23 +228,22 @@ func isMachineOSBuildCurrentForMachineOSConfig(mosc *mcfgv1alpha1.MachineOSConfi // considering the current build annotation and the image pullspec. If the // MachineOSBuild has not (yet) set its final image pushspec, this will return // false. -func isMachineOSBuildCurrentForMachineOSConfigWithPullspec(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) bool { +func isMachineOSBuildCurrentForMachineOSConfigWithPullspec(mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild) bool { // If the MachineOSConfig has the same final image pullspec as // the MachineOSBuild and the MachineOSBuild's pushspec is populated, we know // they're the same. return isMachineOSBuildCurrentForMachineOSConfig(mosc, mosb) && - mosc.Status.CurrentImagePullspec == mosb.Status.FinalImagePushspec && - mosb.Status.FinalImagePushspec != "" + mosc.Status.CurrentImagePullSpec == mosb.Status.DigestedImagePushSpec } // Determines if a given MachineOSConfig has the current build annotation. -func hasCurrentBuildAnnotation(mosc *mcfgv1alpha1.MachineOSConfig) bool { - return metav1.HasAnnotation(mosc.ObjectMeta, constants.CurrentMachineOSBuildAnnotationKey) +func hasCurrentBuildAnnotation(mosc *mcfgv1.MachineOSConfig) bool { + return metav1.HasAnnotation(mosc.ObjectMeta, constants.CurrentMachineOSBuildAnnotationKey) && mosc.Annotations[constants.CurrentMachineOSBuildAnnotationKey] != "" } // Determines if a given MachineOSConfig has the current build annotation and // it matches the name of the given MachineOSBuild. -func isCurrentBuildAnnotationEqual(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) bool { +func isCurrentBuildAnnotationEqual(mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild) bool { if !hasCurrentBuildAnnotation(mosc) { return false } @@ -248,6 +251,11 @@ func isCurrentBuildAnnotationEqual(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcf return mosc.Annotations[constants.CurrentMachineOSBuildAnnotationKey] == mosb.Name } +// Determines if a given MachineOSConfig has the rebuild annotation. +func hasRebuildAnnotation(mosc *mcfgv1.MachineOSConfig) bool { + return metav1.HasAnnotation(mosc.ObjectMeta, constants.RebuildMachineOSConfigAnnotationKey) +} + // Looks at the error chain for the given error and determines if the error // should be ignored or not based upon whether it is a not found error. If it // should be ignored, this will log the error as well as the name and kind of @@ -270,3 +278,14 @@ func ignoreErrIsNotFound(err error) error { // If the error type somehow does not match k8serrors.StatusError, return it. return err } + +// Extracts the namespace and name:tag from an image reference. +func extractNSAndNameWithTag(imageRef string) (string, string, error) { + // Split the image reference to give an array of [registry, namespace, name:tag] + parts := strings.SplitN(imageRef, "/", 3) + if len(parts) < 3 { + return "", "", fmt.Errorf("invalid image reference: %s", imageRef) + } + + return parts[1], parts[2], nil +} diff --git a/pkg/controller/build/helpers_test.go b/pkg/controller/build/helpers_test.go index fb2ee11913..b633de80b2 100644 --- a/pkg/controller/build/helpers_test.go +++ b/pkg/controller/build/helpers_test.go @@ -2,10 +2,11 @@ package build import ( "context" + "fmt" "testing" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/machine-config-operator/pkg/apihelpers" "github.com/openshift/machine-config-operator/pkg/controller/build/fixtures" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/stretchr/testify/assert" @@ -16,7 +17,7 @@ import ( func TestValidateOnClusterBuildConfig(t *testing.T) { t.Parallel() - newMosc := func() *mcfgv1alpha1.MachineOSConfig { + newMosc := func() *mcfgv1.MachineOSConfig { lobj := fixtures.NewObjectsForTest("worker") return lobj.MachineOSConfig } @@ -25,7 +26,7 @@ func TestValidateOnClusterBuildConfig(t *testing.T) { name string errExpected bool secretsToDelete []string - mosc func() *mcfgv1alpha1.MachineOSConfig + mosc func() *mcfgv1.MachineOSConfig }{ { name: "happy path", @@ -33,13 +34,13 @@ func TestValidateOnClusterBuildConfig(t *testing.T) { }, { name: "missing secret", - secretsToDelete: []string{"current-image-pull-secret"}, + secretsToDelete: []string{"final-image-push-secret"}, mosc: newMosc, errExpected: true, }, { name: "missing MachineOSConfig", - mosc: func() *mcfgv1alpha1.MachineOSConfig { + mosc: func() *mcfgv1.MachineOSConfig { mosc := newMosc() mosc.Name = "other-machineosconfig" mosc.Spec.MachineConfigPool.Name = "other-machineconfigpool" @@ -49,9 +50,9 @@ func TestValidateOnClusterBuildConfig(t *testing.T) { }, { name: "malformed image pullspec", - mosc: func() *mcfgv1alpha1.MachineOSConfig { + mosc: func() *mcfgv1.MachineOSConfig { mosc := newMosc() - mosc.Spec.BuildInputs.RenderedImagePushspec = "malformed-image-pullspec" + mosc.Spec.RenderedImagePushSpec = "malformed-image-pullspec" return mosc }, errExpected: true, @@ -63,9 +64,8 @@ func TestValidateOnClusterBuildConfig(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { t.Parallel() - kubeclient, mcfgclient, lobj, _ := fixtures.GetClientsForTest(t) - - _, err := mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().Create(context.TODO(), testCase.mosc(), metav1.CreateOptions{}) + kubeclient, mcfgclient, _, _, lobj, _ := fixtures.GetClientsForTest(t) + _, err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Create(context.TODO(), testCase.mosc(), metav1.CreateOptions{}) require.NoError(t, err) for _, secret := range testCase.secretsToDelete { @@ -82,3 +82,119 @@ func TestValidateOnClusterBuildConfig(t *testing.T) { }) } } + +// This test validates that we have correctly identified if the MachineOSBuild +// should be updated based upon comparing the old and current status of the +// MachineOSBuild. It is worth noting that the current MachineOSBuild status +// can come from the imagebuilder.MachineOSBuildStatus() method which maps the +// current job state to the MachineOSBuild state. +func TestIsMachineOSBuildStatusUpdateNeeded(t *testing.T) { + t.Parallel() + + initialConditions := func() map[mcfgv1.BuildProgress][]metav1.Condition { + return map[mcfgv1.BuildProgress][]metav1.Condition{ + // This value is not part of the OCL API and is here solely for testing purposes. + "Initial": apihelpers.MachineOSBuildInitialConditions(), + } + } + + testCases := []struct { + name string + old map[mcfgv1.BuildProgress][]metav1.Condition + current map[mcfgv1.BuildProgress][]metav1.Condition + expected bool + }{ + // These are valid state transitions. In other words, when one of these + // state transitions is identified, the MachineOSBuild status object should + // be updated. + { + name: "Initial -> Terminal", + old: initialConditions(), + current: ctrlcommon.MachineOSBuildTerminalStates(), + expected: true, + }, + { + name: "Initial -> Transient", + old: initialConditions(), + current: ctrlcommon.MachineOSBuildTransientStates(), + expected: true, + }, + { + name: "Transient -> Terminal", + old: ctrlcommon.MachineOSBuildTransientStates(), + current: ctrlcommon.MachineOSBuildTerminalStates(), + expected: true, + }, + { + name: "Pending -> Running", + old: map[mcfgv1.BuildProgress][]metav1.Condition{ + mcfgv1.MachineOSBuildPrepared: ctrlcommon.MachineOSBuildTransientStates()[mcfgv1.MachineOSBuildPrepared], + }, + current: map[mcfgv1.BuildProgress][]metav1.Condition{ + mcfgv1.MachineOSBuilding: ctrlcommon.MachineOSBuildTransientStates()[mcfgv1.MachineOSBuilding], + }, + expected: true, + }, + // These are invalid state transitions. In other words, when one of these + // state transitions is observed, the MachineOSBuild object should not be + // updated because they are invalid and make no sense. + { + name: "Terminal -> Initial", + old: ctrlcommon.MachineOSBuildTerminalStates(), + current: initialConditions(), + expected: false, + }, + { + name: "Transient -> Initial", + old: ctrlcommon.MachineOSBuildTransientStates(), + current: initialConditions(), + expected: false, + }, + { + name: "Initial -> Initial", + old: initialConditions(), + current: initialConditions(), + expected: false, + }, + { + name: "Terminal -> Terminal", + old: ctrlcommon.MachineOSBuildTerminalStates(), + current: ctrlcommon.MachineOSBuildTerminalStates(), + expected: false, + }, + { + name: "Running -> Pending", + old: map[mcfgv1.BuildProgress][]metav1.Condition{ + mcfgv1.MachineOSBuilding: ctrlcommon.MachineOSBuildTransientStates()[mcfgv1.MachineOSBuilding], + }, + current: map[mcfgv1.BuildProgress][]metav1.Condition{ + mcfgv1.MachineOSBuildPrepared: ctrlcommon.MachineOSBuildTransientStates()[mcfgv1.MachineOSBuildPrepared], + }, + expected: false, + }, + } + + for _, testCase := range testCases { + for oldName, old := range testCase.old { + for currentName, current := range testCase.current { + t.Run(fmt.Sprintf("%s: %s -> %s", testCase.name, oldName, currentName), func(t *testing.T) { + oldStatus := mcfgv1.MachineOSBuildStatus{ + Conditions: old, + } + + curStatus := mcfgv1.MachineOSBuildStatus{ + Conditions: current, + } + + result, reason := isMachineOSBuildStatusUpdateNeeded(oldStatus, curStatus) + + if testCase.expected { + assert.True(t, result, reason) + } else { + assert.False(t, result, reason) + } + }) + } + } + } +} diff --git a/pkg/controller/build/imagebuilder/base.go b/pkg/controller/build/imagebuilder/base.go index 7ee978285a..668134e2ee 100644 --- a/pkg/controller/build/imagebuilder/base.go +++ b/pkg/controller/build/imagebuilder/base.go @@ -5,11 +5,13 @@ import ( "errors" "fmt" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" "github.com/openshift/machine-config-operator/pkg/controller/build/buildrequest" + "github.com/openshift/machine-config-operator/pkg/controller/build/constants" "github.com/openshift/machine-config-operator/pkg/controller/build/utils" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + batchv1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" clientset "k8s.io/client-go/kubernetes" @@ -19,14 +21,14 @@ import ( type baseImageBuilder struct { kubeclient clientset.Interface mcfgclient mcfgclientset.Interface - mosb *mcfgv1alpha1.MachineOSBuild - mosc *mcfgv1alpha1.MachineOSConfig + mosb *mcfgv1.MachineOSBuild + mosc *mcfgv1.MachineOSConfig builder buildrequest.Builder buildrequest buildrequest.BuildRequest } // Constructs a baseImageBuilder, deep-copying objects as needed. -func newBaseImageBuilder(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig, builder buildrequest.Builder) *baseImageBuilder { +func newBaseImageBuilder(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig, builder buildrequest.Builder) *baseImageBuilder { b := &baseImageBuilder{ kubeclient: kubeclient, mcfgclient: mcfgclient, @@ -45,229 +47,13 @@ func newBaseImageBuilder(kubeclient clientset.Interface, mcfgclient mcfgclientse } // Constructs a baseImageBuilder and also instantiates a Cleaner instance based upon the object state. -func newBaseImageBuilderWithCleaner(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig, builder buildrequest.Builder) (*baseImageBuilder, Cleaner) { +func newBaseImageBuilderWithCleaner(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig, builder buildrequest.Builder) (*baseImageBuilder, Cleaner) { b := newBaseImageBuilder(kubeclient, mcfgclient, mosb, mosc, builder) return b, &cleanerImpl{ baseImageBuilder: b, } } -// Represents the successful conditions for a MachineOSBuild. -func (b *baseImageBuilder) succeededConditions() []metav1.Condition { - return []metav1.Condition{ - { - Type: string(mcfgv1alpha1.MachineOSBuildPrepared), - Status: metav1.ConditionFalse, - Reason: "Prepared", - Message: "Build Prepared and Pending", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuilding), - Status: metav1.ConditionFalse, - Reason: "Building", - Message: "Image Build In Progress", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildFailed), - Status: metav1.ConditionFalse, - Reason: "Failed", - Message: "Build Failed", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), - Status: metav1.ConditionFalse, - Reason: "Interrupted", - Message: "Build Interrupted", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), - Status: metav1.ConditionTrue, - Reason: "Ready", - Message: "Build Ready", - }, - } -} - -// Represents the pending conditions for a MachineOSBuild. -func (b *baseImageBuilder) pendingConditions() []metav1.Condition { - return []metav1.Condition{ - { - Type: string(mcfgv1alpha1.MachineOSBuildPrepared), - Status: metav1.ConditionTrue, - Reason: "Prepared", - Message: "Build Prepared and Pending", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuilding), - Status: metav1.ConditionFalse, - Reason: "Building", - Message: "Image Build In Progress", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildFailed), - Status: metav1.ConditionFalse, - Reason: "Failed", - Message: "Build Failed", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), - Status: metav1.ConditionFalse, - Reason: "Interrupted", - Message: "Build Interrupted", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), - Status: metav1.ConditionFalse, - Reason: "Ready", - Message: "Build Ready", - }, - } -} - -// Represents the running conditions for a MachineOSBuild. -func (b *baseImageBuilder) runningConditions() []metav1.Condition { - return []metav1.Condition{ - { - Type: string(mcfgv1alpha1.MachineOSBuildPrepared), - Status: metav1.ConditionFalse, - Reason: "Prepared", - Message: "Build Prepared and Pending", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuilding), - Status: metav1.ConditionTrue, - Reason: "Building", - Message: "Image Build In Progress", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildFailed), - Status: metav1.ConditionFalse, - Reason: "Failed", - Message: "Build Failed", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), - Status: metav1.ConditionFalse, - Reason: "Interrupted", - Message: "Build Interrupted", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), - Status: metav1.ConditionFalse, - Reason: "Ready", - Message: "Build Ready", - }, - } -} - -// Represents the failure conditions for a MachineOSBuild. -func (b *baseImageBuilder) failedConditions() []metav1.Condition { - return []metav1.Condition{ - { - Type: string(mcfgv1alpha1.MachineOSBuildPrepared), - Status: metav1.ConditionFalse, - Reason: "Prepared", - Message: "Build Prepared and Pending", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuilding), - Status: metav1.ConditionFalse, - Reason: "Building", - Message: "Image Build In Progress", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildFailed), - Status: metav1.ConditionTrue, - Reason: "Failed", - Message: "Build Failed", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), - Status: metav1.ConditionFalse, - Reason: "Interrupted", - Message: "Build Interrupted", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), - Status: metav1.ConditionFalse, - Reason: "Ready", - Message: "Build Ready", - }, - } -} - -// Represents the interrupted conditions for a MachineOSBuild. -func (b *baseImageBuilder) interruptedConditions() []metav1.Condition { - return []metav1.Condition{ - { - Type: string(mcfgv1alpha1.MachineOSBuildPrepared), - Status: metav1.ConditionFalse, - Reason: "Prepared", - Message: "Build Prepared and Pending", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuilding), - Status: metav1.ConditionFalse, - Reason: "Building", - Message: "Image Build In Progress", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildFailed), - Status: metav1.ConditionFalse, - Reason: "Failed", - Message: "Build Failed", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), - Status: metav1.ConditionTrue, - Reason: "Interrupted", - Message: "Build Interrupted", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), - Status: metav1.ConditionFalse, - Reason: "Ready", - Message: "Build Ready", - }, - } -} - -// Represents the initial MachineOSBuild state (all conditions false). -func (b *baseImageBuilder) initialConditions() []metav1.Condition { - return []metav1.Condition{ - { - Type: string(mcfgv1alpha1.MachineOSBuildPrepared), - Status: metav1.ConditionFalse, - Reason: "Prepared", - Message: "Build Prepared and Pending", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuilding), - Status: metav1.ConditionFalse, - Reason: "Building", - Message: "Image Build In Progress", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildFailed), - Status: metav1.ConditionFalse, - Reason: "Failed", - Message: "Build Failed", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildInterrupted), - Status: metav1.ConditionFalse, - Reason: "Interrupted", - Message: "Build Interrupted", - }, - { - Type: string(mcfgv1alpha1.MachineOSBuildSucceeded), - Status: metav1.ConditionFalse, - Reason: "Ready", - Message: "Build Ready", - }, - } -} - // Represents a builder object that has a GroupVersionKind method on it; which // anything that has metav1.TypeMeta instance included should have.. type kubeObject interface { @@ -278,35 +64,45 @@ type kubeObject interface { // Computes the MachineOSBuild status given the build status as well as the // conditions. Also fetches the final image pullspec from the digestfile // ConfigMap. -func (b *baseImageBuilder) getMachineOSBuildStatus(ctx context.Context, obj kubeObject, buildStatus mcfgv1alpha1.BuildProgress, conditions []metav1.Condition) (mcfgv1alpha1.MachineOSBuildStatus, error) { +func (b *baseImageBuilder) getMachineOSBuildStatus(ctx context.Context, obj kubeObject, buildStatus mcfgv1.BuildProgress, conditions []metav1.Condition) (mcfgv1.MachineOSBuildStatus, error) { now := metav1.Now() - out := mcfgv1alpha1.MachineOSBuildStatus{} + out := mcfgv1.MachineOSBuildStatus{} out.BuildStart = &now - if buildStatus == mcfgv1alpha1.MachineOSBuildSucceeded || buildStatus == mcfgv1alpha1.MachineOSBuildFailed || buildStatus == mcfgv1alpha1.MachineOSBuildInterrupted { + if buildStatus == mcfgv1.MachineOSBuildSucceeded || buildStatus == mcfgv1.MachineOSBuildFailed || buildStatus == mcfgv1.MachineOSBuildInterrupted { out.BuildEnd = &now } - if buildStatus == mcfgv1alpha1.MachineOSBuildSucceeded { + // In this scenario, the build is in a terminal state, but we don't know + // when it started since the machine-os-builder pod may have been offline. + // In this case, we should get the creation timestamp from the builder + // object and use that as the start time instead of now since the buildEnd + // must be after the buildStart time. + if out.BuildStart == &now && out.BuildEnd == &now { + jobCreationTimestamp := obj.GetCreationTimestamp() + out.BuildStart = &jobCreationTimestamp + } + + if buildStatus == mcfgv1.MachineOSBuildSucceeded { pullspec, err := b.getFinalImagePullspec(ctx) if err != nil { return out, err } - out.FinalImagePushspec = pullspec + out.DigestedImagePushSpec = mcfgv1.ImageDigestFormat(pullspec) } out.Conditions = conditions - out.BuilderReference = &mcfgv1alpha1.MachineOSBuilderReference{ - ImageBuilderType: mcfgv1alpha1.PodBuilder, + out.Builder = &mcfgv1.MachineOSBuilderReference{ + ImageBuilderType: mcfgv1.JobBuilder, // TODO: Should we clear this whenever the build is complete? - PodImageBuilder: &mcfgv1alpha1.ObjectReference{ + Job: &mcfgv1.ObjectReference{ Name: obj.GetName(), - Group: obj.GroupVersionKind().Group, + Group: batchv1.SchemeGroupVersion.Group, Namespace: obj.GetNamespace(), - Resource: obj.GetResourceVersion(), + Resource: "jobs", }, } @@ -351,9 +147,9 @@ func (b *baseImageBuilder) getFinalImagePullspec(ctx context.Context) (string, e return "", fmt.Errorf("could not get final image digest configmap %q: %w", name, err) } - sha, err := utils.ParseImagePullspec(b.mosc.Spec.BuildInputs.RenderedImagePushspec, digestConfigMap.Data["digest"]) + sha, err := utils.ParseImagePullspec(string(b.mosc.Spec.RenderedImagePushSpec), digestConfigMap.Data["digest"]) if err != nil { - return "", fmt.Errorf("could not create digested image pullspec from the pullspec %q and the digest %q: %w", b.mosc.Status.CurrentImagePullspec, digestConfigMap.Data["digest"], err) + return "", fmt.Errorf("could not create digested image pullspec from the pullspec %q and the digest %q: %w", b.mosc.Status.CurrentImagePullSpec, digestConfigMap.Data["digest"], err) } return sha, nil @@ -376,7 +172,17 @@ func (b *baseImageBuilder) getMachineOSConfigName() (string, error) { return b.mosc.Name, nil } - return b.builder.MachineOSBuild() + return b.builder.MachineOSConfig() +} + +// Gets the UID of the builder by either checking the MOSB annotation or +// getting it directly from the Builder object. +func (b *baseImageBuilder) getBuilderUID() (string, error) { + if b.mosb != nil { + return b.mosb.GetAnnotations()[constants.JobUIDAnnotationKey], nil + } + + return b.builder.BuilderUID() } // Gets the name of the builder execution unit by diff --git a/pkg/controller/build/imagebuilder/base_test.go b/pkg/controller/build/imagebuilder/base_test.go deleted file mode 100644 index b0ff14e9bc..0000000000 --- a/pkg/controller/build/imagebuilder/base_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package imagebuilder - -import ( - "context" - "testing" - "time" - - "github.com/openshift/machine-config-operator/pkg/controller/build/fixtures" - ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestBase(t *testing.T) { - t.Parallel() - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - t.Cleanup(cancel) - - kubeclient, mcfgclient, lobj, kubeassert := fixtures.GetClientsForTest(t) - kubeassert = kubeassert.WithContext(ctx) - - base := newBaseImageBuilder(kubeclient, mcfgclient, lobj.MachineOSBuild, lobj.MachineOSConfig, nil) - - terminalConditions := [][]metav1.Condition{ - base.succeededConditions(), - base.failedConditions(), - base.interruptedConditions(), - } - - for _, terminalCondition := range terminalConditions { - mosb := lobj.MachineOSBuild.DeepCopy() - mosb.Status.Conditions = terminalCondition - mosbState := ctrlcommon.NewMachineOSBuildState(mosb) - assert.True(t, mosbState.IsInTerminalState()) - assert.False(t, mosbState.IsInTransientState()) - assert.False(t, mosbState.IsInInitialState()) - } - - transientConditions := [][]metav1.Condition{ - base.pendingConditions(), - base.runningConditions(), - } - - for _, transientCondition := range transientConditions { - mosb := lobj.MachineOSBuild.DeepCopy() - mosb.Status.Conditions = transientCondition - mosbState := ctrlcommon.NewMachineOSBuildState(mosb) - assert.True(t, mosbState.IsInTransientState()) - assert.False(t, mosbState.IsInInitialState()) - assert.False(t, mosbState.IsInTerminalState()) - } - - mosb := lobj.MachineOSBuild.DeepCopy() - mosb.Status.Conditions = base.initialConditions() - mosbState := ctrlcommon.NewMachineOSBuildState(mosb) - assert.False(t, mosbState.IsInTransientState()) - assert.True(t, mosbState.IsInInitialState()) - assert.False(t, mosbState.IsInTerminalState()) -} diff --git a/pkg/controller/build/imagebuilder/cleaner.go b/pkg/controller/build/imagebuilder/cleaner.go index 6541f2afe9..da9a0f8136 100644 --- a/pkg/controller/build/imagebuilder/cleaner.go +++ b/pkg/controller/build/imagebuilder/cleaner.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" "github.com/openshift/machine-config-operator/pkg/controller/build/buildrequest" "github.com/openshift/machine-config-operator/pkg/controller/build/constants" @@ -26,7 +26,7 @@ type cleanerImpl struct { // Constructs an instance of the cleaner from the MachineOSBuild and // MachineOSConfig objects. It is possible that the MachineOSConfig can be nil, // which this tolerates. -func newCleaner(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) Cleaner { +func newCleaner(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig) Cleaner { return &cleanerImpl{ baseImageBuilder: newBaseImageBuilder(kubeclient, mcfgclient, mosb, mosc, nil), } @@ -47,6 +47,10 @@ func (c *cleanerImpl) Clean(ctx context.Context) error { if err != nil { return err } + mosbJobUIDAnnotation, err := c.getBuilderUID() + if err != nil { + return err + } selector, err := c.getSelectorForDeletion() if err != nil { @@ -58,13 +62,12 @@ func (c *cleanerImpl) Clean(ctx context.Context) error { configmaps, err := c.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{ LabelSelector: selector.String(), }) - if err != nil { return fmt.Errorf("could not list configmaps: %w", err) } for _, configmap := range configmaps.Items { - if err := c.deleteConfigMap(ctx, configmap.Name, mosbName); err != nil { + if err := c.deleteConfigMap(ctx, configmap.Name, mosbName, mosbJobUIDAnnotation); err != nil { return fmt.Errorf("could not delete ephemeral configmap %s: %w", configmap.Name, err) } } @@ -72,13 +75,12 @@ func (c *cleanerImpl) Clean(ctx context.Context) error { secrets, err := c.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{ LabelSelector: selector.String(), }) - if err != nil { return fmt.Errorf("could not list secrets: %w", err) } for _, secret := range secrets.Items { - if err := c.deleteSecret(ctx, secret.Name, mosbName); err != nil { + if err := c.deleteSecret(ctx, secret.Name, mosbName, mosbJobUIDAnnotation); err != nil { return fmt.Errorf("could not delete ephemeral configmap %s: %w", secret.Name, err) } } @@ -88,29 +90,54 @@ func (c *cleanerImpl) Clean(ctx context.Context) error { // Deletes a given ConfigMap and tolerates that it was not found so that if // this is called more than once, it will not error. -func (c *cleanerImpl) deleteConfigMap(ctx context.Context, cmName, mosbName string) error { - err := c.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(ctx, cmName, metav1.DeleteOptions{}) - if err == nil { - klog.Infof("Deleted ephemeral ConfigMap %q for build %q", cmName, mosbName) +func (c *cleanerImpl) deleteConfigMap(ctx context.Context, cmName, mosbName, mosbJobUIDAnnotation string) error { + cm, err := c.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(ctx, cmName, metav1.GetOptions{}) + if err != nil { + return err + } + if k8serrors.IsNotFound(err) { return nil } + // Ensure that we delete the correct configmap for the Job we are cleaning up + if !hasOwnerRefWithUID(cm.ObjectMeta, mosbJobUIDAnnotation) { + return nil + } + + err = c.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(ctx, cm.Name, metav1.DeleteOptions{}) + if err == nil { + klog.Infof("Deleted ephemeral ConfigMap %q for build %q", cm.Name, mosbName) + return nil + } if k8serrors.IsNotFound(err) { return nil } return err + } // Deletes a given Secret and tolerates that it was not found so that if // this is called more than once, it will not error. -func (c *cleanerImpl) deleteSecret(ctx context.Context, secretName, mosbName string) error { - err := c.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Delete(ctx, secretName, metav1.DeleteOptions{}) - if err == nil { - klog.Infof("Deleted ephemeral secret %q for build %q", secretName, mosbName) +func (c *cleanerImpl) deleteSecret(ctx context.Context, secretName, mosbName, mosbJobUIDAnnotation string) error { + secret, err := c.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Get(ctx, secretName, metav1.GetOptions{}) + if err != nil { + return err + } + if k8serrors.IsNotFound(err) { return nil } + // Ensure that we are deleting the correct secret for the Job we are cleaning up + if !hasOwnerRefWithUID(secret.ObjectMeta, mosbJobUIDAnnotation) { + return nil + } + + err = c.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Delete(ctx, secret.Name, metav1.DeleteOptions{}) + if err == nil { + klog.Infof("Deleted ephemeral Secret %q for build %q", secret.Name, mosbName) + return nil + } if k8serrors.IsNotFound(err) { return nil } @@ -153,3 +180,19 @@ func ephemeralBuildObjectSelectorForBuilder(builder buildrequest.Builder) (label constants.MachineOSConfigNameLabelKey: moscName, }), nil } + +// hasOwnerRefWithUID returns true if the provided object has an +// owner reference with the provided UID +func hasOwnerRefWithUID(obj metav1.ObjectMeta, uid string) bool { + if obj.OwnerReferences == nil { + return false + } + + for _, owner := range obj.OwnerReferences { + if string(owner.UID) == uid { + return true + } + } + + return false +} diff --git a/pkg/controller/build/imagebuilder/interfaces.go b/pkg/controller/build/imagebuilder/interfaces.go index ff9af52927..2383cd2e6b 100644 --- a/pkg/controller/build/imagebuilder/interfaces.go +++ b/pkg/controller/build/imagebuilder/interfaces.go @@ -3,7 +3,7 @@ package imagebuilder import ( "context" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" "github.com/openshift/machine-config-operator/pkg/controller/build/buildrequest" ) @@ -35,6 +35,6 @@ type Cleaner interface { // MachineOSBuildStatus object. type ImageBuildObserver interface { Exists(context.Context) (bool, error) - Status(context.Context) (mcfgv1alpha1.BuildProgress, error) - MachineOSBuildStatus(context.Context) (mcfgv1alpha1.MachineOSBuildStatus, error) + Status(context.Context) (mcfgv1.BuildProgress, error) + MachineOSBuildStatus(context.Context) (mcfgv1.MachineOSBuildStatus, error) } diff --git a/pkg/controller/build/imagebuilder/jobimagebuilder.go b/pkg/controller/build/imagebuilder/jobimagebuilder.go index caaa3af217..15b28fc588 100644 --- a/pkg/controller/build/imagebuilder/jobimagebuilder.go +++ b/pkg/controller/build/imagebuilder/jobimagebuilder.go @@ -5,9 +5,11 @@ import ( "errors" "fmt" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" + "github.com/openshift/machine-config-operator/pkg/apihelpers" "github.com/openshift/machine-config-operator/pkg/controller/build/buildrequest" + "github.com/openshift/machine-config-operator/pkg/controller/build/constants" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" batchv1 "k8s.io/api/batch/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -22,7 +24,7 @@ type jobImageBuilder struct { cleaner Cleaner } -func newJobImageBuilder(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig, builder buildrequest.Builder) *jobImageBuilder { +func newJobImageBuilder(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig, builder buildrequest.Builder) *jobImageBuilder { b, c := newBaseImageBuilderWithCleaner(kubeclient, mcfgclient, mosb, mosc, builder) return &jobImageBuilder{ baseImageBuilder: b, @@ -31,23 +33,23 @@ func newJobImageBuilder(kubeclient clientset.Interface, mcfgclient mcfgclientset } // Instantiates a ImageBuildObserver using the MachineOSBuild and MachineOSConfig objects. -func NewJobImageBuilder(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) ImageBuilder { +func NewJobImageBuilder(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig) ImageBuilder { return newJobImageBuilder(kubeclient, mcfgclient, mosb, mosc, nil) } // Instantiates an ImageBuildObserver using the MachineOSBuild and MachineOSConfig objects. -func NewJobImageBuildObserver(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) ImageBuildObserver { +func NewJobImageBuildObserver(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig) ImageBuildObserver { return newJobImageBuilder(kubeclient, mcfgclient, mosb, mosc, nil) } // Instantiates an ImageBuildObserver which infers the MachineOSBuild state // from the provided builder object -func NewJobImageBuildObserverFromBuilder(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig, builder buildrequest.Builder) ImageBuildObserver { +func NewJobImageBuildObserverFromBuilder(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig, builder buildrequest.Builder) ImageBuildObserver { return newJobImageBuilder(kubeclient, mcfgclient, mosb, mosc, builder) } // Instantiates a Cleaner using only the MachineOSBuild object. -func NewJobImageBuildCleaner(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild) Cleaner { +func NewJobImageBuildCleaner(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1.MachineOSBuild) Cleaner { return newJobImageBuilder(kubeclient, mcfgclient, mosb, nil, nil) } @@ -101,6 +103,45 @@ func (j *jobImageBuilder) start(ctx context.Context) (*batchv1.Job, error) { bj, err := j.kubeclient.BatchV1().Jobs(ctrlcommon.MCONamespace).Create(ctx, buildJob, metav1.CreateOptions{}) if err == nil { klog.Infof("Build job %q created for MachineOSBuild %q", bj.Name, mosbName) + + // Set the job UID as an annotation in the MOSB + if j.mosb != nil { + metav1.SetMetaDataAnnotation(&j.mosb.ObjectMeta, constants.JobUIDAnnotationKey, string(bj.UID)) + // Update the MOSB with the new annotations + _, err := j.mcfgclient.MachineconfigurationV1().MachineOSBuilds().Update(ctx, j.mosb, metav1.UpdateOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return nil, fmt.Errorf("could not update MachineOSBuild %s with job UID annotation: %w", mosbName, err) + } + } + + // Set the owner reference of the configmaps and secrets created to be the Job + // Set blockOwnerDeletion and Controller to false as Job ownership doesn't work when set to true + oref := metav1.NewControllerRef(bj, batchv1.SchemeGroupVersion.WithKind("Job")) + falseBool := false + oref.BlockOwnerDeletion = &falseBool + oref.Controller = &falseBool + + cms, err := j.buildrequest.ConfigMaps() + if err != nil { + return nil, err + } + for _, cm := range cms { + cm.SetOwnerReferences([]metav1.OwnerReference{*oref}) + if _, err := j.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Update(ctx, cm, metav1.UpdateOptions{}); err != nil { + return nil, err + } + } + + secrets, err := j.buildrequest.Secrets() + if err != nil { + return nil, err + } + for _, secret := range secrets { + secret.SetOwnerReferences([]metav1.OwnerReference{*oref}) + if _, err := j.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Update(ctx, secret, metav1.UpdateOptions{}); err != nil { + return nil, err + } + } return bj, nil } @@ -149,7 +190,7 @@ func (j *jobImageBuilder) Exists(ctx context.Context) (bool, error) { } // Gets the MachineOSBuildStatus for the currently running build. -func (j *jobImageBuilder) MachineOSBuildStatus(ctx context.Context) (mcfgv1alpha1.MachineOSBuildStatus, error) { +func (j *jobImageBuilder) MachineOSBuildStatus(ctx context.Context) (mcfgv1.MachineOSBuildStatus, error) { status, err := j.machineOSBuildStatus(ctx) if err != nil { return status, j.addMachineOSBuildNameToError(fmt.Errorf("could not get MachineOSBuildStatus: %w", err)) @@ -161,54 +202,58 @@ func (j *jobImageBuilder) MachineOSBuildStatus(ctx context.Context) (mcfgv1alpha // Gets the build job from either the provided builder (if present) or the API server. func (j *jobImageBuilder) getBuildJobFromBuilderOrAPI(ctx context.Context) (*batchv1.Job, error) { if j.builder != nil { - klog.V(4).Infof("Using provided build job") - if err := j.validateBuilderType(j.builder); err != nil { return nil, fmt.Errorf("could not get build job from builder: %w", err) } job := j.builder.GetObject().(*batchv1.Job) - return job, nil + + // Ensure that the job UID matches the jobUID annotation in the MOSB so that + // we know that we are using the correct job to set the status of the MOSB + if jobIsForMOSB(job, j.mosb) { + klog.V(4).Infof("Using provided build job %s", string(job.UID)) + return job, nil + } } - klog.V(4).Infof("Using build job from API") job, err := j.getBuildJobStrict(ctx) if err != nil { - return nil, fmt.Errorf("could not get build pod from API: %w", err) + return nil, fmt.Errorf("could not get build job from API: %w", err) } + klog.V(4).Infof("Using build job from API %s", string(job.UID)) return job, nil } -func (j *jobImageBuilder) getStatus(ctx context.Context) (*batchv1.Job, mcfgv1alpha1.BuildProgress, []metav1.Condition, error) { +func (j *jobImageBuilder) getStatus(ctx context.Context) (*batchv1.Job, mcfgv1.BuildProgress, []metav1.Condition, error) { job, err := j.getBuildJobFromBuilderOrAPI(ctx) if err != nil { return nil, "", nil, err } - status, conditions := j.mapJobStatusToBuildStatus(job) + status, conditions := MapJobStatusToBuildStatus(job) klog.Infof("Build job %q status %+v mapped to MachineOSBuild progress %q", job.Name, job.Status, status) return job, status, conditions, nil } -func (j *jobImageBuilder) machineOSBuildStatus(ctx context.Context) (mcfgv1alpha1.MachineOSBuildStatus, error) { +func (j *jobImageBuilder) machineOSBuildStatus(ctx context.Context) (mcfgv1.MachineOSBuildStatus, error) { job, status, conditions, err := j.getStatus(ctx) if err != nil { - return mcfgv1alpha1.MachineOSBuildStatus{}, err + return mcfgv1.MachineOSBuildStatus{}, err } buildStatus, err := j.getMachineOSBuildStatus(ctx, job, status, conditions) if err != nil { - return mcfgv1alpha1.MachineOSBuildStatus{}, err + return mcfgv1.MachineOSBuildStatus{}, err } return buildStatus, nil } // Gets only the build progress field for a currently running build. -func (j *jobImageBuilder) Status(ctx context.Context) (mcfgv1alpha1.BuildProgress, error) { +func (j *jobImageBuilder) Status(ctx context.Context) (mcfgv1.BuildProgress, error) { _, status, _, err := j.getStatus(ctx) if err != nil { return status, j.addMachineOSBuildNameToError(fmt.Errorf("could not get BuildProgress: %w", err)) @@ -217,32 +262,6 @@ func (j *jobImageBuilder) Status(ctx context.Context) (mcfgv1alpha1.BuildProgres return status, nil } -func (j *jobImageBuilder) mapJobStatusToBuildStatus(job *batchv1.Job) (mcfgv1alpha1.BuildProgress, []metav1.Condition) { - // If the job is being deleted and it was not in either a successful or failed state - // then the MachineOSBuild should be considered "interrupted" - if job.DeletionTimestamp != nil && job.Status.Succeeded == 0 && job.Status.Failed == 0 { - return mcfgv1alpha1.MachineOSBuildInterrupted, j.interruptedConditions() - } - - if job.Status.Active == 0 && job.Status.Succeeded == 0 && job.Status.Failed == 0 && job.Status.UncountedTerminatedPods == nil { - return mcfgv1alpha1.MachineOSBuildPrepared, j.pendingConditions() - } - // The build job is still running till it succeeds or maxes out it retries on failures - if job.Status.Active >= 0 && job.Status.Failed >= 0 && job.Status.Failed < 4 && job.Status.Succeeded == 0 { - return mcfgv1alpha1.MachineOSBuilding, j.runningConditions() - } - if job.Status.Succeeded > 0 { - return mcfgv1alpha1.MachineOSBuildSucceeded, j.succeededConditions() - } - // Only return failed if there have been 4 pod failures as the backoffLimit is set to 3 - if job.Status.Failed > 3 { - return mcfgv1alpha1.MachineOSBuildFailed, j.failedConditions() - - } - - return "", j.initialConditions() -} - // Stops the running build by deleting the build job. func (j *jobImageBuilder) Stop(ctx context.Context) error { if err := j.stop(ctx); err != nil { @@ -255,15 +274,26 @@ func (j *jobImageBuilder) Stop(ctx context.Context) error { func (j *jobImageBuilder) stop(ctx context.Context) error { mosbName, err := j.getMachineOSBuildName() if err != nil { - return fmt.Errorf("could not stop build job: %w", err) + return fmt.Errorf("could not get MachineOSBuild name to stop build job: %w", err) } - buildJobName := j.getBuilderName() + buildJob, err := j.getBuildJobFromBuilderOrAPI(ctx) + if k8serrors.IsNotFound(err) { + return nil + } + if err != nil { + return fmt.Errorf("could not get Job to stop build job: %w", err) + } + // Ensure that the job being deleted is for the MOSB we are currently reconciling + if !jobIsForMOSB(buildJob, j.mosb) { + klog.Infof("Build job %q with UID %s is not owned by MachineOSBuild %q, will not delete", buildJob.Name, buildJob.UID, mosbName) + return nil + } propagationPolicy := metav1.DeletePropagationForeground - err = j.kubeclient.BatchV1().Jobs(ctrlcommon.MCONamespace).Delete(ctx, buildJobName, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) + err = j.kubeclient.BatchV1().Jobs(ctrlcommon.MCONamespace).Delete(ctx, buildJob.Name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) if err == nil { - klog.Infof("Deleted build job %s for MachineOSBuild %s", buildJobName, mosbName) + klog.Infof("Deleted build job %s for MachineOSBuild %s", buildJob.Name, mosbName) return nil } @@ -271,7 +301,7 @@ func (j *jobImageBuilder) stop(ctx context.Context) error { return nil } - return fmt.Errorf("could not delete build job %s for MachineOSBuild %s", buildJobName, mosbName) + return fmt.Errorf("could not delete build job %s for MachineOSBuild %s", buildJob.Name, mosbName) } // Stops the running build by calling Stop() and also removes all of the @@ -294,3 +324,43 @@ func (j *jobImageBuilder) validateBuilderType(builder buildrequest.Builder) erro return fmt.Errorf("invalid type %T from builder, expected %T", j.builder, &batchv1.Job{}) } + +// Maps a given batchv1.Job to a given MachineOSBuild status. Exported so that it can be used in e2e tests. +func MapJobStatusToBuildStatus(job *batchv1.Job) (mcfgv1.BuildProgress, []metav1.Condition) { + // If the job is being deleted and it was not in either a successful or failed state + // then the MachineOSBuild should be considered "interrupted" + if job.DeletionTimestamp != nil && job.Status.Succeeded == 0 && job.Status.Failed < constants.JobMaxRetries+1 { + return mcfgv1.MachineOSBuildInterrupted, apihelpers.MachineOSBuildInterruptedConditions() + } + + if job.Status.Active == 0 && job.Status.Succeeded == 0 && job.Status.Failed == 0 && job.Status.UncountedTerminatedPods == nil { + return mcfgv1.MachineOSBuildPrepared, apihelpers.MachineOSBuildPendingConditions() + } + // The build job is still running till it succeeds or maxes out it retries on failures + if job.Status.Active >= 0 && job.Status.Failed >= 0 && job.Status.Failed < constants.JobMaxRetries+1 && job.Status.Succeeded == 0 { + return mcfgv1.MachineOSBuilding, apihelpers.MachineOSBuildRunningConditions() + } + if job.Status.Succeeded > 0 { + return mcfgv1.MachineOSBuildSucceeded, apihelpers.MachineOSBuildSucceededConditions() + } + // Only return failed if there have been 4 pod failures as the backoffLimit is set to 3 + if job.Status.Failed > constants.JobMaxRetries { + return mcfgv1.MachineOSBuildFailed, apihelpers.MachineOSBuildFailedConditions() + + } + + return "", apihelpers.MachineOSBuildInitialConditions() +} + +// Returns true if the provided job UID matches the job UID annotation in the provided MachineOSBuild +func jobIsForMOSB(job *batchv1.Job, mosb *mcfgv1.MachineOSBuild) bool { + if mosb == nil { + return false + } + + if string(job.UID) != mosb.GetAnnotations()[constants.JobUIDAnnotationKey] { + return false + } + + return true +} diff --git a/pkg/controller/build/imagebuilder/jobimagebuilder_test.go b/pkg/controller/build/imagebuilder/jobimagebuilder_test.go index 7262051adc..b05c23304b 100644 --- a/pkg/controller/build/imagebuilder/jobimagebuilder_test.go +++ b/pkg/controller/build/imagebuilder/jobimagebuilder_test.go @@ -8,10 +8,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" "github.com/openshift/machine-config-operator/pkg/apihelpers" "github.com/openshift/machine-config-operator/pkg/controller/build/buildrequest" + "github.com/openshift/machine-config-operator/pkg/controller/build/constants" "github.com/openshift/machine-config-operator/pkg/controller/build/fixtures" "github.com/openshift/machine-config-operator/pkg/controller/build/utils" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" @@ -31,7 +33,7 @@ func TestJobImageBuilder(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) - kubeclient, mcfgclient, lobj, kubeassert := fixtures.GetClientsForTest(t) + kubeclient, mcfgclient, _, _, lobj, kubeassert := fixtures.GetClientsForTest(t) kubeassert = kubeassert.WithContext(ctx) jim := NewJobImageBuilder(kubeclient, mcfgclient, lobj.MachineOSBuild, lobj.MachineOSConfig) @@ -100,7 +102,7 @@ func TestJobImageBuilder(t *testing.T) { js: fixtures.JobStatus{ Active: 0, Succeeded: 0, - Failed: 4, + Failed: constants.JobMaxRetries + 1, }, }, } @@ -127,8 +129,6 @@ func TestJobImageBuilder(t *testing.T) { kubeassert.Now().JobDoesNotExist(buildJobName) assertObjectsAreRemovedByCleaner(ctx, t, kubeassert, jim.(*jobImageBuilder).buildrequest) - - require.NoError(t, jim.Stop(ctx)) } // Ensures that the build states are appropriately mapped within the common @@ -146,27 +146,27 @@ func assertMachineOSBuildStateMapsToCommonState(ctx context.Context, t *testing. // These are states where the MachineOSBuild may transition from either these // states or to a terminal state. - transientBuildStates := map[mcfgv1alpha1.BuildProgress]struct{}{ - mcfgv1alpha1.MachineOSBuildPrepared: {}, - mcfgv1alpha1.MachineOSBuilding: {}, + transientBuildStates := map[mcfgv1.BuildProgress]struct{}{ + mcfgv1.MachineOSBuildPrepared: {}, + mcfgv1.MachineOSBuilding: {}, } // A terminal state is one where the MachineOSBuild cannot transition to any // other state. It is considered the "final" state. - terminalBuildStates := map[mcfgv1alpha1.BuildProgress]struct{}{ - mcfgv1alpha1.MachineOSBuildFailed: {}, - mcfgv1alpha1.MachineOSBuildSucceeded: {}, - mcfgv1alpha1.MachineOSBuildInterrupted: {}, + terminalBuildStates := map[mcfgv1.BuildProgress]struct{}{ + mcfgv1.MachineOSBuildFailed: {}, + mcfgv1.MachineOSBuildSucceeded: {}, + mcfgv1.MachineOSBuildInterrupted: {}, } // Map of the build state to each function that should return true when the // MachineOSBuild is in that particular state. - mosbStateFuncs := map[mcfgv1alpha1.BuildProgress]func() bool{ - mcfgv1alpha1.MachineOSBuildPrepared: mosbState.IsBuildPrepared, - mcfgv1alpha1.MachineOSBuilding: mosbState.IsBuilding, - mcfgv1alpha1.MachineOSBuildFailed: mosbState.IsBuildFailure, - mcfgv1alpha1.MachineOSBuildSucceeded: mosbState.IsBuildSuccess, - mcfgv1alpha1.MachineOSBuildInterrupted: mosbState.IsBuildInterrupted, + mosbStateFuncs := map[mcfgv1.BuildProgress]func() bool{ + mcfgv1.MachineOSBuildPrepared: mosbState.IsBuildPrepared, + mcfgv1.MachineOSBuilding: mosbState.IsBuilding, + mcfgv1.MachineOSBuildFailed: mosbState.IsBuildFailure, + mcfgv1.MachineOSBuildSucceeded: mosbState.IsBuildSuccess, + mcfgv1.MachineOSBuildInterrupted: mosbState.IsBuildInterrupted, } // Iterate through all of the known states and call the function from the helper library. @@ -192,11 +192,11 @@ func assertMachineOSBuildStateMapsToCommonState(ctx context.Context, t *testing. } func assertObserverCanGetJobStatus(ctx context.Context, t *testing.T, obs ImageBuildObserver, jobPhase string) { - buildprogressToJobPhases := map[mcfgv1alpha1.BuildProgress]string{ - mcfgv1alpha1.MachineOSBuildPrepared: jobPending, - mcfgv1alpha1.MachineOSBuilding: jobRunning, - mcfgv1alpha1.MachineOSBuildFailed: jobFailed, - mcfgv1alpha1.MachineOSBuildSucceeded: jobSucceeded, + buildprogressToJobPhases := map[mcfgv1.BuildProgress]string{ + mcfgv1.MachineOSBuildPrepared: jobPending, + mcfgv1.MachineOSBuilding: jobRunning, + mcfgv1.MachineOSBuildFailed: jobFailed, + mcfgv1.MachineOSBuildSucceeded: jobSucceeded, } buildprogress, err := obs.Status(ctx) @@ -209,11 +209,13 @@ func assertObserverCanGetJobStatus(ctx context.Context, t *testing.T, obs ImageB assert.True(t, apihelpers.IsMachineOSBuildConditionTrue(mosbStatus.Conditions, buildprogress)) - assert.NotNil(t, mosbStatus.BuilderReference) + assert.NotNil(t, mosbStatus.Builder) + + assert.NotNil(t, mosbStatus.BuildStart) if jobPhase == jobSucceeded { assert.NotNil(t, mosbStatus.BuildEnd) - assert.Equal(t, "registry.hostname.com/org/repo@sha256:e1992921cba73d9e74e46142eca5946df8a895bfd4419fc8b5c6422d5e7192e6", mosbStatus.FinalImagePushspec) + assert.Equal(t, "registry.hostname.com/org/repo@sha256:e1992921cba73d9e74e46142eca5946df8a895bfd4419fc8b5c6422d5e7192e6", string(mosbStatus.DigestedImagePushSpec)) } assertMachineOSBuildStateMapsToCommonState(ctx, t, obs) @@ -225,7 +227,7 @@ func TestJobImageBuilderCanCleanWithOnlyMachineOSBuild(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) - kubeclient, mcfgclient, lobj, kubeassert := fixtures.GetClientsForTest(t) + kubeclient, mcfgclient, _, _, lobj, kubeassert := fixtures.GetClientsForTest(t) kubeassert = kubeassert.WithContext(ctx) jim := NewJobImageBuilder(kubeclient, mcfgclient, lobj.MachineOSBuild, lobj.MachineOSConfig) @@ -235,11 +237,54 @@ func TestJobImageBuilderCanCleanWithOnlyMachineOSBuild(t *testing.T) { buildJobName := utils.GetBuildJobName(lobj.MachineOSBuild) kubeassert.JobExists(buildJobName) - assertObjectsAreCreatedByPreparer(ctx, t, kubeassert, jim.(*jobImageBuilder).buildrequest) + buildReq := jim.(*jobImageBuilder).buildrequest + assertObjectsAreCreatedByPreparer(ctx, t, kubeassert, buildReq) + setOwnerForObjects(ctx, kubeclient, buildReq) + + job, err := kubeclient.BatchV1().Jobs(ctrlcommon.MCONamespace).Get(ctx, buildJobName, metav1.GetOptions{}) + require.NoError(t, err) + job.SetUID(types.UID(fixtures.JobUID)) + _, err = kubeclient.BatchV1().Jobs(ctrlcommon.MCONamespace).Update(ctx, job, metav1.UpdateOptions{}) + require.NoError(t, err) cleaner := NewJobImageBuildCleaner(kubeclient, mcfgclient, lobj.MachineOSBuild) assert.NoError(t, cleaner.Clean(ctx)) kubeassert.JobDoesNotExist(buildJobName) - assertObjectsAreRemovedByCleaner(ctx, t, kubeassert, jim.(*jobImageBuilder).buildrequest) + assertObjectsAreRemovedByCleaner(ctx, t, kubeassert, buildReq) +} + +func TestJobImageBuilderSetsBuildStartAndEndTimestamp(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + kubeclient, mcfgclient, _, _, lobj, _ := fixtures.GetClientsForTest(t) + + jim := NewJobImageBuilder(kubeclient, mcfgclient, lobj.MachineOSBuild, lobj.MachineOSConfig) + + assert.NoError(t, jim.Start(ctx)) + + job, err := kubeclient.BatchV1().Jobs(ctrlcommon.MCONamespace).Get(ctx, utils.GetBuildJobName(lobj.MachineOSBuild), metav1.GetOptions{}) + require.NoError(t, err) + + // Set -60 seconds so that we can get a time that is one minute before the current time. + jobStartTime := time.Now().Add(time.Second * -60) + job.SetCreationTimestamp(metav1.NewTime(jobStartTime)) + + _, err = kubeclient.BatchV1().Jobs(ctrlcommon.MCONamespace).Update(ctx, job, metav1.UpdateOptions{}) + require.NoError(t, err) + + fixtures.SetJobStatus(ctx, t, kubeclient, lobj.MachineOSBuild, fixtures.JobStatus{Succeeded: 1}) + + status, err := jim.MachineOSBuildStatus(ctx) + require.NoError(t, err) + + assert.NotNil(t, status.BuildStart) + assert.NotNil(t, status.BuildEnd) + + assert.True(t, status.BuildStart.Before(status.BuildEnd)) + assert.GreaterOrEqual(t, status.BuildEnd.Time.Sub(status.BuildStart.Time), time.Second*60) + assert.Equal(t, status.BuildStart.Time, jobStartTime) } diff --git a/pkg/controller/build/imagebuilder/preparer.go b/pkg/controller/build/imagebuilder/preparer.go index 84cd15f705..22a6ad0880 100644 --- a/pkg/controller/build/imagebuilder/preparer.go +++ b/pkg/controller/build/imagebuilder/preparer.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" "github.com/openshift/machine-config-operator/pkg/controller/build/buildrequest" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" @@ -22,13 +22,13 @@ import ( // object knows how to destroy all of the objects that it creates. It does so // by using a specific label query. type preparerImpl struct { - mosb *mcfgv1alpha1.MachineOSBuild - mosc *mcfgv1alpha1.MachineOSConfig + mosb *mcfgv1.MachineOSBuild + mosc *mcfgv1.MachineOSConfig kubeclient clientset.Interface mcfgclient mcfgclientset.Interface } -func NewPreparer(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) Preparer { +func NewPreparer(kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, mosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig) Preparer { return &preparerImpl{ kubeclient: kubeclient, mcfgclient: mcfgclient, diff --git a/pkg/controller/build/imagebuilder/preparer_test.go b/pkg/controller/build/imagebuilder/preparer_test.go index 0a8737890a..01f224c952 100644 --- a/pkg/controller/build/imagebuilder/preparer_test.go +++ b/pkg/controller/build/imagebuilder/preparer_test.go @@ -7,10 +7,14 @@ import ( "github.com/openshift/machine-config-operator/pkg/controller/build/buildrequest" "github.com/openshift/machine-config-operator/pkg/controller/build/fixtures" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" testhelpers "github.com/openshift/machine-config-operator/test/helpers" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + fakecorev1client "k8s.io/client-go/kubernetes/fake" ) // This test ensures that cleanups for one build do not interfere with the @@ -26,7 +30,7 @@ func TestPreparer(t *testing.T) { obj3 := fixtures.NewObjectsForTest("third-worker") addlObjects := append(obj2.ToRuntimeObjects(), obj3.ToRuntimeObjects()...) - kubeclient, mcfgclient, _, kubeassert := fixtures.GetClientsForTestWithAdditionalObjects(t, []runtime.Object{}, addlObjects) + kubeclient, mcfgclient, _, _, _, kubeassert := fixtures.GetClientsForTestWithAdditionalObjects(t, []runtime.Object{}, addlObjects) kubeassert = kubeassert.WithContext(ctx).Now() // Create three preparers assigned to their own MachineOSBuild though sharing @@ -40,16 +44,22 @@ func TestPreparer(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, br1) assertObjectsAreCreatedByPreparer(ctx, t, kubeassert, br1) + err = setOwnerForObjects(ctx, kubeclient, br1) + assert.NoError(t, err) br2, err := p2.Prepare(ctx) assert.NoError(t, err) assert.NotNil(t, br2) assertObjectsAreCreatedByPreparer(ctx, t, kubeassert, br2) + err = setOwnerForObjects(ctx, kubeclient, br2) + assert.NoError(t, err) br3, err := p3.Prepare(ctx) assert.NoError(t, err) assert.NotNil(t, br3) assertObjectsAreCreatedByPreparer(ctx, t, kubeassert, br3) + err = setOwnerForObjects(ctx, kubeclient, br3) + assert.NoError(t, err) // Create three cleaners assigned to their own MachineOSBuilds though // sharing the same kubeclient and mcfgclient objects. @@ -60,7 +70,11 @@ func TestPreparer(t *testing.T) { // c3 uses the Builder object from the BuildRequest instead so that we can // ensure that ephemeral build objects will be removed even if only the Builder object // is available. - c3 := newCleanerFromBuilder(kubeclient, mcfgclient, br3.Builder()) + builder3 := br3.Builder() + // Set the UID for the builder so that we can ensure that ephemeral build objects + // will be removed even if only the Builder object is available. + builder3.SetUID(types.UID(fixtures.JobUID)) + c3 := newCleanerFromBuilder(kubeclient, mcfgclient, builder3) // Cleanup the ephemeral objects from the first MachineOSBuild. assert.NoError(t, c1.Clean(ctx)) @@ -122,3 +136,48 @@ func assertObjectsAreRemovedByCleaner(ctx context.Context, t *testing.T, kubeass kubeassert.WithContext(ctx).Now().SecretDoesNotExist(expectedSecret.Name) } } + +func setOwnerForObjects(ctx context.Context, kubeclient *fakecorev1client.Clientset, br buildrequest.BuildRequest) error { + // Add a dummy job as the owner for testing purposes + jobOwnerRef := metav1.OwnerReference{ + APIVersion: "batch/v1", + Kind: "Job", + Name: "test-job", + UID: types.UID(fixtures.JobUID), + } + + configmaps, err := br.ConfigMaps() + if err != nil { + return err + } + for _, configmap := range configmaps { + cm, err := kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Get(ctx, configmap.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + cm.SetOwnerReferences([]metav1.OwnerReference{jobOwnerRef}) + _, err = kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Update(ctx, cm, metav1.UpdateOptions{}) + if err != nil { + return err + } + } + + secrets, err := br.Secrets() + if err != nil { + return err + } + for _, secret := range secrets { + secret, err := kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Get(ctx, secret.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + secret.SetOwnerReferences([]metav1.OwnerReference{jobOwnerRef}) + _, err = kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Update(ctx, secret, metav1.UpdateOptions{}) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/controller/build/osbuildcontroller.go b/pkg/controller/build/osbuildcontroller.go index 76c3cea751..14a578102b 100644 --- a/pkg/controller/build/osbuildcontroller.go +++ b/pkg/controller/build/osbuildcontroller.go @@ -11,9 +11,10 @@ import ( ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + imagev1clientset "github.com/openshift/client-go/image/clientset/versioned" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" + routeclientset "github.com/openshift/client-go/route/clientset/versioned" "github.com/openshift/machine-config-operator/pkg/controller/build/utils" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -33,6 +34,8 @@ type OSBuildController struct { eventRecorder record.EventRecorder mcfgclient mcfgclientset.Interface kubeclient clientset.Interface + imageclient imagev1clientset.Interface + routeclient routeclientset.Interface config Config execQueue *ctrlcommon.WrappedQueue @@ -73,6 +76,8 @@ func NewOSBuildControllerFromControllerContextWithConfig(ctrlCtx *ctrlcommon.Con cfg, ctrlCtx.ClientBuilder.MachineConfigClientOrDie("machine-os-builder"), ctrlCtx.ClientBuilder.KubeClientOrDie("machine-os-builder"), + ctrlCtx.ClientBuilder.ImageClientOrDie("machine-os-builder"), + ctrlCtx.ClientBuilder.RouteClientOrDie("machine-os-builder"), ) } @@ -80,6 +85,8 @@ func newOSBuildController( ctrlConfig Config, mcfgclient mcfgclientset.Interface, kubeclient clientset.Interface, + imageclient imagev1clientset.Interface, + routeclient routeclientset.Interface, ) *OSBuildController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(klog.Infof) @@ -90,6 +97,8 @@ func newOSBuildController( ctrl := &OSBuildController{ kubeclient: kubeclient, mcfgclient: mcfgclient, + imageclient: imageclient, + routeclient: routeclient, informers: informers, listers: informers.listers(), eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineosbuilder"}), @@ -120,10 +129,11 @@ func newOSBuildController( }) ctrl.machineConfigPoolInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addMachineConfigPool, UpdateFunc: ctrl.updateMachineConfigPool, }) - ctrl.buildReconciler = newBuildReconciler(mcfgclient, kubeclient, ctrl.listers) + ctrl.buildReconciler = newBuildReconciler(mcfgclient, kubeclient, imageclient, routeclient, ctrl.listers) return ctrl } @@ -216,22 +226,22 @@ func (ctrl *OSBuildController) enqueueFuncForObject(obj kubeObject, toRun func(c } func (ctrl *OSBuildController) addMachineOSBuild(cur interface{}) { - mosb := cur.(*mcfgv1alpha1.MachineOSBuild) + mosb := cur.(*mcfgv1.MachineOSBuild) ctrl.enqueueFuncForObject(mosb, func(ctx context.Context) error { return ctrl.buildReconciler.AddMachineOSBuild(ctx, mosb) }) } func (ctrl *OSBuildController) updateMachineOSBuild(old, cur interface{}) { - oldMOSB := old.(*mcfgv1alpha1.MachineOSBuild) - curMOSB := cur.(*mcfgv1alpha1.MachineOSBuild) + oldMOSB := old.(*mcfgv1.MachineOSBuild) + curMOSB := cur.(*mcfgv1.MachineOSBuild) ctrl.enqueueFuncForObject(curMOSB, func(ctx context.Context) error { return ctrl.buildReconciler.UpdateMachineOSBuild(ctx, oldMOSB, curMOSB) }) } func (ctrl *OSBuildController) deleteMachineOSBuild(cur interface{}) { - mosb := cur.(*mcfgv1alpha1.MachineOSBuild) + mosb := cur.(*mcfgv1.MachineOSBuild) ctrl.enqueueFuncForObject(mosb, func(ctx context.Context) error { return ctrl.buildReconciler.DeleteMachineOSBuild(ctx, mosb) }) @@ -261,29 +271,29 @@ func (ctrl *OSBuildController) deleteJob(cur interface{}) { } func (ctrl *OSBuildController) addMachineOSConfig(newMOSC interface{}) { - m := newMOSC.(*mcfgv1alpha1.MachineOSConfig).DeepCopy() + m := newMOSC.(*mcfgv1.MachineOSConfig).DeepCopy() ctrl.enqueueFuncForObject(m, func(ctx context.Context) error { return ctrl.buildReconciler.AddMachineOSConfig(ctx, m) }) } func (ctrl *OSBuildController) updateMachineOSConfig(old, cur interface{}) { - oldMOSC := old.(*mcfgv1alpha1.MachineOSConfig).DeepCopy() - curMOSC := cur.(*mcfgv1alpha1.MachineOSConfig).DeepCopy() + oldMOSC := old.(*mcfgv1.MachineOSConfig).DeepCopy() + curMOSC := cur.(*mcfgv1.MachineOSConfig).DeepCopy() ctrl.enqueueFuncForObject(curMOSC, func(ctx context.Context) error { return ctrl.buildReconciler.UpdateMachineOSConfig(ctx, oldMOSC, curMOSC) }) } func (ctrl *OSBuildController) deleteMachineOSConfig(cur interface{}) { - mosc, ok := cur.(*mcfgv1alpha1.MachineOSConfig) + mosc, ok := cur.(*mcfgv1.MachineOSConfig) if !ok { tombstone, ok := cur.(cache.DeletedFinalStateUnknown) if !ok { utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", cur)) return } - mosc, ok = tombstone.Obj.(*mcfgv1alpha1.MachineOSConfig) + mosc, ok = tombstone.Obj.(*mcfgv1.MachineOSConfig) if !ok { utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a MachineOSConfig %#v", cur)) return @@ -295,6 +305,13 @@ func (ctrl *OSBuildController) deleteMachineOSConfig(cur interface{}) { }) } +func (ctrl *OSBuildController) addMachineConfigPool(newMCP interface{}) { + mcp := newMCP.(*mcfgv1.MachineConfigPool).DeepCopy() + ctrl.enqueueFuncForObject(mcp, func(ctx context.Context) error { + return ctrl.buildReconciler.AddMachineConfigPool(ctx, mcp) + }) +} + func (ctrl *OSBuildController) updateMachineConfigPool(old, cur interface{}) { oldMCP := old.(*mcfgv1.MachineConfigPool).DeepCopy() curMCP := cur.(*mcfgv1.MachineConfigPool).DeepCopy() diff --git a/pkg/controller/build/osbuildcontroller_test.go b/pkg/controller/build/osbuildcontroller_test.go index a50efbeb47..ce622a1ede 100644 --- a/pkg/controller/build/osbuildcontroller_test.go +++ b/pkg/controller/build/osbuildcontroller_test.go @@ -9,9 +9,11 @@ import ( ign3types "github.com/coreos/ignition/v2/config/v3_4/types" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + fakeclientimagev1 "github.com/openshift/client-go/image/clientset/versioned/fake" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" fakeclientmachineconfigv1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake" + fakeclientroutev1 "github.com/openshift/client-go/route/clientset/versioned/fake" + "github.com/openshift/machine-config-operator/pkg/apihelpers" "github.com/openshift/machine-config-operator/pkg/controller/build/buildrequest" "github.com/openshift/machine-config-operator/pkg/controller/build/constants" "github.com/openshift/machine-config-operator/pkg/controller/build/fixtures" @@ -20,6 +22,7 @@ import ( testhelpers "github.com/openshift/machine-config-operator/test/helpers" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + batchv1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -36,18 +39,17 @@ type clients struct { // This test validates that the OSBuildController does nothing unless // there is a matching MachineOSConfig for a given MachineConfigPool. func TestOSBuildControllerDoesNothing(t *testing.T) { - t.Parallel() ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) - _, mcfgclient, _, _ := setupOSBuildControllerForTest(ctx, t) + _, mcfgclient, _, _, _, _, _ := setupOSBuildControllerForTest(ctx, t) // i needs to be set to 2 because rendered-worker-1 already exists. for i := 2; i <= 10; i++ { insertNewRenderedMachineConfigAndUpdatePool(ctx, t, mcfgclient, "worker", fmt.Sprintf("rendered-worker-%d", i)) - mosbList, err := mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().List(ctx, metav1.ListOptions{}) + mosbList, err := mcfgclient.MachineconfigurationV1().MachineOSBuilds().List(ctx, metav1.ListOptions{}) require.NoError(t, err) assert.Len(t, mosbList.Items, 0) } @@ -57,7 +59,6 @@ func TestOSBuildControllerDoesNothing(t *testing.T) { // when a new MachineOSBuild for a givee MachineOSConfig is created or a new // rendered MachineConfig is detected on the associated MachineConfigPool. func TestOSBuildControllerDeletesRunningBuildBeforeStartingANewOne(t *testing.T) { - t.Parallel() ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) @@ -65,14 +66,13 @@ func TestOSBuildControllerDeletesRunningBuildBeforeStartingANewOne(t *testing.T) poolName := "worker" t.Run("MachineOSConfig change", func(t *testing.T) { - t.Parallel() - kubeclient, mcfgclient, mosc, initialMosb, mcp, kubeassert := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) + kubeclient, mcfgclient, _, _, mosc, initialMosb, mcp, kubeassert, _ := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) // Now that the build is in the running state, we update the MachineOSConfig. apiMosc := testhelpers.SetContainerfileContentsOnMachineOSConfig(ctx, t, mcfgclient, mosc, "FROM configs AS final\nRUN echo 'helloworld' > /etc/helloworld") - apiMosc, err := mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) + apiMosc, err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) require.NoError(t, err) mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, apiMosc, mcp) @@ -97,9 +97,8 @@ func TestOSBuildControllerDeletesRunningBuildBeforeStartingANewOne(t *testing.T) }) t.Run("MachineConfig change", func(t *testing.T) { - t.Parallel() - kubeclient, mcfgclient, mosc, initialMosb, mcp, kubeassert := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) + kubeclient, mcfgclient, _, _, mosc, initialMosb, mcp, kubeassert, _ := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) apiMCP := insertNewRenderedMachineConfigAndUpdatePool(ctx, t, mcfgclient, mosc.Spec.MachineConfigPool.Name, "rendered-worker-2") @@ -124,20 +123,19 @@ func TestOSBuildControllerDeletesRunningBuildBeforeStartingANewOne(t *testing.T) // builds but will still clear running builds before statring a new build for // the same MachineOSConfig. func TestOSBuildControllerLeavesSuccessfulBuildAlone(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) t.Cleanup(cancel) poolName := "worker" - kubeclient, mcfgclient, firstMosc, firstMosb, mcp, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) + kubeclient, mcfgclient, _, _, firstMosc, firstMosb, mcp, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) // Ensures that we have detected the first build. isMachineOSBuildReachedExpectedCount(ctx, t, mcfgclient, firstMosc, 1) // Creates a MachineOSBuild via a MachineOSConfig change. - createNewMachineOSBuildViaConfigChange := func(mosc *mcfgv1alpha1.MachineOSConfig, containerfileContents string) (*mcfgv1alpha1.MachineOSConfig, *mcfgv1alpha1.MachineOSBuild) { + createNewMachineOSBuildViaConfigChange := func(mosc *mcfgv1.MachineOSConfig, containerfileContents string) (*mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild) { // Modify the MachineOSConfig. newMosc := testhelpers.SetContainerfileContentsOnMachineOSConfig(ctx, t, mcfgclient, mosc, containerfileContents) @@ -193,7 +191,6 @@ func TestOSBuildControllerLeavesSuccessfulBuildAlone(t *testing.T) { // behind unless someone makes a change to the MachineOSConfig or // MachineConfigPool. func TestOSBuildControllerFailure(t *testing.T) { - t.Parallel() ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) @@ -201,18 +198,16 @@ func TestOSBuildControllerFailure(t *testing.T) { poolName := "worker" t.Run("Failed build objects remain", func(t *testing.T) { - t.Parallel() - _, _, _, failedMosb, _, kubeassert := setupOSBuildControllerForTestWithFailedBuild(ctx, t, poolName) + _, _, _, _, _, failedMosb, _, kubeassert := setupOSBuildControllerForTestWithFailedBuild(ctx, t, poolName) // Ensure that even after failure, the build objects remain. assertBuildObjectsAreCreated(ctx, t, kubeassert, failedMosb) }) t.Run("MachineOSConfig change clears failed build", func(t *testing.T) { - t.Parallel() - kubeclient, mcfgclient, mosc, failedMosb, mcp, kubeassert := setupOSBuildControllerForTestWithFailedBuild(ctx, t, poolName) + kubeclient, mcfgclient, _, _, mosc, failedMosb, mcp, kubeassert := setupOSBuildControllerForTestWithFailedBuild(ctx, t, poolName) // Modify the MachineOSConfig to start a new build. newMosc := testhelpers.SetContainerfileContentsOnMachineOSConfig(ctx, t, mcfgclient, mosc, "FROM configs AS final\nRUN echo 'helloworld' > /etc/helloworld") @@ -235,9 +230,8 @@ func TestOSBuildControllerFailure(t *testing.T) { }) t.Run("MachineConfig change clears failed build", func(t *testing.T) { - t.Parallel() - kubeclient, mcfgclient, mosc, failedMosb, mcp, kubeassert := setupOSBuildControllerForTestWithFailedBuild(ctx, t, poolName) + kubeclient, mcfgclient, _, _, mosc, failedMosb, mcp, kubeassert := setupOSBuildControllerForTestWithFailedBuild(ctx, t, poolName) apiMCP := insertNewRenderedMachineConfigAndUpdatePool(ctx, t, mcfgclient, mosc.Spec.MachineConfigPool.Name, "rendered-worker-2") @@ -254,87 +248,6 @@ func TestOSBuildControllerFailure(t *testing.T) { }) } -// This test checks that a previously built MachineOSBuild can be reused -// without performing another build provided that the hashed name is the same. -func TestOSBuildControllerReusesPreviouslyBuiltImage(t *testing.T) { - t.Parallel() - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - t.Cleanup(cancel) - - poolName := "worker" - - kubeclient, mcfgclient, firstMosc, firstMosb, mcp, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) - - // Get the first expected final image pullspec. - firstPullspec := fixtures.GetExpectedFinalImagePullspecForMachineOSBuild(firstMosb) - - // Ensure that the MachineOSConfig has the first image pullspec. - assertMachineOSConfigGetsBuiltImagePushspec(ctx, t, mcfgclient, firstMosc, firstPullspec) - - assertMachineOSConfigGetsCurrentBuildAnnotation(ctx, t, mcfgclient, firstMosc, firstMosb) - - // Ensures that we have detected the first build. - isMachineOSBuildReachedExpectedCount(ctx, t, mcfgclient, firstMosc, 1) - - // Modify the MachineOSConfig to start a new build. - newMosc := testhelpers.SetContainerfileContentsOnMachineOSConfig(ctx, t, mcfgclient, firstMosc, "FROM configs AS final\nRUN echo 'newbuild' > /etc/newbuild") - - // Compute the new MachineOSBuild. - newMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, newMosc, mcp) - - // Compute the second final image pullspec. - secondPullspec := fixtures.GetExpectedFinalImagePullspecForMachineOSBuild(newMosb) - - // Ensure they're not equal. - assert.NotEqual(t, firstPullspec, secondPullspec) - - assert.NotEqual(t, firstMosb.Name, newMosb.Name) - - // Ensure that the MachineOSBuild exists. - kubeassert.MachineOSBuildExists(newMosb) - // Ensure that the build job exists. - kubeassert.JobExists(utils.GetBuildJobName(newMosb)) - // Set the job status to succeeded. - fixtures.SetJobStatus(ctx, t, kubeclient, newMosb, fixtures.JobStatus{Succeeded: 1}) - // Ensure that the MachineOSBuild gets the successful status. - kubeassert.MachineOSBuildIsSuccessful(newMosb) - - // Ensure that the build count is incremented for our second build. - isMachineOSBuildReachedExpectedCount(ctx, t, mcfgclient, firstMosc, 2) - - // Ensure that the MachineOSConfig gets the second image pullspec. - assertMachineOSConfigGetsBuiltImagePushspec(ctx, t, mcfgclient, firstMosc, secondPullspec) - - assertMachineOSConfigGetsCurrentBuildAnnotation(ctx, t, mcfgclient, firstMosc, newMosb) - - // Next, roll back to the first MachineOSConfig by resetting the containerfile contents back to the initial state. - finalMosc := testhelpers.SetContainerfileContentsOnMachineOSConfig(ctx, t, mcfgclient, newMosc, firstMosc.Spec.BuildInputs.Containerfile[0].Content) - - // Compute the "new" MachineOSBuild name. - finalMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, finalMosc, mcp) - - // Ensure that the name equals the first MachineOSBuild name. - assert.Equal(t, finalMosb.Name, firstMosb.Name) - - // Compute the final pullspec. - finalPullspec := fixtures.GetExpectedFinalImagePullspecForMachineOSBuild(finalMosb) - - // The third pullspec should be the same as the first. - assert.Equal(t, firstPullspec, finalPullspec) - - // Ensure that no build objects were created. - assertBuildObjectsAreDeleted(ctx, t, kubeassert, finalMosb) - - // Ensure that the MachineOSBuild count did not increase at all. - isMachineOSBuildReachedExpectedCount(ctx, t, mcfgclient, firstMosc, 2) - - // Ensure that the MachineOSConfig gets the oriignal image pullspec. - assertMachineOSConfigGetsBuiltImagePushspec(ctx, t, mcfgclient, firstMosc, firstPullspec) - - assertMachineOSConfigGetsCurrentBuildAnnotation(ctx, t, mcfgclient, firstMosc, finalMosb) -} - // This test validates that the OSBuildController does the following: // 1. Creates a new MachineOSBuild for a given MachineOSConfig whenever the // MachineOSConfig is updated. @@ -342,10 +255,10 @@ func TestOSBuildControllerReusesPreviouslyBuiltImage(t *testing.T) { // MachineConfigPool is changed. // 3. Removes all MachineOSBuilds associated with a given MachineOSConfig // whenever the MachineOSConfig itself is deleted. + func TestOSBuildController(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*25) t.Cleanup(cancel) poolName := "worker" @@ -355,9 +268,8 @@ func TestOSBuildController(t *testing.T) { } t.Run("MachineOSConfig changes creates a new MachineOSBuild", func(t *testing.T) { - t.Parallel() - kubeclient, mcfgclient, mosc, _, _, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) + kubeclient, mcfgclient, _, _, mosc, _, _, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) // Update the BuildInputs section on the MachineOSConfig and verify that a // new MachineOSBuild is produced from it. We'll do this 10 times. @@ -389,20 +301,19 @@ func TestOSBuildController(t *testing.T) { // Now, we delete the MachineOSConfig and we expect that all // MachineOSBuilds that were created from it are also deleted. - err := mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().Delete(ctx, mosc.Name, metav1.DeleteOptions{}) + err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Delete(ctx, mosc.Name, metav1.DeleteOptions{}) require.NoError(t, err) isMachineOSBuildReachedExpectedCount(ctx, t, mcfgclient, mosc, 0) }) t.Run("MachineConfig changes creates a new MachineOSBuild", func(t *testing.T) { - t.Parallel() - kubeclient, mcfgclient, mosc, _, mcp, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) + kubeclient, mcfgclient, _, _, mosc, _, mcp, kubeassert := setupOSBuildControllerForTestWithSuccessfulBuild(ctx, t, poolName) // Update the rendered MachineConfig on the MachineConfigPool and verify that a new MachineOSBuild is produced. We'll do this 10 times. for i := 0; i <= 5; i++ { - apiMosc, err := mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) + apiMosc, err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) require.NoError(t, err) apiMCP := insertNewRenderedMachineConfigAndUpdatePool(ctx, t, mcfgclient, mosc.Spec.MachineConfigPool.Name, getConfigNameForPool(i+2)) @@ -426,14 +337,221 @@ func TestOSBuildController(t *testing.T) { // Now, we delete the MachineOSConfig and we expect that all // MachineOSBuilds that were created from it are also deleted. - err := mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().Delete(ctx, mosc.Name, metav1.DeleteOptions{}) + err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Delete(ctx, mosc.Name, metav1.DeleteOptions{}) require.NoError(t, err) isMachineOSBuildReachedExpectedCount(ctx, t, mcfgclient, mosc, 0) }) } -func assertBuildObjectsAreCreated(ctx context.Context, t *testing.T, kubeassert *testhelpers.Assertions, mosb *mcfgv1alpha1.MachineOSBuild) { +func TestOSBuildControllerBuildFailedDoesNotCascade(t *testing.T) { + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + poolName := "worker" + faultyMC := "rendered-undesiredFaultyMC" + + // Create a MOSC to enable OCL and let it produce a new MOSB in Running State + _, mcfgclient, _, _, mosc, mosb, mcp, _, ctrl := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) + assertMachineOSConfigGetsCurrentBuildAnnotation(ctx, t, mcfgclient, mosc, mosb) + + found := func(item *mcfgv1.MachineOSBuild, list []mcfgv1.MachineOSBuild) bool { + for _, m := range list { + if m.Name == item.Name { + return true + } + } + return false + } + + mosbList, err := mcfgclient.MachineconfigurationV1().MachineOSBuilds().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + if !found(mosb, mosbList.Items) { + t.Errorf("Expected %v to be in the list %v", mosb.Name, mosbList.Items) + } + + // This faultyMC represents an older Machine config that passed through API validation checks but if a MOSB (name oldMOSB) were to be built, it would fail to start a job. Hence over here a MC is added but the MCP is not targetting this MCP. + insertNewRenderedMachineConfig(ctx, t, mcfgclient, poolName, faultyMC) + now := metav1.Now() + oldMosb := &mcfgv1.MachineOSBuild{ + TypeMeta: metav1.TypeMeta{ + Kind: "MachineOSBuild", + APIVersion: "machineconfiguration.openshift.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "undesiredAndForgottenMOSB", + Labels: map[string]string{ + constants.TargetMachineConfigPoolLabelKey: mcp.Name, + constants.RenderedMachineConfigLabelKey: faultyMC, + constants.MachineOSConfigNameLabelKey: mosc.Name, + }, + }, + Spec: mcfgv1.MachineOSBuildSpec{ + RenderedImagePushSpec: "randRef", + MachineConfig: mcfgv1.MachineConfigReference{ + Name: faultyMC, + }, + MachineOSConfig: mcfgv1.MachineOSConfigReference{ + Name: mosc.Name, + }, + }, + Status: mcfgv1.MachineOSBuildStatus{ + BuildStart: &now, + }, + } + + // Enqueue another old and un-targeted MOSB to the osbuildcontroller + _, err = mcfgclient.MachineconfigurationV1().MachineOSBuilds().Create(ctx, oldMosb, metav1.CreateOptions{}) + require.NoError(t, err) + ctrl.buildReconciler.AddMachineOSBuild(ctx, oldMosb) + + // Assert that the original MOSB which is derived from the current rendered MC that the MCP targets is still building and untouched + mosbList, err = mcfgclient.MachineconfigurationV1().MachineOSBuilds().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + if !found(mosb, mosbList.Items) { + t.Errorf("Expected %v to be in the list %v", mosb.Name, mosbList.Items) + } +} + +// This scenario tests the case where the controller restarts and a +// MachineConfig change occurs while a build is already running, while it is +// shutdown. To simulate that, this test shuts down the OSBuildController after +// the initial job gets created, then it rolls a new MachineConfig, then +// finally, it starts OSBuildController again and waits for it to reconcile. +func TestOSBuildControllerReconcilesMachineConfigPoolsAfterRestart(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + poolName := "worker" + + // Gets an OSBuildController with a running job. + ctrlCtx, ctrlCtxCancel := context.WithCancel(ctx) + t.Cleanup(ctrlCtxCancel) + kubeclient, mcfgclient, imageclient, routeclient, mosc, firstMosb, mcp, kubeassert, _ := setupOSBuildControllerForTestWithRunningBuild(ctrlCtx, t, poolName) + + // Stop the OSBuildController. + ctrlCtxCancel() + + // Create a MachineConfigPool change. + mcp = insertNewRenderedMachineConfigAndUpdatePool(ctx, t, mcfgclient, poolName, "rendered-worker-2") + + // Get the name of the second MachineOSBuild object. + secondMosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, mosc, mcp) + + // Ensure that everything still exists. + kubeassert = kubeassert.Eventually().WithContext(ctx) + kubeassert.MachineOSBuildExists(firstMosb) + kubeassert.JobExists(utils.GetBuildJobName(firstMosb)) + + // Start OSBuildController (really, get a new instance backed by the same + // fakeclients as used above). + _, stop := startController(ctx, t, kubeclient, mcfgclient, imageclient, routeclient) + t.Cleanup(stop) + + // Assert that the second MachineOSBuild and its job gets created. + kubeassert.MachineOSBuildExists(secondMosb) + kubeassert.JobExists(utils.GetBuildJobName(secondMosb)) + + // Assert that the first MachineOSBuild goes away. + kubeassert.MachineOSBuildDoesNotExist(firstMosb) + kubeassert.JobDoesNotExist(utils.GetBuildJobName(firstMosb)) +} + +// This scenario tests the case where the controller restarts and a running job +// completes before the reconcilation loop can run. To simulate that, this test +// performs all of the setup steps and creates a successful Job before starting +// the controller. +func TestOSBuildControllerReconcilesJobsAfterRestart(t *testing.T) { + mainCtx, mainCancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(mainCancel) + + testCases := []struct { + name string + jobStatus fixtures.JobStatus + conditions []metav1.Condition + assertions func(*testhelpers.Assertions, *mcfgv1.MachineOSBuild) + }{ + { + name: "Empty MOSB conditions -> Running", + jobStatus: fixtures.JobStatus{Active: 1}, + conditions: []metav1.Condition{}, + assertions: func(kubeassert *testhelpers.Assertions, mosb *mcfgv1.MachineOSBuild) { + kubeassert.MachineOSBuildIsRunning(mosb) + kubeassert.JobExists(utils.GetBuildJobName(mosb)) + }, + }, + { + name: "Initial MOSB -> Running", + jobStatus: fixtures.JobStatus{Active: 1}, + conditions: apihelpers.MachineOSBuildInitialConditions(), + assertions: func(kubeassert *testhelpers.Assertions, mosb *mcfgv1.MachineOSBuild) { + kubeassert.MachineOSBuildIsRunning(mosb) + kubeassert.JobExists(utils.GetBuildJobName(mosb)) + }, + }, + { + name: "Running MOSB -> Succeeded", + jobStatus: fixtures.JobStatus{Succeeded: 1}, + conditions: apihelpers.MachineOSBuildRunningConditions(), + assertions: func(kubeassert *testhelpers.Assertions, mosb *mcfgv1.MachineOSBuild) { + kubeassert.MachineOSBuildIsSuccessful(mosb) + kubeassert.JobDoesNotExist(utils.GetBuildJobName(mosb)) + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(mainCtx) + t.Cleanup(cancel) + + poolName := "worker" + + kubeclient, mcfgclient, imageclient, routeclient, lobj, kubeassert := fixtures.GetClientsForTest(t) + + kubeassert = kubeassert.Eventually().WithContext(ctx).WithPollInterval(time.Millisecond) + mcp := lobj.MachineConfigPool + mosc := lobj.MachineOSConfig + mosc.Name = fmt.Sprintf("%s-os-config", poolName) + + _, err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Create(ctx, mosc, metav1.CreateOptions{}) + require.NoError(t, err) + + mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, mosc, mcp) + apiMosb, err := mcfgclient.MachineconfigurationV1().MachineOSBuilds().Create(ctx, mosb, metav1.CreateOptions{}) + require.NoError(t, err) + + // This represents the state of the MachineOSBuild before the build + // controller comes back up after a restart. A job that is in a terminal + // state will produce a different set of conditions which these conditions + // will be compared to. + apiMosb.Status.Conditions = testCase.conditions + + _, err = mcfgclient.MachineconfigurationV1().MachineOSBuilds().UpdateStatus(ctx, apiMosb, metav1.UpdateOptions{}) + require.NoError(t, err) + + br, err := buildrequest.NewBuildRequestFromAPI(ctx, kubeclient, mcfgclient, apiMosb, mosc) + require.NoError(t, err) + + buildJob := br.Builder().GetObject().(*batchv1.Job) + + _, err = kubeclient.BatchV1().Jobs(ctrlcommon.MCONamespace).Create(ctx, buildJob, metav1.CreateOptions{}) + require.NoError(t, err) + + fixtures.SetJobStatus(ctx, t, kubeclient, mosb, testCase.jobStatus) + + // Start the build controller + _, stop := startController(ctx, t, kubeclient, mcfgclient, imageclient, routeclient) + t.Cleanup(stop) + + kubeassert.MachineOSBuildExists(mosb) + testCase.assertions(kubeassert, mosb) + }) + } +} + +func assertBuildObjectsAreCreated(ctx context.Context, t *testing.T, kubeassert *testhelpers.Assertions, mosb *mcfgv1.MachineOSBuild) { t.Helper() kubeassert.JobExists(utils.GetBuildJobName(mosb)) @@ -443,7 +561,7 @@ func assertBuildObjectsAreCreated(ctx context.Context, t *testing.T, kubeassert kubeassert.SecretExists(utils.GetFinalPushSecretName(mosb)) } -func assertBuildObjectsAreDeleted(ctx context.Context, t *testing.T, kubeassert *testhelpers.Assertions, mosb *mcfgv1alpha1.MachineOSBuild) { +func assertBuildObjectsAreDeleted(ctx context.Context, t *testing.T, kubeassert *testhelpers.Assertions, mosb *mcfgv1.MachineOSBuild) { t.Helper() kubeassert.JobDoesNotExist(utils.GetBuildJobName(mosb)) @@ -453,45 +571,58 @@ func assertBuildObjectsAreDeleted(ctx context.Context, t *testing.T, kubeassert kubeassert.SecretDoesNotExist(utils.GetFinalPushSecretName(mosb)) } -func setupOSBuildControllerForTest(ctx context.Context, t *testing.T) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *testhelpers.Assertions, *fixtures.ObjectsForTest) { - kubeclient, mcfgclient, lobj, kubeassert := fixtures.GetClientsForTest(t) +// Creates a cancellable child context which is passed into the +// OSBuildController instance. Returns the cancel function for the child +// context as well as the OSBuildController instance. Useful for testing +// scenarios where it might be desirable to start and stop the +// OSBuildController. +func startController(ctx context.Context, t *testing.T, kubeclient *fakecorev1client.Clientset, mcfgclient *fakeclientmachineconfigv1.Clientset, imageclient *fakeclientimagev1.Clientset, routeclient *fakeclientroutev1.Clientset) (*OSBuildController, func()) { + ctrlCtx, ctrlCtxCancel := context.WithCancel(ctx) cfg := Config{ MaxRetries: 1, UpdateDelay: 0, } - ctrl := newOSBuildController(cfg, mcfgclient, kubeclient) + ctrl := newOSBuildController(cfg, mcfgclient, kubeclient, imageclient, routeclient) // Use a work queue which is tuned for testing. ctrl.execQueue = ctrlcommon.NewWrappedQueueForTesting(t) - go ctrl.Run(ctx, 5) + go ctrl.Run(ctrlCtx, 5) + + return ctrl, ctrlCtxCancel +} + +func setupOSBuildControllerForTest(ctx context.Context, t *testing.T) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *testhelpers.Assertions, *fixtures.ObjectsForTest, *OSBuildController) { + kubeclient, mcfgclient, imageclient, routeclient, lobj, kubeassert := fixtures.GetClientsForTest(t) + + ctrl, _ := startController(ctx, t, kubeclient, mcfgclient, imageclient, routeclient) kubeassert = kubeassert.Eventually().WithContext(ctx).WithPollInterval(time.Millisecond) - return kubeclient, mcfgclient, kubeassert, lobj + return kubeclient, mcfgclient, imageclient, routeclient, kubeassert, lobj, ctrl } -func setupOSBuildControllerForTestWithBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *mcfgv1alpha1.MachineOSConfig, *mcfgv1alpha1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions) { - kubeclient, mcfgclient, kubeassert, lobj := setupOSBuildControllerForTest(ctx, t) +func setupOSBuildControllerForTestWithBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions, *OSBuildController) { + kubeclient, mcfgclient, imageclient, routeclient, kubeassert, lobj, ctrl := setupOSBuildControllerForTest(ctx, t) mcp := lobj.MachineConfigPool mosc := lobj.MachineOSConfig mosc.Name = fmt.Sprintf("%s-os-config", poolName) - _, err := mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().Create(ctx, mosc, metav1.CreateOptions{}) + _, err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Create(ctx, mosc, metav1.CreateOptions{}) require.NoError(t, err) mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, kubeclient, mosc, mcp) - return kubeclient, mcfgclient, mosc, mosb, mcp, kubeassert.WithPollInterval(time.Millisecond * 10).WithContext(ctx).Eventually() + return kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert.WithPollInterval(time.Millisecond * 10).WithContext(ctx).Eventually(), ctrl } -func setupOSBuildControllerForTestWithRunningBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *mcfgv1alpha1.MachineOSConfig, *mcfgv1alpha1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions) { +func setupOSBuildControllerForTestWithRunningBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions, *OSBuildController) { t.Helper() - kubeclient, mcfgclient, mosc, mosb, mcp, kubeassert := setupOSBuildControllerForTestWithBuild(ctx, t, poolName) + kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert, ctrl := setupOSBuildControllerForTestWithBuild(ctx, t, poolName) initialBuildJobName := utils.GetBuildJobName(mosb) @@ -507,13 +638,13 @@ func setupOSBuildControllerForTestWithRunningBuild(ctx context.Context, t *testi // The MachineOSBuild should be running. kubeassert.Eventually().WithContext(ctx).MachineOSBuildIsRunning(mosb, "Expected the MachineOSBuild %s status to be running", mosb.Name) - return kubeclient, mcfgclient, mosc, mosb, mcp, kubeassert + return kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert, ctrl } -func setupOSBuildControllerForTestWithSuccessfulBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *mcfgv1alpha1.MachineOSConfig, *mcfgv1alpha1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions) { +func setupOSBuildControllerForTestWithSuccessfulBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions) { t.Helper() - kubeclient, mcfgclient, mosc, mosb, mcp, kubeassert := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) + kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert, _ := setupOSBuildControllerForTestWithRunningBuild(ctx, t, poolName) kubeassert.MachineOSBuildExists(mosb) kubeassert.JobExists(utils.GetBuildJobName(mosb)) @@ -521,13 +652,13 @@ func setupOSBuildControllerForTestWithSuccessfulBuild(ctx context.Context, t *te kubeassert.MachineOSBuildIsSuccessful(mosb) kubeassert.JobDoesNotExist(utils.GetBuildJobName(mosb)) - return kubeclient, mcfgclient, mosc, mosb, mcp, kubeassert + return kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert } -func setupOSBuildControllerForTestWithFailedBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *mcfgv1alpha1.MachineOSConfig, *mcfgv1alpha1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions) { +func setupOSBuildControllerForTestWithFailedBuild(ctx context.Context, t *testing.T, poolName string) (*fakecorev1client.Clientset, *fakeclientmachineconfigv1.Clientset, *fakeclientimagev1.Clientset, *fakeclientroutev1.Clientset, *mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, *mcfgv1.MachineConfigPool, *testhelpers.Assertions) { t.Helper() - kubeclient, mcfgclient, mosc, mosb, mcp, kubeassert := setupOSBuildControllerForTestWithBuild(ctx, t, poolName) + kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert, _ := setupOSBuildControllerForTestWithBuild(ctx, t, poolName) initialBuildJobName := utils.GetBuildJobName(mosb) @@ -540,7 +671,7 @@ func setupOSBuildControllerForTestWithFailedBuild(ctx context.Context, t *testin // The MachineOSBuild should be running. kubeassert.MachineOSBuildIsRunning(mosb, "Expected the MachineOSBuild %s status to be running", mosb.Name) - return kubeclient, mcfgclient, mosc, mosb, mcp, kubeassert + return kubeclient, mcfgclient, imageclient, routeclient, mosc, mosb, mcp, kubeassert } func insertNewRenderedMachineConfigAndUpdatePool(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, poolName, renderedName string) *mcfgv1.MachineConfigPool { @@ -573,11 +704,11 @@ func insertNewRenderedMachineConfig(ctx context.Context, t *testing.T, mcfgclien require.NoError(t, err) } -func isMachineOSBuildReachedExpectedCount(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, mosc *mcfgv1alpha1.MachineOSConfig, expected int) { +func isMachineOSBuildReachedExpectedCount(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, mosc *mcfgv1.MachineOSConfig, expected int) { t.Helper() err := wait.PollImmediateInfiniteWithContext(ctx, time.Millisecond, func(ctx context.Context) (bool, error) { - mosbList, err := mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().List(ctx, metav1.ListOptions{ + mosbList, err := mcfgclient.MachineconfigurationV1().MachineOSBuilds().List(ctx, metav1.ListOptions{ LabelSelector: utils.MachineOSBuildForPoolSelector(mosc).String(), }) if err != nil { @@ -590,42 +721,31 @@ func isMachineOSBuildReachedExpectedCount(ctx context.Context, t *testing.T, mcf require.NoError(t, err, "MachineOSBuild count did not reach expected value %d", expected) } -func setImagePushspecOnMachineOSBuild(ctx context.Context, mcfgclient mcfgclientset.Interface, mosb *mcfgv1alpha1.MachineOSBuild, pushspec string) error { - apiMosb, err := mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().Get(ctx, mosb.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - apiMosb.Status.FinalImagePushspec = pushspec - - _, err = mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().UpdateStatus(ctx, apiMosb, metav1.UpdateOptions{}) - return err -} - -func assertMachineOSConfigGetsBuiltImagePushspec(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, mosc *mcfgv1alpha1.MachineOSConfig, pullspec string) { +func assertMachineOSConfigGetsBuiltImagePushspec(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, mosc *mcfgv1.MachineOSConfig, pullspec string) { t.Helper() - var foundMosc *mcfgv1alpha1.MachineOSConfig + var foundMosc *mcfgv1.MachineOSConfig err := wait.PollImmediateInfiniteWithContext(ctx, time.Millisecond, func(ctx context.Context) (bool, error) { - apiMosc, err := mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) + apiMosc, err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) if err != nil { return false, err } foundMosc = apiMosc - return apiMosc.Status.CurrentImagePullspec == pullspec, nil + return string(apiMosc.Status.CurrentImagePullSpec) == pullspec, nil }) - require.NoError(t, err, "expected: %q, got: %q", pullspec, foundMosc.Status.CurrentImagePullspec) + require.NoError(t, err) + require.Equal(t, pullspec, string(foundMosc.Status.CurrentImagePullSpec)) } -func assertMachineOSConfigGetsCurrentBuildAnnotation(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) { +func assertMachineOSConfigGetsCurrentBuildAnnotation(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild) { t.Helper() err := wait.PollImmediateInfiniteWithContext(ctx, time.Millisecond, func(ctx context.Context) (bool, error) { - apiMosc, err := mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) + apiMosc, err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/pkg/controller/build/reconciler.go b/pkg/controller/build/reconciler.go index 4f09321278..682c47633e 100644 --- a/pkg/controller/build/reconciler.go +++ b/pkg/controller/build/reconciler.go @@ -2,19 +2,28 @@ package build import ( "context" + "encoding/json" "fmt" + "os" + "path/filepath" "strings" "time" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + imagev1clientset "github.com/openshift/client-go/image/clientset/versioned" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" + routeclientset "github.com/openshift/client-go/route/clientset/versioned" "github.com/openshift/machine-config-operator/pkg/controller/build/buildrequest" "github.com/openshift/machine-config-operator/pkg/controller/build/constants" "github.com/openshift/machine-config-operator/pkg/controller/build/imagebuilder" "github.com/openshift/machine-config-operator/pkg/controller/build/utils" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/pkg/controller/template" + "github.com/openshift/machine-config-operator/pkg/daemon" + daemonconstants "github.com/openshift/machine-config-operator/pkg/daemon/constants" + "github.com/openshift/machine-config-operator/pkg/helpers" batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" @@ -29,21 +38,23 @@ const ( updatingVerb string = "Updating" deletingVerb string = "Deleting" syncingVerb string = "Syncing" + certsDir string = "/etc/docker/certs.d" ) type reconciler interface { - AddMachineOSBuild(context.Context, *mcfgv1alpha1.MachineOSBuild) error - UpdateMachineOSBuild(context.Context, *mcfgv1alpha1.MachineOSBuild, *mcfgv1alpha1.MachineOSBuild) error - DeleteMachineOSBuild(context.Context, *mcfgv1alpha1.MachineOSBuild) error + AddMachineOSBuild(context.Context, *mcfgv1.MachineOSBuild) error + UpdateMachineOSBuild(context.Context, *mcfgv1.MachineOSBuild, *mcfgv1.MachineOSBuild) error + DeleteMachineOSBuild(context.Context, *mcfgv1.MachineOSBuild) error - AddMachineOSConfig(context.Context, *mcfgv1alpha1.MachineOSConfig) error - UpdateMachineOSConfig(context.Context, *mcfgv1alpha1.MachineOSConfig, *mcfgv1alpha1.MachineOSConfig) error - DeleteMachineOSConfig(context.Context, *mcfgv1alpha1.MachineOSConfig) error + AddMachineOSConfig(context.Context, *mcfgv1.MachineOSConfig) error + UpdateMachineOSConfig(context.Context, *mcfgv1.MachineOSConfig, *mcfgv1.MachineOSConfig) error + DeleteMachineOSConfig(context.Context, *mcfgv1.MachineOSConfig) error AddJob(context.Context, *batchv1.Job) error UpdateJob(context.Context, *batchv1.Job, *batchv1.Job) error DeleteJob(context.Context, *batchv1.Job) error + AddMachineConfigPool(context.Context, *mcfgv1.MachineConfigPool) error UpdateMachineConfigPool(context.Context, *mcfgv1.MachineConfigPool, *mcfgv1.MachineConfigPool) error } @@ -51,27 +62,31 @@ type reconciler interface { // is to respond to incoming events in a specific way. By doing this, the // reconciliation process has a clear entrypoint for each incoming event. type buildReconciler struct { - mcfgclient mcfgclientset.Interface - kubeclient clientset.Interface + mcfgclient mcfgclientset.Interface + kubeclient clientset.Interface + imageclient imagev1clientset.Interface + routeclient routeclientset.Interface *listers } // Instantiates a new reconciler instance. This returns an interface to // disallow access to its private methods. -func newBuildReconciler(mcfgclient mcfgclientset.Interface, kubeclient clientset.Interface, l *listers) reconciler { - return newBuildReconcilerAsStruct(mcfgclient, kubeclient, l) +func newBuildReconciler(mcfgclient mcfgclientset.Interface, kubeclient clientset.Interface, imageclient imagev1clientset.Interface, routeclient routeclientset.Interface, l *listers) reconciler { + return newBuildReconcilerAsStruct(mcfgclient, kubeclient, imageclient, routeclient, l) } -func newBuildReconcilerAsStruct(mcfgclient mcfgclientset.Interface, kubeclient clientset.Interface, l *listers) *buildReconciler { +func newBuildReconcilerAsStruct(mcfgclient mcfgclientset.Interface, kubeclient clientset.Interface, imageclient imagev1clientset.Interface, routeclient routeclientset.Interface, l *listers) *buildReconciler { return &buildReconciler{ - mcfgclient: mcfgclient, - kubeclient: kubeclient, - listers: l, + mcfgclient: mcfgclient, + kubeclient: kubeclient, + imageclient: imageclient, + routeclient: routeclient, + listers: l, } } // Executes whenever a new MachineOSConfig is added. -func (b *buildReconciler) AddMachineOSConfig(ctx context.Context, mosc *mcfgv1alpha1.MachineOSConfig) error { +func (b *buildReconciler) AddMachineOSConfig(ctx context.Context, mosc *mcfgv1.MachineOSConfig) error { return b.timeObjectOperation(mosc, addingVerb, func() error { if err := b.addMachineOSConfig(ctx, mosc); err != nil { return err @@ -82,7 +97,7 @@ func (b *buildReconciler) AddMachineOSConfig(ctx context.Context, mosc *mcfgv1al } // Executes whenever an existing MachineOSConfig is updated. -func (b *buildReconciler) UpdateMachineOSConfig(ctx context.Context, old, cur *mcfgv1alpha1.MachineOSConfig) error { +func (b *buildReconciler) UpdateMachineOSConfig(ctx context.Context, old, cur *mcfgv1.MachineOSConfig) error { return b.timeObjectOperation(cur, updatingVerb, func() error { return b.updateMachineOSConfig(ctx, old, cur) }) @@ -90,32 +105,72 @@ func (b *buildReconciler) UpdateMachineOSConfig(ctx context.Context, old, cur *m // Executes whenever a MachineOSConfig is updated. If the build inputs have // changed, a new MachineOSBuild should be created. -func (b *buildReconciler) updateMachineOSConfig(ctx context.Context, old, cur *mcfgv1alpha1.MachineOSConfig) error { - // Whenever the build inputs have changed, create a new MachineOSBuild. - if !equality.Semantic.DeepEqual(old.Spec.BuildInputs, cur.Spec.BuildInputs) { +func (b *buildReconciler) updateMachineOSConfig(ctx context.Context, old, cur *mcfgv1.MachineOSConfig) error { + // If we have gained the rebuild annotation, we should delete the current MachineOSBuild associated with this MachineOSConfig. + if !hasRebuildAnnotation(old) && hasRebuildAnnotation(cur) { + if err := b.rebuildMachineOSConfig(ctx, cur); err != nil { + return fmt.Errorf("could not rebuild MachineOSConfig %q: %w", cur.Name, err) + } + + return nil + } + + // Whenever the MachineOSConfig spec has changed, create a new MachineOSBuild. + if !equality.Semantic.DeepEqual(old.Spec, cur.Spec) { klog.Infof("Detected MachineOSConfig change for %s", cur.Name) - return b.createNewMachineOSBuildOrReuseExisting(ctx, cur) + return b.createNewMachineOSBuildOrReuseExisting(ctx, cur, false) } return b.syncMachineOSConfigs(ctx) } +// Rebuilds the most current build associated with a MachineOSConfig whenever +// the rebuild annotation is applied. This is done by deleting the current +// MachineOSBuild and allowing the controller to replace it with a new one. +func (b *buildReconciler) rebuildMachineOSConfig(ctx context.Context, mosc *mcfgv1.MachineOSConfig) error { + klog.Infof("MachineOSConfig %q has rebuild annotation (%q)", mosc.Name, constants.RebuildMachineOSConfigAnnotationKey) + + if !hasCurrentBuildAnnotation(mosc) { + klog.Infof("MachineOSConfig %q does not have current build annotation (%q) set, skipping rebuild", mosc.Name, constants.CurrentMachineOSBuildAnnotationKey) + return nil + } + + mosbName := mosc.Annotations[constants.CurrentMachineOSBuildAnnotationKey] + + mosb, err := b.machineOSBuildLister.Get(mosbName) + if err != nil { + return ignoreErrIsNotFound(fmt.Errorf("cannot rebuild MachineOSConfig %q: %w", mosc.Name, err)) + } + + if err := b.deleteMachineOSBuild(ctx, mosb); err != nil { + return fmt.Errorf("could not delete MachineOSBuild %q for MachineOSConfig %q: %w", mosb.Name, mosc.Name, err) + } + + if err := b.createNewMachineOSBuildOrReuseExisting(ctx, mosc, true); err != nil { + return fmt.Errorf("could not create new MachineOSBuild for MachineOSConfig %q: %w", mosc.Name, err) + } + + klog.Infof("MachineOSConfig %q is now rebuilding", mosc.Name) + + return nil +} + // Runs whenever a new MachineOSConfig is added. Determines if a new // MachineOSBuild should be created and then creates it, if needed. -func (b *buildReconciler) addMachineOSConfig(ctx context.Context, mosc *mcfgv1alpha1.MachineOSConfig) error { +func (b *buildReconciler) addMachineOSConfig(ctx context.Context, mosc *mcfgv1.MachineOSConfig) error { return b.syncMachineOSConfig(ctx, mosc) } // Executes whenever a MachineOSConfig is deleted. This deletes all // MachineOSBuilds (and the underlying associated build objects). -func (b *buildReconciler) DeleteMachineOSConfig(ctx context.Context, mosc *mcfgv1alpha1.MachineOSConfig) error { +func (b *buildReconciler) DeleteMachineOSConfig(ctx context.Context, mosc *mcfgv1.MachineOSConfig) error { return b.timeObjectOperation(mosc, deletingVerb, func() error { return b.deleteMachineOSConfig(ctx, mosc) }) } // Performs the deletion reconciliation of the MachineOSConfig. -func (b *buildReconciler) deleteMachineOSConfig(ctx context.Context, mosc *mcfgv1alpha1.MachineOSConfig) error { +func (b *buildReconciler) deleteMachineOSConfig(ctx context.Context, mosc *mcfgv1.MachineOSConfig) error { klog.Infof("Removing MachineOSBuild(s) associated with non-existent MachineOSConfig %s", mosc.Name) mosbList, err := b.machineOSBuildLister.List(utils.MachineOSBuildForPoolSelector(mosc)) @@ -137,6 +192,11 @@ func (b *buildReconciler) deleteMachineOSConfig(ctx context.Context, mosc *mcfgv func (b *buildReconciler) AddJob(ctx context.Context, job *batchv1.Job) error { return b.timeObjectOperation(job, addingVerb, func() error { klog.Infof("Adding build job %q", job.Name) + + if err := b.updateMachineOSBuildWithStatus(ctx, job); err != nil { + return fmt.Errorf("could not update job status for %q: %w", job.Name, err) + } + return b.syncAll(ctx) }) } @@ -153,6 +213,7 @@ func (b *buildReconciler) DeleteJob(ctx context.Context, job *batchv1.Job) error return b.timeObjectOperation(job, deletingVerb, func() error { // Set the DeletionTimestamp so that we can set the build status to interrupted job.SetDeletionTimestamp(&metav1.Time{Time: time.Now()}) + err := b.updateMachineOSBuildWithStatus(ctx, job) if err != nil { return err @@ -164,14 +225,14 @@ func (b *buildReconciler) DeleteJob(ctx context.Context, job *batchv1.Job) error // Executes whenever a new MachineOSBuild is added. It starts executing the // build in response to a new MachineOSBuild being created. -func (b *buildReconciler) AddMachineOSBuild(ctx context.Context, mosb *mcfgv1alpha1.MachineOSBuild) error { +func (b *buildReconciler) AddMachineOSBuild(ctx context.Context, mosb *mcfgv1.MachineOSBuild) error { return b.timeObjectOperation(mosb, addingVerb, func() error { return b.addMachineOSBuild(ctx, mosb) }) } // Executes whenever a MachineOSBuild is updated. -func (b *buildReconciler) UpdateMachineOSBuild(ctx context.Context, old, cur *mcfgv1alpha1.MachineOSBuild) error { +func (b *buildReconciler) UpdateMachineOSBuild(ctx context.Context, old, cur *mcfgv1.MachineOSBuild) error { return b.timeObjectOperation(cur, updatingVerb, func() error { if err := b.updateMachineOSBuild(ctx, old, cur); err != nil { return fmt.Errorf("could not update MachineOSBuild: %w", err) @@ -183,7 +244,7 @@ func (b *buildReconciler) UpdateMachineOSBuild(ctx context.Context, old, cur *mc // Performs the reconciliation whenever the MachineOSBuild is updated, such as // cleaning up the build artifacts upon success. -func (b *buildReconciler) updateMachineOSBuild(ctx context.Context, old, current *mcfgv1alpha1.MachineOSBuild) error { +func (b *buildReconciler) updateMachineOSBuild(ctx context.Context, old, current *mcfgv1.MachineOSBuild) error { mosc, err := utils.GetMachineOSConfigForMachineOSBuild(current, b.utilListers()) if err != nil { // If a MachineOSConfig is deleted before the MachineOSBuild is, we should @@ -231,43 +292,53 @@ func (b *buildReconciler) updateMachineOSBuild(ctx context.Context, old, current } // Updates the status on the MachineOSConfig object from the supplied MachineOSBuild object. -func (b *buildReconciler) updateMachineOSConfigStatus(ctx context.Context, mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) error { +func (b *buildReconciler) updateMachineOSConfigStatus(ctx context.Context, mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild) error { mosc, err := b.getMachineOSConfigForUpdate(mosc) if err != nil { return err } + annoUpdateNeeded := false + + if hasRebuildAnnotation(mosc) { + delete(mosc.Annotations, constants.RebuildMachineOSConfigAnnotationKey) + annoUpdateNeeded = true + klog.Infof("Cleared rebuild annotation (%q) on MachineOSConfig %q", constants.RebuildMachineOSConfigAnnotationKey, mosc.Name) + } + if !isCurrentBuildAnnotationEqual(mosc, mosb) { - // If the current build annotation is not equal, do an update and reuse the - // returned MachineOSBuild for the status update, if needed. metav1.SetMetaDataAnnotation(&mosc.ObjectMeta, constants.CurrentMachineOSBuildAnnotationKey, mosb.Name) - updatedMosc, err := b.mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().Update(ctx, mosc, metav1.UpdateOptions{}) + annoUpdateNeeded = true + klog.Infof("Set current build on MachineOSConfig %q to MachineOSBuild %q", mosc.Name, mosb.Name) + } + + if annoUpdateNeeded { + updatedMosc, err := b.mcfgclient.MachineconfigurationV1().MachineOSConfigs().Update(ctx, mosc, metav1.UpdateOptions{}) if err != nil { - return fmt.Errorf("could not set current build annotation on MachineOSConfig %q: %w", mosc.Name, err) + return fmt.Errorf("could not update annotations on MachineOSConfig %q: %w", mosc.Name, err) } - klog.Infof("Set current build on MachineOSConfig %q to MachineOSBuild %q", mosc.Name, mosb.Name) + klog.Infof("Updated annotations on MachineOSConfig %q", mosc.Name) mosc = updatedMosc } - // Skip the status update if final image pushspec hasn't been set yet. - if mosb.Status.FinalImagePushspec == "" { - klog.Infof("MachineOSBuild %q has empty final image pushspec, skipping MachineOSConfig %q update", mosb.Name, mosc.Name) + // Skip the status update if digest image pushspec hasn't been set yet. + if mosb.Status.DigestedImagePushSpec == "" { + klog.Infof("MachineOSBuild %q has empty final image pushspec, skipping MachineOSConfig %q status update", mosb.Name, mosc.Name) return nil } - // skip the status update if the current image pullspec equals the final image pushspec. - if mosc.Status.CurrentImagePullspec == mosb.Status.FinalImagePushspec { + // skip the status update if the current image pullspec equals the digest image pushspec. + if mosc.Status.CurrentImagePullSpec == mosb.Status.DigestedImagePushSpec { klog.Infof("MachineOSConfig %q already has final image pushspec for MachineOSBuild %q", mosc.Name, mosb.Name) return nil } - mosc.Status.CurrentImagePullspec = mosb.Status.FinalImagePushspec - // TODO: Reconsider this. - mosc.Status.ObservedGeneration += mosc.GetGeneration() + mosc.Status.CurrentImagePullSpec = mosb.Status.DigestedImagePushSpec + mosc.Status.ObservedGeneration = mosc.GetGeneration() - _, err = b.mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().UpdateStatus(ctx, mosc, metav1.UpdateOptions{}) + _, err = b.mcfgclient.MachineconfigurationV1().MachineOSConfigs().UpdateStatus(ctx, mosc, metav1.UpdateOptions{}) if err == nil { klog.Infof("Updated status on MachineOSConfig %s", mosc.Name) } @@ -276,13 +347,20 @@ func (b *buildReconciler) updateMachineOSConfigStatus(ctx context.Context, mosc } // Executes whenever a MachineOSBuild is deleted by cleaning up any remaining build artifacts that may be left behind. -func (b *buildReconciler) DeleteMachineOSBuild(ctx context.Context, mosb *mcfgv1alpha1.MachineOSBuild) error { +func (b *buildReconciler) DeleteMachineOSBuild(ctx context.Context, mosb *mcfgv1.MachineOSBuild) error { return b.timeObjectOperation(mosb, deletingVerb, func() error { return b.deleteBuilderForMachineOSBuild(ctx, mosb) }) } -// Executes whenever a MachineConfigPool is updated . +// Executes whenever a MachineConfigPool is added. +func (b *buildReconciler) AddMachineConfigPool(ctx context.Context, mcp *mcfgv1.MachineConfigPool) error { + return b.timeObjectOperation(mcp, addingVerb, func() error { + return b.syncMachineConfigPools(ctx) + }) +} + +// Executes whenever a MachineConfigPool is updated. func (b *buildReconciler) UpdateMachineConfigPool(ctx context.Context, oldMCP, curMCP *mcfgv1.MachineConfigPool) error { return b.timeObjectOperation(curMCP, updatingVerb, func() error { return b.updateMachineConfigPool(ctx, oldMCP, curMCP) @@ -301,16 +379,17 @@ func (b *buildReconciler) updateMachineConfigPool(ctx context.Context, oldMCP, c } // Not sure if we need to do this here yet or not. + // TODO: Determine if we should call b.syncMachineConfigPools() here or not. return b.syncAll(ctx) } // Adds a MachineOSBuild. -func (b *buildReconciler) addMachineOSBuild(ctx context.Context, mosb *mcfgv1alpha1.MachineOSBuild) error { +func (b *buildReconciler) addMachineOSBuild(ctx context.Context, mosb *mcfgv1.MachineOSBuild) error { return b.syncMachineOSBuild(ctx, mosb) } // Starts executing a build for a given MachineOSBuild. -func (b *buildReconciler) startBuild(ctx context.Context, mosb *mcfgv1alpha1.MachineOSBuild) error { +func (b *buildReconciler) startBuild(ctx context.Context, mosb *mcfgv1.MachineOSBuild) error { mosc, err := utils.GetMachineOSConfigForMachineOSBuild(mosb, b.utilListers()) if err != nil { return err @@ -336,7 +415,7 @@ func (b *buildReconciler) startBuild(ctx context.Context, mosb *mcfgv1alpha1.Mac } // Retrieves a deep-copy of the MachineOSConfig from the lister so that the cache is not mutated during the update. -func (b *buildReconciler) getMachineOSConfigForUpdate(mosc *mcfgv1alpha1.MachineOSConfig) (*mcfgv1alpha1.MachineOSConfig, error) { +func (b *buildReconciler) getMachineOSConfigForUpdate(mosc *mcfgv1.MachineOSConfig) (*mcfgv1.MachineOSConfig, error) { out, err := b.machineOSConfigLister.Get(mosc.Name) if err != nil { @@ -347,7 +426,7 @@ func (b *buildReconciler) getMachineOSConfigForUpdate(mosc *mcfgv1alpha1.Machine } // Retrieves a deep-copy of the MachineOSBuild from the lister so that the cache is not mutated during the update. -func (b *buildReconciler) getMachineOSBuildForUpdate(mosb *mcfgv1alpha1.MachineOSBuild) (*mcfgv1alpha1.MachineOSBuild, error) { +func (b *buildReconciler) getMachineOSBuildForUpdate(mosb *mcfgv1.MachineOSBuild) (*mcfgv1.MachineOSBuild, error) { out, err := b.machineOSBuildLister.Get(mosb.Name) if err != nil { @@ -370,15 +449,49 @@ func (b *buildReconciler) createNewMachineOSBuildOrReuseExistingForPoolChange(ct return err } - if err := b.createNewMachineOSBuildOrReuseExisting(ctx, mosc.DeepCopy()); err != nil { + if err := b.createNewMachineOSBuildOrReuseExisting(ctx, mosc.DeepCopy(), false); err != nil { return fmt.Errorf("could not create MachineOSBuild for MachineConfigPool %q change: %w", mcp.Name, err) } return nil } +// Executes whenever a MachineOSConfig has the rebuild annotation and a new MachineOSBuild needs to be created. +func (b *buildReconciler) createNewMachineOSBuildForRebuild(ctx context.Context, mosb *mcfgv1.MachineOSBuild, moscName string) error { + // Verify that the MOSB is actually deleted before we try to create a new one + // The deletion process may take some time and if we try to create a new MOSB with the same name, a clash may happen + childCtx, cancel := context.WithTimeout(ctx, time.Second*90) + defer cancel() + for { + _, err := b.mcfgclient.MachineconfigurationV1().MachineOSBuilds().Get(childCtx, mosb.Name, metav1.GetOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return fmt.Errorf("could not check if MachineOSBuild %s exists: %w", mosb.Name, err) + } + if k8serrors.IsNotFound(err) { + break + } + time.Sleep(1 * time.Second) + } + + // Delete the digest configmap if it exists + // This is created by the wait-for-done container once the image has been built and pushed + // and stays around when the build is successful + err := b.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(ctx, utils.GetDigestConfigMapName(mosb), metav1.DeleteOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return fmt.Errorf("could not delete digest configmap for MachineOSBuild %s: %w", mosb.Name, err) + } + + // Now create the new MOSB + _, err = b.mcfgclient.MachineconfigurationV1().MachineOSBuilds().Create(ctx, mosb, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("could not create new MachineOSBuild from rebuild annotation for MachineOSConfig %q: %w", moscName, err) + } + klog.Infof("New MachineOSBuild created: %s", mosb.Name) + return nil +} + // Executes whenever a new MachineOSBuild is created. -func (b *buildReconciler) createNewMachineOSBuildOrReuseExisting(ctx context.Context, mosc *mcfgv1alpha1.MachineOSConfig) error { +func (b *buildReconciler) createNewMachineOSBuildOrReuseExisting(ctx context.Context, mosc *mcfgv1.MachineOSConfig, isRebuild bool) error { mcp, err := b.machineConfigPoolLister.Get(mosc.Spec.MachineConfigPool.Name) if err != nil { return fmt.Errorf("could not get MachineConfigPool %s for MachineOSConfig %s: %w", mosc.Spec.MachineConfigPool.Name, mosc.Name, err) @@ -407,34 +520,86 @@ func (b *buildReconciler) createNewMachineOSBuildOrReuseExisting(ctx context.Con return fmt.Errorf("could not instantiate new MachineOSBuild: %w", err) } + // Set owner reference of the machineOSBuild to the machineOSConfig that created this + oref := metav1.NewControllerRef(mosc, mcfgv1.SchemeGroupVersion.WithKind("MachineOSConfig")) + mosb.SetOwnerReferences([]metav1.OwnerReference{*oref}) + existingMosb, err := b.machineOSBuildLister.Get(mosb.Name) if err != nil && !k8serrors.IsNotFound(err) { return fmt.Errorf("could not get MachineOSBuild: %w", err) } + // If this is a rebuild based on the rebuild annotation, then we definitely need to create the MOSB again + if isRebuild { + return b.createNewMachineOSBuildForRebuild(ctx, mosb, mosc.Name) + } + // If err is nil, it means a MachineOSBuild with this name already exists. // What likely happened is that a config change was rolled back to the // previous state. Rather than performing another build, we should get the // previously built image pullspec and adjust the MachineOSConfig to use that // image instead. if err == nil && existingMosb != nil { - if err := b.reuseExistingMachineOSBuildIfPossible(ctx, mosc, existingMosb); err != nil { + imageNeedsRebuild, err := b.reuseExistingMachineOSBuildIfPossible(ctx, mosc, existingMosb) + if err != nil { return fmt.Errorf("could not reuse existing MachineOSBuild %q for MachineOSConfig %q: %w", existingMosb.Name, mosc.Name, err) } + // If we need to rebuild, then we need to create a new MachineOSBuild + if imageNeedsRebuild { + return b.createNewMachineOSBuildForRebuild(ctx, mosb, mosc.Name) + } + // If we did not need to rebuild, then we can reuse the existing MOSB and we are done return nil } // In this situation, we've determined that the MachineOSBuild does not // exist, so we need to create it. if k8serrors.IsNotFound(err) { - _, err := b.mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().Create(ctx, mosb, metav1.CreateOptions{}) - if err != nil { + _, err := b.mcfgclient.MachineconfigurationV1().MachineOSBuilds().Create(ctx, mosb, metav1.CreateOptions{}) + if err != nil && !k8serrors.IsAlreadyExists(err) { return fmt.Errorf("could not create new MachineOSBuild %q: %w", mosb.Name, err) } + klog.Infof("New MachineOSBuild created: %s", mosb.Name) + } + + return nil +} + +// getCerts created the certs directory and returns the path to the certs directory +func (b *buildReconciler) getCerts() error { + err := os.MkdirAll(certsDir, 0o755) + if err != nil { + return fmt.Errorf("could not create certs dir: %w", err) + } + controllerConfigs, err := b.listers.controllerConfigLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("could not list ControllerConfigs: %w", err) + } + if len(controllerConfigs) == 0 { + return fmt.Errorf("no ControllerConfigs found") + } + cc := controllerConfigs[0] + template.UpdateControllerConfigCerts(cc) + + // Copy the certs to /etc/docker/certs.d directory + for _, CA := range cc.Spec.ImageRegistryBundleData { + caFile := strings.ReplaceAll(CA.File, "..", ":") + if err := os.MkdirAll(filepath.Join(certsDir, caFile), 0o755); err != nil { + return err + } + if err := os.WriteFile(filepath.Join(certsDir, caFile, "ca.crt"), CA.Data, 0o644); err != nil { + return err + } + } - if err == nil { - klog.Infof("New MachineOSBuild created: %s", mosb.Name) + for _, CA := range cc.Spec.ImageRegistryBundleUserData { + caFile := strings.ReplaceAll(CA.File, "..", ":") + if err := os.MkdirAll(filepath.Join(certsDir, caFile), 0o755); err != nil { + return err + } + if err := os.WriteFile(filepath.Join(certsDir, caFile, "ca.crt"), CA.Data, 0o644); err != nil { + return err } } @@ -442,15 +607,44 @@ func (b *buildReconciler) createNewMachineOSBuildOrReuseExisting(ctx context.Con } // Determines if a preexising MachineOSBuild can be reused and if possible, does it. -func (b *buildReconciler) reuseExistingMachineOSBuildIfPossible(ctx context.Context, mosc *mcfgv1alpha1.MachineOSConfig, existingMosb *mcfgv1alpha1.MachineOSBuild) error { +func (b *buildReconciler) reuseExistingMachineOSBuildIfPossible(ctx context.Context, mosc *mcfgv1.MachineOSConfig, existingMosb *mcfgv1.MachineOSBuild) (bool, error) { existingMosbState := ctrlcommon.NewMachineOSBuildState(existingMosb) canBeReused := false - + imageNeedsRebuild := false // If the existing build is a success and has the image pushspec set, it can be reused. - if existingMosbState.IsBuildSuccess() && existingMosb.Status.FinalImagePushspec != "" { - klog.Infof("Existing MachineOSBuild %q found, reusing image %q by assigning to MachineOSConfig %q", existingMosb.Name, existingMosb.Status.FinalImagePushspec, mosc.Name) - canBeReused = true + if existingMosbState.IsBuildSuccess() && existingMosb.Status.DigestedImagePushSpec != "" { + klog.Infof("Existing MachineOSBuild %q found, checking if image %q still exists", existingMosb.Name, existingMosb.Status.DigestedImagePushSpec) + // Get the certs + if err := b.getCerts(); err != nil { + klog.Warningf("Could not get certs: %v", err) + } + defer os.RemoveAll(certsDir) + + // Get the auth file + authfilePath, err := b.getAuthFilePath(existingMosb, mosc.Name) + if err != nil { + klog.Warningf("Could not get auth file path: %v", err) + } + defer os.RemoveAll(authfilePath) + + image := string(existingMosb.Spec.RenderedImagePushSpec) + inspect, _, err := daemon.ImageInspect(image, authfilePath) + if inspect != nil && err == nil { + klog.Infof("Existing MachineOSBuild %q found, reusing image %q by assigning to MachineOSConfig %q", existingMosb.Name, image, mosc.Name) + canBeReused = true + } else { + klog.Infof("Existing MachineOSBuild image %q no longer exists, skipping reuse. Got error: %v", image, err) + imageNeedsRebuild = true + + // Delete the MOSB so that we can rebuild since the image associated with it doesn't exist anymore + klog.Infof("Deleting MachineOSBuild %q so we can rebuild it to create a new image", existingMosb.Name) + err := b.mcfgclient.MachineconfigurationV1().MachineOSBuilds().Delete(ctx, existingMosb.Name, metav1.DeleteOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return imageNeedsRebuild, fmt.Errorf("could not delete MachineOSBuild %q: %w", existingMosb.Name, err) + } + return imageNeedsRebuild, nil + } } // If the existing build is in a transient state, it can be reused. @@ -462,29 +656,29 @@ func (b *buildReconciler) reuseExistingMachineOSBuildIfPossible(ctx context.Cont if canBeReused { // Stop any other running builds. if err := b.deleteOtherBuildsForMachineOSConfig(ctx, existingMosb, mosc); err != nil { - return fmt.Errorf("could not delete running builds for MachineOSConfig %q after reusing existing MachineOSBuild %q: %w", mosc.Name, existingMosb.Name, err) + return canBeReused, fmt.Errorf("could not delete running builds for MachineOSConfig %q after reusing existing MachineOSBuild %q: %w", mosc.Name, existingMosb.Name, err) } // Update the MachineOSConfig to use the preexisting MachineOSBuild. if err := b.updateMachineOSConfigStatus(ctx, mosc, existingMosb); err != nil { - return fmt.Errorf("could not update MachineOSConfig %q status to reuse preexisting MachineOSBuild %q: %w", mosc.Name, existingMosb.Name, err) + return canBeReused, fmt.Errorf("could not update MachineOSConfig %q status to reuse preexisting MachineOSBuild %q: %w", mosc.Name, existingMosb.Name, err) } } - return nil + return imageNeedsRebuild, nil } // Gets the MachineOSBuild status from the provided metav1.Object which can be // converted into a Builder. -func (b *buildReconciler) getMachineOSBuildStatusForBuilder(ctx context.Context, obj metav1.Object) (mcfgv1alpha1.MachineOSBuildStatus, *mcfgv1alpha1.MachineOSBuild, error) { +func (b *buildReconciler) getMachineOSBuildStatusForBuilder(ctx context.Context, obj metav1.Object) (mcfgv1.MachineOSBuildStatus, *mcfgv1.MachineOSBuild, error) { builder, err := buildrequest.NewBuilder(obj) if err != nil { - return mcfgv1alpha1.MachineOSBuildStatus{}, nil, fmt.Errorf("could not instantiate builder: %w", err) + return mcfgv1.MachineOSBuildStatus{}, nil, fmt.Errorf("could not instantiate builder: %w", err) } mosc, mosb, err := b.getMachineOSConfigAndMachineOSBuildForBuilder(builder) if err != nil { - return mcfgv1alpha1.MachineOSBuildStatus{}, nil, fmt.Errorf("could not get MachineOSConfig or MachineOSBuild for builder: %w", err) + return mcfgv1.MachineOSBuildStatus{}, nil, fmt.Errorf("could not get MachineOSConfig or MachineOSBuild for builder: %w", err) } observer := imagebuilder.NewJobImageBuildObserverFromBuilder(b.kubeclient, b.mcfgclient, mosb, mosc, builder) @@ -548,7 +742,7 @@ func (b *buildReconciler) updateMachineOSBuildWithStatusIfNeeded(ctx context.Con } // Sets the status on the MachineOSBuild object after comparing the statuses according to very specific state transitions. -func (b *buildReconciler) setStatusOnMachineOSBuildIfNeeded(ctx context.Context, mosb *mcfgv1alpha1.MachineOSBuild, oldStatus, curStatus mcfgv1alpha1.MachineOSBuildStatus) error { +func (b *buildReconciler) setStatusOnMachineOSBuildIfNeeded(ctx context.Context, mosb *mcfgv1.MachineOSBuild, oldStatus, curStatus mcfgv1.MachineOSBuildStatus) error { // Compare the old status and the current status to determine if an update is // needed. This is handled according to very specific state transitions. isUpdateNeeded, reason := isMachineOSBuildStatusUpdateNeeded(oldStatus, curStatus) @@ -571,7 +765,7 @@ func (b *buildReconciler) setStatusOnMachineOSBuildIfNeeded(ctx context.Context, bs.SetBuildConditions(curStatus.Conditions) - bs.Build.Status.FinalImagePushspec = curStatus.FinalImagePushspec + bs.Build.Status.DigestedImagePushSpec = curStatus.DigestedImagePushSpec if bs.Build.Status.BuildStart == nil && curStatus.BuildStart != nil { bs.Build.Status.BuildStart = curStatus.BuildStart @@ -581,9 +775,9 @@ func (b *buildReconciler) setStatusOnMachineOSBuildIfNeeded(ctx context.Context, bs.Build.Status.BuildEnd = curStatus.BuildEnd } - bs.Build.Status.BuilderReference = curStatus.BuilderReference + bs.Build.Status.Builder = curStatus.Builder - _, err = b.mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().UpdateStatus(ctx, bs.Build, metav1.UpdateOptions{}) + _, err = b.mcfgclient.MachineconfigurationV1().MachineOSBuilds().UpdateStatus(ctx, bs.Build, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("could not update status on MachineOSBuild %q: %w", mosb.Name, err) } @@ -614,7 +808,7 @@ func (b *buildReconciler) updateMachineOSBuildWithStatus(ctx context.Context, ob } // Resolves the MachineOSBuild for a given builder. -func (b *buildReconciler) getMachineOSBuildForBuilder(builder buildrequest.Builder) (*mcfgv1alpha1.MachineOSBuild, error) { +func (b *buildReconciler) getMachineOSBuildForBuilder(builder buildrequest.Builder) (*mcfgv1.MachineOSBuild, error) { mosbName, err := builder.MachineOSBuild() if err != nil { return nil, fmt.Errorf("could not get MachineOSBuild name from builder %q: %w", builder.GetName(), err) @@ -629,7 +823,7 @@ func (b *buildReconciler) getMachineOSBuildForBuilder(builder buildrequest.Build } // Resolves both the MachineOSConfig and MachienOSBuild for a given Builder. -func (b *buildReconciler) getMachineOSConfigAndMachineOSBuildForBuilder(builder buildrequest.Builder) (*mcfgv1alpha1.MachineOSConfig, *mcfgv1alpha1.MachineOSBuild, error) { +func (b *buildReconciler) getMachineOSConfigAndMachineOSBuildForBuilder(builder buildrequest.Builder) (*mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, error) { mosb, err := b.getMachineOSBuildForBuilder(builder) if err != nil { return nil, nil, err @@ -644,7 +838,7 @@ func (b *buildReconciler) getMachineOSConfigAndMachineOSBuildForBuilder(builder } // Resolves the MachineOSConfig for a given builder. -func (b *buildReconciler) getMachineOSConfigForBuilder(builder buildrequest.Builder) (*mcfgv1alpha1.MachineOSConfig, error) { +func (b *buildReconciler) getMachineOSConfigForBuilder(builder buildrequest.Builder) (*mcfgv1.MachineOSConfig, error) { moscName, err := builder.MachineOSConfig() if err != nil { return nil, fmt.Errorf("could not get MachineOSConfig name from builder %q: %w", builder.GetName(), err) @@ -659,22 +853,49 @@ func (b *buildReconciler) getMachineOSConfigForBuilder(builder buildrequest.Buil } // Deletes the underlying build objects for a given MachineOSBuild. -func (b *buildReconciler) deleteBuilderForMachineOSBuild(ctx context.Context, mosb *mcfgv1alpha1.MachineOSBuild) error { +func (b *buildReconciler) deleteBuilderForMachineOSBuild(ctx context.Context, mosb *mcfgv1.MachineOSBuild) error { if err := imagebuilder.NewJobImageBuildCleaner(b.kubeclient, b.mcfgclient, mosb).Clean(ctx); err != nil { return fmt.Errorf("could not clean build %s: %w", mosb.Name, err) } - + // Delete the image associated with the MOSB first + moscName, err := utils.GetRequiredLabelValueFromObject(mosb, constants.MachineOSConfigNameLabelKey) + if err != nil { + klog.Warningf("could not get MachineOSConfig name for MachineOSBuild %s: %v, cannot delete image", mosb.Name, err) + return nil + } + if err := b.deleteMOSBImage(mosb, moscName); err != nil { + klog.Warningf("could not delete image for MachineOSBuild %s: %v", mosb.Name, err) + } + // Delete the digest configmap if it exists + // This is created by the wait-for-done container once the image has been built and pushed + // and stays around when the build is successful + err = b.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(ctx, utils.GetDigestConfigMapName(mosb), metav1.DeleteOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return fmt.Errorf("could not delete digest configmap for MachineOSBuild %s for MachineOSConfig %s: %w", mosb.Name, moscName, err) + } return nil } // Deletes the MachineOSBuild. -func (b *buildReconciler) deleteMachineOSBuild(ctx context.Context, mosb *mcfgv1alpha1.MachineOSBuild) error { +func (b *buildReconciler) deleteMachineOSBuild(ctx context.Context, mosb *mcfgv1.MachineOSBuild) error { moscName, err := utils.GetRequiredLabelValueFromObject(mosb, constants.MachineOSConfigNameLabelKey) if err != nil { moscName = "" } + // Delete the image associated with the MOSB first + if err := b.deleteMOSBImage(mosb, moscName); err != nil { + klog.Warningf("could not delete image for MachineOSBuild %s for MachineOSConfig %s: %v", mosb.Name, moscName, err) + } + + // Delete the digest configmap if it exists + // This is created by the wait-for-done container once the image has been built and pushed + // and stays around when the build is successful + err = b.kubeclient.CoreV1().ConfigMaps(ctrlcommon.MCONamespace).Delete(ctx, utils.GetDigestConfigMapName(mosb), metav1.DeleteOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + return fmt.Errorf("could not delete digest configmap for MachineOSBuild %s for MachineOSConfig %s: %w", mosb.Name, moscName, err) + } - err = b.mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().Delete(ctx, mosb.Name, metav1.DeleteOptions{}) + err = b.mcfgclient.MachineconfigurationV1().MachineOSBuilds().Delete(ctx, mosb.Name, metav1.DeleteOptions{}) if err == nil { klog.Infof("Deleted MachineOSBuild %s for MachineOSConfig %s", mosb.Name, moscName) return nil @@ -688,8 +909,151 @@ func (b *buildReconciler) deleteMachineOSBuild(ctx context.Context, mosb *mcfgv1 return fmt.Errorf("could not delete MachineOSBuild %s for MachineOSConfig %s: %w", mosb.Name, moscName, err) } +func (b *buildReconciler) getAuthFilePath(mosb *mcfgv1.MachineOSBuild, moscName string) (string, error) { + pushSecret := mosb.GetAnnotations()[constants.RenderedImagePushSecretAnnotationKey] + secret, err := b.kubeclient.CoreV1().Secrets(ctrlcommon.MCONamespace).Get(context.TODO(), pushSecret, metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("could not get rendered push secret for MachineOSConfig %q: %w", moscName, err) + } + if secret.Type != corev1.SecretTypeDockerConfigJson { + return "", fmt.Errorf("rendered push secret for MachineOSConfig %q is not of type %q, cannot delete image", moscName, corev1.SecretTypeDockerConfigJson) + } + var authConfig map[string]interface{} + data, ok := secret.Data[corev1.DockerConfigJsonKey] + if !ok { + return "", fmt.Errorf("rendered push secret for MachineOSConfig %q does not have key %q, cannot delete image", moscName, corev1.DockerConfigJsonKey) + } + if err := json.Unmarshal(data, &authConfig); err != nil { + return "", fmt.Errorf("could not unmarshal rendered push secret for MachineOSConfig %q: %w, cannot delete image", moscName, err) + } + // Create a temp auth.json file + authFile, err := os.CreateTemp("", "auth-*.json") + if err != nil { + return "", fmt.Errorf("could not create temp file for rendered push secret for MachineOSConfig %q: %w, cannot delete image", moscName, err) + } + if err := os.WriteFile(authFile.Name(), data, 0o644); err != nil { + return "", fmt.Errorf("could not write temp auth file for rendered push secret for MachineOSConfig %q: %w, cannot delete image", moscName, err) + } + return authFile.Name(), nil +} + +func (b *buildReconciler) deleteMOSBImage(mosb *mcfgv1.MachineOSBuild, moscName string) error { + moscExists := true + _, err := b.listers.machineOSConfigLister.Get(moscName) + if k8serrors.IsNotFound(err) { + moscExists = false + } else if err != nil { + return fmt.Errorf("could not get MachineOSConfig for MachineOSBuild %q: %w", mosb.Name, err) + } + + if moscExists { + pool, err := b.listers.machineConfigPoolLister.Get(mosb.ObjectMeta.Labels[constants.TargetMachineConfigPoolLabelKey]) + if err != nil { + return fmt.Errorf("could not get MachineConfigPool from MachineOSBuild %q: %w", mosb.Name, err) + } + + nodes, err := helpers.GetNodesForPool(b.listers.machineConfigPoolLister, b.listers.nodeLister, pool) + if err != nil { + return fmt.Errorf("could not get nodes for MachineConfigPool %q: %w", pool.Name, err) + } + + for _, node := range nodes { + if node.GetAnnotations()[daemonconstants.CurrentImageAnnotationKey] == string(mosb.Status.DigestedImagePushSpec) || + node.GetAnnotations()[daemonconstants.DesiredImageAnnotationKey] == string(mosb.Status.DigestedImagePushSpec) { + // the image we are trying to delete is currently on a node or desired by a node + klog.Warningf("Image %s is currently applied on a node or desired by a node, will not delete", string(mosb.Status.DigestedImagePushSpec)) + return nil + } + } + } + + image := string(mosb.Spec.RenderedImagePushSpec) + isOpenShiftRegistry, err := b.isOpenShiftRegistry(image) + if err != nil { + return err + } + if isOpenShiftRegistry { + klog.Infof("Deleting image %s from internal registry for MachineOSBuild %s", image, mosb.Name) + // Use the openshift API to delete the image + ns, img, err := extractNSAndNameWithTag(image) + if err != nil { + return err + } + if err := b.imageclient.ImageV1().ImageStreamTags(ns).Delete(context.TODO(), img, metav1.DeleteOptions{}); err != nil { + return fmt.Errorf("could not delete image %s from internal registry for MachineOSBuild %s: %w", image, mosb.Name, err) + } + return nil + } + + klog.Infof("Deleting image %s from external registry using skopeo for MachineOSBuild %s", image, mosb.Name) + // Create the authfile for the rendered push secret + authFile, err := b.getAuthFilePath(mosb, moscName) + if err != nil { + return err + } + defer os.RemoveAll(authFile) + // Create the certs directory to be used by skopeo + if err := b.getCerts(); err != nil { + return err + } + defer os.RemoveAll(certsDir) + + if err := daemon.DeleteImage(image, authFile); err != nil { + return fmt.Errorf("could not delete image %s from registry for MachineOSBuild %s: %w", image, mosb.Name, err) + } + klog.Infof("Deleted image %s from registry for MachineOSBuild %s", image, mosb.Name) + return nil +} + +// getInternalRegistryHostnames discovers OpenShift internal registry hostnames +func (b *buildReconciler) getInternalRegistryHostnames(ctx context.Context) ([]string, error) { + var hostnames []string + + // Get the list of services in the openshift-image-registry namespace (cluster-local) + services, err := b.kubeclient.CoreV1().Services("openshift-image-registry").List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + for _, svc := range services.Items { + clusterHostname := fmt.Sprintf("%s.%s.svc", svc.Name, svc.Namespace) + if len(svc.Spec.Ports) > 0 { + port := svc.Spec.Ports[0].Port + hostnames = append(hostnames, fmt.Sprintf("%s:%d", clusterHostname, port)) + } else { + hostnames = append(hostnames, clusterHostname) + } + } + + // Get the list of routes in the openshift-image-registry namespace (external access) + routes, err := b.routeclient.RouteV1().Routes("openshift-image-registry").List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + for _, route := range routes.Items { + if route.Spec.Host != "" { + hostnames = append(hostnames, route.Spec.Host) + } + } + + return hostnames, nil +} + +// isOpenShiftRegistry checks if the imageRef points to one of the known internal hostnames +func (b *buildReconciler) isOpenShiftRegistry(imageRef string) (bool, error) { + registryHosts, err := b.getInternalRegistryHostnames(context.TODO()) + if err != nil { + return false, err + } + for _, host := range registryHosts { + if strings.HasPrefix(imageRef, host) { + return true, nil + } + } + return false, nil +} + // Finds and deletes any other running builds for a given MachineOSConfig. -func (b *buildReconciler) deleteOtherBuildsForMachineOSConfig(ctx context.Context, newMosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) error { +func (b *buildReconciler) deleteOtherBuildsForMachineOSConfig(ctx context.Context, newMosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig) error { mosbList, err := b.getMachineOSBuildsForMachineOSConfig(mosc) if err != nil { return fmt.Errorf("could not get MachineOSBuilds for MachineOSConfig %s: %w", mosc.Name, err) @@ -716,7 +1080,7 @@ func (b *buildReconciler) deleteOtherBuildsForMachineOSConfig(ctx context.Contex } // Gets a list of MachineOSBuilds for a given MachineOSConfig. -func (b *buildReconciler) getMachineOSBuildsForMachineOSConfig(mosc *mcfgv1alpha1.MachineOSConfig) ([]*mcfgv1alpha1.MachineOSBuild, error) { +func (b *buildReconciler) getMachineOSBuildsForMachineOSConfig(mosc *mcfgv1.MachineOSConfig) ([]*mcfgv1.MachineOSBuild, error) { sel := utils.MachineOSBuildForPoolSelector(mosc) mosbList, err := b.machineOSBuildLister.List(sel) @@ -771,11 +1135,11 @@ func (b *buildReconciler) timeSyncOperation(name string, toRun func() error) err func (b *buildReconciler) syncAll(ctx context.Context) error { err := b.timeSyncOperation("MachineOSConfigs and MachineOSBuilds", func() error { if err := b.syncMachineOSConfigs(ctx); err != nil { - return err + return fmt.Errorf("could not sync MachineOSConfigs: %w", err) } if err := b.syncMachineOSBuilds(ctx); err != nil { - return err + return fmt.Errorf("could not sync MachineOSBuilds: %w", err) } return nil @@ -815,8 +1179,21 @@ func (b *buildReconciler) syncMachineOSBuilds(ctx context.Context) error { // Syncs a given MachineOSBuild. In this case, sync means that if the // MachineOSBuild is not in a terminal or transient state and does not have a // builder associated with it that one should be created. -func (b *buildReconciler) syncMachineOSBuild(ctx context.Context, mosb *mcfgv1alpha1.MachineOSBuild) error { +func (b *buildReconciler) syncMachineOSBuild(ctx context.Context, mosb *mcfgv1.MachineOSBuild) error { return b.timeObjectOperation(mosb, syncingVerb, func() error { + + // It could be the case that the MCP the mosb in queue was targeting no longer is valid + mcp, err := b.machineConfigPoolLister.Get(mosb.ObjectMeta.Labels[constants.TargetMachineConfigPoolLabelKey]) + if err != nil { + return fmt.Errorf("could not get MachineConfigPool from MachineOSBuild %q: %w", mosb.Name, err) + } + + // An mosb which had previously been forgotten by the queue and is no longer desired by the mcp should not build + if mosb.ObjectMeta.Labels[constants.RenderedMachineConfigLabelKey] != mcp.Spec.Configuration.Name { + klog.Infof("The MachineOSBuild %q which builds the rendered Machine Config %q is no longer desired by the MCP %q", mosb.Name, mosb.ObjectMeta.Labels[constants.RenderedMachineConfigLabelKey], mosb.ObjectMeta.Labels[constants.TargetMachineConfigPoolLabelKey]) + return nil + } + mosbState := ctrlcommon.NewMachineOSBuildState(mosb) if mosbState.IsInTerminalState() { @@ -886,7 +1263,7 @@ func (b *buildReconciler) syncMachineOSConfigs(ctx context.Context) error { // MachineOSConfig does not have any MachineOSBuilds associated with it or the // one it thinks is its current build does not exist, then a new MachineOSBuild // should be created. -func (b *buildReconciler) syncMachineOSConfig(ctx context.Context, mosc *mcfgv1alpha1.MachineOSConfig) error { +func (b *buildReconciler) syncMachineOSConfig(ctx context.Context, mosc *mcfgv1.MachineOSConfig) error { return b.timeObjectOperation(mosc, syncingVerb, func() error { mosbs, err := b.getMachineOSBuildsForMachineOSConfig(mosc) if err != nil { @@ -899,7 +1276,7 @@ func (b *buildReconciler) syncMachineOSConfig(ctx context.Context, mosc *mcfgv1a // If we found the currently-associated MachineOSBuild for this // MachineOSConfig, we're done. We prefer ones with the full image pullspec. if isMachineOSBuildCurrentForMachineOSConfigWithPullspec(mosc, mosb) { - klog.Infof("MachineOSConfig %q has current build annotation and current image pullspec %q for MachineOSBuild %q", mosc.Name, mosc.Status.CurrentImagePullspec, mosb.Name) + klog.Infof("MachineOSConfig %q has current build annotation and current image pullspec %q for MachineOSBuild %q", mosc.Name, mosc.Status.CurrentImagePullSpec, mosb.Name) return nil } } @@ -914,10 +1291,43 @@ func (b *buildReconciler) syncMachineOSConfig(ctx context.Context, mosc *mcfgv1a } klog.Infof("No matching MachineOSBuild found for MachineOSConfig %q, will create one", mosc.Name) - if err := b.createNewMachineOSBuildOrReuseExisting(ctx, mosc); err != nil { + if err := b.createNewMachineOSBuildOrReuseExisting(ctx, mosc, false); err != nil { return fmt.Errorf("could not create new or reuse existing MachineOSBuild for MachineOSConfig %q: %w", mosc.Name, err) } return nil }) } + +// Syncs all existing and opted-in MachineConfigPools. +func (b *buildReconciler) syncMachineConfigPools(ctx context.Context) error { + err := b.timeSyncOperation("MachineConfigPools", func() error { + mcps, err := b.machineConfigPoolLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("could not list MachineConfigPools: %w", err) + } + + for _, mcp := range mcps { + if err := b.syncMachineConfigPool(ctx, mcp); err != nil { + return fmt.Errorf("could not sync MachineConfigPool %q: %w", mcp.Name, err) + } + } + + return nil + }) + + if err != nil { + return fmt.Errorf("could not sync MachineConfigPools: %w", err) + } + + return nil +} + +// Syncs a given MachineConfigPool by cross-checking it against known +// MachineOSConfigs and MachineOSBuilds, which will create a new MachineOSBuild, +// if needed. +func (b *buildReconciler) syncMachineConfigPool(ctx context.Context, mcp *mcfgv1.MachineConfigPool) error { + return b.timeObjectOperation(mcp, syncingVerb, func() error { + return b.createNewMachineOSBuildOrReuseExistingForPoolChange(ctx, mcp) + }) +} diff --git a/pkg/controller/build/utils/helpers.go b/pkg/controller/build/utils/helpers.go index 149845eca9..f86c8cca4c 100644 --- a/pkg/controller/build/utils/helpers.go +++ b/pkg/controller/build/utils/helpers.go @@ -6,7 +6,6 @@ import ( "github.com/distribution/reference" "github.com/opencontainers/go-digest" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -19,7 +18,7 @@ func GetKindForObject(obj runtime.Object) (string, error) { corev1.AddToScheme(s) batchv1.AddToScheme(s) mcfgv1.AddToScheme(s) - mcfgv1alpha1.AddToScheme(s) + mcfgv1.AddToScheme(s) gvks, _, err := s.ObjectKinds(obj) if err != nil { @@ -56,41 +55,49 @@ func parseImagePullspecWithDigest(pullspec string, imageDigest digest.Digest) (s } // Computes the AdditionalTrustBundle ConfigMap name based upon the MachineConfigPool name. -func GetAdditionalTrustBundleConfigMapName(mosb *mcfgv1alpha1.MachineOSBuild) string { +func GetAdditionalTrustBundleConfigMapName(mosb *mcfgv1.MachineOSBuild) string { return fmt.Sprintf("additionaltrustbundle-%s", getFieldFromMachineOSBuild(mosb)) } // Computes the Containerfile ConfigMap name. -func GetContainerfileConfigMapName(mosb *mcfgv1alpha1.MachineOSBuild) string { +func GetContainerfileConfigMapName(mosb *mcfgv1.MachineOSBuild) string { return fmt.Sprintf("containerfile-%s", getFieldFromMachineOSBuild(mosb)) } // Computes the MachineConfig ConfigMap name. -func GetMCConfigMapName(mosb *mcfgv1alpha1.MachineOSBuild) string { +func GetMCConfigMapName(mosb *mcfgv1.MachineOSBuild) string { return fmt.Sprintf("mc-%s", getFieldFromMachineOSBuild(mosb)) } +func GetEtcPolicyConfigMapName(mosb *mcfgv1.MachineOSBuild) string { + return fmt.Sprintf("etc-policy-%s", getFieldFromMachineOSBuild(mosb)) +} + +func GetEtcRegistriesConfigMapName(mosb *mcfgv1.MachineOSBuild) string { + return fmt.Sprintf("etc-registries-%s", getFieldFromMachineOSBuild(mosb)) +} + // Computes the build job name. -func GetBuildJobName(mosb *mcfgv1alpha1.MachineOSBuild) string { +func GetBuildJobName(mosb *mcfgv1.MachineOSBuild) string { return fmt.Sprintf("build-%s", getFieldFromMachineOSBuild(mosb)) } // Computes the digest configmap name. -func GetDigestConfigMapName(mosb *mcfgv1alpha1.MachineOSBuild) string { +func GetDigestConfigMapName(mosb *mcfgv1.MachineOSBuild) string { return fmt.Sprintf("digest-%s", getFieldFromMachineOSBuild(mosb)) } // Computes the base image pull secret name. -func GetBasePullSecretName(mosb *mcfgv1alpha1.MachineOSBuild) string { +func GetBasePullSecretName(mosb *mcfgv1.MachineOSBuild) string { return fmt.Sprintf("base-%s", getFieldFromMachineOSBuild(mosb)) } // Computes the final image push secret name. -func GetFinalPushSecretName(mosb *mcfgv1alpha1.MachineOSBuild) string { +func GetFinalPushSecretName(mosb *mcfgv1.MachineOSBuild) string { return fmt.Sprintf("final-%s", getFieldFromMachineOSBuild(mosb)) } -func getFieldFromMachineOSBuild(mosb *mcfgv1alpha1.MachineOSBuild) string { +func getFieldFromMachineOSBuild(mosb *mcfgv1.MachineOSBuild) string { return mosb.Name } diff --git a/pkg/controller/build/utils/lookups.go b/pkg/controller/build/utils/lookups.go index 21c267c3a2..e5465c2d3b 100644 --- a/pkg/controller/build/utils/lookups.go +++ b/pkg/controller/build/utils/lookups.go @@ -4,21 +4,21 @@ import ( "fmt" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" mcfglistersv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" - mcfglistersv1alpha1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1" "github.com/openshift/machine-config-operator/pkg/controller/build/constants" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" + corelistersv1 "k8s.io/client-go/listers/core/v1" ) // Holds a group of listers used for resolving OCL objects to other OCL objects // and to MachineConfigPools. type Listers struct { // TODO: Consider adding full mcfgclients too. - MachineOSBuildLister mcfglistersv1alpha1.MachineOSBuildLister - MachineOSConfigLister mcfglistersv1alpha1.MachineOSConfigLister + MachineOSBuildLister mcfglistersv1.MachineOSBuildLister + MachineOSConfigLister mcfglistersv1.MachineOSConfigLister MachineConfigPoolLister mcfglistersv1.MachineConfigPoolLister + NodeLister corelistersv1.NodeLister } // Gets a MachineConfigPool after first ensuring that the lister is not nil. @@ -41,7 +41,7 @@ func (l *Listers) listMachineConfigPools(sel labels.Selector) ([]*mcfgv1.Machine } // Gets a MachineOSConfig after first ensuring that the lister is not nil. -func (l *Listers) getMachineOSConfig(name string) (*mcfgv1alpha1.MachineOSConfig, error) { +func (l *Listers) getMachineOSConfig(name string) (*mcfgv1.MachineOSConfig, error) { if l.MachineOSConfigLister == nil { return nil, fmt.Errorf("required MachineOSConfigLister is nil") } @@ -51,7 +51,7 @@ func (l *Listers) getMachineOSConfig(name string) (*mcfgv1alpha1.MachineOSConfig // Lists all MachineOSConfigs matching the given selector after first // ensuring that the lister is not nil. -func (l *Listers) listMachineOSConfigs(sel labels.Selector) ([]*mcfgv1alpha1.MachineOSConfig, error) { +func (l *Listers) listMachineOSConfigs(sel labels.Selector) ([]*mcfgv1.MachineOSConfig, error) { if l.MachineOSConfigLister == nil { return nil, fmt.Errorf("required MachineOSConfigLister is nil") } @@ -60,7 +60,7 @@ func (l *Listers) listMachineOSConfigs(sel labels.Selector) ([]*mcfgv1alpha1.Mac } // Gets a MachineOSBuild after first ensuring that the lister is not nil. -func (l *Listers) getMachineOSBuild(name string) (*mcfgv1alpha1.MachineOSBuild, error) { +func (l *Listers) getMachineOSBuild(name string) (*mcfgv1.MachineOSBuild, error) { if l.MachineOSBuildLister == nil { return nil, fmt.Errorf("required MachineOSBuildLister is nil") } @@ -70,7 +70,7 @@ func (l *Listers) getMachineOSBuild(name string) (*mcfgv1alpha1.MachineOSBuild, // Lists all MachineOSBuilds matching the given selector after first // ensuring that the lister is not nil. -func (l *Listers) listMachineOSBuilds(sel labels.Selector) ([]*mcfgv1alpha1.MachineOSBuild, error) { +func (l *Listers) listMachineOSBuilds(sel labels.Selector) ([]*mcfgv1.MachineOSBuild, error) { if l.MachineOSBuildLister == nil { return nil, fmt.Errorf("required MachineOSBuildLister is nil") } @@ -81,7 +81,7 @@ func (l *Listers) listMachineOSBuilds(sel labels.Selector) ([]*mcfgv1alpha1.Mach // Gets the first MachineOSConfig found for a given MachineConfigPool. Use // GetMachineOSConfigForMachineConfigPoolStrict() if one wants to ensure that // only a single MachineOSConfig is found for a MachineConfigPool. -func GetMachineOSConfigForMachineConfigPool(mcp *mcfgv1.MachineConfigPool, listers *Listers) (*mcfgv1alpha1.MachineOSConfig, error) { +func GetMachineOSConfigForMachineConfigPool(mcp *mcfgv1.MachineConfigPool, listers *Listers) (*mcfgv1.MachineOSConfig, error) { moscList, err := listers.listMachineOSConfigs(labels.Everything()) if err != nil { return nil, fmt.Errorf("could not list MachineOSConfigs: %w", err) @@ -93,7 +93,7 @@ func GetMachineOSConfigForMachineConfigPool(mcp *mcfgv1.MachineConfigPool, liste } } - errNotFound := k8serrors.NewNotFound(mcfgv1alpha1.GroupVersion.WithResource("machineosconfigs").GroupResource(), "") + errNotFound := k8serrors.NewNotFound(mcfgv1.GroupVersion.WithResource("machineosconfigs").GroupResource(), "") return nil, fmt.Errorf("could not find MachineOSConfig for MachineConfigPool %q: %w", mcp.Name, errNotFound) } @@ -101,13 +101,13 @@ func GetMachineOSConfigForMachineConfigPool(mcp *mcfgv1.MachineConfigPool, liste // GetMachineOSConfigForMachineConfigPool(), this version will return an error // if more than one MachineOSConfig is found associated with a given // MachineConfigPool. -func GetMachineOSConfigForMachineConfigPoolStrict(mcp *mcfgv1.MachineConfigPool, listers *Listers) (*mcfgv1alpha1.MachineOSConfig, error) { +func GetMachineOSConfigForMachineConfigPoolStrict(mcp *mcfgv1.MachineConfigPool, listers *Listers) (*mcfgv1.MachineOSConfig, error) { moscList, err := listers.listMachineOSConfigs(labels.Everything()) if err != nil { return nil, fmt.Errorf("could not list MachineOSConfigs: %w", err) } - found := &mcfgv1alpha1.MachineOSConfig{} + found := &mcfgv1.MachineOSConfig{} others := []string{} for _, mosc := range moscList { @@ -121,7 +121,7 @@ func GetMachineOSConfigForMachineConfigPoolStrict(mcp *mcfgv1.MachineConfigPool, } if found == nil { - errNotFound := k8serrors.NewNotFound(mcfgv1alpha1.GroupVersion.WithResource("machineosconfigs").GroupResource(), "") + errNotFound := k8serrors.NewNotFound(mcfgv1.GroupVersion.WithResource("machineosconfigs").GroupResource(), "") return nil, fmt.Errorf("could not find MachineOSConfig for MachineConfigPool %q: %w", mcp.Name, errNotFound) } @@ -133,7 +133,7 @@ func GetMachineOSConfigForMachineConfigPoolStrict(mcp *mcfgv1.MachineConfigPool, } // Gets the MachineOSConfig for a given MachineOSBuild. -func GetMachineOSConfigForMachineOSBuild(mosb *mcfgv1alpha1.MachineOSBuild, listers *Listers) (*mcfgv1alpha1.MachineOSConfig, error) { +func GetMachineOSConfigForMachineOSBuild(mosb *mcfgv1.MachineOSBuild, listers *Listers) (*mcfgv1.MachineOSConfig, error) { moscName, err := GetRequiredLabelValueFromObject(mosb, constants.MachineOSConfigNameLabelKey) if err != nil { return nil, fmt.Errorf("could not identify MachineOSConfig for MachineOSBuild %q: %w", mosb.Name, err) @@ -148,7 +148,7 @@ func GetMachineOSConfigForMachineOSBuild(mosb *mcfgv1alpha1.MachineOSBuild, list } // Gets the MachineOSBuild for a given MachineConfigPool. -func GetMachineOSBuildForMachineConfigPool(mcp *mcfgv1.MachineConfigPool, listers *Listers) (*mcfgv1alpha1.MachineOSBuild, error) { +func GetMachineOSBuildForMachineConfigPool(mcp *mcfgv1.MachineConfigPool, listers *Listers) (*mcfgv1.MachineOSBuild, error) { mosc, err := GetMachineOSConfigForMachineConfigPool(mcp, listers) if err != nil { return nil, err @@ -158,7 +158,7 @@ func GetMachineOSBuildForMachineConfigPool(mcp *mcfgv1.MachineConfigPool, lister } // Gets the MachineOSBuild which matches a given image pullspec. -func GetMachineOSBuildForImagePullspec(pullspec string, listers *Listers) (*mcfgv1alpha1.MachineOSBuild, error) { +func GetMachineOSBuildForImagePullspec(pullspec string, listers *Listers) (*mcfgv1.MachineOSBuild, error) { if pullspec == "" { return nil, fmt.Errorf("required pullspec empty") } @@ -169,17 +169,17 @@ func GetMachineOSBuildForImagePullspec(pullspec string, listers *Listers) (*mcfg } for _, mosb := range mosbList { - if mosb.Status.FinalImagePushspec == pullspec { + if string(mosb.Status.DigestedImagePushSpec) == pullspec { return mosb, nil } } - errNotFound := k8serrors.NewNotFound(mcfgv1alpha1.GroupVersion.WithResource("machineosbuilds").GroupResource(), "") + errNotFound := k8serrors.NewNotFound(mcfgv1.GroupVersion.WithResource("machineosbuilds").GroupResource(), "") return nil, fmt.Errorf("could not find MachineOSBuild with image pullspec %q: %w", pullspec, errNotFound) } // Gets both the MachineOSConfig and the MachineOSBuild for a given MachineConfigPool. -func GetMachineOSConfigAndMachineOSBuildForMachineConfigPool(mcp *mcfgv1.MachineConfigPool, listers *Listers) (*mcfgv1alpha1.MachineOSConfig, *mcfgv1alpha1.MachineOSBuild, error) { +func GetMachineOSConfigAndMachineOSBuildForMachineConfigPool(mcp *mcfgv1.MachineConfigPool, listers *Listers) (*mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, error) { mosc, err := GetMachineOSConfigForMachineConfigPoolStrict(mcp, listers) if err != nil { return nil, nil, err @@ -194,7 +194,7 @@ func GetMachineOSConfigAndMachineOSBuildForMachineConfigPool(mcp *mcfgv1.Machine } // Gets the MachineOSBuild that belongs to the given MachineConfigPool and MachineOSConfig. Ensures that only a single MachineOSBuild is returned. -func getMachineOSBuildForMachineConfigPoolAndMachineOSConfig(mcp *mcfgv1.MachineConfigPool, mosc *mcfgv1alpha1.MachineOSConfig, listers *Listers) (*mcfgv1alpha1.MachineOSBuild, error) { +func getMachineOSBuildForMachineConfigPoolAndMachineOSConfig(mcp *mcfgv1.MachineConfigPool, mosc *mcfgv1.MachineOSConfig, listers *Listers) (*mcfgv1.MachineOSBuild, error) { sel := MachineOSBuildSelector(mosc, mcp) mosbList, err := listers.listMachineOSBuilds(sel) @@ -207,7 +207,7 @@ func getMachineOSBuildForMachineConfigPoolAndMachineOSConfig(mcp *mcfgv1.Machine } if len(mosbList) == 0 { - errNotFound := k8serrors.NewNotFound(mcfgv1alpha1.GroupVersion.WithResource("machineosbuilds").GroupResource(), "") + errNotFound := k8serrors.NewNotFound(mcfgv1.GroupVersion.WithResource("machineosbuilds").GroupResource(), "") return nil, fmt.Errorf("could not find MachineOSBuilds for MachineConfigPool %q and MachineOSConfig %q: %w", mcp.Name, mosc.Name, errNotFound) } @@ -221,7 +221,7 @@ func getMachineOSBuildForMachineConfigPoolAndMachineOSConfig(mcp *mcfgv1.Machine } // Gets the MachineConfigPool for a given MachineOSBuild. -func GetMachineConfigPoolForMachineOSBuild(mosb *mcfgv1alpha1.MachineOSBuild, listers *Listers) (*mcfgv1.MachineConfigPool, error) { +func GetMachineConfigPoolForMachineOSBuild(mosb *mcfgv1.MachineOSBuild, listers *Listers) (*mcfgv1.MachineConfigPool, error) { mcpName, err := GetRequiredLabelValueFromObject(mosb, constants.TargetMachineConfigPoolLabelKey) if err != nil { return nil, fmt.Errorf("could not identify MachineConfigPool from MachineOSBuild %q: %w", mosb.Name, err) @@ -236,7 +236,7 @@ func GetMachineConfigPoolForMachineOSBuild(mosb *mcfgv1alpha1.MachineOSBuild, li } // Gets the MachineConfigPool for a given MachineOSConfig. -func GetMachineConfigPoolForMachineOSConfig(mosc *mcfgv1alpha1.MachineOSConfig, listers *Listers) (*mcfgv1.MachineConfigPool, error) { +func GetMachineConfigPoolForMachineOSConfig(mosc *mcfgv1.MachineOSConfig, listers *Listers) (*mcfgv1.MachineConfigPool, error) { mcp, err := listers.getMachineConfigPool(mosc.Spec.MachineConfigPool.Name) if err != nil { return nil, fmt.Errorf("could not get MachineConfigPool %q for MachineOSConfig %q: %w", mosc.Spec.MachineConfigPool.Name, mosc.Name, err) diff --git a/pkg/controller/build/utils/selectors.go b/pkg/controller/build/utils/selectors.go index 590eaf8e48..2f4a393f19 100644 --- a/pkg/controller/build/utils/selectors.go +++ b/pkg/controller/build/utils/selectors.go @@ -4,7 +4,6 @@ import ( "strings" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/machine-config-operator/pkg/controller/build/constants" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -14,7 +13,7 @@ import ( // Creates the labels for a given MachineOSBuild from the provided // MachineOSConfig and MachineConfigPool. -func GetMachineOSBuildLabels(mosc *mcfgv1alpha1.MachineOSConfig, mcp *mcfgv1.MachineConfigPool) map[string]string { +func GetMachineOSBuildLabels(mosc *mcfgv1.MachineOSConfig, mcp *mcfgv1.MachineConfigPool) map[string]string { return map[string]string{ constants.TargetMachineConfigPoolLabelKey: mcp.Name, constants.RenderedMachineConfigLabelKey: mcp.Spec.Configuration.Name, @@ -24,13 +23,13 @@ func GetMachineOSBuildLabels(mosc *mcfgv1alpha1.MachineOSConfig, mcp *mcfgv1.Mac // Creates a selector for a MachineOSBuild that matches the given // MachineOSConfig and MachineConfigPool. -func MachineOSBuildSelector(mosc *mcfgv1alpha1.MachineOSConfig, mcp *mcfgv1.MachineConfigPool) labels.Selector { +func MachineOSBuildSelector(mosc *mcfgv1.MachineOSConfig, mcp *mcfgv1.MachineConfigPool) labels.Selector { return labels.SelectorFromSet(GetMachineOSBuildLabels(mosc, mcp)) } // Creates a selector for all MachineOSBuilds which are associated with a given // MachineOSConfig. -func MachineOSBuildForPoolSelector(mosc *mcfgv1alpha1.MachineOSConfig) labels.Selector { +func MachineOSBuildForPoolSelector(mosc *mcfgv1.MachineOSConfig) labels.Selector { return labels.SelectorFromSet(map[string]string{ constants.TargetMachineConfigPoolLabelKey: mosc.Spec.MachineConfigPool.Name, constants.MachineOSConfigNameLabelKey: mosc.Name, @@ -60,13 +59,13 @@ func EphemeralBuildObjectSelector() labels.Selector { // Creates a selector for looking up a builder, configmap, or secret associated // with a given MachineOSBuild and MachineOSConfig. -func EphemeralBuildObjectSelectorForSpecificBuild(mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) (labels.Selector, error) { +func EphemeralBuildObjectSelectorForSpecificBuild(mosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig) (labels.Selector, error) { selector := labelsToSelector([]string{ constants.EphemeralBuildObjectLabelKey, constants.OnClusterLayeringLabelKey, }) - renderedMCReq, err := labels.NewRequirement(constants.RenderedMachineConfigLabelKey, selection.Equals, []string{mosb.Spec.DesiredConfig.Name}) + renderedMCReq, err := labels.NewRequirement(constants.RenderedMachineConfigLabelKey, selection.Equals, []string{mosb.Spec.MachineConfig.Name}) if err != nil { return nil, err } @@ -96,7 +95,7 @@ func EphemeralBuildObjectSelectorForSpecificBuild(mosb *mcfgv1alpha1.MachineOSBu // Fetches the MachineConfigPool name from either the MachineOSBuild or the // MachineOSConfig. For MachineOSBuilds, this value is found as a label. -func getMachineConfigPoolNameFromMachineOSConfigOrMachineOSBuild(mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) (string, error) { +func getMachineConfigPoolNameFromMachineOSConfigOrMachineOSBuild(mosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig) (string, error) { if mosc != nil { return mosc.Spec.MachineConfigPool.Name, nil } @@ -106,7 +105,7 @@ func getMachineConfigPoolNameFromMachineOSConfigOrMachineOSBuild(mosb *mcfgv1alp // Fetches the MachineOSBuild name from either the MachineOSBuild or the // MachineOSConfig. For MachineOSConfigs, this value is found as a label. -func getMachineOSBuildNameFromMachineOSConfigOrMachineOSBuild(mosb *mcfgv1alpha1.MachineOSBuild, mosc *mcfgv1alpha1.MachineOSConfig) (string, error) { +func getMachineOSBuildNameFromMachineOSConfigOrMachineOSBuild(mosb *mcfgv1.MachineOSBuild, mosc *mcfgv1.MachineOSConfig) (string, error) { if mosb != nil { return mosb.Name, nil } @@ -161,7 +160,7 @@ func IsObjectCreatedByBuildController(obj metav1.Object) bool { return true } - if _, ok := obj.(*mcfgv1alpha1.MachineOSBuild); ok { + if _, ok := obj.(*mcfgv1.MachineOSBuild); ok { return true } diff --git a/pkg/controller/common/constants.go b/pkg/controller/common/constants.go index 9c6078bca9..61d786b349 100644 --- a/pkg/controller/common/constants.go +++ b/pkg/controller/common/constants.go @@ -10,6 +10,15 @@ const ( // SimpleContentAccessSecret is the secret that holds the SimpleContentAccess cert which provides RHEL entitlements SimpleContentAccessSecretName = "etc-pki-entitlement" + // GlobalPullSecretName is the name of the global pull secret + GlobalPullSecretName = "pull-secret" + + // OpenshiftConfigNamespace is the namespace that has the global pull secret + OpenshiftConfigNamespace = "openshift-config" + + // GlobalPullCopySecret is a copy of the cluster wide pull secret. In OCL, this is used if the base image pull secret is not provided. + GlobalPullSecretCopyName = "global-pull-secret-copy" + // GeneratedByControllerVersionAnnotationKey is used to tag the machineconfigs generated by the controller with the version of the controller. GeneratedByControllerVersionAnnotationKey = "machineconfiguration.openshift.io/generated-by-controller-version" diff --git a/pkg/controller/common/controller_context.go b/pkg/controller/common/controller_context.go index 411e5847b9..4a8ff95111 100644 --- a/pkg/controller/common/controller_context.go +++ b/pkg/controller/common/controller_context.go @@ -6,10 +6,11 @@ import ( "time" configinformers "github.com/openshift/client-go/config/informers/externalversions" + imageinformers "github.com/openshift/client-go/image/informers/externalversions" machineinformersv1beta1 "github.com/openshift/client-go/machine/informers/externalversions" mcfginformers "github.com/openshift/client-go/machineconfiguration/informers/externalversions" - operatorinformers "github.com/openshift/client-go/operator/informers/externalversions" + routeinformers "github.com/openshift/client-go/route/informers/externalversions" "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/machine-config-operator/internal/clients" @@ -60,6 +61,8 @@ type ControllerContext struct { OperatorInformerFactory operatorinformers.SharedInformerFactory KubeMAOSharedInformer informers.SharedInformerFactory MachineInformerFactory machineinformersv1beta1.SharedInformerFactory + ImageInformerFactory imageinformers.SharedInformerFactory + RouteInformerFactory routeinformers.SharedInformerFactory FeatureGateAccess featuregates.FeatureGateAccess @@ -76,6 +79,8 @@ type ControllerContext struct { func CreateControllerContext(ctx context.Context, cb *clients.Builder) *ControllerContext { client := cb.MachineConfigClientOrDie("machine-config-shared-informer") kubeClient := cb.KubeClientOrDie("kube-shared-informer") + imageClient := cb.ImageClientOrDie("image-shared-informer") + routeClient := cb.RouteClientOrDie("route-shared-informer") apiExtClient := cb.APIExtClientOrDie("apiext-shared-informer") configClient := cb.ConfigClientOrDie("config-shared-informer") operatorClient := cb.OperatorClientOrDie("operator-shared-informer") @@ -96,6 +101,8 @@ func CreateControllerContext(ctx context.Context, cb *clients.Builder) *Controll ) // this is needed to listen for changes in MAO user data secrets to re-apply the ones we define in the MCO (since we manage them) kubeMAOSharedInformer := informers.NewFilteredSharedInformerFactory(kubeClient, resyncPeriod()(), "openshift-machine-api", nil) + imageSharedInformer := imageinformers.NewSharedInformerFactory(imageClient, resyncPeriod()()) + routeSharedInformer := routeinformers.NewSharedInformerFactory(routeClient, resyncPeriod()()) // filter out CRDs that do not have the MCO label assignFilterLabels := func(opts *metav1.ListOptions) { @@ -150,5 +157,7 @@ func CreateControllerContext(ctx context.Context, cb *clients.Builder) *Controll ResyncPeriod: resyncPeriod(), KubeMAOSharedInformer: kubeMAOSharedInformer, FeatureGateAccess: featureGateAccessor, + ImageInformerFactory: imageSharedInformer, + RouteInformerFactory: routeSharedInformer, } } diff --git a/pkg/controller/common/layered_node_state.go b/pkg/controller/common/layered_node_state.go index bc2e413cd5..70b225dd56 100644 --- a/pkg/controller/common/layered_node_state.go +++ b/pkg/controller/common/layered_node_state.go @@ -4,7 +4,6 @@ import ( "fmt" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" daemonconsts "github.com/openshift/machine-config-operator/pkg/daemon/constants" corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" @@ -51,20 +50,20 @@ func (l *LayeredNodeState) IsDesiredEqualToPool(mcp *mcfgv1.MachineConfigPool, l return l.isDesiredMachineConfigEqualToPool(mcp) && l.isDesiredImageEqualToPool(mcp, layered) } -func (l *LayeredNodeState) IsDesiredEqualToBuild(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) bool { +func (l *LayeredNodeState) IsDesiredEqualToBuild(mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild) bool { return l.isDesiredImageEqualToBuild(mosc) && l.isDesiredMachineConfigEqualToBuild(mosb) } -func (l *LayeredNodeState) isDesiredImageEqualToBuild(mosc *mcfgv1alpha1.MachineOSConfig) bool { +func (l *LayeredNodeState) isDesiredImageEqualToBuild(mosc *mcfgv1.MachineOSConfig) bool { return l.isImageAnnotationEqualToBuild(daemonconsts.DesiredImageAnnotationKey, mosc) } -func (l *LayeredNodeState) IsCurrentImageEqualToBuild(mosc *mcfgv1alpha1.MachineOSConfig) bool { +func (l *LayeredNodeState) IsCurrentImageEqualToBuild(mosc *mcfgv1.MachineOSConfig) bool { return l.isImageAnnotationEqualToBuild(daemonconsts.CurrentImageAnnotationKey, mosc) } -func (l *LayeredNodeState) isDesiredMachineConfigEqualToBuild(mosb *mcfgv1alpha1.MachineOSBuild) bool { - return l.node.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey] == mosb.Spec.DesiredConfig.Name +func (l *LayeredNodeState) isDesiredMachineConfigEqualToBuild(mosb *mcfgv1.MachineOSBuild) bool { + return l.node.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey] == mosb.Spec.MachineConfig.Name } @@ -108,7 +107,7 @@ func (l *LayeredNodeState) isImageAnnotationEqualToPool(anno string, mcp *mcfgv1 return val == "" || !ok } -func (l *LayeredNodeState) isImageAnnotationEqualToBuild(anno string, mosc *mcfgv1alpha1.MachineOSConfig) bool { +func (l *LayeredNodeState) isImageAnnotationEqualToBuild(anno string, mosc *mcfgv1.MachineOSConfig) bool { mosbs := NewMachineOSConfigState(mosc) val, ok := l.node.Annotations[anno] @@ -158,13 +157,13 @@ func (l *LayeredNodeState) SetDesiredStateFromPool(layered bool, mcp *mcfgv1.Mac l.node = node } -func (l *LayeredNodeState) SetDesiredStateFromMachineOSConfig(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) { +func (l *LayeredNodeState) SetDesiredStateFromMachineOSConfig(mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild) { node := l.Node() if node.Annotations == nil { node.Annotations = map[string]string{} } - node.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey] = mosb.Spec.DesiredConfig.Name + node.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey] = mosb.Spec.MachineConfig.Name moscs := NewMachineOSConfigState(mosc) if moscs.HasOSImage() { diff --git a/pkg/controller/common/mos_state.go b/pkg/controller/common/mos_state.go index 9bd13745b0..474a563bb6 100644 --- a/pkg/controller/common/mos_state.go +++ b/pkg/controller/common/mos_state.go @@ -2,7 +2,6 @@ package common import ( mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/machine-config-operator/pkg/apihelpers" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -13,28 +12,28 @@ import ( // single and consistent interface for that purpose. In this current state, we // do not perform any mutations. type MachineOSBuildState struct { - Build *mcfgv1alpha1.MachineOSBuild + Build *mcfgv1.MachineOSBuild } type MachineOSConfigState struct { - Config *mcfgv1alpha1.MachineOSConfig + Config *mcfgv1.MachineOSConfig } -func NewMachineOSConfigState(mosc *mcfgv1alpha1.MachineOSConfig) *MachineOSConfigState { +func NewMachineOSConfigState(mosc *mcfgv1.MachineOSConfig) *MachineOSConfigState { return &MachineOSConfigState{ Config: mosc, } } -func NewMachineOSBuildState(mosb *mcfgv1alpha1.MachineOSBuild) *MachineOSBuildState { +func NewMachineOSBuildState(mosb *mcfgv1.MachineOSBuild) *MachineOSBuildState { return &MachineOSBuildState{ Build: mosb, } } -func NewMachineOSBuildStateFromStatus(status mcfgv1alpha1.MachineOSBuildStatus) *MachineOSBuildState { +func NewMachineOSBuildStateFromStatus(status mcfgv1.MachineOSBuildStatus) *MachineOSBuildState { return &MachineOSBuildState{ - Build: &mcfgv1alpha1.MachineOSBuild{ + Build: &mcfgv1.MachineOSBuild{ Status: status, }, } @@ -42,21 +41,25 @@ func NewMachineOSBuildStateFromStatus(status mcfgv1alpha1.MachineOSBuildStatus) // Returns the OS image, if one is present. func (c *MachineOSConfigState) GetOSImage() string { - osImage := c.Config.Status.CurrentImagePullspec + osImage := string(c.Config.Status.CurrentImagePullSpec) return osImage } +func (c *MachineOSConfigState) MachineOSBuildIsCurrent(mosb *mcfgv1.MachineOSBuild) bool { + return mosb.Status.DigestedImagePushSpec == c.Config.Status.CurrentImagePullSpec +} + // Determines if a given MachineConfigPool has an available OS image. Returns // false if the annotation is missing or set to an empty string. func (c *MachineOSConfigState) HasOSImage() bool { - val := c.Config.Status.CurrentImagePullspec + val := string(c.Config.Status.CurrentImagePullSpec) return val != "" } // Clears the image pullspec annotation. func (c *MachineOSConfigState) ClearImagePullspec() { - c.Config.Spec.BuildInputs.RenderedImagePushspec = "" - c.Config.Status.CurrentImagePullspec = "" + c.Config.Spec.RenderedImagePushSpec = "" + c.Config.Status.CurrentImagePullSpec = "" } // Clears all build object conditions. @@ -66,32 +69,32 @@ func (b *MachineOSBuildState) ClearAllBuildConditions() { // Determines if an OS image build is a success. func (b *MachineOSBuildState) IsBuildSuccess() bool { - return apihelpers.IsMachineOSBuildConditionTrue(b.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuildSucceeded) + return apihelpers.IsMachineOSBuildConditionTrue(b.Build.Status.Conditions, mcfgv1.MachineOSBuildSucceeded) } // Determines if an OS image build is pending. func (b *MachineOSBuildState) IsBuildPending() bool { - return apihelpers.IsMachineOSBuildConditionTrue(b.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuilding) + return apihelpers.IsMachineOSBuildConditionTrue(b.Build.Status.Conditions, mcfgv1.MachineOSBuilding) } // Determines if an OS image build is prepared. func (b *MachineOSBuildState) IsBuildPrepared() bool { - return apihelpers.IsMachineOSBuildConditionTrue(b.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuildPrepared) + return apihelpers.IsMachineOSBuildConditionTrue(b.Build.Status.Conditions, mcfgv1.MachineOSBuildPrepared) } // Determines if an OS image build is in progress. func (b *MachineOSBuildState) IsBuilding() bool { - return apihelpers.IsMachineOSBuildConditionTrue(b.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuilding) + return apihelpers.IsMachineOSBuildConditionTrue(b.Build.Status.Conditions, mcfgv1.MachineOSBuilding) } // Determines if an OS image build has failed. func (b *MachineOSBuildState) IsBuildFailure() bool { - return apihelpers.IsMachineOSBuildConditionTrue(b.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuildFailed) + return apihelpers.IsMachineOSBuildConditionTrue(b.Build.Status.Conditions, mcfgv1.MachineOSBuildFailed) } // Determines if an OS image build has failed. func (b *MachineOSBuildState) IsBuildInterrupted() bool { - return apihelpers.IsMachineOSBuildConditionTrue(b.Build.Status.Conditions, mcfgv1alpha1.MachineOSBuildInterrupted) + return apihelpers.IsMachineOSBuildConditionTrue(b.Build.Status.Conditions, mcfgv1.MachineOSBuildInterrupted) } // Determines if an OS image build has build conditions set on it. @@ -121,13 +124,8 @@ func (b *MachineOSBuildState) IsInTransientState() bool { } // Gets the transient state, if any is set. Otherwise, returns an empty string. -func (b *MachineOSBuildState) GetTransientState() mcfgv1alpha1.BuildProgress { - transientStates := []mcfgv1alpha1.BuildProgress{ - mcfgv1alpha1.MachineOSBuilding, - mcfgv1alpha1.MachineOSBuildPrepared, - } - - for _, transientState := range transientStates { +func (b *MachineOSBuildState) GetTransientState() mcfgv1.BuildProgress { + for transientState := range MachineOSBuildTransientStates() { if apihelpers.IsMachineOSBuildConditionTrue(b.Build.Status.Conditions, transientState) { return transientState } @@ -137,14 +135,8 @@ func (b *MachineOSBuildState) GetTransientState() mcfgv1alpha1.BuildProgress { } // Gets the current terminal state, if any is set. Otherwise, returns an empty string. -func (b *MachineOSBuildState) GetTerminalState() mcfgv1alpha1.BuildProgress { - terminalStates := []mcfgv1alpha1.BuildProgress{ - mcfgv1alpha1.MachineOSBuildSucceeded, - mcfgv1alpha1.MachineOSBuildFailed, - mcfgv1alpha1.MachineOSBuildInterrupted, - } - - for _, terminalState := range terminalStates { +func (b *MachineOSBuildState) GetTerminalState() mcfgv1.BuildProgress { + for terminalState := range MachineOSBuildTerminalStates() { if apihelpers.IsMachineOSBuildConditionTrue(b.Build.Status.Conditions, terminalState) { return terminalState } @@ -154,9 +146,9 @@ func (b *MachineOSBuildState) GetTerminalState() mcfgv1alpha1.BuildProgress { } func (b *MachineOSBuildState) IsAnyDegraded() bool { - condTypes := []mcfgv1alpha1.BuildProgress{ - mcfgv1alpha1.MachineOSBuildFailed, - mcfgv1alpha1.MachineOSBuildInterrupted, + condTypes := []mcfgv1.BuildProgress{ + mcfgv1.MachineOSBuildFailed, + mcfgv1.MachineOSBuildInterrupted, } for _, condType := range condTypes { @@ -172,7 +164,7 @@ func (b *MachineOSBuildState) IsAnyDegraded() bool { func (b *MachineOSBuildState) SetBuildConditions(conditions []metav1.Condition) { for _, condition := range conditions { condition := condition - currentCondition := apihelpers.GetMachineOSBuildCondition(b.Build.Status, mcfgv1alpha1.BuildProgress(condition.Type)) + currentCondition := apihelpers.GetMachineOSBuildCondition(b.Build.Status, mcfgv1.BuildProgress(condition.Type)) if currentCondition != nil && isConditionEqual(*currentCondition, condition) { continue } @@ -182,6 +174,27 @@ func (b *MachineOSBuildState) SetBuildConditions(conditions []metav1.Condition) } } +// Returns a map of the buildprogress states to their expected conditions for +// transient states. That is, the MachineOSBuild is expected to transition from +// one state to another. +func MachineOSBuildTransientStates() map[mcfgv1.BuildProgress][]metav1.Condition { + return map[mcfgv1.BuildProgress][]metav1.Condition{ + mcfgv1.MachineOSBuilding: apihelpers.MachineOSBuildRunningConditions(), + mcfgv1.MachineOSBuildPrepared: apihelpers.MachineOSBuildPendingConditions(), + } +} + +// Returns a map of the buildprogress states to their expected conditions for +// terminal states; meaning that the MachineOSBuild cannot transition from one +// state to another. +func MachineOSBuildTerminalStates() map[mcfgv1.BuildProgress][]metav1.Condition { + return map[mcfgv1.BuildProgress][]metav1.Condition{ + mcfgv1.MachineOSBuildSucceeded: apihelpers.MachineOSBuildSucceededConditions(), + mcfgv1.MachineOSBuildFailed: apihelpers.MachineOSBuildFailedConditions(), + mcfgv1.MachineOSBuildInterrupted: apihelpers.MachineOSBuildInterruptedConditions(), + } +} + // Determines if two conditions are equal. Note: I purposely do not include the // timestamp in the equality test, since we do not directly set it. func isConditionEqual(cond1, cond2 metav1.Condition) bool { @@ -211,13 +224,13 @@ func clearAllBuildConditions(inConditions []metav1.Condition) []metav1.Condition return conditions } -func getMachineConfigBuildConditions() []mcfgv1alpha1.BuildProgress { - return []mcfgv1alpha1.BuildProgress{ - mcfgv1alpha1.MachineOSBuildFailed, - mcfgv1alpha1.MachineOSBuildInterrupted, - mcfgv1alpha1.MachineOSBuildPrepared, - mcfgv1alpha1.MachineOSBuildSucceeded, - mcfgv1alpha1.MachineOSBuilding, +func getMachineConfigBuildConditions() []mcfgv1.BuildProgress { + return []mcfgv1.BuildProgress{ + mcfgv1.MachineOSBuildFailed, + mcfgv1.MachineOSBuildInterrupted, + mcfgv1.MachineOSBuildPrepared, + mcfgv1.MachineOSBuildSucceeded, + mcfgv1.MachineOSBuilding, } } @@ -242,13 +255,13 @@ func IsPoolConfigChange(oldPool, curPool *mcfgv1.MachineConfigPool) bool { return oldPool.Spec.Configuration.Name != curPool.Spec.Configuration.Name } -func HasBuildObjectForCurrentMachineConfig(pool *mcfgv1.MachineConfigPool, mosb *mcfgv1alpha1.MachineOSBuild) bool { - return pool.Spec.Configuration.Name == mosb.Spec.DesiredConfig.Name +func HasBuildObjectForCurrentMachineConfig(pool *mcfgv1.MachineConfigPool, mosb *mcfgv1.MachineOSBuild) bool { + return pool.Spec.Configuration.Name == mosb.Spec.MachineConfig.Name } // Determines if we should do a build based upon the state of our // MachineConfigPool, the presence of a build pod, etc. -func BuildDueToPoolChange(oldPool, curPool *mcfgv1.MachineConfigPool, moscNew *mcfgv1alpha1.MachineOSConfig, mosbNew *mcfgv1alpha1.MachineOSBuild) bool { +func BuildDueToPoolChange(oldPool, curPool *mcfgv1.MachineConfigPool, moscNew *mcfgv1.MachineOSConfig, mosbNew *mcfgv1.MachineOSBuild) bool { moscState := NewMachineOSConfigState(moscNew) mosbState := NewMachineOSBuildState(mosbNew) @@ -286,6 +299,6 @@ func canPoolBuild(pool *mcfgv1.MachineConfigPool, moscNewState *MachineOSConfigS return true } -func IsLayeredPool(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) bool { +func IsLayeredPool(mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild) bool { return (mosc != nil || mosb != nil) } diff --git a/pkg/controller/common/mos_state_test.go b/pkg/controller/common/mos_state_test.go index afe7027c45..2b23e4e06a 100644 --- a/pkg/controller/common/mos_state_test.go +++ b/pkg/controller/common/mos_state_test.go @@ -3,18 +3,125 @@ package common import ( "testing" - "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + v1 "github.com/openshift/api/machineconfiguration/v1" + "github.com/openshift/machine-config-operator/pkg/apihelpers" "github.com/stretchr/testify/assert" ) func TestMachineOSConfigState(t *testing.T) { t.Parallel() - mosc := NewMachineOSConfigState(&v1alpha1.MachineOSConfig{ - Status: v1alpha1.MachineOSConfigStatus{ - CurrentImagePullspec: "registry.host.com/org/repo:tag", + mosc := NewMachineOSConfigState(&v1.MachineOSConfig{ + Status: v1.MachineOSConfigStatus{ + CurrentImagePullSpec: "registry.host.com/org/repo:tag", }, }) assert.Equal(t, "registry.host.com/org/repo:tag", mosc.GetOSImage()) } + +// This test validates that the MachineOSBuild conditions are correctly +// identified as initial, transient, or terminal. +func TestMachineOSBuildState(t *testing.T) { + t.Parallel() + + // Determines if the helper associated with the given state returns true + // while others associated with other states returns false. If an empty state + // is given, it will not match any of these states and will therefore be + // false. + assertBuildState := func(t *testing.T, mosbState *MachineOSBuildState, givenState mcfgv1.BuildProgress) { + t.Helper() + + states := map[mcfgv1.BuildProgress]func() bool{ + mcfgv1.MachineOSBuildPrepared: mosbState.IsBuildPrepared, + mcfgv1.MachineOSBuilding: mosbState.IsBuilding, + mcfgv1.MachineOSBuildSucceeded: mosbState.IsBuildSuccess, + mcfgv1.MachineOSBuildFailed: mosbState.IsBuildFailure, + mcfgv1.MachineOSBuildInterrupted: mosbState.IsBuildInterrupted, + } + + for state, helper := range states { + // If the current state matches the given state, then that helper should + // return true. Otherwise, it should be false. + if state == givenState { + assert.True(t, helper()) + } else { + assert.False(t, helper()) + } + } + + degradedStates := map[mcfgv1.BuildProgress]struct{}{ + mcfgv1.MachineOSBuildFailed: struct{}{}, + mcfgv1.MachineOSBuildInterrupted: struct{}{}, + } + + if _, isDegradedState := degradedStates[givenState]; isDegradedState { + assert.True(t, mosbState.IsAnyDegraded()) + } else { + assert.False(t, mosbState.IsAnyDegraded()) + } + } + + // For the initial condition, ensure that it is correctly identified as an + // initial condition and not a transient or terminal condition. + t.Run("Initial Conditions", func(t *testing.T) { + t.Parallel() + + mosbState := NewMachineOSBuildState(&mcfgv1.MachineOSBuild{}) + mosbState.SetBuildConditions(apihelpers.MachineOSBuildInitialConditions()) + + assert.True(t, mosbState.IsInInitialState()) + assert.False(t, mosbState.IsInTransientState()) + assert.False(t, mosbState.IsInTerminalState()) + + assert.Equal(t, mosbState.GetTerminalState(), mcfgv1.BuildProgress("")) + assert.Equal(t, mosbState.GetTransientState(), mcfgv1.BuildProgress("")) + assertBuildState(t, mosbState, mcfgv1.BuildProgress("")) + }) + + // For each transient condition, ensure that it is correctly identified as + // a transient condition and not an initial or terminal condition. + // Additionally, check that each helper associated with a given condition + // correctly identifies that condition as being true and all others false. + t.Run("Transient Conditions", func(t *testing.T) { + t.Parallel() + + mosbState := NewMachineOSBuildState(&mcfgv1.MachineOSBuild{}) + + for transientState, transientCondition := range MachineOSBuildTransientStates() { + mosbState.SetBuildConditions(transientCondition) + + assert.False(t, mosbState.IsInInitialState()) + assert.True(t, mosbState.IsInTransientState()) + assert.False(t, mosbState.IsInTerminalState()) + + assert.Equal(t, mosbState.GetTransientState(), transientState) + assert.Equal(t, mosbState.GetTerminalState(), mcfgv1.BuildProgress("")) + assertBuildState(t, mosbState, transientState) + } + }) + + // For each terminal condition, ensure that it is correctly identified as a + // terminal condition and not as a transient or initial condition. + // Additionally, check that each helper associated with a given condition + // correctly identifies that condition as being true and all others false. + t.Run("Terminal Conditions", func(t *testing.T) { + t.Parallel() + + mosbState := NewMachineOSBuildState(&mcfgv1.MachineOSBuild{}) + + for terminalState, terminalCondition := range MachineOSBuildTerminalStates() { + mosbState.SetBuildConditions(terminalCondition) + + assert.False(t, mosbState.IsInInitialState()) + assert.False(t, mosbState.IsInTransientState()) + assert.True(t, mosbState.IsInTerminalState()) + + assert.Equal(t, mosbState.GetTransientState(), mcfgv1.BuildProgress("")) + assert.Equal(t, mosbState.GetTerminalState(), terminalState) + + assertBuildState(t, mosbState, terminalState) + } + }) +} diff --git a/pkg/controller/node/node_controller.go b/pkg/controller/node/node_controller.go index 5186d5d82c..d0364c16f2 100644 --- a/pkg/controller/node/node_controller.go +++ b/pkg/controller/node/node_controller.go @@ -14,15 +14,12 @@ import ( configv1 "github.com/openshift/api/config/v1" features "github.com/openshift/api/features" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" - mcfginformersv1alpha1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1" cligoinformersv1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" cligolistersv1 "github.com/openshift/client-go/config/listers/config/v1" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" mcfginformersv1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1" - mcfglistersv1alpha1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1" mcfglistersv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" "github.com/openshift/library-go/pkg/operator/v1helpers" @@ -51,6 +48,8 @@ import ( clientretry "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + + buildconstants "github.com/openshift/machine-config-operator/pkg/controller/build/constants" ) const ( @@ -92,7 +91,6 @@ type Controller struct { mcpLister mcfglistersv1.MachineConfigPoolLister nodeLister corelisterv1.NodeLister podLister corelisterv1.PodLister - mosbLister mcfglistersv1alpha1.MachineOSBuildLister ccListerSynced cache.InformerSynced mcListerSynced cache.InformerSynced @@ -119,7 +117,7 @@ func New( mcpInformer mcfginformersv1.MachineConfigPoolInformer, nodeInformer coreinformersv1.NodeInformer, podInformer coreinformersv1.PodInformer, - moscInformer mcfginformersv1alpha1.MachineOSConfigInformer, + moscInformer mcfginformersv1.MachineOSConfigInformer, schedulerInformer cligoinformersv1.SchedulerInformer, kubeClient clientset.Interface, mcfgClient mcfgclientset.Interface, @@ -146,7 +144,7 @@ func NewWithCustomUpdateDelay( mcpInformer mcfginformersv1.MachineConfigPoolInformer, nodeInformer coreinformersv1.NodeInformer, podInformer coreinformersv1.PodInformer, - moscInformer mcfginformersv1alpha1.MachineOSConfigInformer, + moscInformer mcfginformersv1.MachineOSConfigInformer, schedulerInformer cligoinformersv1.SchedulerInformer, kubeClient clientset.Interface, mcfgClient mcfgclientset.Interface, @@ -173,7 +171,7 @@ func newController( ccInformer mcfginformersv1.ControllerConfigInformer, mcInformer mcfginformersv1.MachineConfigInformer, mcpInformer mcfginformersv1.MachineConfigPoolInformer, - moscInformer mcfginformersv1alpha1.MachineOSConfigInformer, + moscInformer mcfginformersv1.MachineOSConfigInformer, nodeInformer coreinformersv1.NodeInformer, podInformer coreinformersv1.PodInformer, schedulerInformer cligoinformersv1.SchedulerInformer, @@ -383,7 +381,7 @@ func (ctrl *Controller) makeMasterNodeSchedulable(node *corev1.Node) error { } func (ctrl *Controller) addMachineOSConfig(obj interface{}) { - curMOSC := obj.(*mcfgv1alpha1.MachineOSConfig) + curMOSC := obj.(*mcfgv1.MachineOSConfig) klog.V(4).Infof("Adding MachineOSConfig %s", curMOSC.Name) mcp, err := ctrl.mcpLister.Get(curMOSC.Spec.MachineConfigPool.Name) if err != nil { @@ -395,9 +393,9 @@ func (ctrl *Controller) addMachineOSConfig(obj interface{}) { } func (ctrl *Controller) updateMachineOSConfig(old, cur interface{}) { - oldMOSC := old.(*mcfgv1alpha1.MachineOSConfig) - curMOSC := cur.(*mcfgv1alpha1.MachineOSConfig) - if equality.Semantic.DeepEqual(oldMOSC.Status.CurrentImagePullspec, curMOSC.Status.CurrentImagePullspec) { + oldMOSC := old.(*mcfgv1.MachineOSConfig) + curMOSC := cur.(*mcfgv1.MachineOSConfig) + if equality.Semantic.DeepEqual(oldMOSC.Status.CurrentImagePullSpec, curMOSC.Status.CurrentImagePullSpec) { // we do not want to trigger an update func just if the image is not ready return } @@ -412,14 +410,14 @@ func (ctrl *Controller) updateMachineOSConfig(old, cur interface{}) { } func (ctrl *Controller) deleteMachineOSConfig(cur interface{}) { - curMOSC, ok := cur.(*mcfgv1alpha1.MachineOSConfig) + curMOSC, ok := cur.(*mcfgv1.MachineOSConfig) if !ok { tombstone, ok := cur.(cache.DeletedFinalStateUnknown) if !ok { utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", cur)) return } - curMOSC, ok = tombstone.Obj.(*mcfgv1alpha1.MachineOSConfig) + curMOSC, ok = tombstone.Obj.(*mcfgv1.MachineOSConfig) if !ok { utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a MachineOSConfig %#v", cur)) return @@ -436,14 +434,14 @@ func (ctrl *Controller) deleteMachineOSConfig(cur interface{}) { } func (ctrl *Controller) deleteMachineOSBuild(obj interface{}) { - curMOSB, ok := obj.(*mcfgv1alpha1.MachineOSBuild) + curMOSB, ok := obj.(*mcfgv1.MachineOSBuild) if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) return } - curMOSB, ok = tombstone.Obj.(*mcfgv1alpha1.MachineOSBuild) + curMOSB, ok = tombstone.Obj.(*mcfgv1.MachineOSBuild) if !ok { utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a MOSB %#v", obj)) return @@ -892,16 +890,37 @@ func (ctrl *Controller) handleErr(err error, key string) { // 2. If a MachineConfig changes, we should wait for the OS image build to be // ready so we can update both the nodes' desired MachineConfig and desired // image annotations simultaneously. +func (ctrl *Controller) getConfigAndBuildAndLayeredStatus(pool *mcfgv1.MachineConfigPool) (*mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, bool, error) { + mosc, mosb, err := ctrl.getConfigAndBuild(pool) + // If we attempt to list resources which are not present either because none + // exist or they're behind an inactive feature gate, they will return an + // IsNotFound error. Any other errors should be returned to the caller. + if err != nil && !errors.IsNotFound(err) { + return nil, nil, false, err + } + + isLayered, err := ctrl.isLayeredPool(mosc, mosb) + if err != nil { + return nil, nil, false, fmt.Errorf("Failed to determine whether pool %s opts in to OCL due to an error: %s", pool.Name, err) + } -func (ctrl *Controller) GetConfigAndBuild(pool *mcfgv1.MachineConfigPool) (*mcfgv1alpha1.MachineOSConfig, *mcfgv1alpha1.MachineOSBuild, error) { - var ourConfig *mcfgv1alpha1.MachineOSConfig - var ourBuild *mcfgv1alpha1.MachineOSBuild - configList, err := ctrl.client.MachineconfigurationV1alpha1().MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) + return mosc, mosb, isLayered, nil +} + +func (ctrl *Controller) getConfigAndBuild(pool *mcfgv1.MachineConfigPool) (*mcfgv1.MachineOSConfig, *mcfgv1.MachineOSBuild, error) { + // TODO: We should use the selectors from the build controller since they are + // well-tested and makes querying for this information significantly easier. + // Additionally, this should use listers instead of API clients in order to + // reduce the impact on the API server. + var ourConfig *mcfgv1.MachineOSConfig + var ourBuild *mcfgv1.MachineOSBuild + configList, err := ctrl.client.MachineconfigurationV1().MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) if err != nil { return nil, nil, err } for _, config := range configList.Items { + config := config if config.Spec.MachineConfigPool.Name == pool.Name { ourConfig = &config break @@ -912,30 +931,33 @@ func (ctrl *Controller) GetConfigAndBuild(pool *mcfgv1.MachineConfigPool) (*mcfg return nil, nil, nil } - buildList, err := ctrl.client.MachineconfigurationV1alpha1().MachineOSBuilds().List(context.TODO(), metav1.ListOptions{}) + buildList, err := ctrl.client.MachineconfigurationV1().MachineOSBuilds().List(context.TODO(), metav1.ListOptions{}) if err != nil { return nil, nil, err } for _, build := range buildList.Items { - if build.Spec.MachineOSConfig.Name == ourConfig.Name { - if build.Spec.DesiredConfig.Name == pool.Spec.Configuration.Name { - ourBuild = &build - break - } + build := build + if build.Spec.MachineOSConfig.Name == ourConfig.Name && build.Spec.MachineConfig.Name == pool.Spec.Configuration.Name { + ourBuild = &build + break } } return ourConfig, ourBuild, nil - } -func (ctrl *Controller) canLayeredPoolContinue(pool *mcfgv1.MachineConfigPool) (string, bool, error) { - - mosc, mosb, _ := ctrl.GetConfigAndBuild(pool) +func (ctrl *Controller) canLayeredContinue(mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild) (string, bool, error) { + // This is an edgecase which we should ideally never hit. However, it is + // better to anticipate it and have an error message ready vs. the + // alternative. + if mosc == nil && mosb != nil { + msg := fmt.Sprintf("orphaned MachineOSBuild %q found, but MachineOSConfig %q not found", mosb.Name, mosb.Labels[buildconstants.MachineOSConfigNameLabelKey]) + return msg, false, fmt.Errorf("%s", msg) + } - if mosc == nil || mosb == nil { - return "No MachineOSConfig or Build for this pool", false, nil + if !ctrl.isConfigAndBuildPresent(mosc, mosb) { + return "No MachineOSConfig or MachineOSBuild for this pool", false, nil } cs := ctrlcommon.NewMachineOSConfigState(mosc) @@ -945,15 +967,18 @@ func (ctrl *Controller) canLayeredPoolContinue(pool *mcfgv1.MachineConfigPool) ( pullspec := cs.GetOSImage() if !hasImage { - return "Desired Image not set in MachineOSBuild", false, nil + return "Desired image not set in MachineOSConfig", false, nil } switch { - // If the build is successful and we have the image pullspec, we can proceed + // If the build is successful and the MachineOSConfig has the matching pullspec, we can proceed // with rolling out the new OS image. - case bs.IsBuildSuccess() && hasImage: + case bs.IsBuildSuccess() && hasImage && cs.MachineOSBuildIsCurrent(mosb): msg := fmt.Sprintf("Image built successfully, pullspec: %s", pullspec) return msg, true, nil + case bs.IsBuildSuccess() && hasImage && !cs.MachineOSBuildIsCurrent(mosb): + msg := fmt.Sprintf("Image built successfully, pullspec: %s, but MachineOSConfig %q has not updated yet", pullspec, mosc.Name) + return msg, false, nil case bs.IsBuildPending(): return "Image build pending", false, nil case bs.IsBuilding(): @@ -1019,14 +1044,13 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { return ctrl.syncStatusOnly(pool) } - mosc, mosb, _ := ctrl.GetConfigAndBuild(pool) - layered, err := ctrl.IsLayeredPool(mosc, mosb) + mosc, mosb, layered, err := ctrl.getConfigAndBuildAndLayeredStatus(pool) if err != nil { - return fmt.Errorf("Failed to determine whether pool %s opts in to OCL due to an error: %s", pool.Name, err) + return fmt.Errorf("could not get config and build: %w", err) } if layered { - reason, canApplyUpdates, err := ctrl.canLayeredPoolContinue(pool) + reason, canApplyUpdates, err := ctrl.canLayeredContinue(mosc, mosb) if err != nil { klog.Infof("Layered pool %s encountered an error: %s", pool.Name, err) return err @@ -1099,7 +1123,7 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { } } ctrl.logPool(pool, "%d candidate nodes in %d zones for update, capacity: %d", len(candidates), len(zones), capacity) - if err := ctrl.updateCandidateMachines(pool, candidates, capacity); err != nil { + if err := ctrl.updateCandidateMachines(layered, mosc, mosb, pool, candidates, capacity); err != nil { if syncErr := ctrl.syncStatusOnly(pool); syncErr != nil { errs := kubeErrs.NewAggregate([]error{syncErr, err}) return fmt.Errorf("error setting annotations for pool %q, sync error: %w", pool.Name, errs) @@ -1176,7 +1200,7 @@ func (ctrl *Controller) setClusterConfigAnnotation(nodes []*corev1.Node) error { // updateCandidateNode needs to understand MOSB // specifically, the LayeredNodeState probably needs to understand mosb -func (ctrl *Controller) updateCandidateNode(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, nodeName string, pool *mcfgv1.MachineConfigPool) error { +func (ctrl *Controller) updateCandidateNode(mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild, nodeName string, pool *mcfgv1.MachineConfigPool) error { return clientretry.RetryOnConflict(constants.NodeUpdateBackoff, func() error { oldNode, err := ctrl.kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) if err != nil { @@ -1188,7 +1212,7 @@ func (ctrl *Controller) updateCandidateNode(mosc *mcfgv1alpha1.MachineOSConfig, } lns := ctrlcommon.NewLayeredNodeState(oldNode) - layered, err := ctrl.IsLayeredPool(mosc, mosb) + layered, err := ctrl.isLayeredPool(mosc, mosb) if err != nil { return fmt.Errorf("Failed to determine whether pool %s opts in to OCL due to an error: %s", pool.Name, err) } @@ -1233,13 +1257,16 @@ func (ctrl *Controller) updateCandidateNode(mosc *mcfgv1alpha1.MachineOSConfig, // getAllCandidateMachines returns all possible nodes which can be updated to the target config, along with a maximum // capacity. It is the reponsibility of the caller to choose a subset of the nodes given the capacity. -func getAllCandidateMachines(layered bool, config *mcfgv1alpha1.MachineOSConfig, build *mcfgv1alpha1.MachineOSBuild, pool *mcfgv1.MachineConfigPool, nodesInPool []*corev1.Node, maxUnavailable int) ([]*corev1.Node, uint) { +func getAllCandidateMachines(layered bool, config *mcfgv1.MachineOSConfig, build *mcfgv1.MachineOSBuild, pool *mcfgv1.MachineConfigPool, nodesInPool []*corev1.Node, maxUnavailable int) ([]*corev1.Node, uint) { unavail := getUnavailableMachines(nodesInPool, pool, layered, build) if len(unavail) >= maxUnavailable { - klog.V(4).Infof("Pool %s: No nodes available for updates", pool.Name) + klog.V(4).Infof("getAllCandidateMachines: No capacity left for pool %s (unavail=%d >= maxUnavailable=%d)", + pool.Name, len(unavail), maxUnavailable) return nil, 0 } capacity := maxUnavailable - len(unavail) + klog.V(4).Infof("getAllCandidateMachines: Computed capacity=%d for pool %s", capacity, pool.Name) + failingThisConfig := 0 // We only look at nodes which aren't already targeting our desired config var nodes []*corev1.Node @@ -1278,7 +1305,7 @@ func getAllCandidateMachines(layered bool, config *mcfgv1alpha1.MachineOSConfig, } // getCandidateMachines returns the maximum subset of nodes which can be updated to the target config given availability constraints. -func getCandidateMachines(pool *mcfgv1.MachineConfigPool, config *mcfgv1alpha1.MachineOSConfig, build *mcfgv1alpha1.MachineOSBuild, nodesInPool []*corev1.Node, maxUnavailable int, layered bool) []*corev1.Node { +func getCandidateMachines(pool *mcfgv1.MachineConfigPool, config *mcfgv1.MachineOSConfig, build *mcfgv1.MachineOSBuild, nodesInPool []*corev1.Node, maxUnavailable int, layered bool) []*corev1.Node { nodes, capacity := getAllCandidateMachines(layered, config, build, pool, nodesInPool, maxUnavailable) if uint(len(nodes)) < capacity { return nodes @@ -1327,9 +1354,9 @@ func (ctrl *Controller) filterControlPlaneCandidateNodes(pool *mcfgv1.MachineCon } // SetDesiredStateFromPool in old mco explains how this works. Somehow you need to NOT FAIL if the mosb doesn't exist. So -// we still need to base this whole things on pools but IsLayeredPool == does mosb exist +// we still need to base this whole things on pools but isLayeredPool == does mosb exist // updateCandidateMachines sets the desiredConfig annotation the candidate machines -func (ctrl *Controller) updateCandidateMachines(pool *mcfgv1.MachineConfigPool, candidates []*corev1.Node, capacity uint) error { +func (ctrl *Controller) updateCandidateMachines(layered bool, mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild, pool *mcfgv1.MachineConfigPool, candidates []*corev1.Node, capacity uint) error { if pool.Name == ctrlcommon.MachineConfigPoolMaster { var err error candidates, capacity, err = ctrl.filterControlPlaneCandidateNodes(pool, candidates, capacity) @@ -1348,25 +1375,20 @@ func (ctrl *Controller) updateCandidateMachines(pool *mcfgv1.MachineConfigPool, candidates = candidates[:capacity] } - return ctrl.setDesiredAnnotations(pool, candidates) + return ctrl.setDesiredAnnotations(layered, mosc, mosb, pool, candidates) } -func (ctrl *Controller) setDesiredAnnotations(pool *mcfgv1.MachineConfigPool, candidates []*corev1.Node) error { +func (ctrl *Controller) setDesiredAnnotations(layered bool, mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild, pool *mcfgv1.MachineConfigPool, candidates []*corev1.Node) error { eventName := "SetDesiredConfig" updateName := fmt.Sprintf("MachineConfig: %s", pool.Spec.Configuration.Name) - config, build, _ := ctrl.GetConfigAndBuild(pool) - layered, err := ctrl.IsLayeredPool(config, build) - if err != nil { - return fmt.Errorf("Failed to determine whether pool %s opts in to OCL due to an error: %s", pool.Name, err) - } if layered { eventName = "SetDesiredConfigAndOSImage" - updateName = fmt.Sprintf("%s / Image: %s", updateName, ctrlcommon.NewMachineOSConfigState(config).GetOSImage()) + updateName = fmt.Sprintf("%s / Image: %s", updateName, ctrlcommon.NewMachineOSConfigState(mosc).GetOSImage()) klog.Infof("Continuing to sync layered MachineConfigPool %s", pool.Name) } for _, node := range candidates { - if err := ctrl.updateCandidateNode(config, build, node.Name, pool); err != nil { + if err := ctrl.updateCandidateNode(mosc, mosb, node.Name, pool); err != nil { return fmt.Errorf("setting desired %s for node %s: %w", pool.Spec.Configuration.Name, node.Name, err) } } @@ -1504,10 +1526,18 @@ func getErrorString(err error) string { return "" } -func (ctrl *Controller) IsLayeredPool(mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) (bool, error) { +func (ctrl *Controller) isLayeredPool(mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild) (bool, error) { fg, err := ctrl.fgAcessor.CurrentFeatureGates() if err != nil { return false, err } - return (mosc != nil || mosb != nil) && fg.Enabled(features.FeatureGateOnClusterBuild), nil + return ctrl.isConfigOrBuildPresent(mosc, mosb) && fg.Enabled(features.FeatureGateOnClusterBuild), nil +} + +func (ctrl *Controller) isConfigOrBuildPresent(mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild) bool { + return (mosc != nil || mosb != nil) +} + +func (ctrl *Controller) isConfigAndBuildPresent(mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild) bool { + return (mosc != nil && mosb != nil) } diff --git a/pkg/controller/node/node_controller_test.go b/pkg/controller/node/node_controller_test.go index 80bc2f0338..51c9dfc922 100644 --- a/pkg/controller/node/node_controller_test.go +++ b/pkg/controller/node/node_controller_test.go @@ -12,6 +12,7 @@ import ( mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" + "github.com/openshift/machine-config-operator/pkg/apihelpers" pkghelpers "github.com/openshift/machine-config-operator/pkg/helpers" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -78,20 +79,22 @@ type fixture struct { fgAccess featuregates.FeatureGateAccess } -func newFixture(t *testing.T) *fixture { +func newFixtureWithFeatureGates(t *testing.T, enabled, disabled []configv1.FeatureGateName) *fixture { f := &fixture{} f.t = t f.objects = []runtime.Object{} f.kubeobjects = []runtime.Object{} f.fgAccess = featuregates.NewHardcodedFeatureGateAccess( - []configv1.FeatureGateName{ - features.FeatureGatePinnedImages, - }, - []configv1.FeatureGateName{}, + enabled, + disabled, ) return f } +func newFixture(t *testing.T) *fixture { + return newFixtureWithFeatureGates(t, []configv1.FeatureGateName{features.FeatureGatePinnedImages}, []configv1.FeatureGateName{}) +} + func (f *fixture) newControllerWithStopChan(stopCh <-chan struct{}) *Controller { f.client = fake.NewSimpleClientset(f.objects...) f.kubeclient = k8sfake.NewSimpleClientset(f.kubeobjects...) @@ -101,7 +104,7 @@ func (f *fixture) newControllerWithStopChan(stopCh <-chan struct{}) *Controller k8sI := kubeinformers.NewSharedInformerFactory(f.kubeclient, noResyncPeriodFunc()) ci := configv1informer.NewSharedInformerFactory(f.schedulerClient, noResyncPeriodFunc()) c := NewWithCustomUpdateDelay(i.Machineconfiguration().V1().ControllerConfigs(), i.Machineconfiguration().V1().MachineConfigs(), i.Machineconfiguration().V1().MachineConfigPools(), k8sI.Core().V1().Nodes(), - k8sI.Core().V1().Pods(), i.Machineconfiguration().V1alpha1().MachineOSConfigs(), ci.Config().V1().Schedulers(), f.kubeclient, f.client, time.Millisecond, f.fgAccess) + k8sI.Core().V1().Pods(), i.Machineconfiguration().V1().MachineOSConfigs(), ci.Config().V1().Schedulers(), f.kubeclient, f.client, time.Millisecond, f.fgAccess) c.ccListerSynced = alwaysReady c.mcpListerSynced = alwaysReady @@ -1021,7 +1024,8 @@ func TestUpdateCandidates(t *testing.T) { c := f.newController() - err := c.setDesiredAnnotations(test.pool, []*corev1.Node{test.node}) + // TODO: Add additional test cases to handle layered workflows. + err := c.setDesiredAnnotations(false, nil, nil, test.pool, []*corev1.Node{test.node}) if !assert.Nil(t, err) { return } @@ -1528,6 +1532,168 @@ func TestControlPlaneTopology(t *testing.T) { f.run(getKey(mcp, t)) } +// Checks that the layered pool can / should continue. This is based upon the +// results of querying the API for the MachineOSConfig and MachineOSBuild. +func TestCanLayeredPoolContinue(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + mosc *mcfgv1.MachineOSConfig + mosb *mcfgv1.MachineOSBuild + expected bool + errExpected bool + }{ + { + name: "No MOSC or MOSB", + }, + { + name: "With MOSC no MOSB", + mosc: &mcfgv1.MachineOSConfig{}, + }, + { + name: "With MOSB no MOSC", + mosb: &mcfgv1.MachineOSBuild{}, + errExpected: true, + }, + { + name: "MOSC and MOSB", + mosc: &mcfgv1.MachineOSConfig{}, + mosb: &mcfgv1.MachineOSBuild{}, + }, + { + name: "MOSC has image, but MOSB has different image", + mosc: &mcfgv1.MachineOSConfig{ + Status: mcfgv1.MachineOSConfigStatus{ + CurrentImagePullSpec: "image-pullspec", + }, + }, + mosb: &mcfgv1.MachineOSBuild{ + Status: mcfgv1.MachineOSBuildStatus{ + DigestedImagePushSpec: "other-pullspec", + Conditions: apihelpers.MachineOSBuildSucceededConditions(), + }, + }, + }, + { + name: "MOSC has image, and MOSB does too", + mosc: &mcfgv1.MachineOSConfig{ + Status: mcfgv1.MachineOSConfigStatus{ + CurrentImagePullSpec: "image-pullspec", + }, + }, + mosb: &mcfgv1.MachineOSBuild{ + Status: mcfgv1.MachineOSBuildStatus{ + DigestedImagePushSpec: "image-pullspec", + Conditions: apihelpers.MachineOSBuildSucceededConditions(), + }, + }, + expected: true, + }, + { + name: "MOSB pending", + mosc: &mcfgv1.MachineOSConfig{}, + mosb: &mcfgv1.MachineOSBuild{ + Status: mcfgv1.MachineOSBuildStatus{ + Conditions: apihelpers.MachineOSBuildPendingConditions(), + }, + }, + }, + { + name: "MOSB building", + mosc: &mcfgv1.MachineOSConfig{ + Status: mcfgv1.MachineOSConfigStatus{ + CurrentImagePullSpec: "image-pullspec", + }, + }, + mosb: &mcfgv1.MachineOSBuild{ + Status: mcfgv1.MachineOSBuildStatus{ + Conditions: apihelpers.MachineOSBuildRunningConditions(), + }, + }, + }, + { + name: "MOSB failure", + mosc: &mcfgv1.MachineOSConfig{ + Status: mcfgv1.MachineOSConfigStatus{ + CurrentImagePullSpec: "image-pullspec", + }, + }, + mosb: &mcfgv1.MachineOSBuild{ + Status: mcfgv1.MachineOSBuildStatus{ + Conditions: apihelpers.MachineOSBuildFailedConditions(), + }, + }, + errExpected: true, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + f := newFixture(t) + c := f.newController() + + reason, canContinue, err := c.canLayeredContinue(testCase.mosc, testCase.mosb) + t.Log(reason) + if testCase.errExpected { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.Equal(t, testCase.expected, canContinue) + assert.Equal(t, testCase.expected, canContinue, c.isConfigOrBuildPresent(testCase.mosc, testCase.mosb)) + }) + } +} + +// Normally, I wouldn't bother to test trivial methods like this, but these +// primarily exist to make the NodecControllers assumptions about layering +// explicit. +func TestIsConfigAndOrBuildPresent(t *testing.T) { + testCases := []struct { + name string + mosc *mcfgv1.MachineOSConfig + mosb *mcfgv1.MachineOSBuild + orExpected bool + andExpected bool + }{ + { + name: "Both nil", + }, + { + name: "Only MachineOSConfig", + mosc: &mcfgv1.MachineOSConfig{}, + orExpected: true, + }, + { + name: "Only MachineOSBuild", + mosb: &mcfgv1.MachineOSBuild{}, + orExpected: true, + }, + { + name: "Both", + mosc: &mcfgv1.MachineOSConfig{}, + mosb: &mcfgv1.MachineOSBuild{}, + orExpected: true, + andExpected: true, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + c := newFixture(t).newController() + assert.Equal(t, testCase.orExpected, c.isConfigOrBuildPresent(testCase.mosc, testCase.mosb)) + assert.Equal(t, testCase.andExpected, c.isConfigAndBuildPresent(testCase.mosc, testCase.mosb)) + }) + } +} + // adds annotation to the node func addNodeAnnotations(node *corev1.Node, annotations map[string]string) { if node.Annotations == nil { diff --git a/pkg/controller/node/status.go b/pkg/controller/node/status.go index 4af57d4768..13ddca2d1a 100644 --- a/pkg/controller/node/status.go +++ b/pkg/controller/node/status.go @@ -12,7 +12,6 @@ import ( features "github.com/openshift/api/features" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" "github.com/openshift/machine-config-operator/pkg/apihelpers" @@ -33,7 +32,7 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error { return err } - machineConfigStates := []*mcfgalphav1.MachineConfigNode{} + machineConfigStates := []*mcfgv1alpha1.MachineConfigNode{} fg, err := ctrl.fgAcessor.CurrentFeatureGates() list := fg.KnownFeatures() mcnExists := false @@ -55,7 +54,10 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error { } } - mosc, mosb, _ := ctrl.GetConfigAndBuild(pool) + mosc, mosb, l, err := ctrl.getConfigAndBuildAndLayeredStatus(pool) + if err != nil { + return fmt.Errorf("could get MachineOSConfig or MachineOSBuild: %w", err) + } newStatus := ctrl.calculateStatus(fg, machineConfigStates, cc, pool, nodes, mosc, mosb) if equality.Semantic.DeepEqual(pool.Status, newStatus) { @@ -69,11 +71,6 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error { return fmt.Errorf("could not update MachineConfigPool %q: %w", newPool.Name, err) } - l, err := ctrl.IsLayeredPool(mosc, mosb) - if err != nil { - return fmt.Errorf("Failed to determine whether pool %s opts in to OCL due to an error: %s", pool.Name, err) - } - if pool.Spec.Configuration.Name != newPool.Spec.Configuration.Name { ctrl.eventRecorder.Eventf(pool, corev1.EventTypeNormal, "Updating", "Pool %s now targeting %s", pool.Name, getPoolUpdateLine(newPool, mosc, l)) } @@ -84,7 +81,7 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error { } //nolint:gocyclo -func (ctrl *Controller) calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.ControllerConfig, pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node, mosc *mcfgalphav1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild) mcfgv1.MachineConfigPoolStatus { +func (ctrl *Controller) calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgv1alpha1.MachineConfigNode, cconfig *mcfgv1.ControllerConfig, pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node, mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild) mcfgv1.MachineConfigPoolStatus { certExpirys := []mcfgv1.CertExpiry{} if cconfig != nil { for _, cert := range cconfig.Status.ControllerCertificates { @@ -101,7 +98,11 @@ func (ctrl *Controller) calculateStatus(fg featuregates.FeatureGate, mcs []*mcfg machineCount := int32(len(nodes)) poolSynchronizer := newPoolSynchronizer(machineCount) - l, _ := ctrl.IsLayeredPool(mosc, mosb) + l, err := ctrl.isLayeredPool(mosc, mosb) + if err != nil { + // TODO: Handle this error better. + klog.Warningf("Error when checking isLayeredPool: %s", err) + } var degradedMachines, readyMachines, updatedMachines, unavailableMachines, updatingMachines []*corev1.Node degradedReasons := []string{} @@ -139,7 +140,7 @@ func (ctrl *Controller) calculateStatus(fg featuregates.FeatureGate, mcs []*mcfg degradedMachines = append(degradedMachines, ourNode) // populate the degradedReasons from the MachineConfigNodePinnedImageSetsDegraded condition if fg.Enabled(features.FeatureGatePinnedImages) { - if mcfgalphav1.StateProgress(cond.Type) == mcfgalphav1.MachineConfigNodePinnedImageSetsDegraded && cond.Status == metav1.ConditionTrue { + if mcfgv1alpha1.StateProgress(cond.Type) == mcfgv1alpha1.MachineConfigNodePinnedImageSetsDegraded && cond.Status == metav1.ConditionTrue { degradedReasons = append(degradedReasons, fmt.Sprintf("Node %s is reporting: %q", ourNode.Name, cond.Message)) } } @@ -163,27 +164,27 @@ func (ctrl *Controller) calculateStatus(fg featuregates.FeatureGate, mcs []*mcfg if cond.Status == metav1.ConditionUnknown { // This switch case will cause a node to be double counted, maybe use a hash for node count - switch mcfgalphav1.StateProgress(cond.Type) { - case mcfgalphav1.MachineConfigNodeUpdatePrepared: + switch mcfgv1alpha1.StateProgress(cond.Type) { + case mcfgv1alpha1.MachineConfigNodeUpdatePrepared: updatingMachines = append(updatedMachines, ourNode) //nolint:gocritic - case mcfgalphav1.MachineConfigNodeUpdateExecuted: + case mcfgv1alpha1.MachineConfigNodeUpdateExecuted: updatingMachines = append(updatingMachines, ourNode) - case mcfgalphav1.MachineConfigNodeUpdatePostActionComplete: + case mcfgv1alpha1.MachineConfigNodeUpdatePostActionComplete: updatingMachines = append(updatingMachines, ourNode) - case mcfgalphav1.MachineConfigNodeUpdateComplete: + case mcfgv1alpha1.MachineConfigNodeUpdateComplete: updatingMachines = append(updatingMachines, ourNode) - case mcfgalphav1.MachineConfigNodeResumed: + case mcfgv1alpha1.MachineConfigNodeResumed: updatingMachines = append(updatedMachines, ourNode) //nolint:gocritic readyMachines = append(readyMachines, ourNode) - case mcfgalphav1.MachineConfigNodeUpdateCompatible: + case mcfgv1alpha1.MachineConfigNodeUpdateCompatible: updatingMachines = append(updatedMachines, ourNode) //nolint:gocritic - case mcfgalphav1.MachineConfigNodeUpdateDrained: + case mcfgv1alpha1.MachineConfigNodeUpdateDrained: unavailableMachines = append(unavailableMachines, ourNode) updatingMachines = append(updatingMachines, ourNode) - case mcfgalphav1.MachineConfigNodeUpdateCordoned: + case mcfgv1alpha1.MachineConfigNodeUpdateCordoned: unavailableMachines = append(unavailableMachines, ourNode) updatingMachines = append(updatingMachines, ourNode) - case mcfgalphav1.MachineConfigNodeUpdated: + case mcfgv1alpha1.MachineConfigNodeUpdated: updatedMachines = append(updatedMachines, ourNode) readyMachines = append(readyMachines, ourNode) } @@ -328,7 +329,7 @@ func (ctrl *Controller) calculateStatus(fg featuregates.FeatureGate, mcs []*mcfg return status } -func isPinnedImageSetNodeUpdating(mcs *mcfgalphav1.MachineConfigNode) bool { +func isPinnedImageSetNodeUpdating(mcs *mcfgv1alpha1.MachineConfigNode) bool { var updating int32 for _, set := range mcs.Status.PinnedImageSets { if set.CurrentGeneration != set.DesiredGeneration { @@ -338,7 +339,7 @@ func isPinnedImageSetNodeUpdating(mcs *mcfgalphav1.MachineConfigNode) bool { return updating > 0 } -func getPoolUpdateLine(pool *mcfgv1.MachineConfigPool, mosc *mcfgv1alpha1.MachineOSConfig, layered bool) string { +func getPoolUpdateLine(pool *mcfgv1.MachineConfigPool, mosc *mcfgv1.MachineOSConfig, layered bool) string { targetConfig := pool.Spec.Configuration.Name mcLine := fmt.Sprintf("MachineConfig %s", targetConfig) @@ -346,8 +347,8 @@ func getPoolUpdateLine(pool *mcfgv1.MachineConfigPool, mosc *mcfgv1alpha1.Machin return mcLine } - targetImage := mosc.Status.CurrentImagePullspec - if targetImage == "" { + targetImage := mosc.Status.CurrentImagePullSpec + if string(targetImage) == "" { return mcLine } @@ -384,13 +385,13 @@ func isNodeDone(node *corev1.Node, layered bool) bool { return false } - if layered { - // The MachineConfig annotations are loaded on boot-up by the daemon which - // isn't currently done for the image annotations, so the comparisons here - // are a bit more nuanced. - cimage, cok := node.Annotations[daemonconsts.CurrentImageAnnotationKey] - dimage, dok := node.Annotations[daemonconsts.DesiredImageAnnotationKey] + // The MachineConfig annotations are loaded on boot-up by the daemon which + // isn't currently done for the image annotations, so the comparisons here + // are a bit more nuanced. + cimage, cok := node.Annotations[daemonconsts.CurrentImageAnnotationKey] + dimage, dok := node.Annotations[daemonconsts.DesiredImageAnnotationKey] + if layered { // If desired image is not set, but the pool is layered, this node can // be considered ready for an update. This is the very first time node // is being opted into layering. @@ -409,6 +410,15 @@ func isNodeDone(node *corev1.Node, layered bool) bool { } + // If not in layered mode, we also need to consider the case when the node is rolling back + // from layered to non-layered. In those cases, cconfig==dconfig, but the node + // will still need to do an update back to dconfig's OSImageURL. We can detect a + // rolling back node by checking if the cimage stills exists but the dimage does not exist. + if cok && !dok { + // The node is not "done" in this case, as the current image annotation still exists. + return false + } + return cconfig == dconfig && isNodeMCDState(node, daemonconsts.MachineConfigDaemonStateDone) } @@ -439,14 +449,14 @@ func isNodeMCDFailing(node *corev1.Node) bool { // getUpdatedMachines filters the provided nodes to return the nodes whose // current config matches the desired config, which also matches the target config, // and the "done" flag is set. -func getUpdatedMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node, mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, layered bool) []*corev1.Node { +func getUpdatedMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node, mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild, layered bool) []*corev1.Node { var updated []*corev1.Node for _, node := range nodes { lns := ctrlcommon.NewLayeredNodeState(node) if mosb != nil && mosc != nil { mosbState := ctrlcommon.NewMachineOSBuildState(mosb) // It seems like pool image annotations are no longer being used, so node specific checks were required here - if layered && mosbState.IsBuildSuccess() && mosb.Spec.DesiredConfig.Name == pool.Spec.Configuration.Name && isNodeDoneAt(node, pool, layered) && lns.IsCurrentImageEqualToBuild(mosc) { + if layered && mosbState.IsBuildSuccess() && mosb.Spec.MachineConfig.Name == pool.Spec.Configuration.Name && isNodeDoneAt(node, pool, layered) && lns.IsCurrentImageEqualToBuild(mosc) { updated = append(updated, node) } } else if lns.IsDoneAt(pool, layered) { @@ -458,7 +468,7 @@ func getUpdatedMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node, mo // getReadyMachines filters the provided nodes to return the nodes // that are updated and marked ready -func getReadyMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node, mosc *mcfgv1alpha1.MachineOSConfig, mosb *mcfgv1alpha1.MachineOSBuild, layered bool) []*corev1.Node { +func getReadyMachines(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node, mosc *mcfgv1.MachineOSConfig, mosb *mcfgv1.MachineOSBuild, layered bool) []*corev1.Node { updated := getUpdatedMachines(pool, nodes, mosc, mosb, layered) var ready []*corev1.Node for _, node := range updated { @@ -522,14 +532,14 @@ func isNodeUnavailable(node *corev1.Node, layered bool) bool { // node *may* go unschedulable in the future, so we don't want to // potentially start another node update exceeding our maxUnavailable. // Somewhat the opposite of getReadyNodes(). -func getUnavailableMachines(nodes []*corev1.Node, pool *mcfgv1.MachineConfigPool, layered bool, mosb *mcfgv1alpha1.MachineOSBuild) []*corev1.Node { +func getUnavailableMachines(nodes []*corev1.Node, pool *mcfgv1.MachineConfigPool, layered bool, mosb *mcfgv1.MachineOSBuild) []*corev1.Node { var unavail []*corev1.Node for _, node := range nodes { if mosb != nil { mosbState := ctrlcommon.NewMachineOSBuildState(mosb) // if node is unavail, desiredConfigs match, and the build is a success, then we are unavail. // not sure on this one honestly - if layered && isNodeUnavailable(node, layered) && mosb.Spec.DesiredConfig.Name == pool.Spec.Configuration.Name && mosbState.IsBuildSuccess() { + if layered && isNodeUnavailable(node, layered) && mosb.Spec.MachineConfig.Name == pool.Spec.Configuration.Name && mosbState.IsBuildSuccess() { unavail = append(unavail, node) } } else if isNodeUnavailable(node, layered) { @@ -605,7 +615,7 @@ func (p *poolSynchronizer) GetStatus(sType mcfgv1.PoolSynchronizerType) *mcfgv1. } // isPinnedImageSetNodeUpdated checks if the pinned image sets are updated for the node. -func isPinnedImageSetsUpdated(mcn *mcfgalphav1.MachineConfigNode) bool { +func isPinnedImageSetsUpdated(mcn *mcfgv1alpha1.MachineConfigNode) bool { updated := 0 for _, set := range mcn.Status.PinnedImageSets { if set.DesiredGeneration > 0 && set.CurrentGeneration == set.DesiredGeneration { diff --git a/pkg/controller/template/template_controller.go b/pkg/controller/template/template_controller.go index 8dff83fc56..fad740198c 100644 --- a/pkg/controller/template/template_controller.go +++ b/pkg/controller/template/template_controller.go @@ -486,7 +486,7 @@ func (ctrl *Controller) handleErr(err error, key string) { } // updateControllerConfigCerts parses the raw cert data and places key information about the certs into the controllerconfig status -func updateControllerConfigCerts(config *mcfgv1.ControllerConfig) bool { +func UpdateControllerConfigCerts(config *mcfgv1.ControllerConfig) bool { modified := false names := []string{ "KubeAPIServerServingCAData", "CloudProviderCAData", "RootCAData", "AdditionalTrustBundle", @@ -606,7 +606,7 @@ func (ctrl *Controller) syncControllerConfig(key string) error { } } - modified := updateControllerConfigCerts(cfg) + modified := UpdateControllerConfigCerts(cfg) if modified { if err := ctrl.syncCertificateStatus(cfg); err != nil { diff --git a/pkg/daemon/certificate_writer.go b/pkg/daemon/certificate_writer.go index 31b46a68d9..aa341b3651 100644 --- a/pkg/daemon/certificate_writer.go +++ b/pkg/daemon/certificate_writer.go @@ -5,7 +5,6 @@ import ( "context" "crypto/x509" "encoding/base64" - "encoding/json" "encoding/pem" "errors" "fmt" @@ -29,14 +28,6 @@ import ( var ccRequeueDelay = 1 * time.Minute -const ( - // Where the OS image secrets are mounted into the MCD pod. Note: This is put - // under /run/secrets so that the PrepareNamespace function will include it - // when it bind-mounts /run/secrets to keep access to the MCD service - // account. - osImagePullSecretDir string = "/run/secrets/os-image-pull-secrets" -) - func (dn *Daemon) handleControllerConfigEvent(obj interface{}) { controllerConfig := obj.(*mcfgv1.ControllerConfig) klog.V(4).Infof("Updating ControllerConfig %s", controllerConfig.Name) @@ -137,7 +128,7 @@ func (dn *Daemon) syncControllerConfigHandler(key string) error { // If the ControllerConfig version changed, we should sync our OS image // pull secrets, since they could have changed. - if err := dn.syncOSImagePullSecrets(controllerConfig); err != nil { + if err := dn.syncInternalRegistryPullSecrets(controllerConfig); err != nil { return err } @@ -349,16 +340,12 @@ func (dn *Daemon) syncControllerConfigHandler(key string) error { } // Syncs the OS image pull secrets to disk under -// /etc/mco/internal-registry-pull-secret.json. Uses the internal image -// registry as well as secrets mounted into the MCD pod under a specific path. -// If the mounted secrets are not present, it will default to using solely the -// contents of the ControllerConfig. This will run during the -// certificate_writer sync loop as well as during an OS update. We may also -// want to wire it up to fsnotify so that it runs whenever the mounted secrets -// do. Because this can execute across multiple Goroutines, a Daemon-level -// mutex (osImageMux) is used to ensure that only one call can execute at any -// given time. -func (dn *Daemon) syncOSImagePullSecrets(controllerConfig *mcfgv1.ControllerConfig) error { +// /etc/mco/internal-registry-pull-secret.json using the contents of the +// ControllerConfig. This will run during the certificate_writer sync loop +// as well as during an OS update. Because this can execute across multiple +// Goroutines, a Daemon-level mutex (osImageMux) is used to ensure that only +// one call can execute at any given time. +func (dn *Daemon) syncInternalRegistryPullSecrets(controllerConfig *mcfgv1.ControllerConfig) error { dn.osImageMux.Lock() defer dn.osImageMux.Unlock() @@ -371,16 +358,11 @@ func (dn *Daemon) syncOSImagePullSecrets(controllerConfig *mcfgv1.ControllerConf controllerConfig = cfg } - merged, err := reconcileOSImageRegistryPullSecretData(dn.node, controllerConfig, osImagePullSecretDir) - if err != nil { - return fmt.Errorf("could not reconcile OS image registry pull secret data: %w", err) - } - - if err := writeToDisk(map[string][]byte{imageRegistryAuthFile: merged}); err != nil { + if err := writeToDisk(map[string][]byte{internalRegistryAuthFile: controllerConfig.Spec.InternalRegistryPullSecret}); err != nil { return fmt.Errorf("could not write image pull secret data to node filesystem: %w", err) } - klog.V(4).Infof("Synced image registry secrets to node filesystem in %s", imageRegistryAuthFile) + klog.V(4).Infof("Synced image registry secrets to node filesystem in %s", internalRegistryAuthFile) return nil } @@ -425,164 +407,3 @@ func writeToDisk(pathToData map[string][]byte) error { } return nil } - -// Reconciles and merges the secrets provided by the ControllerConfig along -// with any mounted image pull secrets that the MCO may have configured the MCD -// to use in a layered OS image scenario. -func reconcileOSImageRegistryPullSecretData(node *corev1.Node, controllerCfg *mcfgv1.ControllerConfig, secretDirPath string) ([]byte, error) { - // First, get all of the node-roles that this node might have so we can - // resolve them to MachineConfigPools. - nodeRoles := getNodeRoles(node) - - // This isn't likely to happen, but a guard clause here is nice. - if len(nodeRoles) == 0 { - return nil, fmt.Errorf("node %s has no node-role.kubernetes.io label", node.Name) - } - - // Next, we need to read the secret from disk, if we can find one. - mountedSecret, err := readMountedSecretByNodeRole(nodeRoles, secretDirPath) - if err != nil { - return nil, err - } - - // If there are no mounted secrets, just use the internal image pull secrets - // as-is. - if mountedSecret == nil { - klog.V(4).Infof("Did not find a mounted secret") - return controllerCfg.Spec.InternalRegistryPullSecret, nil - } - - // Next, we need to merge the secret we just found with the contents of the - // InternalImagePullSecret field on the ControllerConfig object, ensuring - // that the values from the mounted secret take precedent over any values - // provided by the InternalImagePullSecret field. - merged, err := mergeMountedSecretsWithControllerConfig(mountedSecret, controllerCfg) - if err != nil { - return nil, fmt.Errorf("could not merge on-disk secrets with ControllerConfig: %w", err) - } - - return merged, nil -} - -// This iterates through all of the node roles until it finds a secret that -// matches. We do this because we have to mount all of the secrets for all of -// the MachineOSConfigs into each MCD pod. Additionally, we want to support the -// case where someone uses wither a .dockercfg or .dockerconfigjson style -// secret. However, it is possible that a different MachineOSConfig can specify -// a different image pull secret. And it is also possible that a node can -// belong to two MachineConfigPools. -func readMountedSecretByNodeRole(nodeRoles []string, secretDirPath string) ([]byte, error) { - // If this directory does not exist, it means that the MCD DaemonSet does not - // have the secrets mounted at all. In this case, just return an empty byte - // array. - _, err := os.Stat(secretDirPath) - if errors.Is(err, os.ErrNotExist) { - klog.V(4).Infof("Path %s does not exist", secretDirPath) - return nil, nil - } - - if err != nil { - return nil, err - } - - // Support both image secret types. - imagePullSecretKeys := []string{ - corev1.DockerConfigJsonKey, - corev1.DockerConfigKey, - } - - for _, key := range imagePullSecretKeys { - for _, nodeRole := range nodeRoles { - // This ends up being a concatenation of - // /run/secrets/os-image-pull-secrets//.dockerconfigjson or - // /run/secrets/os-image-pull-secrets//.dockercfg - path := filepath.Join(secretDirPath, nodeRole, key) - - klog.V(4).Infof("Checking path %s for mounted image pull secret", path) - - _, err := os.Stat(path) - - // If the file exists, we've found it. Read it and stop here. - if err == nil { - klog.V(4).Infof("Found mounted image pull secret in %s", path) - return os.ReadFile(path) - } - - // If the file does not exist, keep going until we find one that does or - // we've exhausted all options. - if errors.Is(err, os.ErrNotExist) { - continue - } - - // If an unknown error has occurred, stop here and bail out. - if err != nil { - return nil, fmt.Errorf("unknown error while reading %s: %w", path, err) - } - } - } - - // If we got this far, it means we've exhausted all of our node roles and our - // secret keys without finding a suitable image pull secret. The most likely - // reason is because the MachineConfigPool(s) this node belongs to does not - // have a MachineOSConfig associated with it, which means there is nothing to - // do here, except return nil. - return nil, nil -} - -// Merges the mounted secret with the ones from the ControllerConfig, with the mounted ones taking priority. -func mergeMountedSecretsWithControllerConfig(mountedSecret []byte, controllerCfg *mcfgv1.ControllerConfig) ([]byte, error) { - // Unmarshal the mounted secrets, converting to new-style secrets, if necessary. - mountedSecrets, err := ctrlcommon.ToDockerConfigJSON(mountedSecret) - if err != nil { - return nil, fmt.Errorf("could not parse mounted secret: %w", err) - } - - // Unmarshal the ControllerConfig secrets, converting to new-style secrets, if necessary. - internalRegistryPullSecrets, err := ctrlcommon.ToDockerConfigJSON(controllerCfg.Spec.InternalRegistryPullSecret) - if err != nil { - return nil, fmt.Errorf("could not parse internal registry pull secret from ControllerConfig: %w", err) - } - - out := &ctrlcommon.DockerConfigJSON{ - Auths: ctrlcommon.DockerConfig{}, - } - - // Copy all of the secrets from the ControllerConfig. - for key, internalRegistryAuth := range internalRegistryPullSecrets.Auths { - out.Auths[key] = internalRegistryAuth - } - - // Copy all of the secrets from the mounted secret, overwriting any secrets - // provided by ControllerConfig. - for key, mountedSecretAuth := range mountedSecrets.Auths { - if _, ok := out.Auths[key]; ok { - klog.V(4).Infof("Overriding image pull secret for %s with mounted secret", key) - } - - out.Auths[key] = mountedSecretAuth - } - - return json.Marshal(out) -} - -// Iterates through all of the labels on a given node, searching for the -// "node-role.kubernetes.io" label, and extracting the role name from the -// label. It is possible for a single node to have more than one node-role -// label, so we extract them all. -func getNodeRoles(node *corev1.Node) []string { - if node.Labels == nil { - return []string{} - } - - nodeRoleLabel := "node-role.kubernetes.io/" - - out := []string{} - - for key := range node.Labels { - if strings.Contains(key, nodeRoleLabel) { - out = append(out, strings.ReplaceAll(key, nodeRoleLabel, "")) - } - } - - return out -} diff --git a/pkg/daemon/certificate_writer_test.go b/pkg/daemon/certificate_writer_test.go deleted file mode 100644 index 8063cb9b4f..0000000000 --- a/pkg/daemon/certificate_writer_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package daemon - -import ( - "os" - "path/filepath" - "testing" - - mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestImagePullSecretReconciliation(t *testing.T) { - t.Parallel() - - testCases := []struct { - name string - nodeRoles []string - writeMountedSecretBytes bool - mountedSecretBytes []byte - controllerConfigSecretBytes []byte - expectedBytes []byte - errExpected bool - }{ - // Tests that secrets from both the mounted secrets and ControllerConfig - // can be merged together without issue. - { - name: "simple concatenation", - nodeRoles: []string{"node-role.kubernetes.io/worker"}, - writeMountedSecretBytes: true, - mountedSecretBytes: []byte(`{"auths": {"registry.hostname.com": {"username": "user", "password": "secret"}}}`), - controllerConfigSecretBytes: []byte(`{"auths": {"other-registry.hostname.com": {"username": "user", "password": "secret"}}}`), - // TODO(zzlotnik): Can we omit the unused fields? - expectedBytes: []byte(`{"auths":{"other-registry.hostname.com":{"username":"user","password":"secret","email":"","auth":""},"registry.hostname.com":{"username":"user","password":"secret","email":"","auth":""}}}`), - }, - // Tests that secrets from the mounted secret path take precedence over - // the ones from the ControllerConfig. - { - name: "mounted secret takes precedence", - nodeRoles: []string{"node-role.kubernetes.io/worker"}, - writeMountedSecretBytes: true, - mountedSecretBytes: []byte(`{"auths": {"registry.hostname.com": {"username": "mounted-secret-user", "password": "mounted-secret-secret"}}}`), - controllerConfigSecretBytes: []byte(`{"auths": {"registry.hostname.com": {"username": "other-user", "password": "other-secret"}}}`), - expectedBytes: []byte(`{"auths":{"registry.hostname.com":{"username":"mounted-secret-user","password":"mounted-secret-secret","email":"","auth":""}}}`), - }, - // Tests that in the absence of mounted secrets, only the ones found on - // the ControllerConfig will be used. - { - name: "no mounted secrets", - nodeRoles: []string{"node-role.kubernetes.io/worker"}, - controllerConfigSecretBytes: []byte(`{"auths": {"registry.hostname.com": {"username": "user", "password": "secret"}}}`), - expectedBytes: []byte(`{"auths": {"registry.hostname.com": {"username": "user", "password": "secret"}}}`), - }, - // Tests that if multiple node roles are found on a node (e.g., for - // control-plane nodes) and only one of them has a mounted secret, that it - // will be found. - { - name: "multiple node roles", - nodeRoles: []string{"node-role.kubernetes.io/master", "node-role.kubernetes.io/control-plane"}, - writeMountedSecretBytes: true, - mountedSecretBytes: []byte(`{"auths": {"registry.hostname.com": {"username": "user", "password": "secret"}}}`), - controllerConfigSecretBytes: []byte(`{"auths": {"other-registry.hostname.com": {"username": "user", "password": "secret"}}}`), - // TODO(zzlotnik): Can we omit the unused fields? - expectedBytes: []byte(`{"auths":{"other-registry.hostname.com":{"username":"user","password":"secret","email":"","auth":""},"registry.hostname.com":{"username":"user","password":"secret","email":"","auth":""}}}`), - }, - // Tests that legacy-style secrets (that is, secrets without a top-level - // {"auths": ...} key) are converted to a new-style secret and that any - // other secrets are correctly merged with them. - { - name: "converts and merges legacy-style secrets", - nodeRoles: []string{"node-role.kubernetes.io/worker"}, - writeMountedSecretBytes: true, - mountedSecretBytes: []byte(`{"registry.hostname.com": {"username": "user", "password": "secret"}}`), - controllerConfigSecretBytes: []byte(`{"auths":{"other-registry.hostname.com": {"username": "user", "password": "secret"}}}`), - // TODO(zzlotnik): Can we omit the unused fields? - expectedBytes: []byte(`{"auths":{"other-registry.hostname.com":{"username":"user","password":"secret","email":"","auth":""},"registry.hostname.com":{"username":"user","password":"secret","email":"","auth":""}}}`), - }, - // Tests that we get an error when a node does not have any node roles. - { - name: "zero node roles", - nodeRoles: []string{}, - errExpected: true, - }, - // Tests that we get an error when mounted secrets are composed of - // invalid JSON. We don't have to worry about a similar case from - // ControllerConfig since by the time we've reached this point, the image - // pull secrets contained therein have already been parsed and validated. - { - name: "invalid JSON for mounted secrets", - nodeRoles: []string{"node-role.kubernetes.io/worker"}, - writeMountedSecretBytes: true, - mountedSecretBytes: []byte(``), - controllerConfigSecretBytes: []byte(`{"auths":{"other-registry.hostname.com": {"username": "user", "password": "secret"}}}`), - errExpected: true, - }, - } - - imagePullSecretKeys := []string{ - corev1.DockerConfigJsonKey, - corev1.DockerConfigKey, - } - - for _, testCase := range testCases { - testCase := testCase - - for _, imagePullSecretKey := range imagePullSecretKeys { - imagePullSecretKey := imagePullSecretKey - t.Run(imagePullSecretKey, func(t *testing.T) { - t.Parallel() - - t.Run(testCase.name, func(t *testing.T) { - t.Parallel() - - node := &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{}, - }, - } - - for _, nodeRole := range testCase.nodeRoles { - node.Labels[nodeRole] = "" - } - - ctrlCfg := &mcfgv1.ControllerConfig{ - Spec: mcfgv1.ControllerConfigSpec{ - InternalRegistryPullSecret: testCase.controllerConfigSecretBytes, - }, - } - - tmpDir := t.TempDir() - - secretRoot := "/doesnt/exist" - - if testCase.writeMountedSecretBytes { - nodeRoles := getNodeRoles(node) - nodeRole := "" - if len(nodeRoles) == 1 { - nodeRole = nodeRoles[0] - } else { - // Get the last node role instead of the first. - nodeRole = nodeRoles[len(nodeRoles)-1] - } - - secretDir := filepath.Join(tmpDir, nodeRole) - secretPath := filepath.Join(secretDir, imagePullSecretKey) - - require.NoError(t, os.MkdirAll(secretDir, 0o755)) - require.NoError(t, os.WriteFile(secretPath, testCase.mountedSecretBytes, 0o755)) - - secretRoot = tmpDir - } - - result, err := reconcileOSImageRegistryPullSecretData(node, ctrlCfg, secretRoot) - - if testCase.errExpected { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - - assert.Equal(t, testCase.expectedBytes, result) - }) - }) - } - } -} diff --git a/pkg/daemon/daemon.go b/pkg/daemon/daemon.go index d92b7bc2f6..1389940cf8 100644 --- a/pkg/daemon/daemon.go +++ b/pkg/daemon/daemon.go @@ -749,12 +749,11 @@ func (dn *Daemon) syncNode(key string) error { } // Sync our OS image pull secrets here. This will account for any changes to - // either the mounted secrets (which can change without a pod restart) or the - // ControllerConfig. + // the ControllerConfig. // // I'm not sure if this needs to be done right here or as frequently as this, // but it shouldn't cause too much impact. - if err := dn.syncOSImagePullSecrets(nil); err != nil { + if err := dn.syncInternalRegistryPullSecrets(nil); err != nil { return err } @@ -2463,7 +2462,7 @@ func (dn *Daemon) completeUpdate(desiredConfigName string) error { func (dn *Daemon) triggerUpdate(currentConfig, desiredConfig *mcfgv1.MachineConfig, currentImage, desiredImage string) error { // Before we do any updates, ensure that the image pull secrets that rpm-ostree uses are up-to-date. - if err := dn.syncOSImagePullSecrets(nil); err != nil { + if err := dn.syncInternalRegistryPullSecrets(nil); err != nil { return err } @@ -2477,7 +2476,7 @@ func (dn *Daemon) triggerUpdate(currentConfig, desiredConfig *mcfgv1.MachineConf dn.stopConfigDriftMonitor() klog.Infof("Performing layered OS update") - return dn.updateOnClusterBuild(currentConfig, desiredConfig, currentImage, desiredImage, true) + return dn.updateOnClusterLayering(currentConfig, desiredConfig, currentImage, desiredImage, true) } // triggerUpdateWithMachineConfig starts the update. It queries the cluster for @@ -2632,7 +2631,7 @@ func (dn *Daemon) checkOS(osImageURL string) bool { // TODO(jkyros): the header for this functions says "if the digests match" // so I'm wondering if at one point this used to work this way.... - inspection, _, err := imageInspect(osImageURL) + inspection, _, err := ImageInspect(osImageURL, "") if err != nil { klog.Warningf("Unable to check manifest for matching hash: %s", err) } else if ostreeCommit, ok := inspection.Labels["ostree.commit"]; ok { diff --git a/pkg/daemon/image-inspect.go b/pkg/daemon/image-inspect.go index 7372131c8a..a03d0d079a 100644 --- a/pkg/daemon/image-inspect.go +++ b/pkg/daemon/image-inspect.go @@ -6,9 +6,9 @@ import ( "strings" "github.com/containers/common/pkg/retry" - "github.com/containers/image/v5/docker" "github.com/containers/image/v5/image" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" ) @@ -18,21 +18,40 @@ const ( cmdRetriesCount = 2 ) -// newDockerImageSource creates an image source for an image reference. -// The caller must call .Close() on the returned ImageSource. -func newDockerImageSource(ctx context.Context, sys *types.SystemContext, name string) (types.ImageSource, error) { - var imageName string - if !strings.HasPrefix(name, "//") { - imageName = "//" + name - } else { - imageName = name +func parseImageName(imgName string) (types.ImageReference, error) { + // Keep this in sync with TransportFromImageName! + transportName, withinTransport, valid := strings.Cut(imgName, ":") + if !valid { + return nil, fmt.Errorf(`invalid image name "%s", expected colon-separated transport:reference`, imgName) } - ref, err := docker.ParseReference(imageName) + transport := transports.Get(transportName) + if transport == nil { + return nil, fmt.Errorf(`invalid image name "%s", unknown transport "%s"`, imgName, transportName) + } + return transport.ParseReference(withinTransport) +} + +//nolint:unparam +func DeleteImage(imageName, authFilePath string) error { + imageName = "docker://" + imageName + retryOpts := retry.RetryOptions{ + MaxRetry: cmdRetriesCount, + } + + ctx := context.Background() + sys := &types.SystemContext{AuthFilePath: authFilePath} + + ref, err := parseImageName(imageName) if err != nil { - return nil, err + return err } - return ref.NewImageSource(ctx, sys) + if err := retry.IfNecessary(ctx, func() error { + return ref.DeleteImage(ctx, sys) + }, &retryOpts); err != nil { + return err + } + return nil } // This function has been inspired from upstream skopeo inspect, see https://github.com/containers/skopeo/blob/master/cmd/skopeo/inspect.go @@ -41,7 +60,7 @@ func newDockerImageSource(ctx context.Context, sys *types.SystemContext, name st // to know what the error is by using the libraries directly :) // //nolint:unparam -func imageInspect(imageName string) (*types.ImageInspectInfo, *digest.Digest, error) { +func ImageInspect(imageName, authfile string) (*types.ImageInspectInfo, *digest.Digest, error) { var ( src types.ImageSource imgInspect *types.ImageInspectInfo @@ -51,26 +70,36 @@ func imageInspect(imageName string) (*types.ImageInspectInfo, *digest.Digest, er retryOpts := retry.RetryOptions{ MaxRetry: cmdRetriesCount, } + imageName = "docker://" + imageName ctx := context.Background() - sys := &types.SystemContext{AuthFilePath: ostreeAuthFile} + authfilePath := ostreeAuthFile + if authfile != "" { + authfilePath = authfile + } + sys := &types.SystemContext{AuthFilePath: authfilePath} + + ref, err := parseImageName(imageName) + if err != nil { + return nil, nil, fmt.Errorf("error parsing image name %q: %w", imageName, err) + } // retry.IfNecessary takes into account whether the error is "retryable" // so we don't keep looping on errors that will never resolve if err := retry.RetryIfNecessary(ctx, func() error { - src, err = newDockerImageSource(ctx, sys, imageName) + src, err = ref.NewImageSource(ctx, sys) return err }, &retryOpts); err != nil { - return nil, nil, fmt.Errorf("error parsing image name %q: %w", imageName, err) + return nil, nil, fmt.Errorf("error getting image source %q: %w", imageName, err) } var rawManifest []byte - if err := retry.RetryIfNecessary(ctx, func() error { - rawManifest, _, err = src.GetManifest(ctx, nil) - + unparsedInstance := image.UnparsedInstance(src, nil) + if err := retry.IfNecessary(ctx, func() error { + rawManifest, _, err = unparsedInstance.Manifest(ctx) return err }, &retryOpts); err != nil { - return nil, nil, fmt.Errorf("error retrieving image manifest %q: %w", imageName, err) + return nil, nil, fmt.Errorf("error retrieving manifest for image: %w", err) } // get the digest here because it's not part of the image inspection @@ -81,7 +110,7 @@ func imageInspect(imageName string) (*types.ImageInspectInfo, *digest.Digest, er defer src.Close() - img, err := image.FromUnparsedImage(ctx, sys, image.UnparsedInstance(src, nil)) + img, err := image.FromUnparsedImage(ctx, sys, unparsedInstance) if err != nil { return nil, nil, fmt.Errorf("error parsing manifest for image %q: %w", imageName, err) } diff --git a/pkg/daemon/image_manager_helper.go b/pkg/daemon/image_manager_helper.go index d5cba4a52b..33ad45ff08 100644 --- a/pkg/daemon/image_manager_helper.go +++ b/pkg/daemon/image_manager_helper.go @@ -28,7 +28,7 @@ const ( // Pull secret. Written by the machine-config-operator kubeletAuthFile = "/var/lib/kubelet/config.json" // Internal Registry Pull secret + Global Pull secret. Written by the machine-config-operator. - imageRegistryAuthFile = "/etc/mco/internal-registry-pull-secret.json" + internalRegistryAuthFile = "/etc/mco/internal-registry-pull-secret.json" ) type imageSystem string @@ -126,7 +126,7 @@ func useMergedPullSecrets(system imageSystem) error { } // check if merged secret file exists - if _, err := os.Stat(imageRegistryAuthFile); err != nil { + if _, err := os.Stat(internalRegistryAuthFile); err != nil { if !errors.Is(err, os.ErrNotExist) { return err } @@ -134,7 +134,7 @@ func useMergedPullSecrets(system imageSystem) error { return linkAuthFile(system, kubeletAuthFile) } // Check that merged secret file is valid JSON - if file, err := os.ReadFile(imageRegistryAuthFile); err != nil { + if file, err := os.ReadFile(internalRegistryAuthFile); err != nil { klog.Errorf("Merged secret file could not be read; defaulting to cluster pull secret %v", err) return linkAuthFile(system, kubeletAuthFile) } else if !json.Valid(file) { @@ -142,7 +142,7 @@ func useMergedPullSecrets(system imageSystem) error { return linkAuthFile(system, kubeletAuthFile) } - return linkAuthFile(system, imageRegistryAuthFile) + return linkAuthFile(system, internalRegistryAuthFile) } func validateImageSystem(imgSys imageSystem) error { diff --git a/pkg/daemon/on_disk_validation.go b/pkg/daemon/on_disk_validation.go index 871b7f8738..37be3b3ae4 100644 --- a/pkg/daemon/on_disk_validation.go +++ b/pkg/daemon/on_disk_validation.go @@ -170,7 +170,7 @@ func getFilesToIgnore() sets.Set[string] { return sets.New[string]( caBundleFilePath, cloudCABundleFilePath, - imageRegistryAuthFile, + internalRegistryAuthFile, ) } diff --git a/pkg/daemon/runtimeassets/machine-config-daemon-revert.service.yaml b/pkg/daemon/runtimeassets/machine-config-daemon-revert.service.yaml index c02b4597c3..30ec81b094 100644 --- a/pkg/daemon/runtimeassets/machine-config-daemon-revert.service.yaml +++ b/pkg/daemon/runtimeassets/machine-config-daemon-revert.service.yaml @@ -21,8 +21,10 @@ contents: | # Run this via podman because we want to use the nmstatectl binary in our container ExecStart=/usr/bin/podman run --authfile=/var/lib/kubelet/config.json --rm --privileged --net=host -v /:/rootfs --entrypoint machine-config-daemon '{{ .MCOImage }}' firstboot-complete-machineconfig --persist-nics --machineconfig-file {{ .RevertServiceMachineConfigFile }} ExecStart=/usr/bin/podman run --authfile=/var/lib/kubelet/config.json --rm --privileged --pid=host --net=host -v /:/rootfs --entrypoint machine-config-daemon '{{ .MCOImage }}' firstboot-complete-machineconfig --machineconfig-file {{ .RevertServiceMachineConfigFile }} + ExecStartPost=rm {{ .RevertServiceMachineConfigFile }} {{if .Proxy -}} - EnvironmentFile=/etc/mco/proxy.env + EnvironmentFile={{ .ProxyFile }} + ExecStartPost=rm {{ .ProxyFile }} {{end -}} [Install] diff --git a/pkg/daemon/runtimeassets/revertservice.go b/pkg/daemon/runtimeassets/revertservice.go index a26089d6bd..2cf07a81f2 100644 --- a/pkg/daemon/runtimeassets/revertservice.go +++ b/pkg/daemon/runtimeassets/revertservice.go @@ -3,6 +3,7 @@ package runtimeassets import ( "bytes" _ "embed" + "encoding/json" "fmt" "html/template" @@ -17,6 +18,7 @@ var _ RuntimeAsset = &revertService{} const ( RevertServiceName string = "machine-config-daemon-revert.service" RevertServiceMachineConfigFile string = "/etc/mco/machineconfig-revert.json" + RevertServiceProxyFile string = "/etc/mco/proxy.env.backup" ) //go:embed machine-config-daemon-revert.service.yaml @@ -27,11 +29,15 @@ type revertService struct { MCOImage string // Whether the proxy file exists and should be considered. Proxy bool + // The current MachineConfig to write to disk. + mc *mcfgv1.MachineConfig + ctrlcfg *mcfgv1.ControllerConfig } -// Constructs a revertService instance from a ControllerConfig. Returns an -// error if the provided ControllerConfig cannot be used. -func NewRevertService(ctrlcfg *mcfgv1.ControllerConfig) (RuntimeAsset, error) { +// Constructs a revertService instance from a ControllerConfig and +// MachineConfig. Returns an error if the provided ControllerConfig or +// MachineConfig cannot be used. +func NewRevertService(ctrlcfg *mcfgv1.ControllerConfig, mc *mcfgv1.MachineConfig) (RuntimeAsset, error) { mcoImage, ok := ctrlcfg.Spec.Images["machineConfigOperator"] if !ok { return nil, fmt.Errorf("controllerconfig Images does not have machineConfigOperator image") @@ -41,52 +47,65 @@ func NewRevertService(ctrlcfg *mcfgv1.ControllerConfig) (RuntimeAsset, error) { return nil, fmt.Errorf("controllerconfig Images has machineConfigOperator but it is empty") } - hasProxy := false - if ctrlcfg.Spec.Proxy != nil { - hasProxy = true - } - return &revertService{ MCOImage: mcoImage, - Proxy: hasProxy, + Proxy: ctrlcfg.Spec.Proxy != nil, + ctrlcfg: ctrlcfg, + mc: mc, }, nil } // Returns an Ignition config containing the -// machine-config-daemon-revert.service systemd unit. +// machine-config-daemon-revert.service systemd unit, the proxy config file, +// and the on-disk MachineConfig needed by the systemd unit. func (r *revertService) Ignition() (*ign3types.Config, error) { - rendered, err := r.render() + unit, err := r.renderServiceTemplate() if err != nil { return nil, err } - out := &ign3types.Unit{} - - if err := yaml.Unmarshal(rendered, out); err != nil { - return nil, err + mcOnDisk, err := r.getMachineConfigJSONFile() + if err != nil { + return nil, fmt.Errorf("could not create Ignition file %q for MachineConfig %q: %w", RevertServiceMachineConfigFile, r.mc.Name, err) } ignConfig := ctrlcommon.NewIgnConfig() + ignConfig.Storage.Files = []ign3types.File{*mcOnDisk} ignConfig.Systemd = ign3types.Systemd{ - Units: []ign3types.Unit{ - *out, - }, + Units: []ign3types.Unit{*unit}, + } + + if r.Proxy { + // TODO: Should we fall back to the ControllerConfig if this file is not + // present? + proxyfile, err := findFileInMachineConfig(r.mc, "/etc/mco/proxy.env") + if err != nil { + return nil, err + } + + proxyfile.Path = RevertServiceProxyFile + ignConfig.Storage.Files = append(ignConfig.Storage.Files, *proxyfile) } return &ignConfig, nil } -// Renders the embedded template with the provided values. -func (r *revertService) render() ([]byte, error) { - if r.MCOImage == "" { - return nil, fmt.Errorf("MCOImage field must be provided") +// Converts a MachineConfig to a JSON representation and returns an Ignition +// file containing the appropriate path for the file. +func (r *revertService) getMachineConfigJSONFile() (*ign3types.File, error) { + outBytes, err := json.Marshal(r.mc) + if err != nil { + return nil, fmt.Errorf("could not marshal MachineConfig %q to JSON: %w", r.mc.Name, err) } - tmpl := template.New(RevertServiceName) + ignFile := ctrlcommon.NewIgnFileBytes(RevertServiceMachineConfigFile, outBytes) + return &ignFile, nil +} - tmpl, err := tmpl.Parse(mcdRevertServiceIgnYAML) - if err != nil { - return nil, err +// Renders the embedded service template with the provided values. +func (r *revertService) renderServiceTemplate() (*ign3types.Unit, error) { + if r.MCOImage == "" { + return nil, fmt.Errorf("MCOImage field must be provided") } // Golang templates must be rendered using exported fields. However, we want @@ -96,17 +115,71 @@ func (r *revertService) render() ([]byte, error) { data := struct { ServiceName string RevertServiceMachineConfigFile string + ProxyFile string revertService }{ ServiceName: RevertServiceName, RevertServiceMachineConfigFile: RevertServiceMachineConfigFile, + ProxyFile: RevertServiceProxyFile, revertService: *r, } + out := &ign3types.Unit{} + + if err := renderTemplate(RevertServiceName, mcdRevertServiceIgnYAML, data, out); err != nil { + return nil, err + } + + return out, nil +} + +// Finds a given file by path in a given MachineConfig. Returns an error if the +// file cannot be found. +func findFileInMachineConfig(mc *mcfgv1.MachineConfig, path string) (*ign3types.File, error) { + ignConfig, err := ctrlcommon.ParseAndConvertConfig(mc.Spec.Config.Raw) + if err != nil { + return nil, err + } + + ignFile := findFileInIgnitionConfig(&ignConfig, path) + if ignFile == nil { + return nil, fmt.Errorf("file %q not found in MachineConfig %q", path, mc.Name) + } + + return ignFile, nil +} + +// Finds a given file by path in a given Ignition config. Returns nil if the +// file is not found. +func findFileInIgnitionConfig(ignConfig *ign3types.Config, path string) *ign3types.File { + for _, file := range ignConfig.Storage.Files { + if file.Path == path { + out := file + return &out + } + } + + return nil +} + +// Renders the given data into the given YAML template source. Then attempts to +// decode it into the given struct instance. Returns any errors encountered. +func renderTemplate(name, src string, data, out interface{}) error { + tmpl := template.New(name) + + tmpl, err := tmpl.Parse(src) + if err != nil { + return err + } + buf := bytes.NewBuffer([]byte{}) if err := tmpl.Execute(buf, data); err != nil { - return nil, err + return err + } + + if err := yaml.Unmarshal(buf.Bytes(), out); err != nil { + return err } - return buf.Bytes(), nil + return nil } diff --git a/pkg/daemon/runtimeassets/revertservice_test.go b/pkg/daemon/runtimeassets/revertservice_test.go index e71fe46839..b71fb20267 100644 --- a/pkg/daemon/runtimeassets/revertservice_test.go +++ b/pkg/daemon/runtimeassets/revertservice_test.go @@ -4,25 +4,39 @@ import ( "fmt" "testing" + ign3types "github.com/coreos/ignition/v2/config/v3_4/types" configv1 "github.com/openshift/api/config/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/helpers" "github.com/stretchr/testify/assert" ) func TestRevertService(t *testing.T) { mcoImagePullspec := "mco.image.pullspec" - proxyContents := "EnvironmentFile=/etc/mco/proxy.env" + proxyContents := []string{ + "EnvironmentFile=/etc/mco/proxy.env.backup", + "ExecStartPost=rm /etc/mco/proxy.env.backup", + } + + proxyStatus := configv1.ProxyStatus{ + HTTPProxy: "http://1.2.3.4", + HTTPSProxy: "https://5.6.7.8", + NoProxy: "no.proxy.local", + } alwaysExpectedContents := []string{ fmt.Sprintf("ConditionPathExists=%s", RevertServiceMachineConfigFile), fmt.Sprintf("podman run --authfile=/var/lib/kubelet/config.json --rm --privileged --net=host -v /:/rootfs --entrypoint machine-config-daemon '%s' firstboot-complete-machineconfig --persist-nics --machineconfig-file %s", mcoImagePullspec, RevertServiceMachineConfigFile), fmt.Sprintf("podman run --authfile=/var/lib/kubelet/config.json --rm --privileged --pid=host --net=host -v /:/rootfs --entrypoint machine-config-daemon '%s' firstboot-complete-machineconfig --machineconfig-file %s", mcoImagePullspec, RevertServiceMachineConfigFile), + fmt.Sprintf("ExecStartPost=rm %s", RevertServiceMachineConfigFile), } testCases := []struct { name string ctrlcfg *mcfgv1.ControllerConfig + mc *mcfgv1.MachineConfig expectedContents []string unexpectedContents []string errExpected bool @@ -36,28 +50,39 @@ func TestRevertService(t *testing.T) { }, }, }, - expectedContents: alwaysExpectedContents, - unexpectedContents: []string{ - proxyContents, - }, + expectedContents: alwaysExpectedContents, + unexpectedContents: proxyContents, }, { name: "with proxy", ctrlcfg: &mcfgv1.ControllerConfig{ Spec: mcfgv1.ControllerConfigSpec{ - Proxy: &configv1.ProxyStatus{}, + Proxy: &proxyStatus, Images: map[string]string{ "machineConfigOperator": mcoImagePullspec, }, }, }, - expectedContents: append(alwaysExpectedContents, proxyContents), + expectedContents: append(alwaysExpectedContents, proxyContents...), }, { name: "no mco image found", ctrlcfg: &mcfgv1.ControllerConfig{}, errExpected: true, }, + { + name: "no proxy file found in MachineConfig despite proxy being enabled", + mc: helpers.NewMachineConfig("", map[string]string{}, "", []ign3types.File{}), + ctrlcfg: &mcfgv1.ControllerConfig{ + Spec: mcfgv1.ControllerConfigSpec{ + Proxy: &proxyStatus, + Images: map[string]string{ + "machineConfigOperator": mcoImagePullspec, + }, + }, + }, + errExpected: true, + }, } for _, testCase := range testCases { @@ -65,8 +90,21 @@ func TestRevertService(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { t.Parallel() - rs, err := NewRevertService(testCase.ctrlcfg) + mc := helpers.NewMachineConfig("", map[string]string{}, "", []ign3types.File{ctrlcommon.NewIgnFile("/etc/mco/proxy.env", "proxycontents")}) + if testCase.mc != nil { + mc = testCase.mc + } + + rs, err := NewRevertService(testCase.ctrlcfg, mc) if testCase.errExpected { + // If the returned error is nil, try calling the Ignition() method to + // see if it returns an error. + if err == nil { + ign, err := rs.Ignition() + assert.Error(t, err) + assert.Nil(t, ign) + return + } assert.Error(t, err) assert.Nil(t, rs) return @@ -91,6 +129,22 @@ func TestRevertService(t *testing.T) { assert.Equal(t, unit.Name, RevertServiceName) assert.True(t, *unit.Enabled) + + if testCase.ctrlcfg.Spec.Proxy != nil { + assertFileExistsInIgnConfig(t, ign, RevertServiceProxyFile) + } + + assertFileExistsInIgnConfig(t, ign, RevertServiceMachineConfigFile) }) } } + +func assertFileExistsInIgnConfig(t *testing.T, igncfg *ign3types.Config, filename string) { + t.Helper() + + file := findFileInIgnitionConfig(igncfg, filename) + assert.NotNil(t, file, "expected to find file %q in ignition config", filename) + + _, err := ctrlcommon.DecodeIgnitionFileContents(file.Contents.Source, file.Contents.Compression) + assert.NoError(t, err, "expected %s to be encoded", file.Path) +} diff --git a/pkg/daemon/update.go b/pkg/daemon/update.go index 7546995f6f..cfaf71d8c8 100644 --- a/pkg/daemon/update.go +++ b/pkg/daemon/update.go @@ -512,7 +512,7 @@ func removePendingDeployment() error { // applyOSChanges extracts the OS image and adds coreos-extensions repo if we have either OS update or package layering to perform func (dn *CoreOSDaemon) applyOSChanges(mcDiff machineConfigDiff, oldConfig, newConfig *mcfgv1.MachineConfig) (retErr error) { // We previously did not emit this event when kargs changed, so we still don't - if mcDiff.osUpdate || mcDiff.extensions || mcDiff.kernelType { + if mcDiff.osUpdate || mcDiff.extensions || mcDiff.kernelType || mcDiff.oclEnabled { // We emitted this event before, so keep it if dn.nodeWriter != nil { dn.nodeWriter.Eventf(corev1.EventTypeNormal, "InClusterUpgrade", fmt.Sprintf("Updating from oscontainer %s", newConfig.Spec.OSImageURL)) @@ -521,13 +521,14 @@ func (dn *CoreOSDaemon) applyOSChanges(mcDiff machineConfigDiff, oldConfig, newC // Only check the image type and execute OS changes if: // - machineconfig changed - // - we're staying on a realtime/64k kernel ( need to run rpm-ostree update ) - // - we have a diff in extensions ( need to run rpm-ostree update ) + // - we're staying on a realtime kernel ( need to run rpm-ostree update ) + // - we have extensions ( need to run rpm-ostree update ) + // - OCL is enabled ( need to run rpm-ostree update ) // We have at least one customer that removes the pull secret from the cluster to "shrinkwrap" it for distribution and we want // to make sure we don't break that use case, but realtime kernel update and extensions update always ran // if they were in use, so we also need to preserve that behavior. // https://issues.redhat.com/browse/OCPBUGS-4049 - if mcDiff.osUpdate || mcDiff.extensions || mcDiff.kernelType || mcDiff.kargs || + if mcDiff.osUpdate || mcDiff.extensions || mcDiff.kernelType || mcDiff.kargs || mcDiff.oclEnabled || canonicalizeKernelType(newConfig.Spec.KernelType) == ctrlcommon.KernelTypeRealtime || canonicalizeKernelType(newConfig.Spec.KernelType) == ctrlcommon.KernelType64kPages { @@ -812,9 +813,14 @@ func (dn *Daemon) calculatePostConfigChangeNodeDisruptionAction(diff *machineCon // This function should be consolidated with dn.update() and dn.updateHypershift(). See: https://issues.redhat.com/browse/MCO-810 for further discussion. // //nolint:gocyclo -func (dn *Daemon) updateOnClusterBuild(oldConfig, newConfig *mcfgv1.MachineConfig, oldImage, newImage string, skipCertificateWrite bool) (retErr error) { +func (dn *Daemon) updateOnClusterLayering(oldConfig, newConfig *mcfgv1.MachineConfig, oldImage, newImage string, skipCertificateWrite bool) (retErr error) { oldConfig = canonicalizeEmptyMC(oldConfig) + mcDiff, err := newMachineConfigDiffFromLayered(oldConfig, newConfig, oldImage, newImage) + if err != nil { + return fmt.Errorf("could not get layered diff from MachineConfig(s) %q / %q and images %q / %q: %w", oldConfig.Name, newConfig.Name, oldImage, newImage, err) + } + if dn.nodeWriter != nil { state, err := getNodeAnnotationExt(dn.node, constants.MachineConfigDaemonStateAnnotationKey, true) if err != nil { @@ -846,7 +852,7 @@ func (dn *Daemon) updateOnClusterBuild(oldConfig, newConfig *mcfgv1.MachineConfi return fmt.Errorf("parsing new Ignition config failed: %w", err) } - klog.Infof("Checking Reconcilable for config %v to %v", oldConfigName, newConfigName) + klog.Infof("Checking Reconcilable for config %s to %s", oldConfigName, newConfigName) // Make sure we can actually reconcile this state. In the future, this check should be moved to the BuildController and performed prior to the build occurring. This addresses the following bugs: // - https://issues.redhat.com/browse/OCPBUGS-18670 @@ -862,42 +868,49 @@ func (dn *Daemon) updateOnClusterBuild(oldConfig, newConfig *mcfgv1.MachineConfi return &unreconcilableErr{wrappedErr} } - if oldImage == newImage && newImage != "" { - if oldImage == "" { - logSystem("Starting transition to %q", newImage) - } else { - logSystem("Starting transition from %q to %q", oldImage, newImage) - } - } - if err := dn.performDrain(); err != nil { return err } - // If the new image pullspec is already on disk, do not attempt to re-apply - // it. rpm-ostree will throw an error as a result. - // See: https://issues.redhat.com/browse/OCPBUGS-18414. - if oldImage != newImage { - // If the new image field is empty, set it to the OS image URL value - // provided by the MachineConfig to do a rollback. - if newImage == "" { - klog.Infof("%s empty, reverting to osImageURL %s from MachineConfig %s", constants.DesiredImageAnnotationKey, newConfig.Spec.OSImageURL, newConfig.Name) - newImage = newConfig.Spec.OSImageURL - } - if err := dn.updateLayeredOSToPullspec(newImage); err != nil { - return err - } - } else { - klog.Infof("Image pullspecs equal, skipping rpm-ostree rebase") + if mcDiff.revertFromOCL { + klog.Infof("%s empty, reverting to osImageURL %s from MachineConfig %s", constants.DesiredImageAnnotationKey, newConfig.Spec.OSImageURL, newConfig.Name) } - // If the new OS image equals the OS image URL value, this means we're in a - // revert-from-layering situation. This also means we can return early after - // taking a different path. - if newImage == newConfig.Spec.OSImageURL { - return dn.finalizeRevertToNonLayering(newConfig) + if !dn.os.IsCoreOSVariant() { + return fmt.Errorf("on-cluster layering on non-CoreOS nodes is not supported") + } + + // We have a separate path for OS images and MachineConfigs because we needed + // a way to handle the case where a node was either opting into or opting out + // of OCL. If either the oldImage or newImage is empty, this has a special + // meaning for OCL depending on which one is empty: + // - If oldImage is empty, this means that we are transitioning into OCL operation. + // - If newImage is empty, this means that we are transitioning out of OCL operation. + // + // The code paths that apply an OS image to the node do not handle the case + // where the image is empty. So here, we "canoncalize" the OSImageURL field + // on both the oldConfig and newConfig. These copies are only used for the OS + // update itself and should not be used for anything else. + oldConfigCopy := canonicalizeMachineConfigImage(oldImage, oldConfig) + newConfigCopy := canonicalizeMachineConfigImage(newImage, newConfig) + + klog.Infof("Old MachineConfig %s / Image %s -> New MachineConfig %s / Image %s", oldConfigName, oldConfigCopy.Spec.OSImageURL, newConfigName, newConfigCopy.Spec.OSImageURL) + + coreOSDaemon := CoreOSDaemon{dn} + if err := coreOSDaemon.applyOSChanges(*mcDiff, oldConfigCopy, newConfigCopy); err != nil { + return err } + defer func() { + if retErr != nil { + if err := coreOSDaemon.applyOSChanges(*mcDiff, newConfigCopy, oldConfigCopy); err != nil { + errs := kubeErrs.NewAggregate([]error{err, retErr}) + retErr = fmt.Errorf("error rolling back changes to OS: %w", errs) + return + } + } + }() + // update files on disk that need updating if err := dn.updateFiles(oldIgnConfig, newIgnConfig, skipCertificateWrite); err != nil { return err @@ -957,7 +970,6 @@ func (dn *Daemon) updateOnClusterBuild(oldConfig, newConfig *mcfgv1.MachineConfi // Update the kernal args if there is a difference if diff.kargs && dn.os.IsCoreOSVariant() { - coreOSDaemon := CoreOSDaemon{dn} if err := coreOSDaemon.updateKernelArguments(oldConfig.Spec.KernelArguments, newConfig.Spec.KernelArguments); err != nil { return err } @@ -974,6 +986,40 @@ func (dn *Daemon) updateOnClusterBuild(oldConfig, newConfig *mcfgv1.MachineConfi currentConfig: newConfig, } + if mcDiff.revertFromOCL { + odc.currentImage = "" + + // Finalizes the revert process by enabling a special systemd unit prior to + // rebooting the node. + // + // After we write the original factory image to the node, none of the files + // specified in the MachineConfig will be written due to how rpm-ostree handles + // file writes. Because those files are owned by the layered container image, + // they are not present after reboot; even if we were to write them to the node + // before rebooting. Consequently, after reverting back to the original image, + // the node will lose contact with the control plane and the easiest way to + // reestablish contact is to rebootstrap the node. + // + // By comparison, if we write a file that is _not_ owned by the layered + // container image, this file will persist after reboot. So what we do is write + // a special systemd unit that will rebootstrap the node upon reboot. + // Unfortunately, this will incur a second reboot during the rollback process, + // so there is room for improvement here. + if err := dn.enableRevertSystemdUnit(newConfig); err != nil { + return fmt.Errorf("could not enable revert systemd service: %w", err) + } + + defer func() { + if retErr != nil { + if err := dn.disableRevertSystemdUnit(); err != nil { + errs := kubeErrs.NewAggregate([]error{err, retErr}) + retErr = fmt.Errorf("error rolling back systemd unit %q on disk: %e", runtimeassets.RevertServiceName, errs) + return + } + } + }() + } + if err := dn.storeCurrentConfigOnDisk(odc); err != nil { return err } @@ -990,66 +1036,12 @@ func (dn *Daemon) updateOnClusterBuild(oldConfig, newConfig *mcfgv1.MachineConfi } }() - return dn.reboot(fmt.Sprintf("Node will reboot into image %s / MachineConfig %s", newImage, newConfigName)) -} - -// Finalizes the revert process by enabling a special systemd unit prior to -// rebooting the node. -// -// After we write the original factory image to the node, none of the files -// specified in the MachineConfig will be written due to how rpm-ostree handles -// file writes. Because those files are owned by the layered container image, -// they are not present after reboot; even if we were to write them to the node -// before rebooting. Consequently, after reverting back to the original image, -// the node will lose contact with the control plane and the easiest way to -// reestablish contact is to rebootstrap the node. -// -// By comparison, if we write a file that is _not_ owned by the layered -// container image, this file will persist after reboot. So what we do is write -// a special systemd unit that will rebootstrap the node upon reboot. -// Unfortunately, this will incur a second reboot during the rollback process, -// so there is room for improvement here. -func (dn *Daemon) finalizeRevertToNonLayering(newConfig *mcfgv1.MachineConfig) error { - // First, we write the new MachineConfig to a file. This is both the signal - // that the revert systemd unit should fire as well as the desired source of - // truth. - outBytes, err := json.Marshal(newConfig) - if err != nil { - return fmt.Errorf("could not marshal MachineConfig %q to JSON: %w", newConfig.Name, err) - } - - if err := writeFileAtomicallyWithDefaults(runtimeassets.RevertServiceMachineConfigFile, outBytes); err != nil { - return fmt.Errorf("could not write MachineConfig %q to %q: %w", newConfig.Name, runtimeassets.RevertServiceMachineConfigFile, err) - } - - klog.Infof("Wrote MachineConfig %q to %q", newConfig.Name, runtimeassets.RevertServiceMachineConfigFile) - - // Next, we enable the revert systemd unit. This renders and writes the - // machine-config-daemon-revert.service systemd unit, clones it, and writes - // it to disk. The reason for doing it this way is because it will persist - // after the reboot since it was not written or mutated by the rpm-ostree - // process. - if err := dn.enableRevertSystemdUnit(); err != nil { - return err - } - - // Clear the current image field so that after reboot, the node will clear - // the currentImage annotation. - odc := &onDiskConfig{ - currentImage: "", - currentConfig: newConfig, - } - - if err := dn.storeCurrentConfigOnDisk(odc); err != nil { - return err - } - - return dn.reboot(fmt.Sprintf("Node will reboot into image %s / MachineConfig %s", newConfig.Spec.OSImageURL, newConfig.Name)) + return dn.reboot(fmt.Sprintf("Node will reboot into image %s / MachineConfig %s", canonicalizeMachineConfigImage(newImage, newConfig).Spec.OSImageURL, newConfigName)) } // Update the node to the provided node configuration. // This function should be de-duped with dn.updateHypershift() and -// dn.updateOnClusterBuild(). See: https://issues.redhat.com/browse/MCO-810 for +// dn.updateOnClusterLayering(). See: https://issues.redhat.com/browse/MCO-810 for // discussion. // //nolint:gocyclo @@ -1454,14 +1446,16 @@ func (dn *Daemon) removeRollback() error { // and the MCO would just operate on that. For now we're just doing this to get // improved logging. type machineConfigDiff struct { - osUpdate bool - kargs bool - fips bool - passwd bool - files bool - units bool - kernelType bool - extensions bool + osUpdate bool + kargs bool + fips bool + passwd bool + files bool + units bool + kernelType bool + extensions bool + oclEnabled bool + revertFromOCL bool } // isEmpty returns true if the machineConfigDiff has no changes, or @@ -1532,6 +1526,19 @@ func newMachineConfigDiff(oldConfig, newConfig *mcfgv1.MachineConfig) (*machineC }, nil } +func newMachineConfigDiffFromLayered(oldConfig, newConfig *mcfgv1.MachineConfig, oldImage, newImage string) (*machineConfigDiff, error) { + mcDiff, err := newMachineConfigDiff(oldConfig, newConfig) + if err != nil { + return mcDiff, err + } + + mcDiff.oclEnabled = true + // If the new OS image is empty, that means we are in a revert-from-OCL situation. + mcDiff.revertFromOCL = newImage == "" + mcDiff.osUpdate = oldImage != newImage || forceFileExists() + return mcDiff, nil +} + // reconcilable checks the configs to make sure that the only changes requested // are ones we know how to do in-place. If we can reconcile, (nil, nil) is returned. // Otherwise, if we can't do it in place, the node is marked as degraded; @@ -2634,10 +2641,7 @@ func (dn *Daemon) queueRevertKernelSwap() error { func (dn *Daemon) updateLayeredOS(config *mcfgv1.MachineConfig) error { newURL := config.Spec.OSImageURL klog.Infof("Updating OS to layered image %q", newURL) - return dn.updateLayeredOSToPullspec(newURL) -} -func (dn *Daemon) updateLayeredOSToPullspec(newURL string) error { newEnough, err := dn.NodeUpdaterClient.IsNewEnoughForLayering() if err != nil { return err @@ -2822,8 +2826,8 @@ func (dn *CoreOSDaemon) applyLayeredOSChanges(mcDiff machineConfigDiff, oldConfi var osExtensionsContentDir string var err error - if newConfig.Spec.BaseOSExtensionsContainerImage != "" && (mcDiff.osUpdate || mcDiff.extensions || mcDiff.kernelType) { + if newConfig.Spec.BaseOSExtensionsContainerImage != "" && (mcDiff.osUpdate || mcDiff.extensions || mcDiff.kernelType) && !mcDiff.oclEnabled { // TODO(jkyros): the original intent was that we use the extensions container as a service, but that currently results // in a lot of complexity due to boostrap and firstboot where the service isn't easily available, so for now we are going // to extract them to disk like we did previously. @@ -2861,12 +2865,14 @@ func (dn *CoreOSDaemon) applyLayeredOSChanges(mcDiff machineConfigDiff, oldConfi } }() - // If we have an OS update *or* a kernel type change, then we must undo the kernel swap - // enablement. - if mcDiff.osUpdate || mcDiff.kernelType { - if err := dn.queueRevertKernelSwap(); err != nil { - mcdPivotErr.Inc() - return err + if !mcDiff.oclEnabled { + // If we have an OS update *or* a kernel type change, then we must undo the kernel swap + // enablement. + if mcDiff.osUpdate || mcDiff.kernelType { + if err := dn.queueRevertKernelSwap(); err != nil { + mcdPivotErr.Inc() + return err + } } } @@ -2895,6 +2901,11 @@ func (dn *CoreOSDaemon) applyLayeredOSChanges(mcDiff machineConfigDiff, oldConfi } } + // If on-cluster layering is enabled, we can skip the rest of this process. + if mcDiff.oclEnabled { + return nil + } + // Switch to real time kernel if mcDiff.osUpdate || mcDiff.kernelType { if err := dn.switchKernel(oldConfig, newConfig); err != nil { @@ -2927,24 +2938,28 @@ func (dn *Daemon) hasImageRegistryDrainOverrideConfigMap() (bool, error) { // // To enable the unit, we perform the following operations: // 1. Retrieve the ControllerConfig. -// 2. Generate the Ignition config from the ControllerConfig. -// 3. Writes the new systemd unit to disk and enables it. -func (dn *Daemon) enableRevertSystemdUnit() error { +// 2. Generate the Ignition config from the ControllerConfig and the supplied MachineConfig. +// 3. Writes the new systemd unit and its needed files to disk and enable it. +func (dn *Daemon) enableRevertSystemdUnit(newConfig *mcfgv1.MachineConfig) error { ctrlcfg, err := dn.ccLister.Get(ctrlcommon.ControllerConfigName) if err != nil { return fmt.Errorf("could not get controllerconfig %s: %w", ctrlcommon.ControllerConfigName, err) } - revertService, err := runtimeassets.NewRevertService(ctrlcfg) + rs, err := runtimeassets.NewRevertService(ctrlcfg, newConfig) if err != nil { return err } - revertIgn, err := revertService.Ignition() + revertIgn, err := rs.Ignition() if err != nil { return fmt.Errorf("could not create %s: %w", runtimeassets.RevertServiceName, err) } + if err := dn.writeFiles(revertIgn.Storage.Files, false); err != nil { + return fmt.Errorf("could not write files for %s: %w", runtimeassets.RevertServiceName, err) + } + if err := dn.writeUnits(revertIgn.Systemd.Units); err != nil { return fmt.Errorf("could not write %s: %w", runtimeassets.RevertServiceName, err) } @@ -2969,7 +2984,7 @@ func (dn *Daemon) disableRevertSystemdUnit() error { return fmt.Errorf("could not determine if service %q exists: %w", runtimeassets.RevertServiceName, err) } - // If the unit path does not exist, there is nothing left to do. + // If the unit path does not exist, there is nothing to do. if !unitPathExists { return nil } @@ -2983,6 +2998,7 @@ func (dn *Daemon) disableRevertSystemdUnit() error { filesToRemove := []string{ unitPath, runtimeassets.RevertServiceMachineConfigFile, + runtimeassets.RevertServiceProxyFile, } // systemd removes the unit file, but there is no harm in calling @@ -2995,3 +3011,19 @@ func (dn *Daemon) disableRevertSystemdUnit() error { return nil } + +// If the provided image is empty, then the OSImageURL value on the +// MachineConfig should take precedence. Otherwise, if the provided image is +// set, then it should take precedence over the OSImageURL value. This is only +// used for OCL OS updates and should not be used for anything else. +func canonicalizeMachineConfigImage(img string, mc *mcfgv1.MachineConfig) *mcfgv1.MachineConfig { + copied := mc.DeepCopy() + + if img == "" { + return copied + } + + copied.Spec.OSImageURL = img + + return copied +} diff --git a/pkg/operator/bootstrap.go b/pkg/operator/bootstrap.go index 2f178f183d..6f84d31f68 100644 --- a/pkg/operator/bootstrap.go +++ b/pkg/operator/bootstrap.go @@ -9,7 +9,6 @@ import ( "k8s.io/klog/v2" configv1 "github.com/openshift/api/config/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" configscheme "github.com/openshift/client-go/config/clientset/versioned/scheme" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -155,7 +154,7 @@ func RenderBootstrap( templatectrl.KubeRbacProxyKey: imgs.KubeRbacProxy, } - config := getRenderConfig("", string(filesData[kubeAPIServerServingCA]), spec, &imgs.RenderConfigImages, infra.Status.APIServerInternalURL, nil, []*mcfgv1alpha1.MachineOSConfig{}, nil) + config := getRenderConfig("", string(filesData[kubeAPIServerServingCA]), spec, &imgs.RenderConfigImages, infra, nil, nil) manifests := []manifest{ { diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index 69d13bb463..0082dec2e0 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -106,7 +106,7 @@ type Operator struct { mckLister mcfglistersv1.KubeletConfigLister crcLister mcfglistersv1.ContainerRuntimeConfigLister nodeClusterLister configlistersv1.NodeLister - moscLister mcfglistersalphav1.MachineOSConfigLister + moscLister mcfglistersv1.MachineOSConfigLister apiserverLister configlistersv1.APIServerLister crdListerSynced cache.InformerSynced @@ -380,7 +380,7 @@ func (optr *Operator) Run(workers int, stopCh <-chan struct{}) { if isOCBEnabled { klog.Infof("On-cluster layering featuregate enabled, starting MachineOSConfig informer") - moscInformer := optr.ctrlctx.InformerFactory.Machineconfiguration().V1alpha1().MachineOSConfigs() + moscInformer := optr.ctrlctx.InformerFactory.Machineconfiguration().V1().MachineOSConfigs() optr.moscLister = moscInformer.Lister() optr.moscListerSynced = moscInformer.Informer().HasSynced cacheSynced = append(cacheSynced, optr.moscListerSynced) diff --git a/pkg/operator/render.go b/pkg/operator/render.go index 73996eb4e0..42aa4731de 100644 --- a/pkg/operator/render.go +++ b/pkg/operator/render.go @@ -16,7 +16,6 @@ import ( ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/machine-config-operator/manifests" "github.com/openshift/machine-config-operator/pkg/constants" utilrand "k8s.io/apimachinery/pkg/util/rand" @@ -42,7 +41,6 @@ type renderConfig struct { Infra configv1.Infrastructure Constants map[string]string PointerConfig string - MachineOSConfigs []*mcfgv1alpha1.MachineOSConfig TLSMinVersion string TLSCipherSuites []string } diff --git a/pkg/operator/render_test.go b/pkg/operator/render_test.go index d161db8172..2cbaff6cb3 100644 --- a/pkg/operator/render_test.go +++ b/pkg/operator/render_test.go @@ -7,7 +7,6 @@ import ( configv1 "github.com/openshift/api/config/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/library-go/pkg/operator/resource/resourceread" mcfgv1resourceread "github.com/openshift/machine-config-operator/lib/resourceread" @@ -259,7 +258,6 @@ func TestRenderAsset(t *testing.T) { // Test that machineconfigdaemon DaemonSets are rendered correctly with proxy config Path: "manifests/machineconfigdaemon/daemonset.yaml", RenderConfig: &renderConfig{ - MachineOSConfigs: nil, TargetNamespace: "testing-namespace", ReleaseVersion: "4.8.0-rc.0", Images: &ctrlcommon.RenderConfigImages{ @@ -280,12 +278,6 @@ func TestRenderAsset(t *testing.T) { "- name: NO_PROXY\n value: \"*\"", // Ensure the * is quoted: "*": https://bugzilla.redhat.com/show_bug.cgi?id=1947066 "--payload-version=4.8.0-rc.0", }, - NotFindExpected: []string{ - "- mountPath: /run/pool-1/secret-1\n name: secret-1", - "- mountPath: /run/pool-2/secret-2\n name: secret-2", - "- secret:\n secretName: secret-1\n name: secret-1", - "- secret:\n secretName: secret-2\n name: secret-2", - }, }, { // Bad path, will cause asset error @@ -305,50 +297,6 @@ func TestRenderAsset(t *testing.T) { "--payload-version=4.8.0-rc.0", }, }, - // Tests that the MCD DaemonSet gets MachineOSConfig secrets mounted into it. - { - Path: "manifests/machineconfigdaemon/daemonset.yaml", - RenderConfig: &renderConfig{ - TargetNamespace: "testing-namespace", - ReleaseVersion: "4.16.0-rc.1", - Images: &ctrlcommon.RenderConfigImages{ - MachineConfigOperator: "mco-operator-image", - KubeRbacProxy: "kube-rbac-proxy-image", - }, - MachineOSConfigs: []*mcfgv1alpha1.MachineOSConfig{ - { - Spec: mcfgv1alpha1.MachineOSConfigSpec{ - MachineConfigPool: mcfgv1alpha1.MachineConfigPoolReference{ - Name: "pool-1", - }, - BuildOutputs: mcfgv1alpha1.BuildOutputs{ - CurrentImagePullSecret: mcfgv1alpha1.ImageSecretObjectReference{ - Name: "secret-1", - }, - }, - }, - }, - { - Spec: mcfgv1alpha1.MachineOSConfigSpec{ - MachineConfigPool: mcfgv1alpha1.MachineConfigPoolReference{ - Name: "pool-2", - }, - BuildOutputs: mcfgv1alpha1.BuildOutputs{ - CurrentImagePullSecret: mcfgv1alpha1.ImageSecretObjectReference{ - Name: "secret-2", - }, - }, - }, - }, - }, - }, - FindExpected: []string{ - "- mountPath: /run/secrets/os-image-pull-secrets/pool-1\n name: secret-1", - "- mountPath: /run/secrets/os-image-pull-secrets/pool-2\n name: secret-2", - "- secret:\n secretName: secret-1\n name: secret-1", - "- secret:\n secretName: secret-2\n name: secret-2", - }, - }, } for idx, test := range tests { diff --git a/pkg/operator/sync.go b/pkg/operator/sync.go index fda9138aa9..24d407aec8 100644 --- a/pkg/operator/sync.go +++ b/pkg/operator/sync.go @@ -13,7 +13,6 @@ import ( "net/url" "os" "reflect" - "sort" "strconv" "strings" "time" @@ -37,7 +36,6 @@ import ( configv1 "github.com/openshift/api/config/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" v1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" opv1 "github.com/openshift/api/operator/v1" @@ -638,22 +636,7 @@ func (optr *Operator) syncRenderConfig(_ *renderConfig, _ *configv1.ClusterOpera return err } - isOnClusterBuildEnabled, err := optr.isOnClusterBuildFeatureGateEnabled() - if err != nil { - return err - } - - if isOnClusterBuildEnabled { - moscs, err := optr.getAndValidateMachineOSConfigs() - if err != nil { - return err - } - - // create renderConfig - optr.renderConfig = getRenderConfig(optr.namespace, string(kubeAPIServerServingCABytes), spec, &imgs.RenderConfigImages, infra.Status.APIServerInternalURL, pointerConfigData, moscs, apiServer) - } else { - optr.renderConfig = getRenderConfig(optr.namespace, string(kubeAPIServerServingCABytes), spec, &imgs.RenderConfigImages, infra.Status.APIServerInternalURL, pointerConfigData, nil, apiServer) - } + optr.renderConfig = getRenderConfig(optr.namespace, string(kubeAPIServerServingCABytes), spec, &imgs.RenderConfigImages, infra, pointerConfigData, apiServer) return nil } @@ -734,7 +717,7 @@ func (optr *Operator) syncMachineConfigPools(config *renderConfig, _ *configv1.C // Work around https://github.com/kubernetes/kubernetes/issues/3030 and https://github.com/kubernetes/kubernetes/issues/80609 pool.APIVersion = mcfgv1.GroupVersion.String() - pool.Kind = "MachineConfigPool" + pool.Kind = "MachineConfigPool" //nolint:goconst p.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ { @@ -1282,6 +1265,11 @@ func (optr *Operator) reconcileMachineOSBuilder(mob *appsv1.Deployment) error { return fmt.Errorf("could not reconcile etc-pki-entitlement secrets: %w", err) } + // Create/Deletes the global pull secret copy in the MCO namespace, depending on layered pool count. + if err := optr.reconcileGlobalPullSecretCopy(layeredMCPs); err != nil { + return fmt.Errorf("could not reconcile global pull secret copy: %w", err) + } + // If we have opted-in pools and the Machine OS Builder deployment is either // not running or doesn't have the correct replica count, scale it up. correctReplicaCount := optr.hasCorrectReplicaCount(mob) @@ -1441,10 +1429,26 @@ func (optr *Operator) reconcileSimpleContentAccessSecrets(layeredMCPs []*mcfgv1. secretName := ctrlcommon.SimpleContentAccessSecretName + "-" + poolName // Create a clone of simpleContentAccessSecret, modify it to be in the MCO namespace with the new name + currentPool, err := optr.mcpLister.Get(poolName) + if err != nil { + return err + } + + // Work around https://github.com/kubernetes/kubernetes/issues/3030 and https://github.com/kubernetes/kubernetes/issues/80609 + currentPool.APIVersion = mcfgv1.GroupVersion.String() + currentPool.Kind = "MachineConfigPool" clonedSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, Namespace: ctrlcommon.MCONamespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: currentPool.APIVersion, + Kind: currentPool.Kind, + Name: currentPool.ObjectMeta.Name, + UID: currentPool.ObjectMeta.UID, + }, + }, }, Data: simpleContentAccessSecret.Data, Type: corev1.SecretTypeOpaque, @@ -1472,6 +1476,57 @@ func (optr *Operator) reconcileSimpleContentAccessSecrets(layeredMCPs []*mcfgv1. return nil } +func (optr *Operator) reconcileGlobalPullSecretCopy(layeredMCPs []*mcfgv1.MachineConfigPool) error { + secretCopyExists := true + currentSecretCopy, err := optr.mcoSecretLister.Secrets(ctrlcommon.MCONamespace).Get(ctrlcommon.GlobalPullSecretCopyName) + if apierrors.IsNotFound(err) { + secretCopyExists = false + } else if err != nil { + return err + } + + if len(layeredMCPs) == 0 { + // If the secret copy doesn't exist, nothing to do here + if !secretCopyExists { + return nil + } + klog.Infof("deleting %s", ctrlcommon.GlobalPullSecretCopyName) + return optr.kubeClient.CoreV1().Secrets(ctrlcommon.MCONamespace).Delete(context.TODO(), ctrlcommon.GlobalPullSecretCopyName, metav1.DeleteOptions{}) + } + + // Atleast one pool is opted-in, let's create or update the copy if needed. First, grab the global pull secret. + globalPullSecret, err := optr.ocSecretLister.Secrets(ctrlcommon.OpenshiftConfigNamespace).Get("pull-secret") + if err != nil { + return fmt.Errorf("error fetching cluster pull secret: %w", err) + } + + // Create a clone of clusterPullSecret, and modify it to be in the MCO namespace. + globalPullSecretCopy := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: ctrlcommon.GlobalPullSecretCopyName, + Namespace: ctrlcommon.MCONamespace, + }, + Data: globalPullSecret.Data, + Type: corev1.SecretTypeDockerConfigJson, + } + + // If the secret copy doesn't exist, create it. + if !secretCopyExists { + klog.Infof("creating %s", ctrlcommon.GlobalPullSecretCopyName) + _, err := optr.kubeClient.CoreV1().Secrets(ctrlcommon.MCONamespace).Create(context.TODO(), globalPullSecretCopy, metav1.CreateOptions{}) + return err + } + + // If it does exist, check if an update is required before making the update call. + if !reflect.DeepEqual(currentSecretCopy.Data, globalPullSecret.Data) { + klog.Infof("updating %s", ctrlcommon.GlobalPullSecretCopyName) + _, err := optr.kubeClient.CoreV1().Secrets(ctrlcommon.MCONamespace).Update(context.TODO(), globalPullSecretCopy, metav1.UpdateOptions{}) + return err + } + + return nil +} + // Updates the Machine OS Builder Deployment, creating it if it does not exist. func (optr *Operator) startMachineOSBuilderDeployment(mob *appsv1.Deployment, layeredMCPs []*mcfgv1.MachineConfigPool) error { if err := build.ValidateOnClusterBuildConfig(optr.kubeClient, optr.client, layeredMCPs); err != nil { @@ -1512,11 +1567,11 @@ func (optr *Operator) getLayeredMachineConfigPools() ([]*mcfgv1.MachineConfigPoo if len(pools) == 0 { moscPools := []*mcfgv1.MachineConfigPool{} - machineosconfigs, err := optr.client.MachineconfigurationV1alpha1().MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) + machineosconfigs, err := optr.moscLister.List(labels.Everything()) if err != nil { return []*mcfgv1.MachineConfigPool{}, err } - for _, mosc := range machineosconfigs.Items { + for _, mosc := range machineosconfigs { mcp, err := optr.mcpLister.Get(mosc.Spec.MachineConfigPool.Name) if err != nil { return []*mcfgv1.MachineConfigPool{}, err @@ -2051,7 +2106,7 @@ func setGVK(obj runtime.Object, scheme *runtime.Scheme) error { return nil } -func getRenderConfig(tnamespace, kubeAPIServerServingCA string, ccSpec *mcfgv1.ControllerConfigSpec, imgs *ctrlcommon.RenderConfigImages, apiServerURL string, pointerConfigData []byte, moscs []*mcfgv1alpha1.MachineOSConfig, apiServer *configv1.APIServer) *renderConfig { +func getRenderConfig(tnamespace, kubeAPIServerServingCA string, ccSpec *mcfgv1.ControllerConfigSpec, imgs *ctrlcommon.RenderConfigImages, infra *configv1.Infrastructure, pointerConfigData []byte, apiServer *configv1.APIServer) *renderConfig { tlsMinVersion, tlsCipherSuites := ctrlcommon.GetSecurityProfileCiphersFromAPIServer(apiServer) return &renderConfig{ TargetNamespace: tnamespace, @@ -2060,9 +2115,9 @@ func getRenderConfig(tnamespace, kubeAPIServerServingCA string, ccSpec *mcfgv1.C ControllerConfig: *ccSpec, Images: imgs, KubeAPIServerServingCA: kubeAPIServerServingCA, - APIServerURL: apiServerURL, + APIServerURL: infra.Status.APIServerInternalURL, PointerConfig: string(pointerConfigData), - MachineOSConfigs: moscs, + Infra: *infra, TLSMinVersion: tlsMinVersion, TLSCipherSuites: tlsCipherSuites, } @@ -2275,49 +2330,6 @@ func (optr *Operator) syncMachineConfiguration(_ *renderConfig, _ *configv1.Clus return nil } -// Gets MachineOSConfigs from the lister, assuming that the OnClusterBuild -// FeatureGate is enabled. Will return nil if the FeatureGate is not enabled. -func (optr *Operator) getMachineOSConfigs() ([]*mcfgv1alpha1.MachineOSConfig, error) { - isOnClusterBuildEnabled, err := optr.isOnClusterBuildFeatureGateEnabled() - if err != nil { - return nil, err - } - - if !isOnClusterBuildEnabled { - return nil, nil - } - - moscs, err := optr.moscLister.List(labels.Everything()) - if err != nil { - return nil, err - } - - sort.Slice(moscs, func(i, j int) bool { return moscs[i].Name < moscs[j].Name }) - return moscs, nil -} - -// Fetches and validates the MachineOSConfigs. For now, validation consists of -// ensuring that the secrets the MachineOSConfig was configured with exist. -func (optr *Operator) getAndValidateMachineOSConfigs() ([]*mcfgv1alpha1.MachineOSConfig, error) { - moscs, err := optr.getMachineOSConfigs() - - if err != nil { - return nil, err - } - - if moscs == nil { - return nil, nil - } - - for _, mosc := range moscs { - if err := build.ValidateMachineOSConfigFromListers(optr.mcpLister, optr.mcoSecretLister, mosc); err != nil { - return nil, fmt.Errorf("invalid MachineOSConfig %s: %w", mosc.Name, err) - } - } - - return moscs, nil -} - // Determines if the OnclusterBuild FeatureGate is enabled. Returns any errors encountered. func (optr *Operator) isOnClusterBuildFeatureGateEnabled() (bool, error) { fg, err := optr.fgAccessor.CurrentFeatureGates() diff --git a/pkg/operator/sync_test.go b/pkg/operator/sync_test.go index 7cce826f74..0dbd4e14bd 100644 --- a/pkg/operator/sync_test.go +++ b/pkg/operator/sync_test.go @@ -161,56 +161,73 @@ func withCABundle(caBundle string) kubeCloudConfigOption { } } -func TestReconcileSimpleContentAccessSecret(t *testing.T) { +func TestMachineOSBuilderSecretReconciliation(t *testing.T) { masterPool := helpers.NewMachineConfigPool("master", nil, helpers.MasterSelector, "v0") workerPool := helpers.NewMachineConfigPool("worker", nil, helpers.MasterSelector, "v0") infraPool := helpers.NewMachineConfigPool("infra", nil, helpers.MasterSelector, "v0") entitlementSecret := helpers.NewOpaqueSecret(ctrlcommon.SimpleContentAccessSecretName, ctrlcommon.OpenshiftConfigManagedNamespace, "abc") - workerEntitlementSecret := helpers.NewOpaqueSecret(ctrlcommon.SimpleContentAccessSecretName+"-"+workerPool.Name, ctrlcommon.MCONamespace, "abc") - infraEntitlementSecret := helpers.NewOpaqueSecret(ctrlcommon.SimpleContentAccessSecretName+"-"+infraPool.Name, ctrlcommon.MCONamespace, "abc") - outOfDateInfraEntitlementSecret := helpers.NewOpaqueSecret(ctrlcommon.SimpleContentAccessSecretName+"-"+infraPool.Name, ctrlcommon.MCONamespace, "123") + workerEntitlementSecret := helpers.NewOpaqueSecretWithOwnerPool(ctrlcommon.SimpleContentAccessSecretName+"-"+workerPool.Name, ctrlcommon.MCONamespace, "abc", *workerPool) + infraEntitlementSecret := helpers.NewOpaqueSecretWithOwnerPool(ctrlcommon.SimpleContentAccessSecretName+"-"+infraPool.Name, ctrlcommon.MCONamespace, "abc", *infraPool) + outOfDateInfraEntitlementSecret := helpers.NewOpaqueSecretWithOwnerPool(ctrlcommon.SimpleContentAccessSecretName+"-"+infraPool.Name, ctrlcommon.MCONamespace, "123", *infraPool) + globalPullSecret := helpers.NewDockerCfgJSONSecret(ctrlcommon.GlobalPullSecretName, ctrlcommon.OpenshiftConfigNamespace, "abc") + outOfDateGlobalPullSecretCopy := helpers.NewDockerCfgJSONSecret(ctrlcommon.GlobalPullSecretCopyName, ctrlcommon.MCONamespace, "123") + globalPullSecretCopy := helpers.NewDockerCfgJSONSecret(ctrlcommon.GlobalPullSecretCopyName, ctrlcommon.MCONamespace, "abc") cases := []struct { name string mcoSecrets []*corev1.Secret + ocSecrets []*corev1.Secret ocManagedSecrets []*corev1.Secret expectedMCOSecrets []corev1.Secret layeredMCPs []*mcfgv1.MachineConfigPool }{ { name: "no entitlement secret on cluster, with opted-in pool", + ocSecrets: []*corev1.Secret{globalPullSecret.DeepCopy()}, ocManagedSecrets: []*corev1.Secret{}, mcoSecrets: []*corev1.Secret{}, - expectedMCOSecrets: []corev1.Secret{}, layeredMCPs: []*mcfgv1.MachineConfigPool{infraPool.DeepCopy()}, + expectedMCOSecrets: []corev1.Secret{*globalPullSecretCopy.DeepCopy()}, }, { name: "entitlement secret on cluster, with opted-in pool", + ocSecrets: []*corev1.Secret{globalPullSecret.DeepCopy()}, ocManagedSecrets: []*corev1.Secret{entitlementSecret.DeepCopy()}, mcoSecrets: []*corev1.Secret{}, layeredMCPs: []*mcfgv1.MachineConfigPool{infraPool.DeepCopy()}, - expectedMCOSecrets: []corev1.Secret{*infraEntitlementSecret.DeepCopy()}, + expectedMCOSecrets: []corev1.Secret{*infraEntitlementSecret.DeepCopy(), *globalPullSecretCopy.DeepCopy()}, }, { name: "entitlement secret on cluster, with multiple opted-in pools", + ocSecrets: []*corev1.Secret{globalPullSecret.DeepCopy()}, ocManagedSecrets: []*corev1.Secret{entitlementSecret.DeepCopy()}, mcoSecrets: []*corev1.Secret{}, layeredMCPs: []*mcfgv1.MachineConfigPool{workerPool.DeepCopy(), infraPool.DeepCopy()}, - expectedMCOSecrets: []corev1.Secret{*workerEntitlementSecret.DeepCopy(), *infraEntitlementSecret.DeepCopy()}, + expectedMCOSecrets: []corev1.Secret{*workerEntitlementSecret.DeepCopy(), *infraEntitlementSecret.DeepCopy(), *globalPullSecretCopy.DeepCopy()}, }, { - name: "entitlement and cloned secret on cluster, with no opted-in pools", + name: "entitlement, cloned secret and global pull secret copy on cluster, with no opted-in pools", + ocSecrets: []*corev1.Secret{globalPullSecret.DeepCopy()}, ocManagedSecrets: []*corev1.Secret{entitlementSecret.DeepCopy()}, - mcoSecrets: []*corev1.Secret{infraEntitlementSecret.DeepCopy()}, + mcoSecrets: []*corev1.Secret{infraEntitlementSecret.DeepCopy(), globalPullSecretCopy.DeepCopy()}, layeredMCPs: []*mcfgv1.MachineConfigPool{}, expectedMCOSecrets: []corev1.Secret{}, }, { name: "entitlement and cloned secret on cluster, with an outdated cloned secret", + ocSecrets: []*corev1.Secret{globalPullSecret.DeepCopy()}, ocManagedSecrets: []*corev1.Secret{entitlementSecret.DeepCopy()}, mcoSecrets: []*corev1.Secret{outOfDateInfraEntitlementSecret.DeepCopy()}, layeredMCPs: []*mcfgv1.MachineConfigPool{infraPool.DeepCopy()}, - expectedMCOSecrets: []corev1.Secret{*infraEntitlementSecret.DeepCopy()}, + expectedMCOSecrets: []corev1.Secret{*infraEntitlementSecret.DeepCopy(), *globalPullSecretCopy.DeepCopy()}, + }, + { + name: "outdated global pull secret copy on cluster", + ocSecrets: []*corev1.Secret{globalPullSecret.DeepCopy()}, + ocManagedSecrets: []*corev1.Secret{}, + mcoSecrets: []*corev1.Secret{outOfDateGlobalPullSecretCopy.DeepCopy()}, + layeredMCPs: []*mcfgv1.MachineConfigPool{infraPool.DeepCopy()}, + expectedMCOSecrets: []corev1.Secret{*globalPullSecretCopy.DeepCopy()}, }, } for _, tc := range cases { @@ -222,6 +239,7 @@ func TestReconcileSimpleContentAccessSecret(t *testing.T) { sharedInformerFactory := informers.NewSharedInformerFactory(kubeClient, 0) mcoSecretInformer := sharedInformerFactory.Core().V1().Secrets() ocManagedSecretInformer := sharedInformerFactory.Core().V1().Secrets() + ocSecretInformer := sharedInformerFactory.Core().V1().Secrets() // Add secrets to informer and client for _, secret := range tc.mcoSecrets { @@ -234,6 +252,11 @@ func TestReconcileSimpleContentAccessSecret(t *testing.T) { _, err := kubeClient.CoreV1().Secrets(ctrlcommon.OpenshiftConfigManagedNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) assert.NoError(t, err) } + for _, secret := range tc.ocSecrets { + ocSecretInformer.Informer().GetIndexer().Add(secret) + _, err := kubeClient.CoreV1().Secrets(ctrlcommon.OpenshiftConfigNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + assert.NoError(t, err) + } // Create MCO specific clients mcfgClient := fakeclientmachineconfigv1.NewSimpleClientset() @@ -250,11 +273,15 @@ func TestReconcileSimpleContentAccessSecret(t *testing.T) { kubeClient: kubeClient, mcpLister: mcpInformer.Lister(), mcoSecretLister: mcoSecretInformer.Lister(), + ocSecretLister: ocSecretInformer.Lister(), ocManagedSecretLister: ocManagedSecretInformer.Lister(), } err := optr.reconcileSimpleContentAccessSecrets(tc.layeredMCPs) assert.NoError(t, err) + err = optr.reconcileGlobalPullSecretCopy(tc.layeredMCPs) + assert.NoError(t, err) + // Verify secrets in MCO namespace are as expected secrets, err := kubeClient.CoreV1().Secrets(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{}) assert.NoError(t, err) diff --git a/test/e2e-bootstrap/bootstrap_test.go b/test/e2e-bootstrap/bootstrap_test.go index e1449ef444..06fa57e2c9 100644 --- a/test/e2e-bootstrap/bootstrap_test.go +++ b/test/e2e-bootstrap/bootstrap_test.go @@ -521,7 +521,7 @@ func createControllers(ctx *ctrlcommon.ControllerContext) []ctrlcommon.Controlle ctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), ctx.KubeInformerFactory.Core().V1().Nodes(), ctx.KubeInformerFactory.Core().V1().Pods(), - ctx.InformerFactory.Machineconfiguration().V1alpha1().MachineOSConfigs(), + ctx.InformerFactory.Machineconfiguration().V1().MachineOSConfigs(), ctx.ConfigInformerFactory.Config().V1().Schedulers(), ctx.ClientBuilder.KubeClientOrDie("node-update-controller"), ctx.ClientBuilder.MachineConfigClientOrDie("node-update-controller"), diff --git a/test/e2e-techpreview/Containerfile.cowsay b/test/e2e-ocl/Containerfile.cowsay similarity index 100% rename from test/e2e-techpreview/Containerfile.cowsay rename to test/e2e-ocl/Containerfile.cowsay diff --git a/test/e2e-techpreview/Containerfile.entitled b/test/e2e-ocl/Containerfile.entitled similarity index 100% rename from test/e2e-techpreview/Containerfile.entitled rename to test/e2e-ocl/Containerfile.entitled diff --git a/test/e2e-techpreview/Containerfile.okd-fcos b/test/e2e-ocl/Containerfile.okd-fcos similarity index 100% rename from test/e2e-techpreview/Containerfile.okd-fcos rename to test/e2e-ocl/Containerfile.okd-fcos diff --git a/test/e2e-techpreview/Containerfile.yum-repos-d b/test/e2e-ocl/Containerfile.yum-repos-d similarity index 100% rename from test/e2e-techpreview/Containerfile.yum-repos-d rename to test/e2e-ocl/Containerfile.yum-repos-d diff --git a/test/e2e-techpreview/helpers_test.go b/test/e2e-ocl/helpers_test.go similarity index 70% rename from test/e2e-techpreview/helpers_test.go rename to test/e2e-ocl/helpers_test.go index 5994a228f1..e3af425b2d 100644 --- a/test/e2e-techpreview/helpers_test.go +++ b/test/e2e-ocl/helpers_test.go @@ -1,4 +1,4 @@ -package e2e_techpreview_test +package e2e_ocl_test import ( "bytes" @@ -10,6 +10,7 @@ import ( "os" "os/exec" "path/filepath" + "slices" "strings" "testing" "time" @@ -20,7 +21,7 @@ import ( "github.com/distribution/reference" imagev1 "github.com/openshift/api/image/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/machine-config-operator/pkg/controller/build/constants" "github.com/openshift/machine-config-operator/pkg/controller/build/utils" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/test/framework" @@ -33,23 +34,34 @@ import ( "k8s.io/apimachinery/pkg/runtime" aggerrs "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" "sigs.k8s.io/yaml" ) -const ( - clonedSecretLabelKey string = "machineconfiguration.openshift.io/cloned-secret" +var ( + InspectMC = "inspect-mc" ) -func createMachineOSConfig(t *testing.T, cs *framework.ClientSet, mosc *mcfgv1alpha1.MachineOSConfig) func() { +func applyMC(t *testing.T, cs *framework.ClientSet, mc *mcfgv1.MachineConfig) func() { + cleanupFunc := helpers.ApplyMC(t, cs, mc) + t.Logf("Created new MachineConfig %q", mc.Name) + + return makeIdempotentAndRegister(t, func() { + cleanupFunc() + t.Logf("Deleted MachineConfig %q", mc.Name) + }) +} + +func createMachineOSConfig(t *testing.T, cs *framework.ClientSet, mosc *mcfgv1.MachineOSConfig) func() { helpers.SetMetadataOnObject(t, mosc) - _, err := cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().Create(context.TODO(), mosc, metav1.CreateOptions{}) + _, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Create(context.TODO(), mosc, metav1.CreateOptions{}) require.NoError(t, err) t.Logf("Created MachineOSConfig %q", mosc.Name) return makeIdempotentAndRegister(t, func() { - require.NoError(t, cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().Delete(context.TODO(), mosc.Name, metav1.DeleteOptions{})) + require.NoError(t, cs.MachineconfigurationV1Interface.MachineOSConfigs().Delete(context.TODO(), mosc.Name, metav1.DeleteOptions{})) t.Logf("Deleted MachineOSConfig %q", mosc.Name) }) } @@ -196,22 +208,6 @@ func createSecret(t *testing.T, cs *framework.ClientSet, secret *corev1.Secret) }) } -// Copies the global pull secret from openshift-config/pull-secret into the MCO -// namespace so that it can be used by the build processes. -func copyGlobalPullSecret(t *testing.T, cs *framework.ClientSet) func() { - src := metav1.ObjectMeta{ - Name: "pull-secret", - Namespace: "openshift-config", - } - - dst := metav1.ObjectMeta{ - Name: globalPullSecretCloneName, - Namespace: ctrlcommon.MCONamespace, - } - - return cloneSecret(t, cs, src, dst) -} - // Computes the name of the currently-running MachineOSBuild given a MachineConfigPool and MachineOSConfig. func getMachineOSBuildNameForPool(cs *framework.ClientSet, poolName, moscName string) (string, error) { mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(context.TODO(), poolName, metav1.GetOptions{}) @@ -219,12 +215,12 @@ func getMachineOSBuildNameForPool(cs *framework.ClientSet, poolName, moscName st return "", err } - mosc, err := cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().Get(context.TODO(), moscName, metav1.GetOptions{}) + mosc, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Get(context.TODO(), moscName, metav1.GetOptions{}) if err != nil { return "", err } - mosbs, err := cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().List(context.TODO(), metav1.ListOptions{ + mosbs, err := cs.MachineconfigurationV1Interface.MachineOSBuilds().List(context.TODO(), metav1.ListOptions{ LabelSelector: utils.MachineOSBuildSelector(mosc, mcp).String(), }) @@ -257,16 +253,27 @@ func waitForPoolToReachState(t *testing.T, cs *framework.ClientSet, poolName str require.NoError(t, err, "MachineConfigPool %q did not reach desired state", poolName) } -// Registers a cleanup function, making it idempotent, and wiring up the -// skip-cleanup flag to it which will cause cleanup to be skipped, if set. +// Registers a cleanup function, making it idempotent, and wiring up the skip +// cleanup checks which will cause cleanup to be skipped under certain +// conditions. func makeIdempotentAndRegister(t *testing.T, cleanupFunc func()) func() { - out := helpers.MakeIdempotent(func() { - if !skipCleanup { - cleanupFunc() - } - }) - t.Cleanup(out) - return out + cfg := helpers.IdempotentConfig{ + SkipAlways: skipCleanupAlways, + SkipOnlyOnFailure: skipCleanupOnlyAfterFailure, + } + + return helpers.MakeConfigurableIdempotentAndRegister(t, cfg, cleanupFunc) +} + +// Registers a cleanup function, making it idempotent and ensures that it will +// always be run, regardless of skip cleanup opts or whether we're in CI. +// +// Note: Use this wrapper only in cases where you want to ensure that a +// function is only called once despite there being multiple calls to the +// returned function. If there is only one call to the returned function +// anyway, use t.Cleanup() instead for clarity. +func makeIdempotentAndRegisterAlwaysRun(t *testing.T, cleanupFunc func()) func() { + return helpers.MakeIdempotentAndRegister(t, cleanupFunc) } // TOOD: Refactor into smaller functions. @@ -287,18 +294,26 @@ func cleanupEphemeralBuildObjects(t *testing.T, cs *framework.ClientSet) { require.NoError(t, err) + jobList, err := cs.BatchV1Interface.Jobs(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: labelSelector, + }) + + require.NoError(t, err) + podList, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{ LabelSelector: labelSelector, }) require.NoError(t, err) - mosbList, err := cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().List(context.TODO(), metav1.ListOptions{}) + mosbList, err := cs.MachineconfigurationV1Interface.MachineOSBuilds().List(context.TODO(), metav1.ListOptions{}) require.NoError(t, err) - moscList, err := cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) + moscList, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) require.NoError(t, err) + kubeassert := helpers.AssertClientSet(t, cs).WithContext(context.TODO()) + if len(secretList.Items) == 0 { t.Logf("No build-time secrets to clean up") } @@ -307,6 +322,10 @@ func cleanupEphemeralBuildObjects(t *testing.T, cs *framework.ClientSet) { t.Logf("No ephemeral ConfigMaps to clean up") } + if len(jobList.Items) == 0 { + t.Logf("No ephemeral Jobs to clean up") + } + if len(podList.Items) == 0 { t.Logf("No build pods to clean up") } @@ -322,44 +341,88 @@ func cleanupEphemeralBuildObjects(t *testing.T, cs *framework.ClientSet) { for _, item := range secretList.Items { t.Logf("Cleaning up build-time Secret %s", item.Name) require.NoError(t, deleteObject(context.TODO(), t, &item, cs.CoreV1Interface.Secrets(ctrlcommon.MCONamespace))) + kubeassert.SecretDoesNotExist(item.Name) } for _, item := range cmList.Items { t.Logf("Cleaning up ephemeral ConfigMap %q", item.Name) require.NoError(t, deleteObject(context.TODO(), t, &item, cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace))) + kubeassert.ConfigMapDoesNotExist(item.Name) + } + + for _, item := range jobList.Items { + jobUID := string(item.UID) + t.Logf("Cleaning up build job %q", item.Name) + bgDeletion := metav1.DeletePropagationBackground + require.NoError(t, deleteObjectWithOpts(context.TODO(), t, &item, cs.BatchV1Interface.Jobs(ctrlcommon.MCONamespace), metav1.DeleteOptions{ + PropagationPolicy: &bgDeletion, + })) + kubeassert.JobDoesNotExist(item.Name) + + // Delete any pods that were created by the job + pods, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: "controller-uid=" + jobUID, + }) + require.NoError(t, err) + for _, pod := range pods.Items { + require.NoError(t, deleteObject(context.TODO(), t, &pod, cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace))) + kubeassert.PodDoesNotExist(pod.Name) + } } for _, item := range podList.Items { t.Logf("Cleaning up build pod %q", item.Name) require.NoError(t, deleteObject(context.TODO(), t, &item, cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace))) + kubeassert.PodDoesNotExist(item.Name) } for _, item := range moscList.Items { t.Logf("Cleaning up MachineOSConfig %q", item.Name) - require.NoError(t, deleteObject(context.TODO(), t, &item, cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs())) + require.NoError(t, deleteObject(context.TODO(), t, &item, cs.MachineconfigurationV1Interface.MachineOSConfigs())) + kubeassert.MachineOSConfigDoesNotExist(&item) } for _, item := range mosbList.Items { t.Logf("Cleaning up MachineOSBuild %q", item.Name) - require.NoError(t, deleteObject(context.TODO(), t, &item, cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds())) + require.NoError(t, deleteObject(context.TODO(), t, &item, cs.MachineconfigurationV1Interface.MachineOSBuilds())) + kubeassert.MachineOSBuildDoesNotExist(&item) + + // Also clean up the digest ConfigMap + t.Logf("Cleaning up ephemeral digest ConfigMap %q", utils.GetDigestConfigMapName(&item)) + require.NoError(t, cleanupDigestConfigMap(t, cs, &item)) + kubeassert.ConfigMapDoesNotExist(utils.GetDigestConfigMapName(&item)) + } + + // Clean up inspect MC if it exists + machineConfig, err := cs.MachineConfigs().Get(context.TODO(), InspectMC, metav1.GetOptions{}) + if err == nil { + t.Logf("Cleaning up MachineConfig %q", InspectMC) + require.NoError(t, deleteObject(context.TODO(), t, machineConfig, cs.MachineConfigs())) + kubeassert.MachineConfigDoesNotExist(machineConfig) } } +type deleter interface { + Delete(context.Context, string, metav1.DeleteOptions) error +} + type kubeObject interface { runtime.Object GetName() string } -func deleteObject(ctx context.Context, t *testing.T, obj kubeObject, deleter interface { - Delete(context.Context, string, metav1.DeleteOptions) error -}) error { - err := deleter.Delete(ctx, obj.GetName(), metav1.DeleteOptions{}) +func deleteObject(ctx context.Context, t *testing.T, obj kubeObject, deleter deleter) error { + return deleteObjectWithOpts(ctx, t, obj, deleter, metav1.DeleteOptions{}) +} +func deleteObjectWithOpts(ctx context.Context, t *testing.T, obj kubeObject, deleter deleter, opts metav1.DeleteOptions) error { kind, err := utils.GetKindForObject(obj) if err != nil && kind == "" { kind = "" } + err = deleter.Delete(ctx, obj.GetName(), opts) + if err == nil { t.Logf("Cleaned up %s %q", kind, obj.GetName()) return nil @@ -373,12 +436,15 @@ func deleteObject(ctx context.Context, t *testing.T, obj kubeObject, deleter int return err } -func ignoreErrNotFound(t *testing.T, err error) error { +func cleanupDigestConfigMap(t *testing.T, cs *framework.ClientSet, mosb *mcfgv1.MachineOSBuild) error { + cm, err := cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace).Get(context.TODO(), utils.GetDigestConfigMapName(mosb), metav1.GetOptions{}) + if err == nil { + return deleteObject(context.TODO(), t, cm, cs.CoreV1Interface.ConfigMaps(ctrlcommon.MCONamespace)) + } if k8serrors.IsNotFound(err) { - t.Logf("") + t.Logf("%s already cleaned up", utils.GetDigestConfigMapName(mosb)) return nil } - return err } @@ -425,7 +491,7 @@ func writeBuildArtifactsToFiles(t *testing.T, cs *framework.ClientSet, poolName // Writes all MachineOSBuilds to a file. func writeMachineOSBuildsToFile(t *testing.T, cs *framework.ClientSet, archiveDir string) error { - mosbList, err := cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().List(context.TODO(), metav1.ListOptions{}) + mosbList, err := cs.MachineconfigurationV1Interface.MachineOSBuilds().List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -440,7 +506,7 @@ func writeMachineOSBuildsToFile(t *testing.T, cs *framework.ClientSet, archiveDi // Writes all MachineOSConfigs to a file. func writeMachineOSConfigsToFile(t *testing.T, cs *framework.ClientSet, archiveDir string) error { - moscList, err := cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) + moscList, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -515,8 +581,8 @@ func streamMachineOSBuilderPodLogsToFile(ctx context.Context, t *testing.T, cs * // Streams the logs for all of the containers running in the build pod. The pod // logs can provide a valuable window into how / why a given build failed. -func streamBuildPodLogsToFile(ctx context.Context, t *testing.T, cs *framework.ClientSet, mosb *mcfgv1alpha1.MachineOSBuild, dirPath string) error { - jobName := mosb.Status.BuilderReference.PodImageBuilder.Name +func streamBuildPodLogsToFile(ctx context.Context, t *testing.T, cs *framework.ClientSet, mosb *mcfgv1.MachineOSBuild, dirPath string) error { + jobName := mosb.Status.Builder.Job.Name pod, err := getPodFromJob(ctx, cs, jobName) if err != nil { @@ -532,7 +598,7 @@ func getPodFromJob(ctx context.Context, cs *framework.ClientSet, jobName string) return nil, fmt.Errorf("could not get job %s: %w", job, err) } - podList, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("job-name=%s", jobName)}) + podList, err := cs.CoreV1Interface.Pods(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("controller-uid=%s", job.UID)}) if err != nil { return nil, fmt.Errorf("could not get pods with job label %s: %w", jobName, err) } @@ -553,6 +619,40 @@ func getPodFromJob(ctx context.Context, cs *framework.ClientSet, jobName string) return nil, fmt.Errorf("no pod found for job %s", jobName) } +// getJobForMOSB returns the name of the job that was created for the given MOSB by comparing the job UID +// to the UID stored in the MOSB annotation +func getJobForMOSB(ctx context.Context, cs *framework.ClientSet, build *mcfgv1.MachineOSBuild) (string, error) { + jobName := "" + mosbJobUID := "" + + for mosbJobUID == "" { + mosb, err := cs.MachineconfigurationV1Interface.MachineOSBuilds().Get(ctx, build.Name, metav1.GetOptions{}) + if err != nil { + return jobName, fmt.Errorf("could not get MachineOSBuild %s: %w", build.Name, err) + } + if mosb.GetAnnotations()[constants.JobUIDAnnotationKey] != "" { + mosbJobUID = mosb.GetAnnotations()[constants.JobUIDAnnotationKey] + break + } + time.Sleep(1 * time.Second) + } + + for jobName == "" { + jobs, err := cs.BatchV1Interface.Jobs(ctrlcommon.MCONamespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return jobName, fmt.Errorf("could not get list of jobs: %w", err) + } + for _, job := range jobs.Items { + if string(job.UID) == mosbJobUID { + jobName = job.Name + break + } + } + time.Sleep(1 * time.Second) + } + return jobName, nil +} + // Attaches a follower to each of the containers within a given pod in order to // stream their logs to disk for future debugging. func streamPodContainerLogsToFile(ctx context.Context, t *testing.T, cs *framework.ClientSet, pod *corev1.Pod, dirPath string) error { @@ -677,34 +777,17 @@ func convertFilesFromContainerImageToBytesMap(t *testing.T, pullspec, containerF return out } -// Copy the entitlement certificates into the MCO namespace. If the secrets -// cannot be found, calls t.Skip() to skip the test. -// -// Registers and returns a cleanup function to remove the certificate(s) after test completion. -func copyEntitlementCerts(t *testing.T, cs *framework.ClientSet) func() { - src := metav1.ObjectMeta{ - Name: "etc-pki-entitlement", - Namespace: "openshift-config-managed", - } - - dst := metav1.ObjectMeta{ - Name: src.Name, - Namespace: ctrlcommon.MCONamespace, - } - - _, err := cs.CoreV1Interface.Secrets(src.Namespace).Get(context.TODO(), src.Name, metav1.GetOptions{}) - if err == nil { - return cloneSecret(t, cs, src, dst) - } +// Skips the test if the entitlement secret is not present. +func skipIfEntitlementNotPresent(t *testing.T, cs *framework.ClientSet) { + _, err := cs.CoreV1Interface.Secrets(constants.EtcPkiEntitlementSecretName).Get(context.TODO(), ctrlcommon.OpenshiftConfigManagedNamespace, metav1.GetOptions{}) if k8serrors.IsNotFound(err) { - t.Logf("Secret %q not found in %q, skipping test", src.Name, src.Namespace) + t.Logf("Secret %q not found in %q, skipping test", constants.EtcPkiEntitlementSecretName, ctrlcommon.OpenshiftConfigManagedNamespace) t.Skip() - return func() {} + return } - - t.Fatalf("could not get %q from %q: %s", src.Name, src.Namespace, err) - return func() {} + // No other errors are expected. + require.NoError(t, err) } // Uses the centos stream 9 container and extracts the contents of both the @@ -749,34 +832,6 @@ func injectYumRepos(t *testing.T, cs *framework.ClientSet) func() { }) } -// Clones a given secret from a given namespace into the MCO namespace. -// Registers and returns a cleanup function to delete the secret upon test -// completion. -func cloneSecret(t *testing.T, cs *framework.ClientSet, src, dst metav1.ObjectMeta) func() { - secret, err := cs.CoreV1Interface.Secrets(src.Namespace).Get(context.TODO(), src.Name, metav1.GetOptions{}) - require.NoError(t, err) - - secretCopy := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: dst.Name, - Namespace: dst.Namespace, - Labels: map[string]string{ - clonedSecretLabelKey: "", - }, - }, - Data: secret.Data, - Type: secret.Type, - } - - cleanup := createSecret(t, cs, secretCopy) - t.Logf("Cloned \"%s/%s\" to \"%s/%s\"", src.Namespace, src.Name, dst.Namespace, dst.Name) - - return makeIdempotentAndRegister(t, func() { - cleanup() - t.Logf("Deleted cloned secret \"%s/%s\"", dst.Namespace, dst.Name) - }) -} - func newMachineConfig(name, pool string) *mcfgv1.MachineConfig { mode := 420 testfiledata := fmt.Sprintf("data:,%s-%s", name, pool) @@ -828,6 +883,13 @@ func getImagePullspecForFailureTest(ctx context.Context, cs *framework.ClientSet } } +func getBadContainerFileForFailureTest() []mcfgv1.MachineOSContainerfile { + return []mcfgv1.MachineOSContainerfile{{ + ContainerfileArch: mcfgv1.NoArch, + Content: "THIS IS A BAD CONTAINERFILE", + }} +} + // Talks to an image registry to get the digested image pullspec for the // supplied image pullspec. Note: Only supports public image registries. This // is the same as doing: @@ -852,3 +914,93 @@ func resolveTaggedPullspecToDigestedPullspec(ctx context.Context, pullspec strin return canonical.String(), nil } + +// TODO: Deduplicate this definition from machine-config-operator/devex/internal/pkg/rollout/rollout.go +// Having "internal" in the module path prevents us from reusing it here since +// it is internal to the devex directory. +func setDeploymentReplicas(t *testing.T, cs *framework.ClientSet, deployment metav1.ObjectMeta, replicas int32) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + t.Logf("Setting replicas for %s/%s to %d", deployment.Namespace, deployment.Name, replicas) + scale, err := cs.AppsV1Interface.Deployments(deployment.Namespace).GetScale(context.TODO(), deployment.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + scale.Spec.Replicas = replicas + + _, err = cs.AppsV1Interface.Deployments(deployment.Namespace).UpdateScale(context.TODO(), deployment.Name, scale, metav1.UpdateOptions{}) + return err + }) +} + +// Scales down the machine-os-builder, machine-config-opreator, and +// cluster-version-operator deployments. Registers and returns an idempotent +// function that will scale the deployments back to their original values. +func scaleDownDeployments(t *testing.T, cs *framework.ClientSet) func() { + deployments := []metav1.ObjectMeta{ + // Scale down the cluster-version-operator since it could set the desired + // replicas for the MCO to 1. + { + Name: "cluster-version-operator", + Namespace: "openshift-cluster-version", + }, + // Scale down the machine-config-operator since it could set the desired + // replicas for the build controller to 1. + { + Name: "machine-config-operator", + Namespace: ctrlcommon.MCONamespace, + }, + // Scale down the machine-os-builder since we want to simulate its pod + // being rescheduled. + { + Name: "machine-os-builder", + Namespace: ctrlcommon.MCONamespace, + }, + } + + restoreFuncs := []func(){} + + for _, deployment := range deployments { + restoreFuncs = append(restoreFuncs, scaleDownDeployment(t, cs, deployment)) + } + + return helpers.MakeIdempotentAndRegister(t, func() { + // Restore the deployments in the reverse order by which we disabled them. + // Not really necessary, but we want to ensure that the machine-os-builder + // deployment starts back up as soon as possible. + slices.Reverse(restoreFuncs) + + for _, restoreFunc := range restoreFuncs { + restoreFunc() + } + }) +} + +// Scales down a given deployment unless that deployment is already set to zero +// replicas, in which case it no-ops. Registers and returns an idempotent +// restoral function that will revert the deployment back to its original +// setting. +func scaleDownDeployment(t *testing.T, cs *framework.ClientSet, deployment metav1.ObjectMeta) func() { + ctx := context.TODO() + + originalDeployment, err := cs.AppsV1Interface.Deployments(deployment.Namespace).Get(ctx, deployment.Name, metav1.GetOptions{}) + require.NoError(t, err) + + originalReplicas := *originalDeployment.Spec.Replicas + + // We check if the original replica count is zero. This is because it is very + // common for a dev sandbox cluster to at least have the CVO disabled. + if originalReplicas == 0 { + t.Logf("Original replica count for deployment %s/%s set to 0, skipping scale down", deployment.Namespace, deployment.Name) + + return helpers.MakeIdempotentAndRegister(t, func() { + t.Logf("Original replica count for deployment %s/%s set to 0, skipping restore", deployment.Namespace, deployment.Name) + }) + } + + require.NoError(t, setDeploymentReplicas(t, cs, deployment, 0)) + + return helpers.MakeIdempotentAndRegister(t, func() { + require.NoError(t, setDeploymentReplicas(t, cs, deployment, originalReplicas)) + }) +} diff --git a/test/e2e-ocl/main_test.go b/test/e2e-ocl/main_test.go new file mode 100644 index 0000000000..e6f51b52b0 --- /dev/null +++ b/test/e2e-ocl/main_test.go @@ -0,0 +1,16 @@ +package e2e_ocl_test + +import ( + "flag" + "os" + "testing" + + "k8s.io/klog/v2" +) + +func TestMain(m *testing.M) { + flag.Parse() + klog.Infof("-skip-cleanup: %v", skipCleanupAlways) + klog.Infof("-skip-cleanup-on-failure: %v", skipCleanupOnlyAfterFailure) + os.Exit(m.Run()) +} diff --git a/test/e2e-techpreview/onclusterlayering_test.go b/test/e2e-ocl/onclusterlayering_test.go similarity index 56% rename from test/e2e-techpreview/onclusterlayering_test.go rename to test/e2e-ocl/onclusterlayering_test.go index 210513bb7d..b250cfa523 100644 --- a/test/e2e-techpreview/onclusterlayering_test.go +++ b/test/e2e-ocl/onclusterlayering_test.go @@ -1,4 +1,4 @@ -package e2e_techpreview_test +package e2e_ocl_test import ( "context" @@ -12,11 +12,11 @@ import ( "testing" "time" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" ign3types "github.com/coreos/ignition/v2/config/v3_4/types" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" "github.com/openshift/machine-config-operator/pkg/daemon/runtimeassets" "github.com/openshift/machine-config-operator/test/framework" @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/require" "github.com/openshift/machine-config-operator/pkg/controller/build/buildrequest" + "github.com/openshift/machine-config-operator/pkg/controller/build/constants" + "github.com/openshift/machine-config-operator/pkg/controller/build/imagebuilder" "github.com/openshift/machine-config-operator/pkg/controller/build/utils" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -62,17 +64,20 @@ var ( okdFcosDockerfile string ) -var skipCleanup bool +var skipCleanupAlways bool +var skipCleanupOnlyAfterFailure bool func init() { // Skips running the cleanup functions. Useful for debugging tests. - flag.BoolVar(&skipCleanup, "skip-cleanup", false, "Skips running the cleanup functions") + flag.BoolVar(&skipCleanupAlways, "skip-cleanup", false, "Skips running cleanups regardless of outcome") + // Skips running the cleanup function only when the test fails. + flag.BoolVar(&skipCleanupOnlyAfterFailure, "skip-cleanup-on-failure", false, "Skips running cleanups only after failure") } // Holds elements common for each on-cluster build tests. type onClusterLayeringTestOpts struct { // Which image builder type to use for the test. - imageBuilderType mcfgv1alpha1.MachineOSImageBuilderType + imageBuilderType mcfgv1.MachineOSImageBuilderType // The custom Dockerfiles to use for the test. This is a map of MachineConfigPool name to Dockerfile content. customDockerfiles map[string]string @@ -84,16 +89,13 @@ type onClusterLayeringTestOpts struct { poolName string // Use RHEL entitlements - useEtcPkiEntitlement bool + entitlementRequired bool // Inject YUM repo information from a Centos 9 stream container useYumRepos bool - - // Add Extensions for testing - useExtensions bool } -func TestOnClusterBuildsOnOKD(t *testing.T) { +func TestOnClusterLayeringOnOKD(t *testing.T) { skipOnOCP(t) runOnClusterLayeringTest(t, onClusterLayeringTestOpts{ @@ -105,42 +107,161 @@ func TestOnClusterBuildsOnOKD(t *testing.T) { } // Tests that an on-cluster build can be performed with the Custom Pod Builder. -func TestOnClusterBuildsCustomPodBuilder(t *testing.T) { - runOnClusterLayeringTest(t, onClusterLayeringTestOpts{ +func TestOnClusterLayering(t *testing.T) { + + _, mosb := runOnClusterLayeringTest(t, onClusterLayeringTestOpts{ poolName: layeredMCPName, customDockerfiles: map[string]string{ layeredMCPName: cowsayDockerfile, }, }) + + // Test rebuild annotation works + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + t.Logf("Applying rebuild annotation (%q) to MachineOSConfig (%q) to cause a rebuild", constants.RebuildMachineOSConfigAnnotationKey, layeredMCPName) + + cs := framework.NewClientSet("") + + mosc, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Get(ctx, layeredMCPName, metav1.GetOptions{}) + require.NoError(t, err) + + helpers.SetRebuildAnnotationOnMachineOSConfig(ctx, t, cs.GetMcfgclient(), mosc) + + // Use the UID of the previous MOSB to ensure it is deleted as the rebuild will trigger a MOSB with the same name + t.Logf("Waiting for the previous MachineOSBuild with UID %q to be deleted", mosb.UID) + waitForMOSBToBeDeleted(t, cs, mosb) + + // Wait for the build to start + waitForBuildToStartForPoolAndConfig(t, cs, layeredMCPName, mosc.Name) + } // Tests that an on-cluster build can be performed and that the resulting image // is rolled out to an opted-in node. -func TestOnClusterBuildRollsOutImageWithExtensionsInstalled(t *testing.T) { - imagePullspec := runOnClusterLayeringTest(t, onClusterLayeringTestOpts{ +func TestOnClusterBuildRollsOutImage(t *testing.T) { + imagePullspec, _ := runOnClusterLayeringTest(t, onClusterLayeringTestOpts{ poolName: layeredMCPName, customDockerfiles: map[string]string{ layeredMCPName: cowsayDockerfile, }, - useExtensions: true, }) cs := framework.NewClientSet("") node := helpers.GetRandomNode(t, cs, "worker") - unlabelFunc := makeIdempotentAndRegister(t, helpers.LabelNode(t, cs, node, helpers.MCPNameToRole(layeredMCPName))) + unlabelFunc := makeIdempotentAndRegisterAlwaysRun(t, helpers.LabelNode(t, cs, node, helpers.MCPNameToRole(layeredMCPName))) helpers.WaitForNodeImageChange(t, cs, node, imagePullspec) helpers.AssertNodeBootedIntoImage(t, cs, node, imagePullspec) t.Logf("Node %s is booted into image %q", node.Name, imagePullspec) - assertExtensionInstalledOnNode(t, cs, node, true) t.Log(helpers.ExecCmdOnNode(t, cs, node, "chroot", "/rootfs", "cowsay", "Moo!")) unlabelFunc() assertNodeRevertsToNonLayered(t, cs, node) - assertExtensionInstalledOnNode(t, cs, node, false) +} + +func TestMissingImageIsRebuilt(t *testing.T) { + cs := framework.NewClientSet("") + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + kubeassert := helpers.AssertClientSet(t, cs).WithContext(ctx) + + firstImagePullspec, firstMOSB := runOnClusterLayeringTest(t, onClusterLayeringTestOpts{ + poolName: layeredMCPName, + customDockerfiles: map[string]string{ + layeredMCPName: cowsayDockerfile, + }, + }) + + moscName := layeredMCPName + t.Logf("Waiting for MachineOSConfig %q to have a new pullspec", moscName) + waitForMOSCToGetNewPullspec(ctx, t, cs, moscName, firstImagePullspec) + + // Create a MC to create another MOSB + testMC := newMachineConfig(InspectMC, layeredMCPName) + t.Logf("Creating MachineConfig %q", testMC.Name) + firstMC, err := cs.MachineConfigs().Create(ctx, testMC, metav1.CreateOptions{}) + require.NoError(t, err) + t.Logf("Created MachineConfig %q", firstMC.Name) + kubeassert.MachineConfigExists(firstMC) + + // Wait for the build to start + t.Logf("Waiting for 2nd build to start...") + secondMOSBName := waitForMOSCToUpdateCurrentMOSB(ctx, t, cs, moscName, firstMOSB.Name) + secondMOSB, err := cs.GetMcfgclient().MachineconfigurationV1().MachineOSBuilds().Get(ctx, secondMOSBName, metav1.GetOptions{}) + require.NoError(t, err) + secondMOSB = waitForBuildToStart(t, cs, secondMOSB) + t.Logf("MachineOSBuild %q has started", secondMOSB.Name) + assertBuildJobIsAsExpected(t, cs, secondMOSB) + + // Wait for the build to finish + t.Logf("Waiting for 2nd build completion...") + secondFinishedBuild := waitForBuildToComplete(t, cs, secondMOSB) + secondImagePullspec := string(secondFinishedBuild.Status.DigestedImagePushSpec) + t.Logf("MachineOSBuild %q has completed and produced image: %s", secondFinishedBuild.Name, secondImagePullspec) + waitForMOSCToGetNewPullspec(ctx, t, cs, moscName, secondImagePullspec) + + // Delete the first image + t.Logf("Deleting image %q", firstImagePullspec) + istName := fmt.Sprintf("os-image:%s", firstMOSB.Name) + err = cs.ImageStreamTags(ctrlcommon.MCONamespace).Delete(ctx, istName, metav1.DeleteOptions{}) + require.NoError(t, err) + kubeassert.ImageDoesNotExist(istName) + t.Logf("Deleted image %q", firstImagePullspec) + // Delete the MC + t.Logf("Deleting MachineConfig %q to retrigger build", firstMC.Name) + err = cs.MachineConfigs().Delete(ctx, firstMC.Name, metav1.DeleteOptions{}) + require.NoError(t, err) + kubeassert.MachineConfigDoesNotExist(firstMC) + t.Logf("Deleted MachineConfig %q", firstMC.Name) + + // Wait for the build to start + t.Logf("Waiting for 3rd build to start...") + thirdMOSBName := waitForMOSCToUpdateCurrentMOSB(ctx, t, cs, moscName, secondMOSB.Name) + thirdMOSB, err := cs.GetMcfgclient().MachineconfigurationV1().MachineOSBuilds().Get(ctx, thirdMOSBName, metav1.GetOptions{}) + require.NoError(t, err) + thirdMOSB = waitForBuildToStart(t, cs, thirdMOSB) + t.Logf("MachineOSBuild %q has started", thirdMOSB.Name) + assertBuildJobIsAsExpected(t, cs, thirdMOSB) + + // Wait for the build to finish + t.Logf("Waiting for 3rd build completion...") + thirdFinishedBuild := waitForBuildToComplete(t, cs, thirdMOSB) + thirdImagePullspec := string(thirdFinishedBuild.Status.DigestedImagePushSpec) + t.Logf("MachineOSBuild %q has completed and produced image: %s", thirdFinishedBuild.Name, thirdImagePullspec) + waitForMOSCToGetNewPullspec(ctx, t, cs, moscName, thirdImagePullspec) + + // Apply the MC again + t.Logf("Applying MachineConfig %q", firstMC.Name) + secondMC, err := cs.MachineConfigs().Create(ctx, testMC, metav1.CreateOptions{}) + require.NoError(t, err) + kubeassert.MachineConfigExists(secondMC) + t.Logf("Created MachineConfig %q", secondMC.Name) + t.Logf("Reusing previous MachineOSBuild %q that has produced image: %s", secondFinishedBuild.Name, secondImagePullspec) + waitForMOSCToGetNewPullspec(ctx, t, cs, moscName, secondImagePullspec) + + // Delete the third MOSB and the image should be deleted also + t.Logf("Deleting MachineOSBuild %q", thirdMOSB.Name) + err = cs.MachineconfigurationV1Interface.MachineOSBuilds().Delete(ctx, thirdMOSB.Name, metav1.DeleteOptions{}) + require.NoError(t, err) + kubeassert.MachineOSBuildDoesNotExist(thirdMOSB) + t.Logf("Deleted MachineOSBuild %q", thirdMOSB.Name) + isiName := fmt.Sprintf("os-image:%s", thirdMOSBName) + kubeassert.ImageDoesNotExist(isiName) + t.Logf("Image %q belonging to MachineOSBuild %q has been deleted", thirdImagePullspec, thirdMOSB.Name) + + // Delete the MC for cleanup + t.Logf("Deleting MachineConfig %q for cleanup", firstMC.Name) + err = cs.MachineConfigs().Delete(ctx, firstMC.Name, metav1.DeleteOptions{}) + require.NoError(t, err) + kubeassert.MachineConfigDoesNotExist(firstMC) + t.Logf("Deleted MachineConfig %q", firstMC.Name) } func assertNodeRevertsToNonLayered(t *testing.T, cs *framework.ClientSet, node corev1.Node) { @@ -157,28 +278,14 @@ func assertNodeRevertsToNonLayered(t *testing.T, cs *framework.ClientSet, node c helpers.AssertFileNotOnNode(t, cs, node, runtimeassets.RevertServiceMachineConfigFile) } -func assertExtensionInstalledOnNode(t *testing.T, cs *framework.ClientSet, node corev1.Node, shouldExist bool) { - foundPkg, err := helpers.ExecCmdOnNodeWithError(cs, node, "chroot", "/rootfs", "rpm", "-q", "usbguard") - if shouldExist { - require.NoError(t, err, "usbguard extension not found") - if strings.Contains(foundPkg, "package usbguard is not installed") { - t.Fatalf("usbguard package not installed on node %s, got %s", node.Name, foundPkg) - } - t.Logf("usbguard extension installed, got %s", foundPkg) - } else { - if !strings.Contains(foundPkg, "package usbguard is not installed") { - t.Fatalf("usbguard package is installed on node %s, got %s", node.Name, foundPkg) - } - t.Logf("usbguard extension not installed as expected, got %s", foundPkg) - } -} - // This test extracts the /etc/yum.repos.d and /etc/pki/rpm-gpg content from a // Centos Stream 9 image and injects them into the MCO namespace. It then // performs a build with the expectation that these artifacts will be used, // simulating a build where someone has added this content; usually a Red Hat // Satellite user. func TestYumReposBuilds(t *testing.T) { + // Skipping this test as it is having a package conflict issue unrelated to MCO + t.Skip() runOnClusterLayeringTest(t, onClusterLayeringTestOpts{ poolName: layeredMCPName, customDockerfiles: map[string]string{ @@ -188,9 +295,8 @@ func TestYumReposBuilds(t *testing.T) { }) } -// Clones the etc-pki-entitlement certificate from the openshift-config-managed -// namespace into the MCO namespace. Then performs an on-cluster layering build -// which should consume the entitlement certificates. +// Then performs an on-cluster layering build which should consume the +// etc-pki-entitlement certificates. func TestEntitledBuilds(t *testing.T) { skipOnOKD(t) @@ -199,7 +305,7 @@ func TestEntitledBuilds(t *testing.T) { customDockerfiles: map[string]string{ layeredMCPName: entitledDockerfile, }, - useEtcPkiEntitlement: true, + entitlementRequired: true, }) } @@ -208,7 +314,7 @@ func TestEntitledBuilds(t *testing.T) { // being built. func TestMachineOSConfigChangeRestartsBuild(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - cancel = makeIdempotentAndRegister(t, cancel) + t.Cleanup(cancel) cs := framework.NewClientSet("") @@ -219,7 +325,7 @@ func TestMachineOSConfigChangeRestartsBuild(t *testing.T) { }, }) - t.Cleanup(createMachineOSConfig(t, cs, mosc)) + createMachineOSConfig(t, cs, mosc) mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(ctx, layeredMCPName, metav1.GetOptions{}) require.NoError(t, err) @@ -246,12 +352,11 @@ func TestMachineOSConfigChangeRestartsBuild(t *testing.T) { waitForBuildToStart(t, cs, moscChangeMosb) t.Logf("Waiting for initial MachineOSBuild %s to be deleted", firstMosb.Name) - // Wait for the first build to be deleted. waitForBuildToBeDeleted(t, cs, firstMosb) // Ensure that the second build still exists. - _, err = cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().Get(context.TODO(), moscChangeMosb.Name, metav1.GetOptions{}) + _, err = cs.MachineconfigurationV1Interface.MachineOSBuilds().Get(context.TODO(), moscChangeMosb.Name, metav1.GetOptions{}) require.NoError(t, err) } @@ -268,7 +373,7 @@ func TestMachineConfigPoolChangeRestartsBuild(t *testing.T) { }, }) - t.Cleanup(createMachineOSConfig(t, cs, mosc)) + createMachineOSConfig(t, cs, mosc) // Wait for the first build to start. firstMosb := waitForBuildToStartForPoolAndConfig(t, cs, layeredMCPName, mosc.Name) @@ -277,15 +382,8 @@ func TestMachineConfigPoolChangeRestartsBuild(t *testing.T) { // the rendered config to appear, then we check that a new MachineOSBuild has // started for that new MachineConfig. mcName := "new-machineconfig" - mc := newMachineConfig("new-machineconfig", layeredMCPName) - - mcCleanupFunc := helpers.ApplyMC(t, cs, mc) - t.Cleanup(func() { - mcCleanupFunc() - t.Logf("Deleted MachineConfig %s", mc.Name) - }) - - t.Logf("Created new MachineConfig %q", mcName) + mc := newMachineConfig(mcName, layeredMCPName) + applyMC(t, cs, mc) _, err := helpers.WaitForRenderedConfig(t, cs, layeredMCPName, mcName) require.NoError(t, err) @@ -296,15 +394,16 @@ func TestMachineConfigPoolChangeRestartsBuild(t *testing.T) { // Next, we wait for the new build to be started. secondMosb := waitForBuildToStartForPoolAndConfig(t, cs, layeredMCPName, mosc.Name) - _, err = cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().Get(context.TODO(), secondMosb.Name, metav1.GetOptions{}) + _, err = cs.MachineconfigurationV1Interface.MachineOSBuilds().Get(context.TODO(), secondMosb.Name, metav1.GetOptions{}) require.NoError(t, err) } -// This test starts a build with an image that is known to fail because it does -// not have the necessary binaries within it. After failure, it edits the -// MachineOSConfig with the expectation that the failed build and its objects -// will be deleted and a new build will start in its place. +// This test starts a build with an image that is known to fail because it uses +// an invalid containerfile. After failure, it edits the MachineOSConfig +// with the expectation that the failed build and its will be deleted and a new +// build will start in its place. func TestGracefulBuildFailureRecovery(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -317,16 +416,12 @@ func TestGracefulBuildFailureRecovery(t *testing.T) { }, }) - // Override the base OS container image to pull an invalid (and smaller) - // image that should produce a failure faster. - pullspec, err := getImagePullspecForFailureTest(ctx, cs) - require.NoError(t, err) - - t.Logf("Overriding BaseImagePullspec for MachineOSConfig %s with %q to cause a build failure", mosc.Name, pullspec) + // Add a bad containerfile so that we can cause a build failure + t.Logf("Adding a bad containerfile for MachineOSConfig %s to cause a build failure", mosc.Name) - mosc.Spec.BuildInputs.BaseOSImagePullspec = pullspec + mosc.Spec.Containerfile = getBadContainerFileForFailureTest() - t.Cleanup(createMachineOSConfig(t, cs, mosc)) + createMachineOSConfig(t, cs, mosc) // Wait for the build to start. firstMosb := waitForBuildToStartForPoolAndConfig(t, cs, layeredMCPName, mosc.Name) @@ -338,15 +433,15 @@ func TestGracefulBuildFailureRecovery(t *testing.T) { kubeassert.Eventually().MachineOSBuildIsFailure(firstMosb) // Clear the overridden image pullspec. - apiMosc, err := cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) + apiMosc, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) require.NoError(t, err) - apiMosc.Spec.BuildInputs.BaseOSImagePullspec = "" + apiMosc.Spec.Containerfile = []mcfgv1.MachineOSContainerfile{} - updated, err := cs.MachineconfigurationV1alpha1Interface.MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) + updated, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) require.NoError(t, err) - t.Logf("Cleared BaseImagePullspec to use default value") + t.Logf("Cleared out bad containerfile") mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(ctx, layeredMCPName, metav1.GetOptions{}) require.NoError(t, err) @@ -364,6 +459,7 @@ func TestGracefulBuildFailureRecovery(t *testing.T) { // Ensure that the second build is still running. kubeassert.MachineOSBuildExists(secondMosb) assertBuildObjectsAreCreated(t, kubeassert, secondMosb) + } // This test validates that when a running builder is deleted, the @@ -385,19 +481,26 @@ func TestDeletedBuilderInterruptsMachineOSBuild(t *testing.T) { // Create our MachineOSConfig and ensure that it is deleted after the test is // finished. - t.Cleanup(createMachineOSConfig(t, cs, mosc)) + createMachineOSConfig(t, cs, mosc) // Wait for the build to start startedBuild := waitForBuildToStartForPoolAndConfig(t, cs, poolName, mosc.Name) t.Logf("MachineOSBuild %q has started", startedBuild.Name) + pod, err := getPodFromJob(ctx, cs, utils.GetBuildJobName(startedBuild)) + require.NoError(t, err) + // Delete the builder - err := cs.BatchV1Interface.Jobs(ctrlcommon.MCONamespace).Delete(ctx, utils.GetBuildJobName(startedBuild), metav1.DeleteOptions{}) + bgDeletion := metav1.DeletePropagationBackground + err = cs.BatchV1Interface.Jobs(ctrlcommon.MCONamespace).Delete(ctx, utils.GetBuildJobName(startedBuild), metav1.DeleteOptions{PropagationPolicy: &bgDeletion}) require.NoError(t, err) // Wait for the build to be interrupted. kubeassert := helpers.AssertClientSet(t, cs).WithContext(ctx).Eventually() - kubeassert.MachineOSBuildIsInterrupted(startedBuild) + waitForBuildToBeInterrupted(t, cs, startedBuild) + // Ensure that the pod and job are deleted + kubeassert.Eventually().JobDoesNotExist(utils.GetBuildJobName(startedBuild)) + kubeassert.Eventually().PodDoesNotExist(pod.Name) } // This test validates that when a running build pod is deleted, the @@ -420,7 +523,7 @@ func TestDeletedPodDoesNotInterruptMachineOSBuild(t *testing.T) { // Create our MachineOSConfig and ensure that it is deleted after the test is // finished. - t.Cleanup(createMachineOSConfig(t, cs, mosc)) + createMachineOSConfig(t, cs, mosc) // Wait for the build to start startedBuild := waitForBuildToStartForPoolAndConfig(t, cs, poolName, mosc.Name) @@ -465,7 +568,7 @@ func TestDeletedTransientMachineOSBuildIsRecreated(t *testing.T) { // Create our MachineOSConfig and ensure that it is deleted after the test is // finished. - t.Cleanup(createMachineOSConfig(t, cs, mosc)) + createMachineOSConfig(t, cs, mosc) // Wait for the build to start firstMosb := waitForBuildToStartForPoolAndConfig(t, cs, poolName, mosc.Name) @@ -474,9 +577,11 @@ func TestDeletedTransientMachineOSBuildIsRecreated(t *testing.T) { require.NoError(t, err) // Delete the MachineOSBuild. - err = cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().Delete(ctx, firstMosb.Name, metav1.DeleteOptions{}) + err = cs.MachineconfigurationV1Interface.MachineOSBuilds().Delete(ctx, firstMosb.Name, metav1.DeleteOptions{}) require.NoError(t, err) + t.Logf("MachineOSBuild %q deleted", firstMosb.Name) + // Wait a few seconds for the MachineOSBuild deletion to complete. time.Sleep(time.Second * 5) // Ensure that the Job is deleted as this might take some time @@ -496,245 +601,112 @@ func TestDeletedTransientMachineOSBuildIsRecreated(t *testing.T) { assert.NotEqual(t, firstJob.UID, secondJob.UID) } -func assertBuildObjectsAreCreated(t *testing.T, kubeassert *helpers.Assertions, mosb *mcfgv1alpha1.MachineOSBuild) { - t.Helper() - - kubeassert.JobExists(utils.GetBuildJobName(mosb)) - kubeassert.ConfigMapExists(utils.GetContainerfileConfigMapName(mosb)) - kubeassert.ConfigMapExists(utils.GetMCConfigMapName(mosb)) - kubeassert.SecretExists(utils.GetBasePullSecretName(mosb)) - kubeassert.SecretExists(utils.GetFinalPushSecretName(mosb)) -} - -func assertBuildObjectsAreDeleted(t *testing.T, kubeassert *helpers.Assertions, mosb *mcfgv1alpha1.MachineOSBuild) { - t.Helper() - - kubeassert.JobDoesNotExist(utils.GetBuildJobName(mosb)) - kubeassert.ConfigMapDoesNotExist(utils.GetContainerfileConfigMapName(mosb)) - kubeassert.ConfigMapDoesNotExist(utils.GetMCConfigMapName(mosb)) - kubeassert.SecretDoesNotExist(utils.GetBasePullSecretName(mosb)) - kubeassert.SecretDoesNotExist(utils.GetFinalPushSecretName(mosb)) -} +// This test verifies that if the rebuild annotation is added to a given MachineOSConfig, that +// the build is restarted +func TestRebuildAnnotationRestartsBuild(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) -// This test asserts that any secrets attached to the MachineOSConfig are made -// available to the MCD and get written to the node. Note: In this test, the -// built image should not make it to the node before we've verified that the -// MCD has performed the desired actions. -func TestMCDGetsMachineOSConfigSecrets(t *testing.T) { cs := framework.NewClientSet("") - secretName := "mosc-image-pull-secret" - - // Create a dummy secret with a known hostname which will be assigned to the - // MachineOSConfig. This secret does not actually have to work for right now; - // we just need to make sure it lands on the node. - t.Cleanup(createSecret(t, cs, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: ctrlcommon.MCONamespace, - }, - Type: corev1.SecretTypeDockerConfigJson, - Data: map[string][]byte{ - corev1.DockerConfigJsonKey: []byte(`{"auths": {"registry.hostname.com": {"username": "user", "password": "password"}}}`), - }, - })) - - // Select a random node that we will opt into the layered pool. Note: If - // successful, this node will never actually get the new image because we - // will have halted the build process before that happens. - node := helpers.GetRandomNode(t, cs, "worker") - - // Set up all of the objects needed for the build, including getting (but not - // yet applying) the MachineOSConfig. mosc := prepareForOnClusterLayeringTest(t, cs, onClusterLayeringTestOpts{ poolName: layeredMCPName, customDockerfiles: map[string]string{ - layeredMCPName: yumReposDockerfile, + layeredMCPName: cowsayDockerfile, }, - targetNode: &node, - useYumRepos: true, - }) - - // Assign the secret name to the MachineOSConfig. - mosc.Spec.BuildOutputs.CurrentImagePullSecret.Name = secretName - - // Create the MachineOSConfig which will start the build process. - t.Cleanup(createMachineOSConfig(t, cs, mosc)) - - // Wait for the build to start. We don't need to wait for it to complete - // since this test is primarily concerned about whether the MCD on the node - // gets our dummy secret or not. In the future, we should use a real secret - // and validate that the node can push and pull the image from it. We can - // simulate that by using an imagestream that lives in a different namespace. - waitForBuildToStartForPoolAndConfig(t, cs, layeredMCPName, mosc.Name) - - // Verifies that the MCD pod gets the appropriate secret volume and volume mount. - err := wait.PollImmediate(1*time.Second, 5*time.Minute, func() (bool, error) { - mcdPod, err := helpers.MCDForNode(cs, &node) - - // If we can ignore this error, it means that the MCD is not ready yet, so - // return false here and try again later. - if err != nil && canIgnoreMCDForNodeError(err) { - return false, nil - } - - if err != nil { - return false, err - } - - // Return true when the following conditions are met: - // 1. The MCD pod has the expected secret volume. - // 2. The MCD pod has the expected secret volume mount. - // 3. All of the containers within the MCD pod are ready and running. - return podHasExpectedSecretVolume(mcdPod, secretName) && - podHasExpectedSecretVolumeMount(mcdPod, layeredMCPName, secretName) && - isMcdPodRunning(mcdPod), nil }) - require.NoError(t, err) - - t.Logf("MCD pod is running and has secret volume mount for %s", secretName) + createMachineOSConfig(t, cs, mosc) - // Get the internal image registry hostnames that we will ensure are present - // on the target node. - internalRegistryHostnames, err := getInternalRegistryHostnamesFromControllerConfig(cs) + mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(ctx, layeredMCPName, metav1.GetOptions{}) require.NoError(t, err) - // Adds the dummy hostname to the expected internal registry hostname list. - // At this point, the ControllerConfig may already have our dummy - // hostname, but there is no harm if it is present in this list twice. - internalRegistryHostnames = append(internalRegistryHostnames, "registry.hostname.com") - - // Wait for the MCD pod to write the dummy secret to the nodes' filesystem - // and validate that our dummy hostname is in there along with all of the - // ones from the ControllerConfig. - err = wait.PollImmediate(1*time.Second, 5*time.Minute, func() (bool, error) { - filename := "/etc/mco/internal-registry-pull-secret.json" + mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, cs.GetKubeclient(), mosc, mcp) - contents := helpers.ExecCmdOnNode(t, cs, node, "cat", filepath.Join("/rootfs", filename)) - - for _, hostname := range internalRegistryHostnames { - if !strings.Contains(contents, hostname) { - return false, nil - } - } + // First, we get a MachineOSBuild started as usual. + waitForBuildToStart(t, cs, mosb) - t.Logf("All hostnames %v found in %q on node %q", internalRegistryHostnames, filename, node.Name) - return true, nil - }) + kubeassert := helpers.AssertClientSet(t, cs).WithContext(ctx) + assertBuildObjectsAreCreated(t, kubeassert, mosb) + pod, err := getPodFromJob(ctx, cs, utils.GetBuildJobName(mosb)) + require.NoError(t, err) + t.Logf("Initial build has started, delete the job to interrupt the build...") + // Delete the builder + bgDeletion := metav1.DeletePropagationBackground + err = cs.BatchV1Interface.Jobs(ctrlcommon.MCONamespace).Delete(ctx, utils.GetBuildJobName(mosb), metav1.DeleteOptions{PropagationPolicy: &bgDeletion}) require.NoError(t, err) - t.Logf("Node filesystem %s got secret from MachineOSConfig", node.Name) -} - -// Returns a list of the hostnames provided by the InternalRegistryPullSecret -// field on the ControllerConfig object. -func getInternalRegistryHostnamesFromControllerConfig(cs *framework.ClientSet) ([]string, error) { - cfg, err := cs.ControllerConfigs().Get(context.TODO(), "machine-config-controller", metav1.GetOptions{}) - if err != nil { - return nil, err - } - - imagePullCfg, err := ctrlcommon.ToDockerConfigJSON(cfg.Spec.InternalRegistryPullSecret) - if err != nil { - return nil, err - } + // Wait for the build to be interrupted. + waitForBuildToBeInterrupted(t, cs, mosb) - hostnames := []string{} + // Wait for the job and pod to be deleted. + kubeassert.Eventually().JobDoesNotExist(utils.GetBuildJobName(mosb)) + kubeassert.Eventually().PodDoesNotExist(pod.Name) - for key := range imagePullCfg.Auths { - hostnames = append(hostnames, key) - } + t.Logf("Add rebuild annotation to the MOSC...") + helpers.SetRebuildAnnotationOnMachineOSConfig(ctx, t, cs.GetMcfgclient(), mosc) - return hostnames, nil -} + // Wait for the MOSB to be deleted + t.Logf("Waiting for MachineOSBuild with UID %s to be deleted", mosb.UID) + waitForMOSBToBeDeleted(t, cs, mosb) -// Determines if we can ignore the error returned by helpers.MCDForNode(). This -// checks for a very specific condition that is encountered by this test. In -// order for the MCD to get the secret from the MachineOSConfig, it must be -// restarted. While it is restarting, it is possible that the node will -// temporarily have two MCD pods associated with it; one is being created while -// the other is being terminated. -// -// The helpers.MCDForNode() function cannot -// distinguish between those scenarios, which is fine. But for the purposes of -// this test, we should ignore that specific error because it means that the -// MCD pod is not ready yet. -// -// Finally, it is worth noting that this is not the best way to compare errors, -// but its acceptable for our purposes here. -func canIgnoreMCDForNodeError(err error) bool { - return strings.Contains(err.Error(), "too many") && - strings.Contains(err.Error(), "MCDs for node") + t.Logf("Annotation is updated, waiting for new build %s to start", mosb.Name) + // Wait for the build to start. + waitForBuildToStart(t, cs, mosb) } -func podHasExpectedSecretVolume(pod *corev1.Pod, secretName string) bool { - for _, volume := range pod.Spec.Volumes { - if volume.Secret != nil && volume.Secret.SecretName == secretName { - return true - } - } - - return false -} +func assertBuildObjectsAreCreated(t *testing.T, kubeassert *helpers.Assertions, mosb *mcfgv1.MachineOSBuild) { + t.Helper() -func podHasExpectedSecretVolumeMount(pod *corev1.Pod, poolName, secretName string) bool { - for _, container := range pod.Spec.Containers { - for _, volumeMount := range container.VolumeMounts { - if volumeMount.Name == secretName && volumeMount.MountPath == filepath.Join("/run/secrets/os-image-pull-secrets", poolName) { - return true - } - } - } + kubeassert.JobExists(utils.GetBuildJobName(mosb)) + kubeassert.ConfigMapExists(utils.GetContainerfileConfigMapName(mosb)) + kubeassert.ConfigMapExists(utils.GetMCConfigMapName(mosb)) + kubeassert.ConfigMapExists(utils.GetEtcPolicyConfigMapName(mosb)) + kubeassert.ConfigMapExists(utils.GetEtcRegistriesConfigMapName(mosb)) + kubeassert.SecretExists(utils.GetBasePullSecretName(mosb)) + kubeassert.SecretExists(utils.GetFinalPushSecretName(mosb)) - return false + // Check that ownerReferences are set as well + kubeassert.ConfigMapHasOwnerSet(utils.GetContainerfileConfigMapName(mosb)) + kubeassert.ConfigMapHasOwnerSet(utils.GetMCConfigMapName(mosb)) + kubeassert.ConfigMapHasOwnerSet(utils.GetEtcPolicyConfigMapName(mosb)) + kubeassert.ConfigMapHasOwnerSet(utils.GetEtcRegistriesConfigMapName(mosb)) + kubeassert.SecretHasOwnerSet(utils.GetBasePullSecretName(mosb)) + kubeassert.SecretHasOwnerSet(utils.GetFinalPushSecretName(mosb)) } -// Determines whether a given MCD pod is running. Returns true only once all of -// the container statuses are in a running state. -func isMcdPodRunning(pod *corev1.Pod) bool { - for _, containerStatus := range pod.Status.ContainerStatuses { - if containerStatus.Ready != true { - return false - } - - if containerStatus.Started == nil { - return false - } - - if *containerStatus.Started != true { - return false - } - - if containerStatus.State.Running == nil { - return false - } - } +func assertBuildObjectsAreDeleted(t *testing.T, kubeassert *helpers.Assertions, mosb *mcfgv1.MachineOSBuild) { + t.Helper() - return true + kubeassert.JobDoesNotExist(utils.GetBuildJobName(mosb)) + kubeassert.ConfigMapDoesNotExist(utils.GetContainerfileConfigMapName(mosb)) + kubeassert.ConfigMapDoesNotExist(utils.GetMCConfigMapName(mosb)) + kubeassert.ConfigMapDoesNotExist(utils.GetEtcPolicyConfigMapName(mosb)) + kubeassert.ConfigMapDoesNotExist(utils.GetEtcRegistriesConfigMapName(mosb)) + kubeassert.SecretDoesNotExist(utils.GetBasePullSecretName(mosb)) + kubeassert.SecretDoesNotExist(utils.GetFinalPushSecretName(mosb)) } // Sets up and performs an on-cluster build for a given set of parameters. // Returns the built image pullspec for later consumption. -func runOnClusterLayeringTest(t *testing.T, testOpts onClusterLayeringTestOpts) string { +func runOnClusterLayeringTest(t *testing.T, testOpts onClusterLayeringTestOpts) (string, *mcfgv1.MachineOSBuild) { ctx, cancel := context.WithCancel(context.Background()) - cancel = makeIdempotentAndRegister(t, cancel) + t.Cleanup(cancel) cs := framework.NewClientSet("") imageBuilder := testOpts.imageBuilderType if testOpts.imageBuilderType == "" { - imageBuilder = mcfgv1alpha1.PodBuilder + imageBuilder = mcfgv1.JobBuilder } t.Logf("Running with ImageBuilder type: %s", imageBuilder) mosc := prepareForOnClusterLayeringTest(t, cs, testOpts) - // Create our MachineOSConfig and ensure that it is deleted after the test is - // finished. - t.Cleanup(createMachineOSConfig(t, cs, mosc)) + // Create our MachineOSConfig. + createMachineOSConfig(t, cs, mosc) // Create a child context for the machine-os-builder pod log streamer. We // create it here because we want the cancellation to run before the @@ -759,7 +731,7 @@ func runOnClusterLayeringTest(t *testing.T, testOpts onClusterLayeringTestOpts) // We wire this to both t.Cleanup() as well as defer because we want to // cancel this context either at the end of this function or when the test // fails, whichever comes first. - buildPodWatcherShutdown := makeIdempotentAndRegister(t, buildPodStreamerCancel) + buildPodWatcherShutdown := makeIdempotentAndRegisterAlwaysRun(t, buildPodStreamerCancel) defer buildPodWatcherShutdown() dirPath, err := helpers.GetBuildArtifactDir(t) @@ -796,11 +768,11 @@ func runOnClusterLayeringTest(t *testing.T, testOpts onClusterLayeringTestOpts) // Wait for the build to complete. finishedBuild := waitForBuildToComplete(t, cs, startedBuild) - t.Logf("MachineOSBuild %q has completed and produced image: %s", finishedBuild.Name, finishedBuild.Status.FinalImagePushspec) + t.Logf("MachineOSBuild %q has completed and produced image: %s", finishedBuild.Name, finishedBuild.Status.DigestedImagePushSpec) require.NoError(t, archiveBuildPodLogs(t, podLogsDirPath)) - return finishedBuild.Status.FinalImagePushspec + return string(finishedBuild.Status.DigestedImagePushSpec), startedBuild } func archiveBuildPodLogs(t *testing.T, podLogsDirPath string) error { @@ -814,7 +786,7 @@ func archiveBuildPodLogs(t *testing.T, podLogsDirPath string) error { cmd := exec.Command("mv", podLogsDirPath, archive.StagingDir()) output, err := cmd.CombinedOutput() if err != nil { - t.Logf(string(output)) + t.Log(string(output)) return err } @@ -822,7 +794,7 @@ func archiveBuildPodLogs(t *testing.T, podLogsDirPath string) error { } // Waits for the build to start and returns the started MachineOSBuild object. -func waitForBuildToStartForPoolAndConfig(t *testing.T, cs *framework.ClientSet, poolName, moscName string) *mcfgv1alpha1.MachineOSBuild { +func waitForBuildToStartForPoolAndConfig(t *testing.T, cs *framework.ClientSet, poolName, moscName string) *mcfgv1.MachineOSBuild { t.Helper() var mosbName string @@ -840,7 +812,7 @@ func waitForBuildToStartForPoolAndConfig(t *testing.T, cs *framework.ClientSet, // Create a "dummy" MachineOSBuild object with just the name field set so // that waitForMachineOSBuildToReachState() can use it. - mosb := &mcfgv1alpha1.MachineOSBuild{ + mosb := &mcfgv1.MachineOSBuild{ ObjectMeta: metav1.ObjectMeta{ Name: mosbName, }, @@ -850,7 +822,7 @@ func waitForBuildToStartForPoolAndConfig(t *testing.T, cs *framework.ClientSet, } // Waits for a MachineOSBuild to start building. -func waitForBuildToStart(t *testing.T, cs *framework.ClientSet, build *mcfgv1alpha1.MachineOSBuild) *mcfgv1alpha1.MachineOSBuild { +func waitForBuildToStart(t *testing.T, cs *framework.ClientSet, build *mcfgv1.MachineOSBuild) *mcfgv1.MachineOSBuild { t.Helper() t.Logf("Waiting for MachineOSBuild %s to start", build.Name) @@ -865,18 +837,21 @@ func waitForBuildToStart(t *testing.T, cs *framework.ClientSet, build *mcfgv1alp t.Logf("MachineOSBuild %s created after %s", build.Name, time.Since(start)) kubeassert.Eventually().MachineOSBuildIsRunning(build) t.Logf("MachineOSBuild %s running after %s", build.Name, time.Since(start)) - // The Job reports running before the pod is fully up and running, so the mosb ends up in building status - // however, since we are streaming container logs we might hit a race where the container has not started yet - // so add a check to ensure that the pod is up an running also - kubeassert.Eventually().JobExists(utils.GetBuildJobName(build)) - t.Logf("Build job %s created after %s", utils.GetBuildJobName(build), time.Since(start)) + + // Get the job for the MOSB created by comparing the job UID with the MOSB annotation + buildJobName, err := getJobForMOSB(ctx, cs, build) + require.NoError(t, err) + kubeassert.Eventually().JobExists(buildJobName) + t.Logf("Build job %s created after %s", buildJobName, time.Since(start)) // Get the pod created by the job - buildPod, err := getPodFromJob(context.TODO(), cs, utils.GetBuildJobName(build)) + buildPod, err := getPodFromJob(ctx, cs, buildJobName) require.NoError(t, err) kubeassert.Eventually().PodIsRunning(buildPod.Name) t.Logf("Build pod %s running after %s", buildPod.Name, time.Since(start)) + kubeassert.Eventually().PodHasOwnerSet(buildPod.Name) + t.Logf("Build pod %s has owner set after %s", buildPod.Name, time.Since(start)) - mosb, err := cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().Get(ctx, build.Name, metav1.GetOptions{}) + mosb, err := cs.MachineconfigurationV1Interface.MachineOSBuilds().Get(ctx, build.Name, metav1.GetOptions{}) require.NoError(t, err) assertBuildObjectsAreCreated(t, kubeassert.Eventually(), mosb) @@ -885,8 +860,41 @@ func waitForBuildToStart(t *testing.T, cs *framework.ClientSet, build *mcfgv1alp return mosb } +func waitForMOSBToBeDeleted(t *testing.T, cs *framework.ClientSet, mosb *mcfgv1.MachineOSBuild) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) + defer cancel() + + start := time.Now() + kubeassert := helpers.AssertClientSet(t, cs).WithContext(ctx) + + // Get the MOSB from the API to get the UID + mosb, err := cs.MachineconfigurationV1Interface.MachineOSBuilds().Get(context.Background(), mosb.Name, metav1.GetOptions{}) + require.NoError(t, err) + mosbUID := mosb.UID + t.Logf("Waiting for MachineOSBuild with UID %s to be deleted", mosbUID) + + mosbs, err := cs.MachineconfigurationV1Interface.MachineOSBuilds().List(ctx, metav1.ListOptions{}) + require.NoError(t, err) + + mosbWithUIDFound := false + for _, mosb := range mosbs.Items { + if mosb.UID == mosbUID { + kubeassert.Eventually().MachineOSBuildDoesNotExist(&mosb) + t.Logf("MachineOSBuild with UID %s deleted after %s", mosbUID, time.Since(start)) + mosbWithUIDFound = true + return + } + } + + if !mosbWithUIDFound { + t.Logf("MachineOSBuild with UID %s not found, must have already been deleted", mosbUID) + } +} + // Waits for a MachineOSBuild to be deleted. -func waitForBuildToBeDeleted(t *testing.T, cs *framework.ClientSet, build *mcfgv1alpha1.MachineOSBuild) { +func waitForBuildToBeDeleted(t *testing.T, cs *framework.ClientSet, build *mcfgv1.MachineOSBuild) { t.Helper() ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) @@ -905,7 +913,7 @@ func waitForBuildToBeDeleted(t *testing.T, cs *framework.ClientSet, build *mcfgv // Waits for the given MachineOSBuild to complete and returns the completed // MachineOSBuild object. -func waitForBuildToComplete(t *testing.T, cs *framework.ClientSet, startedBuild *mcfgv1alpha1.MachineOSBuild) *mcfgv1alpha1.MachineOSBuild { +func waitForBuildToComplete(t *testing.T, cs *framework.ClientSet, startedBuild *mcfgv1.MachineOSBuild) *mcfgv1.MachineOSBuild { t.Helper() t.Logf("Waiting for MachineOSBuild %s to complete", startedBuild.Name) @@ -916,12 +924,32 @@ func waitForBuildToComplete(t *testing.T, cs *framework.ClientSet, startedBuild start := time.Now() kubeassert := helpers.AssertClientSet(t, cs).WithContext(ctx) - kubeassert.Eventually().MachineOSBuildIsSuccessful(startedBuild) //foo + kubeassert.Eventually().MachineOSBuildIsSuccessful(startedBuild) t.Logf("MachineOSBuild %s successful after %s", startedBuild.Name, time.Since(start)) assertBuildObjectsAreDeleted(t, kubeassert.Eventually(), startedBuild) t.Logf("Build objects deleted after %s", time.Since(start)) - mosb, err := cs.MachineconfigurationV1alpha1Interface.MachineOSBuilds().Get(ctx, startedBuild.Name, metav1.GetOptions{}) + mosb, err := cs.MachineconfigurationV1Interface.MachineOSBuilds().Get(ctx, startedBuild.Name, metav1.GetOptions{}) + require.NoError(t, err) + + return mosb +} + +func waitForBuildToBeInterrupted(t *testing.T, cs *framework.ClientSet, startedBuild *mcfgv1.MachineOSBuild) *mcfgv1.MachineOSBuild { + t.Helper() + + t.Logf("Waiting for MachineOSBuild %s to be interrupted", startedBuild.Name) + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) + defer cancel() + + start := time.Now() + + kubeassert := helpers.AssertClientSet(t, cs).WithContext(ctx) + kubeassert.Eventually().MachineOSBuildIsInterrupted(startedBuild) + t.Logf("MachineOSBuild %s interrupted after %s", startedBuild.Name, time.Since(start)) + + mosb, err := cs.MachineconfigurationV1Interface.MachineOSBuilds().Get(ctx, startedBuild.Name, metav1.GetOptions{}) require.NoError(t, err) return mosb @@ -931,7 +959,7 @@ func waitForBuildToComplete(t *testing.T, cs *framework.ClientSet, startedBuild // "correctly" means that it has the correct container images. Future // assertions could include things like ensuring that the proper volume mounts // are present, etc. -func assertBuildJobIsAsExpected(t *testing.T, cs *framework.ClientSet, mosb *mcfgv1alpha1.MachineOSBuild) { +func assertBuildJobIsAsExpected(t *testing.T, cs *framework.ClientSet, mosb *mcfgv1.MachineOSBuild) { t.Helper() osImageURLConfig, err := ctrlcommon.GetOSImageURLConfig(context.TODO(), cs.GetKubeclient()) @@ -940,7 +968,7 @@ func assertBuildJobIsAsExpected(t *testing.T, cs *framework.ClientSet, mosb *mcf mcoImages, err := ctrlcommon.GetImagesConfig(context.TODO(), cs.GetKubeclient()) require.NoError(t, err) - buildPod, err := getPodFromJob(context.TODO(), cs, mosb.Status.BuilderReference.PodImageBuilder.Name) + buildPod, err := getPodFromJob(context.TODO(), cs, mosb.Status.Builder.Job.Name) require.NoError(t, err) assertContainerIsUsingExpectedImage := func(c corev1.Container, containerName, expectedImage string) { @@ -969,11 +997,11 @@ func assertBuildJobIsAsExpected(t *testing.T, cs *framework.ClientSet, mosb *mcf // // Returns a MachineOSConfig object for the caller to create to begin the build // process. -func prepareForOnClusterLayeringTest(t *testing.T, cs *framework.ClientSet, testOpts onClusterLayeringTestOpts) *mcfgv1alpha1.MachineOSConfig { - // If the test requires RHEL entitlements, clone them from - // "etc-pki-entitlement" in the "openshift-config-managed" namespace. - if testOpts.useEtcPkiEntitlement { - t.Cleanup(copyEntitlementCerts(t, cs)) +func prepareForOnClusterLayeringTest(t *testing.T, cs *framework.ClientSet, testOpts onClusterLayeringTestOpts) *mcfgv1.MachineOSConfig { + // If the test requires RHEL entitlements, ensure they are present + // in the test cluster. If not found, the test is skipped. + if testOpts.entitlementRequired { + skipIfEntitlementNotPresent(t, cs) } // If the test requires /etc/yum.repos.d and /etc/pki/rpm-gpg, pull a Centos @@ -981,90 +1009,48 @@ func prepareForOnClusterLayeringTest(t *testing.T, cs *framework.ClientSet, test // emulate the Red Hat Satellite enablement process, but does not actually // require any Red Hat Satellite creds to work. if testOpts.useYumRepos { - t.Cleanup(injectYumRepos(t, cs)) + injectYumRepos(t, cs) } // Register ephemeral object cleanup function. - t.Cleanup(func() { + makeIdempotentAndRegister(t, func() { cleanupEphemeralBuildObjects(t, cs) }) imagestreamObjMeta := metav1.ObjectMeta{ - Name: "os-image", - Namespace: strings.ToLower(t.Name()), + Name: "os-image", } - pushSecretName, finalPullspec, imagestreamCleanupFunc := setupImageStream(t, cs, imagestreamObjMeta) - t.Cleanup(imagestreamCleanupFunc) - - t.Cleanup(copyGlobalPullSecret(t, cs)) + pushSecretName, finalPullspec, _ := setupImageStream(t, cs, imagestreamObjMeta) if testOpts.targetNode != nil { - t.Cleanup(makeIdempotentAndRegister(t, helpers.CreatePoolWithNode(t, cs, testOpts.poolName, *testOpts.targetNode))) + makeIdempotentAndRegister(t, helpers.CreatePoolWithNode(t, cs, testOpts.poolName, *testOpts.targetNode)) } else { - t.Cleanup(makeIdempotentAndRegister(t, helpers.CreateMCP(t, cs, testOpts.poolName))) - } - - if testOpts.useExtensions { - extensionsMC := &mcfgv1.MachineConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "99-extensions", - Labels: helpers.MCLabelForRole(testOpts.poolName), - }, - Spec: mcfgv1.MachineConfigSpec{ - Config: runtime.RawExtension{ - Raw: helpers.MarshalOrDie(ctrlcommon.NewIgnConfig()), - }, - Extensions: []string{"usbguard"}, - }, - } - - helpers.SetMetadataOnObject(t, extensionsMC) - // Apply the extensions MC - mcCleanupFunc := helpers.ApplyMC(t, cs, extensionsMC) - t.Cleanup(func() { - mcCleanupFunc() - t.Logf("Deleted MachineConfig %s", extensionsMC.Name) - }) - t.Logf("Created new MachineConfig %q", extensionsMC.Name) - // Wait for rendered config to finish creating - renderedConfig, err := helpers.WaitForRenderedConfig(t, cs, testOpts.poolName, extensionsMC.Name) - require.NoError(t, err) - t.Logf("Finished rendering config %s", renderedConfig) + makeIdempotentAndRegister(t, helpers.CreateMCP(t, cs, testOpts.poolName)) } _, err := helpers.WaitForRenderedConfig(t, cs, testOpts.poolName, "00-worker") require.NoError(t, err) - mosc := &mcfgv1alpha1.MachineOSConfig{ + mosc := &mcfgv1.MachineOSConfig{ ObjectMeta: metav1.ObjectMeta{ Name: testOpts.poolName, }, - Spec: mcfgv1alpha1.MachineOSConfigSpec{ - MachineConfigPool: mcfgv1alpha1.MachineConfigPoolReference{ + Spec: mcfgv1.MachineOSConfigSpec{ + MachineConfigPool: mcfgv1.MachineConfigPoolReference{ Name: testOpts.poolName, }, - BuildInputs: mcfgv1alpha1.BuildInputs{ - BaseImagePullSecret: mcfgv1alpha1.ImageSecretObjectReference{ - Name: globalPullSecretCloneName, - }, - RenderedImagePushSecret: mcfgv1alpha1.ImageSecretObjectReference{ - Name: pushSecretName, - }, - RenderedImagePushspec: finalPullspec, - ImageBuilder: &mcfgv1alpha1.MachineOSImageBuilder{ - ImageBuilderType: mcfgv1alpha1.PodBuilder, - }, - Containerfile: []mcfgv1alpha1.MachineOSContainerfile{ - { - ContainerfileArch: mcfgv1alpha1.NoArch, - Content: testOpts.customDockerfiles[testOpts.poolName], - }, - }, + RenderedImagePushSecret: mcfgv1.ImageSecretObjectReference{ + Name: pushSecretName, + }, + RenderedImagePushSpec: mcfgv1.ImageTagFormat(finalPullspec), + ImageBuilder: mcfgv1.MachineOSImageBuilder{ + ImageBuilderType: mcfgv1.JobBuilder, }, - BuildOutputs: mcfgv1alpha1.BuildOutputs{ - CurrentImagePullSecret: mcfgv1alpha1.ImageSecretObjectReference{ - Name: pushSecretName, + Containerfile: []mcfgv1.MachineOSContainerfile{ + { + ContainerfileArch: mcfgv1.NoArch, + Content: testOpts.customDockerfiles[testOpts.poolName], }, }, }, @@ -1121,9 +1107,7 @@ func TestSSHKeyAndPasswordForOSBuilder(t *testing.T) { helpers.SetMetadataOnObject(t, testConfig) // Create the MachineConfig and wait for the configuration to be applied - _, err := cs.MachineConfigs().Create(context.TODO(), testConfig, metav1.CreateOptions{}) - require.Nil(t, err, "failed to create MC") - t.Logf("Created %s", testConfig.Name) + mcCleanupFunc := applyMC(t, cs, testConfig) // wait for rendered config to finish creating renderedConfig, err := helpers.WaitForRenderedConfig(t, cs, layeredMCPName, testConfig.Name) @@ -1156,10 +1140,140 @@ func TestSSHKeyAndPasswordForOSBuilder(t *testing.T) { t.Cleanup(func() { unlabelFunc() - if err := cs.MachineConfigs().Delete(context.TODO(), testConfig.Name, metav1.DeleteOptions{}); err != nil { - t.Error(err) + mcCleanupFunc() + }) +} + +// This test starts a build and then immediately scales down the +// machine-os-builder deployment until the underlying build job has completed. +// The rationale behind this test is so that if the machine-os-builder pod gets +// rescheduled onto a different node while a build is occurring that the +// MachineOSBuild object will eventually be reconciled, even if the build +// completed during the rescheduling operation. +func TestControllerEventuallyReconciles(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + cs := framework.NewClientSet("") + + poolName := layeredMCPName + + mosc := prepareForOnClusterLayeringTest(t, cs, onClusterLayeringTestOpts{ + poolName: poolName, + customDockerfiles: map[string]string{ + layeredMCPName: cowsayDockerfile, + }, + }) + + mcp, err := cs.MachineconfigurationV1Interface.MachineConfigPools().Get(ctx, poolName, metav1.GetOptions{}) + require.NoError(t, err) + + createMachineOSConfig(t, cs, mosc) + + mosb := buildrequest.NewMachineOSBuildFromAPIOrDie(ctx, cs.GetKubeclient(), mosc, mcp) + + // Wait for the MachineOSBuild to exist. + kubeassert := helpers.AssertClientSet(t, cs).WithContext(ctx).Eventually() + kubeassert.MachineOSBuildExists(mosb) + jobName, err := getJobForMOSB(ctx, cs, mosb) + require.NoError(t, err) + kubeassert.JobExists(jobName) + assertBuildObjectsAreCreated(t, kubeassert, mosb) + + t.Logf("MachineOSBuild %q exists, stopping machine-os-builder", mosb.Name) + + // As soon as the MachineOSBuild exists, scale down the machine-os-builder + // deployment and any other deployments which may inadvertantly cause its + // replica count to increase. This is done to simulate the machine-os-builder + // pod being scheduled onto a different node. + restoreDeployments := scaleDownDeployments(t, cs) + + // Wait for the job to start running. + waitForJobToReachMOSBCondition(ctx, t, cs, jobName, mcfgv1.MachineOSBuilding) + + t.Logf("Job %s has started running, starting machine-os-builder", jobName) + + // Restore the deployments. + restoreDeployments() + + // Ensure that the MachineOSBuild object eventually gets updated. + kubeassert.MachineOSBuildIsRunning(mosb) + + t.Logf("MachineOSBuild %s is now running, stopping machine-os-builder", mosb.Name) + + // Stop the deployments again. + restoreDeployments = scaleDownDeployments(t, cs) + + // Wait for the job to complete. + waitForJobToReachMOSBCondition(ctx, t, cs, jobName, mcfgv1.MachineOSBuildSucceeded) + + t.Logf("Job %q finished, starting machine-os-builder", jobName) + + // Restore the deployments again. + restoreDeployments() + + // At this point, the machine-os-builder is running, so we wait for the build + // itself to complete and be updated. + mosb = waitForBuildToComplete(t, cs, mosb) + + // Wait until the MachineOSConfig gets the digested pullspec from the MachineOSBuild. + require.NoError(t, wait.PollImmediate(1*time.Second, 5*time.Minute, func() (bool, error) { + mosc, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + + return mosc.Status.CurrentImagePullSpec != "" && mosc.Status.CurrentImagePullSpec == mosb.Status.DigestedImagePushSpec, nil + })) +} + +func waitForMOSCToGetNewPullspec(ctx context.Context, t *testing.T, cs *framework.ClientSet, moscName, pullspec string) { + require.NoError(t, wait.PollImmediate(1*time.Second, 5*time.Minute, func() (bool, error) { + mosc, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Get(ctx, moscName, metav1.GetOptions{}) + if err != nil { + return false, err } - // delete() - t.Logf("Deleted MachineConfig %s", testConfig.Name) + + return mosc.Status.CurrentImagePullSpec != "" && string(mosc.Status.CurrentImagePullSpec) == pullspec, nil + })) +} + +func waitForMOSCToUpdateCurrentMOSB(ctx context.Context, t *testing.T, cs *framework.ClientSet, moscName, mosbName string) string { + var currentMOSB string + require.NoError(t, wait.PollImmediate(1*time.Second, 5*time.Minute, func() (bool, error) { + mosc, err := cs.MachineconfigurationV1Interface.MachineOSConfigs().Get(ctx, moscName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + currentMOSB = mosc.GetAnnotations()[constants.CurrentMachineOSBuildAnnotationKey] + return currentMOSB != mosbName, nil + + })) + return currentMOSB +} + +// Waits for a job object to reach a given state. +// TOOD: Add this to the Asserts helper struct. +func waitForJobToReachCondition(ctx context.Context, t *testing.T, cs *framework.ClientSet, jobName string, condFunc func(*batchv1.Job) (bool, error)) { + require.NoError(t, wait.PollImmediate(1*time.Second, 10*time.Minute, func() (bool, error) { + job, err := cs.BatchV1Interface.Jobs(ctrlcommon.MCONamespace).Get(ctx, jobName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + return condFunc(job) + })) +} + +// Waits for a job object to be mapped to a given MachineOSBuild state. Will always fail the test if the job reaches a failed state unexpectedly. +func waitForJobToReachMOSBCondition(ctx context.Context, t *testing.T, cs *framework.ClientSet, jobName string, expectedCondition mcfgv1.BuildProgress) { + waitForJobToReachCondition(ctx, t, cs, jobName, func(job *batchv1.Job) (bool, error) { + buildprogress, _ := imagebuilder.MapJobStatusToBuildStatus(job) + if buildprogress == mcfgv1.MachineOSBuildFailed && expectedCondition != mcfgv1.MachineOSBuildFailed { + return false, fmt.Errorf("job %q failed unexpectedly", jobName) + } + + return expectedCondition == buildprogress, nil }) } diff --git a/test/e2e-techpreview/main_test.go b/test/e2e-techpreview/main_test.go index e6057d81f3..f7d89e49c3 100644 --- a/test/e2e-techpreview/main_test.go +++ b/test/e2e-techpreview/main_test.go @@ -3,15 +3,8 @@ package e2e_techpreview_test import ( "os" "testing" - - "github.com/openshift/machine-config-operator/test/helpers" ) func TestMain(m *testing.M) { - - // Ensure required feature gates are set. - // Add any new feature gates to the test here, and remove them as features are GAed. - helpers.MustHaveFeatureGatesEnabled("OnClusterBuild", "MachineConfigNodes") - os.Exit(m.Run()) } diff --git a/test/framework/clientset.go b/test/framework/clientset.go index 5dc2ea2d77..491fe89285 100644 --- a/test/framework/clientset.go +++ b/test/framework/clientset.go @@ -6,6 +6,7 @@ import ( clientbuildv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" clientconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + imagev1clientset "github.com/openshift/client-go/image/clientset/versioned" clientimagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" clientmachineconfigv1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1" @@ -17,6 +18,7 @@ import ( appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1" batchv1client "k8s.io/client-go/kubernetes/typed/batch/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" @@ -32,11 +34,13 @@ type ClientSet struct { clientoperatorsv1alpha1.OperatorV1alpha1Interface clientbuildv1.BuildV1Interface clientimagev1.ImageV1Interface + rbacv1.RbacV1Interface clientmachineconfigv1alpha1.MachineconfigurationV1alpha1Interface - kubeconfig string - config *rest.Config - kubeclient clientset.Interface - mcfgclient mcfgclientset.Interface + kubeconfig string + config *rest.Config + kubeclient clientset.Interface + mcfgclient mcfgclientset.Interface + imageclient imagev1clientset.Interface } // Allows the instantiation of additional clients with the same config. @@ -60,6 +64,10 @@ func (cs *ClientSet) GetMcfgclient() mcfgclientset.Interface { return cs.mcfgclient } +func (cs *ClientSet) GetImageclient() imagev1clientset.Interface { + return cs.imageclient +} + // NewClientSet returns a *ClientBuilder with the given kubeconfig. func NewClientSet(kubeconfig string) *ClientSet { var config *rest.Config @@ -90,6 +98,7 @@ func NewClientSet(kubeconfig string) *ClientSet { func NewClientSetFromConfig(config *rest.Config) *ClientSet { kubeclient := kubernetes.NewForConfigOrDie(config) mcfgclient := mcfgclientset.NewForConfigOrDie(config) + imageclient := imagev1clientset.NewForConfigOrDie(config) return &ClientSet{ CoreV1Interface: kubeclient.CoreV1(), @@ -101,9 +110,11 @@ func NewClientSetFromConfig(config *rest.Config) *ClientSet { OperatorV1alpha1Interface: clientoperatorsv1alpha1.NewForConfigOrDie(config), BuildV1Interface: clientbuildv1.NewForConfigOrDie(config), ImageV1Interface: clientimagev1.NewForConfigOrDie(config), + RbacV1Interface: kubeclient.RbacV1(), MachineconfigurationV1alpha1Interface: mcfgclient.MachineconfigurationV1alpha1(), config: config, kubeclient: kubeclient, mcfgclient: mcfgclient, + imageclient: imageclient, } } diff --git a/test/helpers/assertions.go b/test/helpers/assertions.go index 80b583a74d..b193ed28d9 100644 --- a/test/helpers/assertions.go +++ b/test/helpers/assertions.go @@ -5,8 +5,9 @@ import ( "fmt" "time" + imagev1 "github.com/openshift/api/image/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + imagev1clientset "github.com/openshift/client-go/image/clientset/versioned" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" "github.com/openshift/machine-config-operator/pkg/apihelpers" "github.com/openshift/machine-config-operator/test/framework" @@ -40,6 +41,8 @@ type Assertions struct { kubeclient clientset.Interface // Mcfgclient; may be real or fake. mcfgclient mcfgclientset.Interface + // imageclient; may be real or fake. + imageclient imagev1clientset.Interface // The context to use for all API requests. ctx context.Context // Should we poll for results or use the first result we get. @@ -57,24 +60,27 @@ type TestingT interface { assert.TestingT Helper() FailNow() + Cleanup(func()) + Failed() bool } // Instantiates the Assertions struct using a ClientSet object. func AssertClientSet(t TestingT, cs *framework.ClientSet) *Assertions { - return Assert(t, cs.GetKubeclient(), cs.GetMcfgclient()) + return Assert(t, cs.GetKubeclient(), cs.GetMcfgclient(), cs.GetImageclient()) } // Instantiates the Assertions struct using the provided clients. -func Assert(t TestingT, kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface) *Assertions { - return newAssertions(t, kubeclient, mcfgclient) +func Assert(t TestingT, kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, imageclient imagev1clientset.Interface) *Assertions { + return newAssertions(t, kubeclient, mcfgclient, imageclient) } // Constructs an Assertions struct with initialized but zeroed values. -func newAssertions(t TestingT, kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface) *Assertions { +func newAssertions(t TestingT, kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, imageclient imagev1clientset.Interface) *Assertions { return &Assertions{ t: t, kubeclient: kubeclient, mcfgclient: mcfgclient, + imageclient: imageclient, poll: false, keepCount: false, pollCount: 0, @@ -84,15 +90,15 @@ func newAssertions(t TestingT, kubeclient clientset.Interface, mcfgclient mcfgcl } // Constructs an Assertions struct with initialized but zeroed values and a context. -func newAssertionsWithContext(ctx context.Context, t TestingT, kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface) *Assertions { - a := newAssertions(t, kubeclient, mcfgclient) +func newAssertionsWithContext(ctx context.Context, t TestingT, kubeclient clientset.Interface, mcfgclient mcfgclientset.Interface, imageclient imagev1clientset.Interface) *Assertions { + a := newAssertions(t, kubeclient, mcfgclient, imageclient) a.ctx = ctx return a } // Returns a deep copy of the Assertion struct with zeroed polling values. func (a *Assertions) deepcopy() *Assertions { - return newAssertionsWithContext(a.ctx, a.t, a.kubeclient, a.mcfgclient) + return newAssertionsWithContext(a.ctx, a.t, a.kubeclient, a.mcfgclient, a.imageclient) } // Returns a deep copy of the Assertion struct while preserving values related to polling. @@ -270,9 +276,9 @@ func (a *Assertions) JobDoesNotExist(jobName string, msgAndArgs ...interface{}) } // Asserts that a MachineOSConfig is created. -func (a *Assertions) MachineOSConfigExists(mosb *mcfgv1alpha1.MachineOSConfig, msgAndArgs ...interface{}) { +func (a *Assertions) MachineOSConfigExists(mosb *mcfgv1.MachineOSConfig, msgAndArgs ...interface{}) { a.t.Helper() - stateFunc := func(_ *mcfgv1alpha1.MachineOSConfig, err error) (bool, error) { + stateFunc := func(_ *mcfgv1.MachineOSConfig, err error) (bool, error) { return a.created(err) } @@ -280,9 +286,9 @@ func (a *Assertions) MachineOSConfigExists(mosb *mcfgv1alpha1.MachineOSConfig, m } // Asserts that a MachineOSConfig is deleted. -func (a *Assertions) MachineOSConfigDoesNotExist(mosb *mcfgv1alpha1.MachineOSConfig, msgAndArgs ...interface{}) { +func (a *Assertions) MachineOSConfigDoesNotExist(mosb *mcfgv1.MachineOSConfig, msgAndArgs ...interface{}) { a.t.Helper() - stateFunc := func(_ *mcfgv1alpha1.MachineOSConfig, err error) (bool, error) { + stateFunc := func(_ *mcfgv1.MachineOSConfig, err error) (bool, error) { return a.deleted(err) } @@ -290,29 +296,29 @@ func (a *Assertions) MachineOSConfigDoesNotExist(mosb *mcfgv1alpha1.MachineOSCon } // Asserts that a MachineOSBuild has failed. -func (a *Assertions) MachineOSBuildIsFailure(mosb *mcfgv1alpha1.MachineOSBuild, msgAndArgs ...interface{}) { - a.machineOSBuildHasConditionTrue(mosb, mcfgv1alpha1.MachineOSBuildFailed, msgAndArgs...) +func (a *Assertions) MachineOSBuildIsFailure(mosb *mcfgv1.MachineOSBuild, msgAndArgs ...interface{}) { + a.machineOSBuildHasConditionTrue(mosb, mcfgv1.MachineOSBuildFailed, msgAndArgs...) } // Asserts that a MachineOSBuild has succeeded. -func (a *Assertions) MachineOSBuildIsSuccessful(mosb *mcfgv1alpha1.MachineOSBuild, msgAndArgs ...interface{}) { - a.machineOSBuildHasConditionTrue(mosb, mcfgv1alpha1.MachineOSBuildSucceeded, msgAndArgs...) +func (a *Assertions) MachineOSBuildIsSuccessful(mosb *mcfgv1.MachineOSBuild, msgAndArgs ...interface{}) { + a.machineOSBuildHasConditionTrue(mosb, mcfgv1.MachineOSBuildSucceeded, msgAndArgs...) } // Asserts that a MachineOSBuild is running. -func (a *Assertions) MachineOSBuildIsRunning(mosb *mcfgv1alpha1.MachineOSBuild, msgAndArgs ...interface{}) { - a.machineOSBuildHasConditionTrue(mosb, mcfgv1alpha1.MachineOSBuilding, msgAndArgs...) +func (a *Assertions) MachineOSBuildIsRunning(mosb *mcfgv1.MachineOSBuild, msgAndArgs ...interface{}) { + a.machineOSBuildHasConditionTrue(mosb, mcfgv1.MachineOSBuilding, msgAndArgs...) } // Asserts that a MachineOSBuild is interrupted. -func (a *Assertions) MachineOSBuildIsInterrupted(mosb *mcfgv1alpha1.MachineOSBuild, msgAndArgs ...interface{}) { - a.machineOSBuildHasConditionTrue(mosb, mcfgv1alpha1.MachineOSBuildInterrupted, msgAndArgs...) +func (a *Assertions) MachineOSBuildIsInterrupted(mosb *mcfgv1.MachineOSBuild, msgAndArgs ...interface{}) { + a.machineOSBuildHasConditionTrue(mosb, mcfgv1.MachineOSBuildInterrupted, msgAndArgs...) } // Asserts that a MachineOSBuild is created. -func (a *Assertions) MachineOSBuildExists(mosb *mcfgv1alpha1.MachineOSBuild, msgAndArgs ...interface{}) { +func (a *Assertions) MachineOSBuildExists(mosb *mcfgv1.MachineOSBuild, msgAndArgs ...interface{}) { a.t.Helper() - stateFunc := func(_ *mcfgv1alpha1.MachineOSBuild, err error) (bool, error) { + stateFunc := func(_ *mcfgv1.MachineOSBuild, err error) (bool, error) { return a.created(err) } @@ -320,15 +326,105 @@ func (a *Assertions) MachineOSBuildExists(mosb *mcfgv1alpha1.MachineOSBuild, msg } // Asserts that a MachineOSBuild is deleted. -func (a *Assertions) MachineOSBuildDoesNotExist(mosb *mcfgv1alpha1.MachineOSBuild, msgAndArgs ...interface{}) { +func (a *Assertions) MachineOSBuildDoesNotExist(mosb *mcfgv1.MachineOSBuild, msgAndArgs ...interface{}) { a.t.Helper() - stateFunc := func(_ *mcfgv1alpha1.MachineOSBuild, err error) (bool, error) { + stateFunc := func(_ *mcfgv1.MachineOSBuild, err error) (bool, error) { return a.deleted(err) } a.machineOSBuildReachesState(mosb, stateFunc, msgAndArgs...) } +// Asserts that a pod has an owner set +func (a *Assertions) PodHasOwnerSet(podName string, msgAndArgs ...interface{}) { + a.t.Helper() + + ctx, cancel := a.getContextAndCancel() + defer cancel() + + err := wait.PollUntilContextCancel(ctx, a.getPollInterval(), true, func(ctx context.Context) (bool, error) { + pod, err := a.kubeclient.CoreV1().Pods(mcoNamespace).Get(ctx, podName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + return len(pod.OwnerReferences) > 0, nil + }) + + msgAndArgs = prefixMsgAndArgs(fmt.Sprintf("Pod %s did not have owner set", podName), msgAndArgs) + require.NoError(a.t, err, msgAndArgs...) +} + +// Asserts that an ImageStreamTag is deleted. +func (a *Assertions) ImageDoesNotExist(imageName string, msgAndArgs ...interface{}) { + a.t.Helper() + stateFunc := func(_ *imagev1.ImageStreamTag, err error) (bool, error) { + return a.deleted(err) + } + + a.imageStreamTagReachesState(imageName, stateFunc, msgAndArgs...) +} + +// Asserts that a MachineConfig is created. +func (a *Assertions) MachineConfigExists(mc *mcfgv1.MachineConfig, msgAndArgs ...interface{}) { + a.t.Helper() + stateFunc := func(_ *mcfgv1.MachineConfig, err error) (bool, error) { + return a.created(err) + } + + a.machineConfigReachesState(mc, stateFunc, msgAndArgs...) +} + +// Asserts that a MachineConfig is deleted. +func (a *Assertions) MachineConfigDoesNotExist(mc *mcfgv1.MachineConfig, msgAndArgs ...interface{}) { + a.t.Helper() + stateFunc := func(_ *mcfgv1.MachineConfig, err error) (bool, error) { + return a.deleted(err) + } + + a.machineConfigReachesState(mc, stateFunc, msgAndArgs...) +} + +// Asserts that a secret has an owner set +func (a *Assertions) SecretHasOwnerSet(secretName string, msgAndArgs ...interface{}) { + a.t.Helper() + + ctx, cancel := a.getContextAndCancel() + defer cancel() + + err := wait.PollUntilContextCancel(ctx, a.getPollInterval(), true, func(ctx context.Context) (bool, error) { + secret, err := a.kubeclient.CoreV1().Secrets(mcoNamespace).Get(ctx, secretName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + return len(secret.OwnerReferences) > 0, nil + }) + + msgAndArgs = prefixMsgAndArgs(fmt.Sprintf("Secret %s did not have owner set", secretName), msgAndArgs) + require.NoError(a.t, err, msgAndArgs...) +} + +// Asserts that a configmap has an owner set +func (a *Assertions) ConfigMapHasOwnerSet(cmName string, msgAndArgs ...interface{}) { + a.t.Helper() + + ctx, cancel := a.getContextAndCancel() + defer cancel() + + err := wait.PollUntilContextCancel(ctx, a.getPollInterval(), true, func(ctx context.Context) (bool, error) { + cm, err := a.kubeclient.CoreV1().ConfigMaps(mcoNamespace).Get(ctx, cmName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + return len(cm.OwnerReferences) > 0, nil + }) + + msgAndArgs = prefixMsgAndArgs(fmt.Sprintf("ConfigMap %s did not have owner set", cmName), msgAndArgs) + require.NoError(a.t, err, msgAndArgs...) +} + // Asserts that a Secret reaches the desired state. func (a *Assertions) secretReachesState(name string, stateFunc func(*corev1.Secret, error) (bool, error), msgAndArgs ...interface{}) { a.t.Helper() @@ -394,14 +490,14 @@ func (a *Assertions) jobReachesState(jobName string, stateFunc func(*batchv1.Job } // Asserts that a MachineOSConfig reaches the desired state. -func (a *Assertions) machineOSConfigReachesState(mosc *mcfgv1alpha1.MachineOSConfig, stateFunc func(*mcfgv1alpha1.MachineOSConfig, error) (bool, error), msgAndArgs ...interface{}) { +func (a *Assertions) machineOSConfigReachesState(mosc *mcfgv1.MachineOSConfig, stateFunc func(*mcfgv1.MachineOSConfig, error) (bool, error), msgAndArgs ...interface{}) { a.t.Helper() ctx, cancel := a.getContextAndCancel() defer cancel() err := wait.PollUntilContextCancel(ctx, a.getPollInterval(), true, func(ctx context.Context) (bool, error) { - apiMosc, err := a.mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) + apiMosc, err := a.mcfgclient.MachineconfigurationV1().MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) return a.handleStateFuncResult(stateFunc(apiMosc, err)) }) @@ -410,14 +506,14 @@ func (a *Assertions) machineOSConfigReachesState(mosc *mcfgv1alpha1.MachineOSCon } // Asserts that a MachineOSBuild reaches the desired state. -func (a *Assertions) machineOSBuildReachesState(mosc *mcfgv1alpha1.MachineOSBuild, stateFunc func(*mcfgv1alpha1.MachineOSBuild, error) (bool, error), msgAndArgs ...interface{}) { +func (a *Assertions) machineOSBuildReachesState(mosc *mcfgv1.MachineOSBuild, stateFunc func(*mcfgv1.MachineOSBuild, error) (bool, error), msgAndArgs ...interface{}) { a.t.Helper() ctx, cancel := a.getContextAndCancel() defer cancel() err := wait.PollUntilContextCancel(ctx, a.getPollInterval(), true, func(ctx context.Context) (bool, error) { - apiMosc, err := a.mcfgclient.MachineconfigurationV1alpha1().MachineOSBuilds().Get(ctx, mosc.Name, metav1.GetOptions{}) + apiMosc, err := a.mcfgclient.MachineconfigurationV1().MachineOSBuilds().Get(ctx, mosc.Name, metav1.GetOptions{}) return a.handleStateFuncResult(stateFunc(apiMosc, err)) }) @@ -425,6 +521,38 @@ func (a *Assertions) machineOSBuildReachesState(mosc *mcfgv1alpha1.MachineOSBuil require.NoError(a.t, err, msgAndArgs...) } +// Asserts that a MachineConfig reaches the desired state. +func (a *Assertions) machineConfigReachesState(mc *mcfgv1.MachineConfig, stateFunc func(*mcfgv1.MachineConfig, error) (bool, error), msgAndArgs ...interface{}) { + a.t.Helper() + + ctx, cancel := a.getContextAndCancel() + defer cancel() + + err := wait.PollUntilContextCancel(ctx, a.getPollInterval(), true, func(ctx context.Context) (bool, error) { + apiMC, err := a.mcfgclient.MachineconfigurationV1().MachineConfigs().Get(ctx, mc.Name, metav1.GetOptions{}) + return a.handleStateFuncResult(stateFunc(apiMC, err)) + }) + + msgAndArgs = prefixMsgAndArgs(fmt.Sprintf("MachineConfig %s did not reach specified state", mc.Name), msgAndArgs) + require.NoError(a.t, err, msgAndArgs...) +} + +// Asserts that a ImageStreamTag reaches the desired state. +func (a *Assertions) imageStreamTagReachesState(imageName string, stateFunc func(*imagev1.ImageStreamTag, error) (bool, error), msgAndArgs ...interface{}) { + a.t.Helper() + + ctx, cancel := a.getContextAndCancel() + defer cancel() + + err := wait.PollUntilContextCancel(ctx, a.getPollInterval(), true, func(ctx context.Context) (bool, error) { + apiIST, err := a.imageclient.ImageV1().ImageStreamTags(mcoNamespace).Get(ctx, imageName, metav1.GetOptions{}) + return a.handleStateFuncResult(stateFunc(apiIST, err)) + }) + + msgAndArgs = prefixMsgAndArgs(fmt.Sprintf("ImageStreamTag %s did not reach specified state", imageName), msgAndArgs) + require.NoError(a.t, err, msgAndArgs...) +} + // Asserts that a MachineConfigPool reaches the desired state. func (a *Assertions) MachineConfigPoolReachesState(mcp *mcfgv1.MachineConfigPool, stateFunc func(*mcfgv1.MachineConfigPool, error) (bool, error), msgAndArgs ...interface{}) { a.t.Helper() @@ -510,10 +638,10 @@ func (a *Assertions) getContextAndCancel() (context.Context, func()) { } // Determines if the MachineOSBuild has reached the desired state. -func (a *Assertions) machineOSBuildHasConditionTrue(mosb *mcfgv1alpha1.MachineOSBuild, condition mcfgv1alpha1.BuildProgress, msgAndArgs ...interface{}) { +func (a *Assertions) machineOSBuildHasConditionTrue(mosb *mcfgv1.MachineOSBuild, condition mcfgv1.BuildProgress, msgAndArgs ...interface{}) { a.t.Helper() - stateFunc := func(apiMosb *mcfgv1alpha1.MachineOSBuild, err error) (bool, error) { + stateFunc := func(apiMosb *mcfgv1.MachineOSBuild, err error) (bool, error) { if err != nil { return false, err } diff --git a/test/helpers/assertions_test.go b/test/helpers/assertions_test.go index 956bc4a6cd..0e4c52290f 100644 --- a/test/helpers/assertions_test.go +++ b/test/helpers/assertions_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + fakeclientimagev1 "github.com/openshift/client-go/image/clientset/versioned/fake" mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" fakeclientmachineconfigv1 "github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake" "github.com/stretchr/testify/assert" @@ -13,9 +14,11 @@ import ( ) type mockTesting struct { + failed bool isFailNowCalled bool isHelperCalled bool isErrorfCalled bool + cleanupFunc func() errorfFormat string errorfArgs []interface{} } @@ -26,8 +29,10 @@ func (m *mockTesting) Errorf(format string, args ...interface{}) { m.errorfArgs = args } -func (m *mockTesting) Helper() { m.isHelperCalled = true } -func (m *mockTesting) FailNow() { m.isFailNowCalled = true } +func (m *mockTesting) Helper() { m.isHelperCalled = true } +func (m *mockTesting) FailNow() { m.isFailNowCalled = true } +func (m *mockTesting) Cleanup(f func()) { m.cleanupFunc = f } +func (m *mockTesting) Failed() bool { return m.failed } // For now, this test will primarily concern itself with whether the various // objects on the Assertion struct are set correctly. This is because failed @@ -302,7 +307,8 @@ func getAssertionsForTest() (*Assertions, *mockTesting, clientset.Interface, mcf mock := &mockTesting{} kubeclient := fakecorev1client.NewSimpleClientset() mcfgclient := fakeclientmachineconfigv1.NewSimpleClientset() - a := Assert(mock, kubeclient, mcfgclient) + imageclient := fakeclientimagev1.NewSimpleClientset() + a := Assert(mock, kubeclient, mcfgclient, imageclient) return a, mock, kubeclient, mcfgclient } @@ -310,5 +316,6 @@ func getAssertionsForTest() (*Assertions, *mockTesting, clientset.Interface, mcf func getAssertionsForTestWithRealT(t *testing.T) (*Assertions, clientset.Interface, mcfgclientset.Interface) { kubeclient := fakecorev1client.NewSimpleClientset() mcfgclient := fakeclientmachineconfigv1.NewSimpleClientset() - return Assert(t, kubeclient, mcfgclient), kubeclient, mcfgclient + imageclient := fakeclientimagev1.NewSimpleClientset() + return Assert(t, kubeclient, mcfgclient, imageclient), kubeclient, mcfgclient } diff --git a/test/helpers/helpers.go b/test/helpers/helpers.go index 671f2cac14..32488999a6 100644 --- a/test/helpers/helpers.go +++ b/test/helpers/helpers.go @@ -201,6 +201,43 @@ func NewOpaqueSecret(name, namespace, content string) *corev1.Secret { Type: corev1.SecretTypeOpaque, } } +func NewOpaqueSecretWithOwnerPool(name, namespace, content string, pool mcfgv1.MachineConfigPool) *corev1.Secret { + // Work around https://github.com/kubernetes/kubernetes/issues/3030 and https://github.com/kubernetes/kubernetes/issues/80609 + pool.APIVersion = mcfgv1.GroupVersion.String() + pool.Kind = "MachineConfigPool" + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: pool.APIVersion, + Kind: pool.Kind, + Name: pool.ObjectMeta.Name, + UID: pool.ObjectMeta.UID, + }, + }, + }, + Data: map[string][]byte{ + "entitlement-key.pem": []byte(content), + "entitlement.pem": []byte(content), + }, + Type: corev1.SecretTypeOpaque, + } +} + +func NewDockerCfgJSONSecret(name, namespace, content string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string][]byte{ + ".dockerconfigjson": []byte(content), + }, + Type: corev1.SecretTypeDockerConfigJson, + } +} // CreateMachineConfigFromIgnitionWithMetadata returns a MachineConfig object from an Ignition config, name, and role label func CreateMachineConfigFromIgnitionWithMetadata(ignCfg interface{}, name, role string) *mcfgv1.MachineConfig { diff --git a/test/helpers/idempotent.go b/test/helpers/idempotent.go new file mode 100644 index 0000000000..a95ef1b3cb --- /dev/null +++ b/test/helpers/idempotent.go @@ -0,0 +1,112 @@ +package helpers + +import "os" + +// Ensures that a given cleanup function only runs once; even if called +// multiple times. +func MakeIdempotent(f func()) func() { + hasRun := false + + return func() { + if !hasRun { + f() + hasRun = true + } + } +} + +// Ensures that a given cleanup function only runs once; even if called multiple times. +// When the provided shouldRun function returns false, executing the cleanup will be +// skipped. The shouldRun will also only be executed once. +func MakeIdempotentSkippable(shouldRun func() bool, f func()) func() { + return MakeIdempotent(func() { + if shouldRun() { + f() + } + }) +} + +// Makes a given function idempotent and ensures that it is called at least +// once during the test run. +func MakeIdempotentAndRegister(t TestingT, f func()) func() { + out := MakeIdempotent(f) + t.Cleanup(out) + return out +} + +// Makes a given function idempotent and ensures that it is called at least +// once during the test run, but only if the provided shouldRun function +// returns true. The shouldRun function will also only be evaluated a single +// time. +func MakeIdempotentSkippableAndRegister(t TestingT, shouldRun func() bool, f func()) func() { + out := MakeIdempotentSkippable(shouldRun, f) + t.Cleanup(out) + return out +} + +// Provides simple configuration for potentially skipping a cleanup in the +// event of test failure. +type IdempotentConfig struct { + // Always skip running this function. + SkipAlways bool + // Only skip running this function when the test has failed. + SkipOnlyOnFailure bool + // Whether the test failed or not. Private because we set it from testing.T. + testFailed bool + // Whether we're running in CI. Private because we set it using the inCI() + // helper. + inCI bool +} + +func (i *IdempotentConfig) shouldRun() bool { + // If skipCleanupAlways is set, then we should not run cleanups regardless of + // outcome. This takes precedence over the inCI() check. + if i.SkipAlways { + return false + } + + // If the test failed and skipCleanupOnlyAfterFailure is set, we should skip + // cleanup. This takes precedence over the inCI() check. + if i.testFailed && i.SkipOnlyOnFailure { + return false + } + + // If the test failed, the skip cleanup after failures flag is not set, and + // we're in CI, skip running cleanups since the CI system will capture the + // current cluster state, which is useful for debugging the test failure. + if i.testFailed && i.inCI { + return false + } + + // At this point, we know the test passed or there is nothing precluding us + // from running the cleanups. + return true +} + +// Makes a function idempotent modulo the provided config struct which +// determines if the function should always be run, only run on failure, etc. +// In CI, we generally don't want to skip cleanups in the event of failure. +func MakeConfigurableIdempotentAndRegister(t TestingT, shouldRunCfg IdempotentConfig, f func()) func() { + shouldRunCfg.testFailed = t.Failed() + shouldRunCfg.inCI = inCI() + return MakeIdempotentSkippableAndRegister(t, shouldRunCfg.shouldRun, f) +} + +// Determines if we're running in a CI system based upon the presence (or lack +// thereof) of certain environment variables. +func inCI() bool { + items := []string{ + // Specific to OpenShift CI. + "OPENSHIFT_CI", + // Common to all CI systems. + "CI", + } + + for _, item := range items { + if _, ok := os.LookupEnv(item); ok { + return true + } + } + + return false +} diff --git a/test/helpers/idempotent_test.go b/test/helpers/idempotent_test.go new file mode 100644 index 0000000000..91d71875ec --- /dev/null +++ b/test/helpers/idempotent_test.go @@ -0,0 +1,221 @@ +package helpers + +import ( + "sync/atomic" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMakeIdempotent(t *testing.T) { + newShouldRunFunc := func(shouldRun *bool, toRun func()) func() bool { + return func() bool { + toRun() + return *shouldRun + } + } + + newRegisteredMakeIdempotent := func(shouldRun *bool, toRun func()) func() { + m := &mockTesting{} + + f := func() {} + + if shouldRun == nil { + f = MakeIdempotentAndRegister(m, toRun) + } else { + f = MakeIdempotentSkippableAndRegister(m, newShouldRunFunc(shouldRun, toRun), toRun) + } + + return func() { + f() + m.cleanupFunc() + } + } + + newUnregisteredMakeIdempotent := func(shouldRun *bool, toRun func()) func() { + if shouldRun == nil { + return MakeIdempotent(toRun) + } + + return MakeIdempotentSkippable(newShouldRunFunc(shouldRun, toRun), toRun) + } + + newMakeIdempotent := func(registered bool, shouldRun *bool, toRun func()) func() { + if registered { + return newRegisteredMakeIdempotent(shouldRun, toRun) + } + + return newUnregisteredMakeIdempotent(shouldRun, toRun) + } + + boolToPtr := func(val bool) *bool { + return &val + } + + testCases := []struct { + name string + registered bool + shouldRun *bool + expectedIncrementerValue uint64 + }{ + { + name: "Simple idempotent", + expectedIncrementerValue: 1, + registered: false, + }, + { + name: "Skippable idempotent should run", + shouldRun: boolToPtr(true), + expectedIncrementerValue: 2, + }, + { + name: "Skippable idempotent should not run", + shouldRun: boolToPtr(false), + expectedIncrementerValue: 1, + }, + { + name: "Registered idempotent", + registered: true, + expectedIncrementerValue: 1, + }, + { + name: "Skippable registered idempotent should run", + shouldRun: boolToPtr(true), + registered: true, + expectedIncrementerValue: 2, + }, + { + name: "Skippable registered idempotent should not run", + shouldRun: boolToPtr(false), + registered: true, + expectedIncrementerValue: 1, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + t.Run("Single Level", func(t *testing.T) { + t.Parallel() + + var counter atomic.Uint64 + counter.Store(0) + + f := newMakeIdempotent(testCase.registered, testCase.shouldRun, func() { + counter.Add(1) + }) + + for i := 0; i <= 100; i++ { + f() + } + + assert.Equal(t, testCase.expectedIncrementerValue, counter.Load()) + }) + + t.Run("Nested", func(t *testing.T) { + t.Parallel() + + var counter atomic.Uint64 + counter.Store(0) + + nested := newMakeIdempotent(testCase.registered, testCase.shouldRun, func() { + counter.Add(1) + }) + + for i := 0; i <= 100; i++ { + nested = newMakeIdempotent(testCase.registered, testCase.shouldRun, nested) + } + + for i := 0; i <= 100; i++ { + nested() + } + + assert.Equal(t, testCase.expectedIncrementerValue, counter.Load()) + }) + }) + } +} + +func TestIdempotentConfig(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + config IdempotentConfig + expected bool + }{ + { + name: "local failure, no flags set", + config: IdempotentConfig{ + testFailed: true, + }, + expected: true, + }, + { + name: "local pass, no flags set", + config: IdempotentConfig{ + testFailed: false, + }, + expected: true, + }, + { + name: "local pass, skip always set", + config: IdempotentConfig{ + testFailed: false, + SkipAlways: true, + }, + expected: false, + }, + { + name: "local pass, skip on failure set", + config: IdempotentConfig{ + testFailed: false, + SkipOnlyOnFailure: true, + }, + expected: true, + }, + { + name: "CI failure, no flags set", + config: IdempotentConfig{ + inCI: true, + testFailed: true, + }, + expected: false, + }, + { + name: "CI pass, no flags set", + config: IdempotentConfig{ + inCI: true, + testFailed: false, + }, + expected: true, + }, + { + name: "CI pass, skip always set", + config: IdempotentConfig{ + inCI: true, + testFailed: false, + SkipAlways: true, + }, + expected: false, + }, + { + name: "CI pass, skip on failure set", + config: IdempotentConfig{ + inCI: true, + testFailed: false, + SkipOnlyOnFailure: true, + }, + expected: true, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + assert.Equal(t, testCase.expected, testCase.config.shouldRun()) + }) + } +} diff --git a/test/helpers/machineosbuildbuilder.go b/test/helpers/machineosbuildbuilder.go index 51982fbd32..4f3112358a 100644 --- a/test/helpers/machineosbuildbuilder.go +++ b/test/helpers/machineosbuildbuilder.go @@ -4,31 +4,28 @@ import ( "fmt" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type MachineOSBuildBuilder struct { - mosb *mcfgv1alpha1.MachineOSBuild + mosb *mcfgv1.MachineOSBuild } func NewMachineOSBuildBuilder(name string) *MachineOSBuildBuilder { return &MachineOSBuildBuilder{ - mosb: &mcfgv1alpha1.MachineOSBuild{ + mosb: &mcfgv1.MachineOSBuild{ TypeMeta: metav1.TypeMeta{ Kind: "MachineOSBuild", - APIVersion: "machineconfiguration.openshift.io/v1alpha1", + APIVersion: "machineconfiguration.openshift.io/v1", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{}, Annotations: map[string]string{}, }, - Spec: mcfgv1alpha1.MachineOSBuildSpec{ - Version: 1, - ConfigGeneration: 1, - DesiredConfig: mcfgv1alpha1.RenderedMachineConfigReference{}, - MachineOSConfig: mcfgv1alpha1.MachineOSConfigReference{}, + Spec: mcfgv1.MachineOSBuildSpec{ + MachineConfig: mcfgv1.MachineConfigReference{}, + MachineOSConfig: mcfgv1.MachineOSConfigReference{}, }, }, } @@ -36,7 +33,7 @@ func NewMachineOSBuildBuilder(name string) *MachineOSBuildBuilder { func NewMachineOSBuildBuilderFromMachineConfigPool(mcp *mcfgv1.MachineConfigPool) *MachineOSBuildBuilder { m := NewMachineOSBuildBuilder(fmt.Sprintf("%s-%s-builder", mcp.Name, mcp.Spec.Configuration.Name)) - m.mosb.Spec.DesiredConfig.Name = mcp.Spec.Configuration.Name + m.mosb.Spec.MachineConfig.Name = mcp.Spec.Configuration.Name return m } @@ -46,7 +43,7 @@ func (m *MachineOSBuildBuilder) WithName(name string) *MachineOSBuildBuilder { } func (m *MachineOSBuildBuilder) WithRenderedImagePushspec(pushspec string) *MachineOSBuildBuilder { - m.mosb.Spec.RenderedImagePushspec = pushspec + m.mosb.Spec.RenderedImagePushSpec = mcfgv1.ImageTagFormat(pushspec) return m } @@ -56,7 +53,7 @@ func (m *MachineOSBuildBuilder) WithMachineOSConfig(name string) *MachineOSBuild } func (m *MachineOSBuildBuilder) WithDesiredConfig(name string) *MachineOSBuildBuilder { - m.mosb.Spec.DesiredConfig.Name = name + m.mosb.Spec.MachineConfig.Name = name return m } @@ -76,6 +73,6 @@ func (m *MachineOSBuildBuilder) WithLabels(labels map[string]string) *MachineOSB return m } -func (m *MachineOSBuildBuilder) MachineOSBuild() *mcfgv1alpha1.MachineOSBuild { +func (m *MachineOSBuildBuilder) MachineOSBuild() *mcfgv1.MachineOSBuild { return m.mosb.DeepCopy() } diff --git a/test/helpers/machineosconfigbuilder.go b/test/helpers/machineosconfigbuilder.go index 74787df8af..3f624f5992 100644 --- a/test/helpers/machineosconfigbuilder.go +++ b/test/helpers/machineosconfigbuilder.go @@ -1,98 +1,74 @@ package helpers import ( - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type MachineOSConfigBuilder struct { - mosc *mcfgv1alpha1.MachineOSConfig + mosc *mcfgv1.MachineOSConfig } func NewMachineOSConfigBuilder(name string) *MachineOSConfigBuilder { return &MachineOSConfigBuilder{ - mosc: &mcfgv1alpha1.MachineOSConfig{ + mosc: &mcfgv1.MachineOSConfig{ TypeMeta: metav1.TypeMeta{ Kind: "MachineOSConfig", - APIVersion: "machineconfiguration.openshift.io/v1alpha1", + APIVersion: "machineconfiguration.openshift.io/v1", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{}, Annotations: map[string]string{}, }, - Spec: mcfgv1alpha1.MachineOSConfigSpec{ - MachineConfigPool: mcfgv1alpha1.MachineConfigPoolReference{}, - BuildInputs: mcfgv1alpha1.BuildInputs{ - Containerfile: []mcfgv1alpha1.MachineOSContainerfile{}, - ImageBuilder: &mcfgv1alpha1.MachineOSImageBuilder{ - ImageBuilderType: mcfgv1alpha1.MachineOSImageBuilderType("PodImageBuilder"), - }, - BaseImagePullSecret: mcfgv1alpha1.ImageSecretObjectReference{}, - RenderedImagePushSecret: mcfgv1alpha1.ImageSecretObjectReference{}, - }, - BuildOutputs: mcfgv1alpha1.BuildOutputs{ - CurrentImagePullSecret: mcfgv1alpha1.ImageSecretObjectReference{}, + Spec: mcfgv1.MachineOSConfigSpec{ + MachineConfigPool: mcfgv1.MachineConfigPoolReference{}, + + Containerfile: []mcfgv1.MachineOSContainerfile{}, + ImageBuilder: mcfgv1.MachineOSImageBuilder{ + ImageBuilderType: mcfgv1.JobBuilder, }, + BaseImagePullSecret: nil, + RenderedImagePushSecret: mcfgv1.ImageSecretObjectReference{}, }, }, } } -func (m *MachineOSConfigBuilder) WithReleaseVersion(version string) *MachineOSConfigBuilder { - m.mosc.Spec.BuildInputs.ReleaseVersion = version - return m -} - -func (m *MachineOSConfigBuilder) WithBaseOSImagePullspec(pullspec string) *MachineOSConfigBuilder { - m.mosc.Spec.BuildInputs.BaseOSImagePullspec = pullspec - return m -} - func (m *MachineOSConfigBuilder) WithMachineConfigPool(name string) *MachineOSConfigBuilder { m.mosc.Spec.MachineConfigPool.Name = name return m } func (m *MachineOSConfigBuilder) WithBaseImagePullSecret(name string) *MachineOSConfigBuilder { - m.mosc.Spec.BuildInputs.BaseImagePullSecret.Name = name + m.mosc.Spec.BaseImagePullSecret.Name = name return m } func (m *MachineOSConfigBuilder) WithFinalImagePushSecret(name string) *MachineOSConfigBuilder { - return m.WithRenderedImagePushspec(name) + return m.WithRenderedImagePushSpec(name) } -func (m *MachineOSConfigBuilder) WithContainerfile(arch mcfgv1alpha1.ContainerfileArch, content string) *MachineOSConfigBuilder { - m.mosc.Spec.BuildInputs.Containerfile = append(m.mosc.Spec.BuildInputs.Containerfile, mcfgv1alpha1.MachineOSContainerfile{ +func (m *MachineOSConfigBuilder) WithContainerfile(arch mcfgv1.ContainerfileArch, content string) *MachineOSConfigBuilder { + m.mosc.Spec.Containerfile = append(m.mosc.Spec.Containerfile, mcfgv1.MachineOSContainerfile{ ContainerfileArch: arch, Content: content, }) return m } -func (m *MachineOSConfigBuilder) WithRenderedImagePushspec(pushspec string) *MachineOSConfigBuilder { - m.mosc.Spec.BuildInputs.RenderedImagePushspec = pushspec +func (m *MachineOSConfigBuilder) WithRenderedImagePushSpec(pushspec string) *MachineOSConfigBuilder { + m.mosc.Spec.RenderedImagePushSpec = mcfgv1.ImageTagFormat(pushspec) return m } func (m *MachineOSConfigBuilder) WithRenderedImagePushSecret(name string) *MachineOSConfigBuilder { - m.mosc.Spec.BuildInputs.RenderedImagePushSecret.Name = name - return m -} - -func (m *MachineOSConfigBuilder) WithExtensionsImagePullspec(pullspec string) *MachineOSConfigBuilder { - m.mosc.Spec.BuildInputs.BaseOSExtensionsImagePullspec = pullspec + m.mosc.Spec.RenderedImagePushSecret.Name = name return m } func (m *MachineOSConfigBuilder) WithCurrentImagePullspec(pullspec string) *MachineOSConfigBuilder { - m.mosc.Status.CurrentImagePullspec = pullspec - return m -} - -func (m *MachineOSConfigBuilder) WithCurrentImagePullSecret(name string) *MachineOSConfigBuilder { - m.mosc.Spec.BuildOutputs.CurrentImagePullSecret.Name = name + m.mosc.Status.CurrentImagePullSpec = mcfgv1.ImageDigestFormat(pullspec) return m } @@ -112,6 +88,6 @@ func (m *MachineOSConfigBuilder) WithLabels(labels map[string]string) *MachineOS return m } -func (m *MachineOSConfigBuilder) MachineOSConfig() *mcfgv1alpha1.MachineOSConfig { +func (m *MachineOSConfigBuilder) MachineOSConfig() *mcfgv1.MachineOSConfig { return m.mosc.DeepCopy() } diff --git a/test/helpers/utils.go b/test/helpers/utils.go index 7b15d3f203..14bf4cd4e5 100644 --- a/test/helpers/utils.go +++ b/test/helpers/utils.go @@ -6,7 +6,6 @@ import ( "context" "encoding/json" "fmt" - "log" "math/rand" "os" "os/exec" @@ -25,14 +24,13 @@ import ( mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" ign3types "github.com/coreos/ignition/v2/config/v3_4/types" "github.com/davecgh/go-spew/spew" - configv1 "github.com/openshift/api/config/v1" - mcfgv1 "github.com/openshift/api/machineconfiguration/v1" machineClientv1beta1 "github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1" "github.com/openshift/machine-config-operator/pkg/apihelpers" + buildConstants "github.com/openshift/machine-config-operator/pkg/controller/build/constants" "github.com/openshift/machine-config-operator/pkg/daemon/constants" "github.com/openshift/machine-config-operator/pkg/daemon/osrelease" "github.com/openshift/machine-config-operator/test/framework" @@ -99,19 +97,6 @@ func ApplyMC(t *testing.T, cs *framework.ClientSet, mc *mcfgv1.MachineConfig) fu } } -// Ensures that a given cleanup function only runs once; even if called -// multiple times. -func MakeIdempotent(f func()) func() { - hasRun := false - - return func() { - if !hasRun { - f() - hasRun = true - } - } -} - // Applies a MachineConfig to a given MachineConfigPool, if a MachineConfig is // provided. If a MachineConfig is not provided (i.e., nil), it will skip the // apply process and wait for the MachineConfigPool to include the "00-worker" @@ -1470,46 +1455,6 @@ func setDeletionAnnotationOnMachineForNode(ctx context.Context, cs *framework.Cl return err } -// MustHaveFeatureGatesEnabled fatally exits the test if any feature gate in requiredFeatureGates is not enabled. -func MustHaveFeatureGatesEnabled(requiredFeatureGates ...configv1.FeatureGateName) { - cs := framework.NewClientSet("") - if err := validateFeatureGatesEnabled(cs, requiredFeatureGates...); err != nil { - log.Fatalln(err) - } - log.Printf("All required featuregates %v present!", requiredFeatureGates) -} - -// Validates if feature gates listed in requiredFeatureGates are enabled. -func validateFeatureGatesEnabled(cs *framework.ClientSet, requiredFeatureGates ...configv1.FeatureGateName) error { - currentFeatureGates, err := cs.ConfigV1Interface.FeatureGates().Get(context.TODO(), "cluster", metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("failed to fetch feature gates: %w", err) - } - - // This uses the new Go generics to construct a typed set of - // FeatureGateNames. Under the hood, sets are map[T]struct{}{} where - // only the keys matter and one cannot have duplicate keys. Perfect for our use-case! - enabledFeatures := sets.New[configv1.FeatureGateName]() - - // Load all of the feature gate names into our set. Duplicates will be - // automatically be ignored. - for _, currentFeatureGateDetails := range currentFeatureGates.Status.FeatureGates { - for _, enabled := range currentFeatureGateDetails.Enabled { - enabledFeatures.Insert(enabled.Name) - } - } - - // If we have all of the required feature gates, we're done! - if enabledFeatures.HasAll(requiredFeatureGates...) { - return nil - } - - // If we don't, lets diff against what we have vs. what we want and return that information. - requiredFeatures := sets.New[configv1.FeatureGateName](requiredFeatureGates...) - disabledRequiredFeatures := requiredFeatures.Difference(enabledFeatures) - return fmt.Errorf("missing required FeatureGate(s): %v, have: %v", sets.List(disabledRequiredFeatures), sets.List(enabledFeatures)) -} - // Writes a file to a given node. Returns an idempotent cleanup function. func WriteFileToNode(t *testing.T, cs *framework.ClientSet, node corev1.Node, filename, contents string) func() { t.Helper() @@ -1792,21 +1737,33 @@ func nodeListToSet(nodeList *corev1.NodeList) sets.Set[string] { return nodes } -func SetContainerfileContentsOnMachineOSConfig(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, mosc *mcfgv1alpha1.MachineOSConfig, contents string) *mcfgv1alpha1.MachineOSConfig { +func SetContainerfileContentsOnMachineOSConfig(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, mosc *mcfgv1.MachineOSConfig, contents string) *mcfgv1.MachineOSConfig { t.Helper() - apiMosc, err := mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) + apiMosc, err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) require.NoError(t, err) - apiMosc.Spec.BuildInputs.Containerfile = []mcfgv1alpha1.MachineOSContainerfile{ + apiMosc.Spec.Containerfile = []mcfgv1.MachineOSContainerfile{ { - ContainerfileArch: mcfgv1alpha1.NoArch, + ContainerfileArch: mcfgv1.NoArch, Content: contents, }, } - apiMosc, err = mcfgclient.MachineconfigurationV1alpha1().MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) + apiMosc, err = mcfgclient.MachineconfigurationV1().MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) require.NoError(t, err) return apiMosc } + +func SetRebuildAnnotationOnMachineOSConfig(ctx context.Context, t *testing.T, mcfgclient mcfgclientset.Interface, mosc *mcfgv1.MachineOSConfig) { + t.Helper() + + apiMosc, err := mcfgclient.MachineconfigurationV1().MachineOSConfigs().Get(ctx, mosc.Name, metav1.GetOptions{}) + require.NoError(t, err) + + apiMosc.Annotations[buildConstants.RebuildMachineOSConfigAnnotationKey] = "" + + _, err = mcfgclient.MachineconfigurationV1().MachineOSConfigs().Update(ctx, apiMosc, metav1.UpdateOptions{}) + require.NoError(t, err) +} diff --git a/test/helpers/utils_test.go b/test/helpers/utils_test.go deleted file mode 100644 index 7921277ac5..0000000000 --- a/test/helpers/utils_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package helpers - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -// Tests helpers.MakeIdempotent() to ensure that any function that is wrapped -// by it is only called once. -func TestMakeIdempotent(t *testing.T) { - t.Parallel() - - count := 0 - - increment := func() { count++ } - - testCases := []struct { - name string - incrementer func() - expectedCount int - }{ - { - name: "Not idempotent", - incrementer: increment, - expectedCount: 10, - }, - { - name: "Is idempotent - Single-wrapped", - incrementer: MakeIdempotent(increment), - expectedCount: 1, - }, - { - name: "Is idempotent - Double-wrapped", - incrementer: MakeIdempotent(MakeIdempotent(increment)), - expectedCount: 1, - }, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { - count = 0 - - for i := 0; i < 10; i++ { - testCase.incrementer() - } - - assert.Equal(t, testCase.expectedCount, count) - }) - } -} diff --git a/vendor/github.com/openshift/api/.golangci.yaml b/vendor/github.com/openshift/api/.golangci.yaml new file mode 100644 index 0000000000..4b8b1d7458 --- /dev/null +++ b/vendor/github.com/openshift/api/.golangci.yaml @@ -0,0 +1,22 @@ +linters-settings: + custom: + kal: + type: "module" + description: KAL is the Kube-API-Linter and lints Kube like APIs based on API conventions and best practices. + settings: + linters: + enable: + - "nobools" + lintersConfig: + conditions: + isFirstField: Warn + useProtobuf: Ignore +linters: + disable-all: true + enable: + - kal +issues: + # We have a lot of existing issues. + # Want to make sure that those adding new fields have an + # opportunity to fix them when running the linter locally. + max-issues-per-linter: 1000 diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile index 5e6a6b1312..8c27449ba5 100644 --- a/vendor/github.com/openshift/api/Makefile +++ b/vendor/github.com/openshift/api/Makefile @@ -39,6 +39,28 @@ update-codegen-crds: update-scripts # ##################### +# When not otherwise set, diff/lint against the local master branch +PULL_BASE_SHA ?= master + +.PHONY: lint +lint: + hack/golangci-lint.sh run --new-from-rev=${PULL_BASE_SHA} + +# While https://github.com/golangci/golangci-lint/issues/1779 is not fixed, +# we need to run the fix separately from the lint command. +# GolangCI-Lint will not actually run the fixer for us. +# In the future we can remove this and have the linter auto-fix. +.PHONY: lint-fix +lint-fix: + hack/lint-fix.sh + +# Ignore the exit code of the fix lint, it will always error as there are unfixed issues +# that cannot be fixed from historic commits. +.PHONY: verify-lint-fix +verify-lint-fix: + make lint-fix 2>/dev/null || true + git diff --exit-code + .PHONY: verify-scripts verify-scripts: bash -x hack/verify-deepcopy.sh @@ -56,7 +78,7 @@ verify-scripts: hack/verify-promoted-features-pass-tests.sh .PHONY: verify -verify: verify-scripts verify-crd-schema verify-codegen-crds +verify: verify-scripts lint verify-crd-schema verify-codegen-crds .PHONY: verify-codegen-crds verify-codegen-crds: diff --git a/vendor/github.com/openshift/api/README.md b/vendor/github.com/openshift/api/README.md index 2f503a88d2..934bcd3299 100644 --- a/vendor/github.com/openshift/api/README.md +++ b/vendor/github.com/openshift/api/README.md @@ -111,6 +111,25 @@ conventions](https://github.com/openshift/enhancements/blob/master/CONVENTIONS.m and then follow the instructions below to regenerate CRDs (if necessary) and submit a pull request with your new API definitions and generated files. +New APIs (new CRDs) must be added first as an unstable API (v1alpha1). +Once the feature is more developed, and ready to be promoted to stable, the API can be promoted to v1. + +### Why do we start with v1alpha1? + +By starting an API as a v1alpha1, we can iterate on the API with the ability to make breaking changes. +We can make changes to the schema, change validations, change entire types and even serialization without worry. + +When changes are made to an API, any existing client code will need to be updated to match. +If there are breaking changes (such as changing the serialization), then this requires a new version of the API. + +If we did not bump the API version for each breaking change, a client, generated prior to the breaking change, +would panic when it tried to deserialize the new serialization of the API. + +If, during development of a feature, we need to make a breaking change, we should move the feature to v1alpha2 (or v1alpha3, etc), +until we reach a version that we are happy to promote to v1. + +Do not make changes to the API when promoting the feature to v1. + ### Adding a new stable API (v1) When copying, it matters which `// +foo` markers are two comments blocks up and which are one comment block up. diff --git a/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go b/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go index eb4918a661..645d796f77 100644 --- a/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go +++ b/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go @@ -35,7 +35,6 @@ type APIRequestCount struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec defines the characteristics of the resource. - // +kubebuilder:validation:Required // +required Spec APIRequestCountSpec `json:"spec"` @@ -126,7 +125,7 @@ type PerNodeAPIRequestLog struct { // PerUserAPIRequestCount contains logs of a user's requests. type PerUserAPIRequestCount struct { - // userName that made the request. + // username that made the request. // +kubebuilder:validation:MaxLength=512 UserName string `json:"username"` diff --git a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go index 27d74b6c19..b3d6b615fc 100644 --- a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go @@ -74,7 +74,7 @@ func (PerResourceAPIRequestLog) SwaggerDoc() map[string]string { var map_PerUserAPIRequestCount = map[string]string{ "": "PerUserAPIRequestCount contains logs of a user's requests.", - "username": "userName that made the request.", + "username": "username that made the request.", "userAgent": "userAgent that made the request. The same user often has multiple binaries which connect (pods with many containers). The different binaries will have different userAgents, but the same user. In addition, we have userAgents with version information embedded and the userName isn't likely to change.", "requestCount": "requestCount of requests by the user across all verbs.", "byVerb": "byVerb details by verb.", diff --git a/vendor/github.com/openshift/api/apps/v1/generated.proto b/vendor/github.com/openshift/api/apps/v1/generated.proto index 010f36b955..6f50fcaf95 100644 --- a/vendor/github.com/openshift/api/apps/v1/generated.proto +++ b/vendor/github.com/openshift/api/apps/v1/generated.proto @@ -15,39 +15,39 @@ option go_package = "github.com/openshift/api/apps/v1"; // CustomDeploymentStrategyParams are the input to the Custom deployment strategy. message CustomDeploymentStrategyParams { - // Image specifies a container image which can carry out a deployment. + // image specifies a container image which can carry out a deployment. optional string image = 1; - // Environment holds the environment which will be given to the container for Image. + // environment holds the environment which will be given to the container for Image. repeated .k8s.io.api.core.v1.EnvVar environment = 2; - // Command is optional and overrides CMD in the container Image. + // command is optional and overrides CMD in the container Image. repeated string command = 3; } // DeploymentCause captures information about a particular cause of a deployment. message DeploymentCause { - // Type of the trigger that resulted in the creation of a new deployment + // type of the trigger that resulted in the creation of a new deployment optional string type = 1; - // ImageTrigger contains the image trigger details, if this trigger was fired based on an image change + // imageTrigger contains the image trigger details, if this trigger was fired based on an image change optional DeploymentCauseImageTrigger imageTrigger = 2; } // DeploymentCauseImageTrigger represents details about the cause of a deployment originating // from an image change trigger message DeploymentCauseImageTrigger { - // From is a reference to the changed object which triggered a deployment. The field may have + // from is a reference to the changed object which triggered a deployment. The field may have // the kinds DockerImage, ImageStreamTag, or ImageStreamImage. optional .k8s.io.api.core.v1.ObjectReference from = 1; } // DeploymentCondition describes the state of a deployment config at a certain point. message DeploymentCondition { - // Type of deployment condition. + // type of deployment condition. optional string type = 1; - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. optional string status = 2; // The last time this condition was updated. @@ -81,10 +81,10 @@ message DeploymentConfig { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec represents a desired deployment state and how to deploy to it. + // spec represents a desired deployment state and how to deploy to it. optional DeploymentConfigSpec spec = 2; - // Status represents the current deployment state. + // status represents the current deployment state. // +optional optional DeploymentConfigStatus status = 3; } @@ -98,7 +98,7 @@ message DeploymentConfigList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of deployment configs + // items is a list of deployment configs repeated DeploymentConfig items = 2; } @@ -107,108 +107,108 @@ message DeploymentConfigList { // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 message DeploymentConfigRollback { - // Name of the deployment config that will be rolled back. + // name of the deployment config that will be rolled back. optional string name = 1; - // UpdatedAnnotations is a set of new annotations that will be added in the deployment config. + // updatedAnnotations is a set of new annotations that will be added in the deployment config. map updatedAnnotations = 2; - // Spec defines the options to rollback generation. + // spec defines the options to rollback generation. optional DeploymentConfigRollbackSpec spec = 3; } // DeploymentConfigRollbackSpec represents the options for rollback generation. message DeploymentConfigRollbackSpec { - // From points to a ReplicationController which is a deployment. + // from points to a ReplicationController which is a deployment. optional .k8s.io.api.core.v1.ObjectReference from = 1; - // Revision to rollback to. If set to 0, rollback to the last revision. + // revision to rollback to. If set to 0, rollback to the last revision. optional int64 revision = 2; - // IncludeTriggers specifies whether to include config Triggers. + // includeTriggers specifies whether to include config Triggers. optional bool includeTriggers = 3; - // IncludeTemplate specifies whether to include the PodTemplateSpec. + // includeTemplate specifies whether to include the PodTemplateSpec. optional bool includeTemplate = 4; - // IncludeReplicationMeta specifies whether to include the replica count and selector. + // includeReplicationMeta specifies whether to include the replica count and selector. optional bool includeReplicationMeta = 5; - // IncludeStrategy specifies whether to include the deployment Strategy. + // includeStrategy specifies whether to include the deployment Strategy. optional bool includeStrategy = 6; } // DeploymentConfigSpec represents the desired state of the deployment. message DeploymentConfigSpec { - // Strategy describes how a deployment is executed. + // strategy describes how a deployment is executed. // +optional optional DeploymentStrategy strategy = 1; - // MinReadySeconds is the minimum number of seconds for which a newly created pod should + // minReadySeconds is the minimum number of seconds for which a newly created pod should // be ready without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) optional int32 minReadySeconds = 9; - // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers + // triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers // are defined, a new deployment can only occur as a result of an explicit client update to the // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger. // +optional optional DeploymentTriggerPolicies triggers = 2; - // Replicas is the number of desired replicas. + // replicas is the number of desired replicas. // +optional optional int32 replicas = 3; - // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. + // revisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. // This field is a pointer to allow for differentiation between an explicit zero and not specified. // Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.) optional int32 revisionHistoryLimit = 4; - // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the + // test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action. // +optional optional bool test = 5; - // Paused indicates that the deployment config is paused resulting in no new deployments on template + // paused indicates that the deployment config is paused resulting in no new deployments on template // changes or changes in the template caused by other triggers. optional bool paused = 6; - // Selector is a label query over pods that should match the Replicas count. + // selector is a label query over pods that should match the Replicas count. map selector = 7; - // Template is the object that describes the pod that will be created if + // template is the object that describes the pod that will be created if // insufficient replicas are detected. optional .k8s.io.api.core.v1.PodTemplateSpec template = 8; } // DeploymentConfigStatus represents the current deployment state. message DeploymentConfigStatus { - // LatestVersion is used to determine whether the current deployment associated with a deployment + // latestVersion is used to determine whether the current deployment associated with a deployment // config is out of sync. optional int64 latestVersion = 1; - // ObservedGeneration is the most recent generation observed by the deployment config controller. + // observedGeneration is the most recent generation observed by the deployment config controller. optional int64 observedGeneration = 2; - // Replicas is the total number of pods targeted by this deployment config. + // replicas is the total number of pods targeted by this deployment config. optional int32 replicas = 3; - // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config + // updatedReplicas is the total number of non-terminated pods targeted by this deployment config // that have the desired template spec. optional int32 updatedReplicas = 4; - // AvailableReplicas is the total number of available pods targeted by this deployment config. + // availableReplicas is the total number of available pods targeted by this deployment config. optional int32 availableReplicas = 5; - // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config. + // unavailableReplicas is the total number of unavailable pods targeted by this deployment config. optional int32 unavailableReplicas = 6; - // Details are the reasons for the update to this deployment config. + // details are the reasons for the update to this deployment config. // This could be based on a change made by the user or caused by an automatic trigger optional DeploymentDetails details = 7; - // Conditions represents the latest available observations of a deployment config's current state. + // conditions represents the latest available observations of a deployment config's current state. // +patchMergeKey=type // +patchStrategy=merge repeated DeploymentCondition conditions = 8; @@ -219,10 +219,10 @@ message DeploymentConfigStatus { // DeploymentDetails captures information about the causes of a deployment. message DeploymentDetails { - // Message is the user specified change message, if this deployment was triggered manually by the user + // message is the user specified change message, if this deployment was triggered manually by the user optional string message = 1; - // Causes are extended data associated with all the causes for creating a new deployment + // causes are extended data associated with all the causes for creating a new deployment repeated DeploymentCause causes = 2; } @@ -241,7 +241,7 @@ message DeploymentLogOptions { // The container for which to stream logs. Defaults to only container if there is one container in the pod. optional string container = 1; - // Follow if true indicates that the build log should be streamed until + // follow if true indicates that the build log should be streamed until // the build terminates. optional bool follow = 2; @@ -273,12 +273,12 @@ message DeploymentLogOptions { // slightly more or slightly less than the specified limit. optional int64 limitBytes = 8; - // NoWait if true causes the call to return immediately even if the deployment + // nowait if true causes the call to return immediately even if the deployment // is not available yet. Otherwise the server will wait until the deployment has started. // TODO: Fix the tag to 'noWait' in v2 optional bool nowait = 9; - // Version of the deployment for which to view logs. + // version of the deployment for which to view logs. optional int64 version = 10; } @@ -287,17 +287,17 @@ message DeploymentLogOptions { // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 message DeploymentRequest { - // Name of the deployment config for requesting a new deployment. + // name of the deployment config for requesting a new deployment. optional string name = 1; - // Latest will update the deployment config with the latest state from all triggers. + // latest will update the deployment config with the latest state from all triggers. optional bool latest = 2; - // Force will try to force a new deployment to run. If the deployment config is paused, + // force will try to force a new deployment to run. If the deployment config is paused, // then setting this to true will return an Invalid error. optional bool force = 3; - // ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. + // excludeTriggers instructs the instantiator to avoid processing the specified triggers. // This field overrides the triggers from latest and allows clients to control specific // logic. This field is ignored if not specified. repeated string excludeTriggers = 4; @@ -305,53 +305,53 @@ message DeploymentRequest { // DeploymentStrategy describes how to perform a deployment. message DeploymentStrategy { - // Type is the name of a deployment strategy. + // type is the name of a deployment strategy. // +optional optional string type = 1; - // CustomParams are the input to the Custom deployment strategy, and may also + // customParams are the input to the Custom deployment strategy, and may also // be specified for the Recreate and Rolling strategies to customize the execution // process that runs the deployment. optional CustomDeploymentStrategyParams customParams = 2; - // RecreateParams are the input to the Recreate deployment strategy. + // recreateParams are the input to the Recreate deployment strategy. optional RecreateDeploymentStrategyParams recreateParams = 3; - // RollingParams are the input to the Rolling deployment strategy. + // rollingParams are the input to the Rolling deployment strategy. optional RollingDeploymentStrategyParams rollingParams = 4; - // Resources contains resource requirements to execute the deployment and any hooks. + // resources contains resource requirements to execute the deployment and any hooks. optional .k8s.io.api.core.v1.ResourceRequirements resources = 5; - // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + // labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. map labels = 6; - // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + // annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. map annotations = 7; - // ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment + // activeDeadlineSeconds is the duration in seconds that the deployer pods for this deployment // config may be active on a node before the system actively tries to terminate them. optional int64 activeDeadlineSeconds = 8; } // DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger. message DeploymentTriggerImageChangeParams { - // Automatic means that the detection of a new tag value should result in an image update + // automatic means that the detection of a new tag value should result in an image update // inside the pod template. optional bool automatic = 1; - // ContainerNames is used to restrict tag updates to the specified set of container names in a pod. + // containerNames is used to restrict tag updates to the specified set of container names in a pod. // If multiple triggers point to the same containers, the resulting behavior is undefined. Future // API versions will make this a validation error. If ContainerNames does not point to a valid container, // the trigger will be ignored. Future API versions will make this a validation error. repeated string containerNames = 2; - // From is a reference to an image stream tag to watch for changes. From.Name is the only + // from is a reference to an image stream tag to watch for changes. From.Name is the only // required subfield - if From.Namespace is blank, the namespace of the current deployment // trigger will be used. optional .k8s.io.api.core.v1.ObjectReference from = 3; - // LastTriggeredImage is the last image to be triggered. + // lastTriggeredImage is the last image to be triggered. optional string lastTriggeredImage = 4; } @@ -366,10 +366,10 @@ message DeploymentTriggerPolicies { // DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. message DeploymentTriggerPolicy { - // Type of the trigger + // type of the trigger optional string type = 1; - // ImageChangeParams represents the parameters for the ImageChange trigger. + // imageChangeParams represents the parameters for the ImageChange trigger. optional DeploymentTriggerImageChangeParams imageChangeParams = 2; } @@ -377,17 +377,17 @@ message DeploymentTriggerPolicy { // based on the specified container which is assumed to be part of the // deployment template. message ExecNewPodHook { - // Command is the action command and its arguments. + // command is the action command and its arguments. repeated string command = 1; - // Env is a set of environment variables to supply to the hook pod's container. + // env is a set of environment variables to supply to the hook pod's container. repeated .k8s.io.api.core.v1.EnvVar env = 2; - // ContainerName is the name of a container in the deployment pod template + // containerName is the name of a container in the deployment pod template // whose container image will be used for the hook pod's container. optional string containerName = 3; - // Volumes is a list of named volumes from the pod template which should be + // volumes is a list of named volumes from the pod template which should be // copied to the hook pod. Volumes names not found in pod spec are ignored. // An empty list means no volumes will be copied. repeated string volumes = 4; @@ -395,32 +395,32 @@ message ExecNewPodHook { // LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. message LifecycleHook { - // FailurePolicy specifies what action to take if the hook fails. + // failurePolicy specifies what action to take if the hook fails. optional string failurePolicy = 1; - // ExecNewPod specifies the options for a lifecycle hook backed by a pod. + // execNewPod specifies the options for a lifecycle hook backed by a pod. optional ExecNewPodHook execNewPod = 2; - // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. + // tagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. repeated TagImageHook tagImages = 3; } // RecreateDeploymentStrategyParams are the input to the Recreate deployment // strategy. message RecreateDeploymentStrategyParams { - // TimeoutSeconds is the time to wait for updates before giving up. If the + // timeoutSeconds is the time to wait for updates before giving up. If the // value is nil, a default will be used. optional int64 timeoutSeconds = 1; - // Pre is a lifecycle hook which is executed before the strategy manipulates + // pre is a lifecycle hook which is executed before the strategy manipulates // the deployment. All LifecycleHookFailurePolicy values are supported. optional LifecycleHook pre = 2; - // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new + // mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new // pod is created. All LifecycleHookFailurePolicy values are supported. optional LifecycleHook mid = 3; - // Post is a lifecycle hook which is executed after the strategy has + // post is a lifecycle hook which is executed after the strategy has // finished all deployment logic. All LifecycleHookFailurePolicy values are supported. optional LifecycleHook post = 4; } @@ -428,19 +428,19 @@ message RecreateDeploymentStrategyParams { // RollingDeploymentStrategyParams are the input to the Rolling deployment // strategy. message RollingDeploymentStrategyParams { - // UpdatePeriodSeconds is the time to wait between individual pod updates. + // updatePeriodSeconds is the time to wait between individual pod updates. // If the value is nil, a default will be used. optional int64 updatePeriodSeconds = 1; - // IntervalSeconds is the time to wait between polling deployment status + // intervalSeconds is the time to wait between polling deployment status // after update. If the value is nil, a default will be used. optional int64 intervalSeconds = 2; - // TimeoutSeconds is the time to wait for updates before giving up. If the + // timeoutSeconds is the time to wait for updates before giving up. If the // value is nil, a default will be used. optional int64 timeoutSeconds = 3; - // MaxUnavailable is the maximum number of pods that can be unavailable + // maxUnavailable is the maximum number of pods that can be unavailable // during the update. Value can be an absolute number (ex: 5) or a // percentage of total pods at the start of update (ex: 10%). Absolute // number is calculated from percentage by rounding down. @@ -454,7 +454,7 @@ message RollingDeploymentStrategyParams { // all times during the update. optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 4; - // MaxSurge is the maximum number of pods that can be scheduled above the + // maxSurge is the maximum number of pods that can be scheduled above the // original number of pods. Value can be an absolute number (ex: 5) or a // percentage of total pods at the start of the update (ex: 10%). Absolute // number is calculated from percentage by rounding up. @@ -468,11 +468,11 @@ message RollingDeploymentStrategyParams { // pods. optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 5; - // Pre is a lifecycle hook which is executed before the deployment process + // pre is a lifecycle hook which is executed before the deployment process // begins. All LifecycleHookFailurePolicy values are supported. optional LifecycleHook pre = 7; - // Post is a lifecycle hook which is executed after the strategy has + // post is a lifecycle hook which is executed after the strategy has // finished all deployment logic. All LifecycleHookFailurePolicy values // are supported. optional LifecycleHook post = 8; @@ -480,11 +480,11 @@ message RollingDeploymentStrategyParams { // TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. message TagImageHook { - // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single + // containerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single // container this value will be defaulted to the name of that container. optional string containerName = 1; - // To is the target ImageStreamTag to set the container's image onto. + // to is the target ImageStreamTag to set the container's image onto. optional .k8s.io.api.core.v1.ObjectReference to = 2; } diff --git a/vendor/github.com/openshift/api/apps/v1/types.go b/vendor/github.com/openshift/api/apps/v1/types.go index 1465aea278..619c30e828 100644 --- a/vendor/github.com/openshift/api/apps/v1/types.go +++ b/vendor/github.com/openshift/api/apps/v1/types.go @@ -38,81 +38,81 @@ type DeploymentConfig struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec represents a desired deployment state and how to deploy to it. + // spec represents a desired deployment state and how to deploy to it. Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status represents the current deployment state. + // status represents the current deployment state. // +optional Status DeploymentConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // DeploymentConfigSpec represents the desired state of the deployment. type DeploymentConfigSpec struct { - // Strategy describes how a deployment is executed. + // strategy describes how a deployment is executed. // +optional Strategy DeploymentStrategy `json:"strategy" protobuf:"bytes,1,opt,name=strategy"` - // MinReadySeconds is the minimum number of seconds for which a newly created pod should + // minReadySeconds is the minimum number of seconds for which a newly created pod should // be ready without any of its container crashing, for it to be considered available. // Defaults to 0 (pod will be considered available as soon as it is ready) MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` - // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers + // triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers // are defined, a new deployment can only occur as a result of an explicit client update to the // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger. // +optional Triggers DeploymentTriggerPolicies `json:"triggers" protobuf:"bytes,2,rep,name=triggers"` - // Replicas is the number of desired replicas. + // replicas is the number of desired replicas. // +optional Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` - // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. + // revisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. // This field is a pointer to allow for differentiation between an explicit zero and not specified. // Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.) RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,4,opt,name=revisionHistoryLimit"` - // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the + // test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action. // +optional Test bool `json:"test" protobuf:"varint,5,opt,name=test"` - // Paused indicates that the deployment config is paused resulting in no new deployments on template + // paused indicates that the deployment config is paused resulting in no new deployments on template // changes or changes in the template caused by other triggers. Paused bool `json:"paused,omitempty" protobuf:"varint,6,opt,name=paused"` - // Selector is a label query over pods that should match the Replicas count. + // selector is a label query over pods that should match the Replicas count. Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,7,rep,name=selector"` - // Template is the object that describes the pod that will be created if + // template is the object that describes the pod that will be created if // insufficient replicas are detected. Template *corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,8,opt,name=template"` } // DeploymentStrategy describes how to perform a deployment. type DeploymentStrategy struct { - // Type is the name of a deployment strategy. + // type is the name of a deployment strategy. // +optional Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` - // CustomParams are the input to the Custom deployment strategy, and may also + // customParams are the input to the Custom deployment strategy, and may also // be specified for the Recreate and Rolling strategies to customize the execution // process that runs the deployment. CustomParams *CustomDeploymentStrategyParams `json:"customParams,omitempty" protobuf:"bytes,2,opt,name=customParams"` - // RecreateParams are the input to the Recreate deployment strategy. + // recreateParams are the input to the Recreate deployment strategy. RecreateParams *RecreateDeploymentStrategyParams `json:"recreateParams,omitempty" protobuf:"bytes,3,opt,name=recreateParams"` - // RollingParams are the input to the Rolling deployment strategy. + // rollingParams are the input to the Rolling deployment strategy. RollingParams *RollingDeploymentStrategyParams `json:"rollingParams,omitempty" protobuf:"bytes,4,opt,name=rollingParams"` - // Resources contains resource requirements to execute the deployment and any hooks. + // resources contains resource requirements to execute the deployment and any hooks. Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,5,opt,name=resources"` - // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + // labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,6,rep,name=labels"` - // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + // annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,7,rep,name=annotations"` - // ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment + // activeDeadlineSeconds is the duration in seconds that the deployer pods for this deployment // config may be active on a node before the system actively tries to terminate them. ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=activeDeadlineSeconds"` } @@ -131,27 +131,27 @@ const ( // CustomDeploymentStrategyParams are the input to the Custom deployment strategy. type CustomDeploymentStrategyParams struct { - // Image specifies a container image which can carry out a deployment. + // image specifies a container image which can carry out a deployment. Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` - // Environment holds the environment which will be given to the container for Image. + // environment holds the environment which will be given to the container for Image. Environment []corev1.EnvVar `json:"environment,omitempty" protobuf:"bytes,2,rep,name=environment"` - // Command is optional and overrides CMD in the container Image. + // command is optional and overrides CMD in the container Image. Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` } // RecreateDeploymentStrategyParams are the input to the Recreate deployment // strategy. type RecreateDeploymentStrategyParams struct { - // TimeoutSeconds is the time to wait for updates before giving up. If the + // timeoutSeconds is the time to wait for updates before giving up. If the // value is nil, a default will be used. TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"` - // Pre is a lifecycle hook which is executed before the strategy manipulates + // pre is a lifecycle hook which is executed before the strategy manipulates // the deployment. All LifecycleHookFailurePolicy values are supported. Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,2,opt,name=pre"` - // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new + // mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new // pod is created. All LifecycleHookFailurePolicy values are supported. Mid *LifecycleHook `json:"mid,omitempty" protobuf:"bytes,3,opt,name=mid"` - // Post is a lifecycle hook which is executed after the strategy has + // post is a lifecycle hook which is executed after the strategy has // finished all deployment logic. All LifecycleHookFailurePolicy values are supported. Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,4,opt,name=post"` } @@ -159,16 +159,16 @@ type RecreateDeploymentStrategyParams struct { // RollingDeploymentStrategyParams are the input to the Rolling deployment // strategy. type RollingDeploymentStrategyParams struct { - // UpdatePeriodSeconds is the time to wait between individual pod updates. + // updatePeriodSeconds is the time to wait between individual pod updates. // If the value is nil, a default will be used. UpdatePeriodSeconds *int64 `json:"updatePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=updatePeriodSeconds"` - // IntervalSeconds is the time to wait between polling deployment status + // intervalSeconds is the time to wait between polling deployment status // after update. If the value is nil, a default will be used. IntervalSeconds *int64 `json:"intervalSeconds,omitempty" protobuf:"varint,2,opt,name=intervalSeconds"` - // TimeoutSeconds is the time to wait for updates before giving up. If the + // timeoutSeconds is the time to wait for updates before giving up. If the // value is nil, a default will be used. TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` - // MaxUnavailable is the maximum number of pods that can be unavailable + // maxUnavailable is the maximum number of pods that can be unavailable // during the update. Value can be an absolute number (ex: 5) or a // percentage of total pods at the start of update (ex: 10%). Absolute // number is calculated from percentage by rounding down. @@ -181,7 +181,7 @@ type RollingDeploymentStrategyParams struct { // ensuring that at least 70% of original number of pods are available at // all times during the update. MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,4,opt,name=maxUnavailable"` - // MaxSurge is the maximum number of pods that can be scheduled above the + // maxSurge is the maximum number of pods that can be scheduled above the // original number of pods. Value can be an absolute number (ex: 5) or a // percentage of total pods at the start of the update (ex: 10%). Absolute // number is calculated from percentage by rounding up. @@ -194,10 +194,10 @@ type RollingDeploymentStrategyParams struct { // pods running at any time during the update is atmost 130% of original // pods. MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,5,opt,name=maxSurge"` - // Pre is a lifecycle hook which is executed before the deployment process + // pre is a lifecycle hook which is executed before the deployment process // begins. All LifecycleHookFailurePolicy values are supported. Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,7,opt,name=pre"` - // Post is a lifecycle hook which is executed after the strategy has + // post is a lifecycle hook which is executed after the strategy has // finished all deployment logic. All LifecycleHookFailurePolicy values // are supported. Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,8,opt,name=post"` @@ -205,13 +205,13 @@ type RollingDeploymentStrategyParams struct { // LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. type LifecycleHook struct { - // FailurePolicy specifies what action to take if the hook fails. + // failurePolicy specifies what action to take if the hook fails. FailurePolicy LifecycleHookFailurePolicy `json:"failurePolicy" protobuf:"bytes,1,opt,name=failurePolicy,casttype=LifecycleHookFailurePolicy"` - // ExecNewPod specifies the options for a lifecycle hook backed by a pod. + // execNewPod specifies the options for a lifecycle hook backed by a pod. ExecNewPod *ExecNewPodHook `json:"execNewPod,omitempty" protobuf:"bytes,2,opt,name=execNewPod"` - // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. + // tagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. TagImages []TagImageHook `json:"tagImages,omitempty" protobuf:"bytes,3,rep,name=tagImages"` } @@ -231,14 +231,14 @@ const ( // based on the specified container which is assumed to be part of the // deployment template. type ExecNewPodHook struct { - // Command is the action command and its arguments. + // command is the action command and its arguments. Command []string `json:"command" protobuf:"bytes,1,rep,name=command"` - // Env is a set of environment variables to supply to the hook pod's container. + // env is a set of environment variables to supply to the hook pod's container. Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` - // ContainerName is the name of a container in the deployment pod template + // containerName is the name of a container in the deployment pod template // whose container image will be used for the hook pod's container. ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"` - // Volumes is a list of named volumes from the pod template which should be + // volumes is a list of named volumes from the pod template which should be // copied to the hook pod. Volumes names not found in pod spec are ignored. // An empty list means no volumes will be copied. Volumes []string `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` @@ -246,10 +246,10 @@ type ExecNewPodHook struct { // TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. type TagImageHook struct { - // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single + // containerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single // container this value will be defaulted to the name of that container. ContainerName string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"` - // To is the target ImageStreamTag to set the container's image onto. + // to is the target ImageStreamTag to set the container's image onto. To corev1.ObjectReference `json:"to" protobuf:"bytes,2,opt,name=to"` } @@ -264,9 +264,9 @@ func (t DeploymentTriggerPolicies) String() string { // DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. type DeploymentTriggerPolicy struct { - // Type of the trigger + // type of the trigger Type DeploymentTriggerType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"` - // ImageChangeParams represents the parameters for the ImageChange trigger. + // imageChangeParams represents the parameters for the ImageChange trigger. ImageChangeParams *DeploymentTriggerImageChangeParams `json:"imageChangeParams,omitempty" protobuf:"bytes,2,opt,name=imageChangeParams"` } @@ -284,42 +284,42 @@ const ( // DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger. type DeploymentTriggerImageChangeParams struct { - // Automatic means that the detection of a new tag value should result in an image update + // automatic means that the detection of a new tag value should result in an image update // inside the pod template. Automatic bool `json:"automatic,omitempty" protobuf:"varint,1,opt,name=automatic"` - // ContainerNames is used to restrict tag updates to the specified set of container names in a pod. + // containerNames is used to restrict tag updates to the specified set of container names in a pod. // If multiple triggers point to the same containers, the resulting behavior is undefined. Future // API versions will make this a validation error. If ContainerNames does not point to a valid container, // the trigger will be ignored. Future API versions will make this a validation error. ContainerNames []string `json:"containerNames,omitempty" protobuf:"bytes,2,rep,name=containerNames"` - // From is a reference to an image stream tag to watch for changes. From.Name is the only + // from is a reference to an image stream tag to watch for changes. From.Name is the only // required subfield - if From.Namespace is blank, the namespace of the current deployment // trigger will be used. From corev1.ObjectReference `json:"from" protobuf:"bytes,3,opt,name=from"` - // LastTriggeredImage is the last image to be triggered. + // lastTriggeredImage is the last image to be triggered. LastTriggeredImage string `json:"lastTriggeredImage,omitempty" protobuf:"bytes,4,opt,name=lastTriggeredImage"` } // DeploymentConfigStatus represents the current deployment state. type DeploymentConfigStatus struct { - // LatestVersion is used to determine whether the current deployment associated with a deployment + // latestVersion is used to determine whether the current deployment associated with a deployment // config is out of sync. LatestVersion int64 `json:"latestVersion" protobuf:"varint,1,opt,name=latestVersion"` - // ObservedGeneration is the most recent generation observed by the deployment config controller. + // observedGeneration is the most recent generation observed by the deployment config controller. ObservedGeneration int64 `json:"observedGeneration" protobuf:"varint,2,opt,name=observedGeneration"` - // Replicas is the total number of pods targeted by this deployment config. + // replicas is the total number of pods targeted by this deployment config. Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` - // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config + // updatedReplicas is the total number of non-terminated pods targeted by this deployment config // that have the desired template spec. UpdatedReplicas int32 `json:"updatedReplicas" protobuf:"varint,4,opt,name=updatedReplicas"` - // AvailableReplicas is the total number of available pods targeted by this deployment config. + // availableReplicas is the total number of available pods targeted by this deployment config. AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,5,opt,name=availableReplicas"` - // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config. + // unavailableReplicas is the total number of unavailable pods targeted by this deployment config. UnavailableReplicas int32 `json:"unavailableReplicas" protobuf:"varint,6,opt,name=unavailableReplicas"` - // Details are the reasons for the update to this deployment config. + // details are the reasons for the update to this deployment config. // This could be based on a change made by the user or caused by an automatic trigger Details *DeploymentDetails `json:"details,omitempty" protobuf:"bytes,7,opt,name=details"` - // Conditions represents the latest available observations of a deployment config's current state. + // conditions represents the latest available observations of a deployment config's current state. // +patchMergeKey=type // +patchStrategy=merge Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=conditions"` @@ -329,24 +329,24 @@ type DeploymentConfigStatus struct { // DeploymentDetails captures information about the causes of a deployment. type DeploymentDetails struct { - // Message is the user specified change message, if this deployment was triggered manually by the user + // message is the user specified change message, if this deployment was triggered manually by the user Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"` - // Causes are extended data associated with all the causes for creating a new deployment + // causes are extended data associated with all the causes for creating a new deployment Causes []DeploymentCause `json:"causes" protobuf:"bytes,2,rep,name=causes"` } // DeploymentCause captures information about a particular cause of a deployment. type DeploymentCause struct { - // Type of the trigger that resulted in the creation of a new deployment + // type of the trigger that resulted in the creation of a new deployment Type DeploymentTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"` - // ImageTrigger contains the image trigger details, if this trigger was fired based on an image change + // imageTrigger contains the image trigger details, if this trigger was fired based on an image change ImageTrigger *DeploymentCauseImageTrigger `json:"imageTrigger,omitempty" protobuf:"bytes,2,opt,name=imageTrigger"` } // DeploymentCauseImageTrigger represents details about the cause of a deployment originating // from an image change trigger type DeploymentCauseImageTrigger struct { - // From is a reference to the changed object which triggered a deployment. The field may have + // from is a reference to the changed object which triggered a deployment. The field may have // the kinds DockerImage, ImageStreamTag, or ImageStreamImage. From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` } @@ -381,9 +381,9 @@ const ( // DeploymentCondition describes the state of a deployment config at a certain point. type DeploymentCondition struct { - // Type of deployment condition. + // type of deployment condition. Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` // The last time this condition was updated. LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` @@ -411,7 +411,7 @@ type DeploymentConfigList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of deployment configs + // items is a list of deployment configs Items []DeploymentConfig `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -426,27 +426,27 @@ type DeploymentConfigList struct { // +openshift:compatibility-gen:level=1 type DeploymentConfigRollback struct { metav1.TypeMeta `json:",inline"` - // Name of the deployment config that will be rolled back. + // name of the deployment config that will be rolled back. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // UpdatedAnnotations is a set of new annotations that will be added in the deployment config. + // updatedAnnotations is a set of new annotations that will be added in the deployment config. UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` - // Spec defines the options to rollback generation. + // spec defines the options to rollback generation. Spec DeploymentConfigRollbackSpec `json:"spec" protobuf:"bytes,3,opt,name=spec"` } // DeploymentConfigRollbackSpec represents the options for rollback generation. type DeploymentConfigRollbackSpec struct { - // From points to a ReplicationController which is a deployment. + // from points to a ReplicationController which is a deployment. From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` - // Revision to rollback to. If set to 0, rollback to the last revision. + // revision to rollback to. If set to 0, rollback to the last revision. Revision int64 `json:"revision,omitempty" protobuf:"varint,2,opt,name=revision"` - // IncludeTriggers specifies whether to include config Triggers. + // includeTriggers specifies whether to include config Triggers. IncludeTriggers bool `json:"includeTriggers" protobuf:"varint,3,opt,name=includeTriggers"` - // IncludeTemplate specifies whether to include the PodTemplateSpec. + // includeTemplate specifies whether to include the PodTemplateSpec. IncludeTemplate bool `json:"includeTemplate" protobuf:"varint,4,opt,name=includeTemplate"` - // IncludeReplicationMeta specifies whether to include the replica count and selector. + // includeReplicationMeta specifies whether to include the replica count and selector. IncludeReplicationMeta bool `json:"includeReplicationMeta" protobuf:"varint,5,opt,name=includeReplicationMeta"` - // IncludeStrategy specifies whether to include the deployment Strategy. + // includeStrategy specifies whether to include the deployment Strategy. IncludeStrategy bool `json:"includeStrategy" protobuf:"varint,6,opt,name=includeStrategy"` } @@ -461,14 +461,14 @@ type DeploymentConfigRollbackSpec struct { // +openshift:compatibility-gen:level=1 type DeploymentRequest struct { metav1.TypeMeta `json:",inline"` - // Name of the deployment config for requesting a new deployment. + // name of the deployment config for requesting a new deployment. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Latest will update the deployment config with the latest state from all triggers. + // latest will update the deployment config with the latest state from all triggers. Latest bool `json:"latest" protobuf:"varint,2,opt,name=latest"` - // Force will try to force a new deployment to run. If the deployment config is paused, + // force will try to force a new deployment to run. If the deployment config is paused, // then setting this to true will return an Invalid error. Force bool `json:"force" protobuf:"varint,3,opt,name=force"` - // ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. + // excludeTriggers instructs the instantiator to avoid processing the specified triggers. // This field overrides the triggers from latest and allows clients to control specific // logic. This field is ignored if not specified. ExcludeTriggers []DeploymentTriggerType `json:"excludeTriggers,omitempty" protobuf:"bytes,4,rep,name=excludeTriggers,casttype=DeploymentTriggerType"` @@ -501,7 +501,7 @@ type DeploymentLogOptions struct { // The container for which to stream logs. Defaults to only container if there is one container in the pod. Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` - // Follow if true indicates that the build log should be streamed until + // follow if true indicates that the build log should be streamed until // the build terminates. Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` // Return previous deployment logs. Defaults to false. @@ -527,11 +527,11 @@ type DeploymentLogOptions struct { // slightly more or slightly less than the specified limit. LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` - // NoWait if true causes the call to return immediately even if the deployment + // nowait if true causes the call to return immediately even if the deployment // is not available yet. Otherwise the server will wait until the deployment has started. // TODO: Fix the tag to 'noWait' in v2 NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"` - // Version of the deployment for which to view logs. + // version of the deployment for which to view logs. Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"` } diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go index ab137d59be..55b53c5daf 100644 --- a/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go @@ -13,9 +13,9 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_CustomDeploymentStrategyParams = map[string]string{ "": "CustomDeploymentStrategyParams are the input to the Custom deployment strategy.", - "image": "Image specifies a container image which can carry out a deployment.", - "environment": "Environment holds the environment which will be given to the container for Image.", - "command": "Command is optional and overrides CMD in the container Image.", + "image": "image specifies a container image which can carry out a deployment.", + "environment": "environment holds the environment which will be given to the container for Image.", + "command": "command is optional and overrides CMD in the container Image.", } func (CustomDeploymentStrategyParams) SwaggerDoc() map[string]string { @@ -24,8 +24,8 @@ func (CustomDeploymentStrategyParams) SwaggerDoc() map[string]string { var map_DeploymentCause = map[string]string{ "": "DeploymentCause captures information about a particular cause of a deployment.", - "type": "Type of the trigger that resulted in the creation of a new deployment", - "imageTrigger": "ImageTrigger contains the image trigger details, if this trigger was fired based on an image change", + "type": "type of the trigger that resulted in the creation of a new deployment", + "imageTrigger": "imageTrigger contains the image trigger details, if this trigger was fired based on an image change", } func (DeploymentCause) SwaggerDoc() map[string]string { @@ -34,7 +34,7 @@ func (DeploymentCause) SwaggerDoc() map[string]string { var map_DeploymentCauseImageTrigger = map[string]string{ "": "DeploymentCauseImageTrigger represents details about the cause of a deployment originating from an image change trigger", - "from": "From is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.", + "from": "from is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.", } func (DeploymentCauseImageTrigger) SwaggerDoc() map[string]string { @@ -43,8 +43,8 @@ func (DeploymentCauseImageTrigger) SwaggerDoc() map[string]string { var map_DeploymentCondition = map[string]string{ "": "DeploymentCondition describes the state of a deployment config at a certain point.", - "type": "Type of deployment condition.", - "status": "Status of the condition, one of True, False, Unknown.", + "type": "type of deployment condition.", + "status": "status of the condition, one of True, False, Unknown.", "lastUpdateTime": "The last time this condition was updated.", "lastTransitionTime": "The last time the condition transitioned from one status to another.", "reason": "The reason for the condition's last transition.", @@ -58,8 +58,8 @@ func (DeploymentCondition) SwaggerDoc() map[string]string { var map_DeploymentConfig = map[string]string{ "": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). Deprecated: Use deployments or other means for declarative updates for pods instead.", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec represents a desired deployment state and how to deploy to it.", - "status": "Status represents the current deployment state.", + "spec": "spec represents a desired deployment state and how to deploy to it.", + "status": "status represents the current deployment state.", } func (DeploymentConfig) SwaggerDoc() map[string]string { @@ -69,7 +69,7 @@ func (DeploymentConfig) SwaggerDoc() map[string]string { var map_DeploymentConfigList = map[string]string{ "": "DeploymentConfigList is a collection of deployment configs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of deployment configs", + "items": "items is a list of deployment configs", } func (DeploymentConfigList) SwaggerDoc() map[string]string { @@ -78,9 +78,9 @@ func (DeploymentConfigList) SwaggerDoc() map[string]string { var map_DeploymentConfigRollback = map[string]string{ "": "DeploymentConfigRollback provides the input to rollback generation.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "name": "Name of the deployment config that will be rolled back.", - "updatedAnnotations": "UpdatedAnnotations is a set of new annotations that will be added in the deployment config.", - "spec": "Spec defines the options to rollback generation.", + "name": "name of the deployment config that will be rolled back.", + "updatedAnnotations": "updatedAnnotations is a set of new annotations that will be added in the deployment config.", + "spec": "spec defines the options to rollback generation.", } func (DeploymentConfigRollback) SwaggerDoc() map[string]string { @@ -89,12 +89,12 @@ func (DeploymentConfigRollback) SwaggerDoc() map[string]string { var map_DeploymentConfigRollbackSpec = map[string]string{ "": "DeploymentConfigRollbackSpec represents the options for rollback generation.", - "from": "From points to a ReplicationController which is a deployment.", - "revision": "Revision to rollback to. If set to 0, rollback to the last revision.", - "includeTriggers": "IncludeTriggers specifies whether to include config Triggers.", - "includeTemplate": "IncludeTemplate specifies whether to include the PodTemplateSpec.", - "includeReplicationMeta": "IncludeReplicationMeta specifies whether to include the replica count and selector.", - "includeStrategy": "IncludeStrategy specifies whether to include the deployment Strategy.", + "from": "from points to a ReplicationController which is a deployment.", + "revision": "revision to rollback to. If set to 0, rollback to the last revision.", + "includeTriggers": "includeTriggers specifies whether to include config Triggers.", + "includeTemplate": "includeTemplate specifies whether to include the PodTemplateSpec.", + "includeReplicationMeta": "includeReplicationMeta specifies whether to include the replica count and selector.", + "includeStrategy": "includeStrategy specifies whether to include the deployment Strategy.", } func (DeploymentConfigRollbackSpec) SwaggerDoc() map[string]string { @@ -103,15 +103,15 @@ func (DeploymentConfigRollbackSpec) SwaggerDoc() map[string]string { var map_DeploymentConfigSpec = map[string]string{ "": "DeploymentConfigSpec represents the desired state of the deployment.", - "strategy": "Strategy describes how a deployment is executed.", - "minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "triggers": "Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.", - "replicas": "Replicas is the number of desired replicas.", - "revisionHistoryLimit": "RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified. Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)", - "test": "Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.", - "paused": "Paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.", - "selector": "Selector is a label query over pods that should match the Replicas count.", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected.", + "strategy": "strategy describes how a deployment is executed.", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "triggers": "triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.", + "replicas": "replicas is the number of desired replicas.", + "revisionHistoryLimit": "revisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified. Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)", + "test": "test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.", + "paused": "paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.", + "selector": "selector is a label query over pods that should match the Replicas count.", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected.", } func (DeploymentConfigSpec) SwaggerDoc() map[string]string { @@ -120,14 +120,14 @@ func (DeploymentConfigSpec) SwaggerDoc() map[string]string { var map_DeploymentConfigStatus = map[string]string{ "": "DeploymentConfigStatus represents the current deployment state.", - "latestVersion": "LatestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.", - "observedGeneration": "ObservedGeneration is the most recent generation observed by the deployment config controller.", - "replicas": "Replicas is the total number of pods targeted by this deployment config.", - "updatedReplicas": "UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.", - "availableReplicas": "AvailableReplicas is the total number of available pods targeted by this deployment config.", - "unavailableReplicas": "UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.", - "details": "Details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger", - "conditions": "Conditions represents the latest available observations of a deployment config's current state.", + "latestVersion": "latestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.", + "observedGeneration": "observedGeneration is the most recent generation observed by the deployment config controller.", + "replicas": "replicas is the total number of pods targeted by this deployment config.", + "updatedReplicas": "updatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.", + "availableReplicas": "availableReplicas is the total number of available pods targeted by this deployment config.", + "unavailableReplicas": "unavailableReplicas is the total number of unavailable pods targeted by this deployment config.", + "details": "details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger", + "conditions": "conditions represents the latest available observations of a deployment config's current state.", "readyReplicas": "Total number of ready pods targeted by this deployment.", } @@ -137,8 +137,8 @@ func (DeploymentConfigStatus) SwaggerDoc() map[string]string { var map_DeploymentDetails = map[string]string{ "": "DeploymentDetails captures information about the causes of a deployment.", - "message": "Message is the user specified change message, if this deployment was triggered manually by the user", - "causes": "Causes are extended data associated with all the causes for creating a new deployment", + "message": "message is the user specified change message, if this deployment was triggered manually by the user", + "causes": "causes are extended data associated with all the causes for creating a new deployment", } func (DeploymentDetails) SwaggerDoc() map[string]string { @@ -156,15 +156,15 @@ func (DeploymentLog) SwaggerDoc() map[string]string { var map_DeploymentLogOptions = map[string]string{ "": "DeploymentLogOptions is the REST options for a deployment log\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", - "follow": "Follow if true indicates that the build log should be streamed until the build terminates.", + "follow": "follow if true indicates that the build log should be streamed until the build terminates.", "previous": "Return previous deployment logs. Defaults to false.", "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", - "nowait": "NoWait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.", - "version": "Version of the deployment for which to view logs.", + "nowait": "nowait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.", + "version": "version of the deployment for which to view logs.", } func (DeploymentLogOptions) SwaggerDoc() map[string]string { @@ -173,10 +173,10 @@ func (DeploymentLogOptions) SwaggerDoc() map[string]string { var map_DeploymentRequest = map[string]string{ "": "DeploymentRequest is a request to a deployment config for a new deployment.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "name": "Name of the deployment config for requesting a new deployment.", - "latest": "Latest will update the deployment config with the latest state from all triggers.", - "force": "Force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.", - "excludeTriggers": "ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. This field overrides the triggers from latest and allows clients to control specific logic. This field is ignored if not specified.", + "name": "name of the deployment config for requesting a new deployment.", + "latest": "latest will update the deployment config with the latest state from all triggers.", + "force": "force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.", + "excludeTriggers": "excludeTriggers instructs the instantiator to avoid processing the specified triggers. This field overrides the triggers from latest and allows clients to control specific logic. This field is ignored if not specified.", } func (DeploymentRequest) SwaggerDoc() map[string]string { @@ -185,14 +185,14 @@ func (DeploymentRequest) SwaggerDoc() map[string]string { var map_DeploymentStrategy = map[string]string{ "": "DeploymentStrategy describes how to perform a deployment.", - "type": "Type is the name of a deployment strategy.", - "customParams": "CustomParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.", - "recreateParams": "RecreateParams are the input to the Recreate deployment strategy.", - "rollingParams": "RollingParams are the input to the Rolling deployment strategy.", - "resources": "Resources contains resource requirements to execute the deployment and any hooks.", - "labels": "Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", - "annotations": "Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", - "activeDeadlineSeconds": "ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment config may be active on a node before the system actively tries to terminate them.", + "type": "type is the name of a deployment strategy.", + "customParams": "customParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.", + "recreateParams": "recreateParams are the input to the Recreate deployment strategy.", + "rollingParams": "rollingParams are the input to the Rolling deployment strategy.", + "resources": "resources contains resource requirements to execute the deployment and any hooks.", + "labels": "labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", + "annotations": "annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", + "activeDeadlineSeconds": "activeDeadlineSeconds is the duration in seconds that the deployer pods for this deployment config may be active on a node before the system actively tries to terminate them.", } func (DeploymentStrategy) SwaggerDoc() map[string]string { @@ -201,10 +201,10 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { var map_DeploymentTriggerImageChangeParams = map[string]string{ "": "DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.", - "automatic": "Automatic means that the detection of a new tag value should result in an image update inside the pod template.", - "containerNames": "ContainerNames is used to restrict tag updates to the specified set of container names in a pod. If multiple triggers point to the same containers, the resulting behavior is undefined. Future API versions will make this a validation error. If ContainerNames does not point to a valid container, the trigger will be ignored. Future API versions will make this a validation error.", - "from": "From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.", - "lastTriggeredImage": "LastTriggeredImage is the last image to be triggered.", + "automatic": "automatic means that the detection of a new tag value should result in an image update inside the pod template.", + "containerNames": "containerNames is used to restrict tag updates to the specified set of container names in a pod. If multiple triggers point to the same containers, the resulting behavior is undefined. Future API versions will make this a validation error. If ContainerNames does not point to a valid container, the trigger will be ignored. Future API versions will make this a validation error.", + "from": "from is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.", + "lastTriggeredImage": "lastTriggeredImage is the last image to be triggered.", } func (DeploymentTriggerImageChangeParams) SwaggerDoc() map[string]string { @@ -213,8 +213,8 @@ func (DeploymentTriggerImageChangeParams) SwaggerDoc() map[string]string { var map_DeploymentTriggerPolicy = map[string]string{ "": "DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.", - "type": "Type of the trigger", - "imageChangeParams": "ImageChangeParams represents the parameters for the ImageChange trigger.", + "type": "type of the trigger", + "imageChangeParams": "imageChangeParams represents the parameters for the ImageChange trigger.", } func (DeploymentTriggerPolicy) SwaggerDoc() map[string]string { @@ -223,10 +223,10 @@ func (DeploymentTriggerPolicy) SwaggerDoc() map[string]string { var map_ExecNewPodHook = map[string]string{ "": "ExecNewPodHook is a hook implementation which runs a command in a new pod based on the specified container which is assumed to be part of the deployment template.", - "command": "Command is the action command and its arguments.", - "env": "Env is a set of environment variables to supply to the hook pod's container.", - "containerName": "ContainerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.", - "volumes": "Volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.", + "command": "command is the action command and its arguments.", + "env": "env is a set of environment variables to supply to the hook pod's container.", + "containerName": "containerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.", + "volumes": "volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.", } func (ExecNewPodHook) SwaggerDoc() map[string]string { @@ -235,9 +235,9 @@ func (ExecNewPodHook) SwaggerDoc() map[string]string { var map_LifecycleHook = map[string]string{ "": "LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.", - "failurePolicy": "FailurePolicy specifies what action to take if the hook fails.", - "execNewPod": "ExecNewPod specifies the options for a lifecycle hook backed by a pod.", - "tagImages": "TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.", + "failurePolicy": "failurePolicy specifies what action to take if the hook fails.", + "execNewPod": "execNewPod specifies the options for a lifecycle hook backed by a pod.", + "tagImages": "tagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.", } func (LifecycleHook) SwaggerDoc() map[string]string { @@ -246,10 +246,10 @@ func (LifecycleHook) SwaggerDoc() map[string]string { var map_RecreateDeploymentStrategyParams = map[string]string{ "": "RecreateDeploymentStrategyParams are the input to the Recreate deployment strategy.", - "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", - "pre": "Pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.", - "mid": "Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.", - "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", + "timeoutSeconds": "timeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", + "pre": "pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.", + "mid": "mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.", + "post": "post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", } func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string { @@ -258,13 +258,13 @@ func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string { var map_RollingDeploymentStrategyParams = map[string]string{ "": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.", - "updatePeriodSeconds": "UpdatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.", - "intervalSeconds": "IntervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.", - "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", - "maxUnavailable": "MaxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.", - "maxSurge": "MaxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.", - "pre": "Pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.", - "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", + "updatePeriodSeconds": "updatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.", + "intervalSeconds": "intervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.", + "timeoutSeconds": "timeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", + "maxUnavailable": "maxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.", + "maxSurge": "maxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.", + "pre": "pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.", + "post": "post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", } func (RollingDeploymentStrategyParams) SwaggerDoc() map[string]string { @@ -273,8 +273,8 @@ func (RollingDeploymentStrategyParams) SwaggerDoc() map[string]string { var map_TagImageHook = map[string]string{ "": "TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.", - "containerName": "ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.", - "to": "To is the target ImageStreamTag to set the container's image onto.", + "containerName": "containerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.", + "to": "to is the target ImageStreamTag to set the container's image onto.", } func (TagImageHook) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.proto b/vendor/github.com/openshift/api/authorization/v1/generated.proto index 28e4e8ce62..f7d7b772a7 100644 --- a/vendor/github.com/openshift/api/authorization/v1/generated.proto +++ b/vendor/github.com/openshift/api/authorization/v1/generated.proto @@ -16,10 +16,10 @@ option go_package = "github.com/openshift/api/authorization/v1"; // Action describes a request to the API server message Action { - // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces optional string namespace = 1; - // Verb is one of: get, list, watch, create, update, delete + // verb is one of: get, list, watch, create, update, delete optional string verb = 2; // Group is the API group of the resource @@ -30,19 +30,19 @@ message Action { // Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined optional string resourceAPIVersion = 4; - // Resource is one of the existing resource types + // resource is one of the existing resource types optional string resource = 5; - // ResourceName is the name of the resource being requested for a "get" or deleted for a "delete" + // resourceName is the name of the resource being requested for a "get" or deleted for a "delete" optional string resourceName = 6; - // Path is the path of a non resource URL + // path is the path of a non resource URL optional string path = 8; - // IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy) + // isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy) optional bool isNonResourceURL = 9; - // Content is the actual content of the request for create and update + // content is the actual content of the request for create and update // +kubebuilder:pruning:PreserveUnknownFields optional .k8s.io.apimachinery.pkg.runtime.RawExtension content = 7; } @@ -56,10 +56,10 @@ message ClusterRole { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Rules holds all the PolicyRules for this ClusterRole + // rules holds all the PolicyRules for this ClusterRole repeated PolicyRule rules = 2; - // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // aggregationRule is an optional field that describes how to build the Rules for this ClusterRole. // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be // stomped by the controller. optional .k8s.io.api.rbac.v1.AggregationRule aggregationRule = 3; @@ -76,28 +76,28 @@ message ClusterRoleBinding { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // UserNames holds all the usernames directly bound to the role. + // userNames holds all the usernames directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional optional OptionalNames userNames = 2; - // GroupNames holds all the groups directly bound to the role. + // groupNames holds all the groups directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional optional OptionalNames groupNames = 3; - // Subjects hold object references to authorize with this rule. + // subjects hold object references to authorize with this rule. // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. // Thus newer clients that do not need to support backwards compatibility should send // only fully qualified Subjects and should omit the UserNames and GroupNames fields. // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. repeated .k8s.io.api.core.v1.ObjectReference subjects = 4; - // RoleRef can only reference the current namespace and the global namespace. + // roleRef can only reference the current namespace and the global namespace. // If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. // Since Policy is a singleton, this is sufficient knowledge to locate a role. optional .k8s.io.api.core.v1.ObjectReference roleRef = 5; @@ -112,7 +112,7 @@ message ClusterRoleBindingList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of ClusterRoleBindings + // items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; } @@ -125,14 +125,14 @@ message ClusterRoleList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of ClusterRoles + // items is a list of ClusterRoles repeated ClusterRole items = 2; } // GroupRestriction matches a group either by a string match on the group name // or a label selector applied to group labels. message GroupRestriction { - // Groups is a list of groups used to match against an individual user's + // groups is a list of groups used to match against an individual user's // groups. If the user is a member of one of the whitelisted groups, the user // is allowed to be bound to a role. // +nullable @@ -175,14 +175,14 @@ message LocalSubjectAccessReview { // Action describes the action being tested. The Namespace element is FORCED to the current namespace. optional Action Action = 1; - // User is optional. If both User and Groups are empty, the current authenticated user is used. + // user is optional. If both User and Groups are empty, the current authenticated user is used. optional string user = 2; - // Groups is optional. Groups is the list of groups to which the User belongs. + // groups is optional. Groups is the list of groups to which the User belongs. // +k8s:conversion-gen=false repeated string groups = 3; - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". // Nil for a self-SAR, means "use the scopes on this request". // Nil for a regular SAR, means the same as empty. // +k8s:conversion-gen=false @@ -191,37 +191,37 @@ message LocalSubjectAccessReview { // NamedClusterRole relates a name with a cluster role message NamedClusterRole { - // Name is the name of the cluster role + // name is the name of the cluster role optional string name = 1; - // Role is the cluster role being named + // role is the cluster role being named optional ClusterRole role = 2; } // NamedClusterRoleBinding relates a name with a cluster role binding message NamedClusterRoleBinding { - // Name is the name of the cluster role binding + // name is the name of the cluster role binding optional string name = 1; - // RoleBinding is the cluster role binding being named + // roleBinding is the cluster role binding being named optional ClusterRoleBinding roleBinding = 2; } // NamedRole relates a Role with a name message NamedRole { - // Name is the name of the role + // name is the name of the role optional string name = 1; - // Role is the role being named + // role is the role being named optional Role role = 2; } // NamedRoleBinding relates a role binding with a name message NamedRoleBinding { - // Name is the name of the role binding + // name is the name of the role binding optional string name = 1; - // RoleBinding is the role binding being named + // roleBinding is the role binding being named optional RoleBinding roleBinding = 2; } @@ -246,25 +246,25 @@ message OptionalScopes { // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. message PolicyRule { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + // verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. repeated string verbs = 1; - // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // attributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. // +kubebuilder:pruning:PreserveUnknownFields optional .k8s.io.apimachinery.pkg.runtime.RawExtension attributeRestrictions = 2; - // APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. + // apiGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. // That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request // will be allowed // +optional // +nullable repeated string apiGroups = 3; - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // resources is a list of resources this rule applies to. ResourceAll represents all resources. repeated string resources = 4; - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // resourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. repeated string resourceNames = 5; // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path @@ -291,7 +291,7 @@ message ResourceAccessReview { // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 message ResourceAccessReviewResponse { - // Namespace is the namespace used for the access review + // namespace is the namespace used for the access review optional string namespace = 1; // UsersSlice is the list of users who can perform the action @@ -317,7 +317,7 @@ message Role { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Rules holds all the PolicyRules for this Role + // rules holds all the PolicyRules for this Role repeated PolicyRule rules = 2; } @@ -332,28 +332,28 @@ message RoleBinding { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // UserNames holds all the usernames directly bound to the role. + // userNames holds all the usernames directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional optional OptionalNames userNames = 2; - // GroupNames holds all the groups directly bound to the role. + // groupNames holds all the groups directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional optional OptionalNames groupNames = 3; - // Subjects hold object references to authorize with this rule. + // subjects hold object references to authorize with this rule. // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. // Thus newer clients that do not need to support backwards compatibility should send // only fully qualified Subjects and should omit the UserNames and GroupNames fields. // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. repeated .k8s.io.api.core.v1.ObjectReference subjects = 4; - // RoleRef can only reference the current namespace and the global namespace. + // roleRef can only reference the current namespace and the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. // Since Policy is a singleton, this is sufficient knowledge to locate a role. optional .k8s.io.api.core.v1.ObjectReference roleRef = 5; @@ -368,7 +368,7 @@ message RoleBindingList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of RoleBindings + // items is a list of RoleBindings repeated RoleBinding items = 2; } @@ -390,7 +390,7 @@ message RoleBindingRestriction { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec defines the matcher. + // spec defines the matcher. optional RoleBindingRestrictionSpec spec = 2; } @@ -403,22 +403,22 @@ message RoleBindingRestrictionList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of RoleBindingRestriction objects. + // items is a list of RoleBindingRestriction objects. repeated RoleBindingRestriction items = 2; } // RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one // field must be non-nil. message RoleBindingRestrictionSpec { - // UserRestriction matches against user subjects. + // userrestriction matches against user subjects. // +nullable optional UserRestriction userrestriction = 1; - // GroupRestriction matches against group subjects. + // grouprestriction matches against group subjects. // +nullable optional GroupRestriction grouprestriction = 2; - // ServiceAccountRestriction matches against service-account subjects. + // serviceaccountrestriction matches against service-account subjects. // +nullable optional ServiceAccountRestriction serviceaccountrestriction = 3; } @@ -432,7 +432,7 @@ message RoleList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of Roles + // items is a list of Roles repeated Role items = 2; } @@ -445,16 +445,16 @@ message SelfSubjectRulesReview { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3; - // Spec adds information about how to conduct the check + // spec adds information about how to conduct the check optional SelfSubjectRulesReviewSpec spec = 1; - // Status is completed by the server to tell which permissions you have + // status is completed by the server to tell which permissions you have optional SubjectRulesReviewStatus status = 2; } // SelfSubjectRulesReviewSpec adds information about how to conduct the check message SelfSubjectRulesReviewSpec { - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". // Nil means "use the scopes on this request". // +k8s:conversion-gen=false optional OptionalScopes scopes = 1; @@ -463,10 +463,10 @@ message SelfSubjectRulesReviewSpec { // ServiceAccountReference specifies a service account and namespace by their // names. message ServiceAccountReference { - // Name is the name of the service account. + // name is the name of the service account. optional string name = 1; - // Namespace is the namespace of the service account. Service accounts from + // namespace is the namespace of the service account. Service accounts from // inside the whitelisted namespaces are allowed to be bound to roles. If // Namespace is empty, then the namespace of the RoleBindingRestriction in // which the ServiceAccountReference is embedded is used. @@ -477,10 +477,10 @@ message ServiceAccountReference { // either the service-account name or the name of the service account's // namespace. message ServiceAccountRestriction { - // ServiceAccounts specifies a list of literal service-account names. + // serviceaccounts specifies a list of literal service-account names. repeated ServiceAccountReference serviceaccounts = 1; - // Namespaces specifies a list of literal namespace names. + // namespaces specifies a list of literal namespace names. repeated string namespaces = 2; } @@ -496,14 +496,14 @@ message SubjectAccessReview { // Action describes the action being tested. optional Action Action = 1; - // User is optional. If both User and Groups are empty, the current authenticated user is used. + // user is optional. If both User and Groups are empty, the current authenticated user is used. optional string user = 2; // GroupsSlice is optional. Groups is the list of groups to which the User belongs. // +k8s:conversion-gen=false repeated string groups = 3; - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". // Nil for a self-SAR, means "use the scopes on this request". // Nil for a regular SAR, means the same as empty. // +k8s:conversion-gen=false @@ -515,16 +515,16 @@ message SubjectAccessReview { // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 message SubjectAccessReviewResponse { - // Namespace is the namespace used for the access review + // namespace is the namespace used for the access review optional string namespace = 1; - // Allowed is required. True if the action would be allowed, false otherwise. + // allowed is required. True if the action would be allowed, false otherwise. optional bool allowed = 2; - // Reason is optional. It indicates why a request was allowed or denied. + // reason is optional. It indicates why a request was allowed or denied. optional string reason = 3; - // EvaluationError is an indication that some error occurred during the authorization check. + // evaluationError is an indication that some error occurred during the authorization check. // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. optional string evaluationError = 4; @@ -539,31 +539,31 @@ message SubjectRulesReview { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 3; - // Spec adds information about how to conduct the check + // spec adds information about how to conduct the check optional SubjectRulesReviewSpec spec = 1; - // Status is completed by the server to tell which permissions you have + // status is completed by the server to tell which permissions you have optional SubjectRulesReviewStatus status = 2; } // SubjectRulesReviewSpec adds information about how to conduct the check message SubjectRulesReviewSpec { - // User is optional. At least one of User and Groups must be specified. + // user is optional. At least one of User and Groups must be specified. optional string user = 1; - // Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. + // groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. repeated string groups = 2; - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". optional OptionalScopes scopes = 3; } // SubjectRulesReviewStatus is contains the result of a rules check message SubjectRulesReviewStatus { - // Rules is the list of rules (no particular sort) that are allowed for the subject + // rules is the list of rules (no particular sort) that are allowed for the subject repeated PolicyRule rules = 1; - // EvaluationError can appear in combination with Rules. It means some error happened during evaluation + // evaluationError can appear in combination with Rules. It means some error happened during evaluation // that may have prevented additional rules from being populated. optional string evaluationError = 2; } @@ -572,10 +572,10 @@ message SubjectRulesReviewStatus { // a string match on the name of a group to which the user belongs, or a label // selector applied to the user labels. message UserRestriction { - // Users specifies a list of literal user names. + // users specifies a list of literal user names. repeated string users = 1; - // Groups specifies a list of literal group names. + // groups specifies a list of literal group names. // +nullable repeated string groups = 2; diff --git a/vendor/github.com/openshift/api/authorization/v1/types.go b/vendor/github.com/openshift/api/authorization/v1/types.go index e8dd0c29fa..bf4071867f 100644 --- a/vendor/github.com/openshift/api/authorization/v1/types.go +++ b/vendor/github.com/openshift/api/authorization/v1/types.go @@ -28,21 +28,21 @@ const ( // PolicyRule holds information that describes a policy rule, but does not contain information // about who the rule applies to or which namespace the rule applies to. type PolicyRule struct { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + // verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` - // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // attributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. // +kubebuilder:pruning:PreserveUnknownFields AttributeRestrictions kruntime.RawExtension `json:"attributeRestrictions,omitempty" protobuf:"bytes,2,opt,name=attributeRestrictions"` - // APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. + // apiGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. // That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request // will be allowed // +optional // +nullable APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"` - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // resources is a list of resources this rule applies to. ResourceAll represents all resources. Resources []string `json:"resources" protobuf:"bytes,4,rep,name=resources"` - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // resourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. @@ -73,7 +73,7 @@ type Role struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Rules holds all the PolicyRules for this Role + // rules holds all the PolicyRules for this Role Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -102,26 +102,26 @@ type RoleBinding struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // UserNames holds all the usernames directly bound to the role. + // userNames holds all the usernames directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"` - // GroupNames holds all the groups directly bound to the role. + // groupNames holds all the groups directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"` - // Subjects hold object references to authorize with this rule. + // subjects hold object references to authorize with this rule. // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. // Thus newer clients that do not need to support backwards compatibility should send // only fully qualified Subjects and should omit the UserNames and GroupNames fields. // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"` - // RoleRef can only reference the current namespace and the global namespace. + // roleRef can only reference the current namespace and the global namespace. // If the RoleRef cannot be resolved, the Authorizer must return an error. // Since Policy is a singleton, this is sufficient knowledge to locate a role. RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"` @@ -129,17 +129,17 @@ type RoleBinding struct { // NamedRole relates a Role with a name type NamedRole struct { - // Name is the name of the role + // name is the name of the role Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Role is the role being named + // role is the role being named Role Role `json:"role" protobuf:"bytes,2,opt,name=role"` } // NamedRoleBinding relates a role binding with a name type NamedRoleBinding struct { - // Name is the name of the role binding + // name is the name of the role binding Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // RoleBinding is the role binding being named + // roleBinding is the role binding being named RoleBinding RoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"` } @@ -158,16 +158,16 @@ type SelfSubjectRulesReview struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"` - // Spec adds information about how to conduct the check + // spec adds information about how to conduct the check Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` - // Status is completed by the server to tell which permissions you have + // status is completed by the server to tell which permissions you have Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } // SelfSubjectRulesReviewSpec adds information about how to conduct the check type SelfSubjectRulesReviewSpec struct { - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". // Nil means "use the scopes on this request". // +k8s:conversion-gen=false Scopes OptionalScopes `json:"scopes" protobuf:"bytes,1,rep,name=scopes"` @@ -188,28 +188,28 @@ type SubjectRulesReview struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,3,opt,name=metadata"` - // Spec adds information about how to conduct the check + // spec adds information about how to conduct the check Spec SubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` - // Status is completed by the server to tell which permissions you have + // status is completed by the server to tell which permissions you have Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } // SubjectRulesReviewSpec adds information about how to conduct the check type SubjectRulesReviewSpec struct { - // User is optional. At least one of User and Groups must be specified. + // user is optional. At least one of User and Groups must be specified. User string `json:"user" protobuf:"bytes,1,opt,name=user"` - // Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. + // groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"` - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". Scopes OptionalScopes `json:"scopes" protobuf:"bytes,3,opt,name=scopes"` } // SubjectRulesReviewStatus is contains the result of a rules check type SubjectRulesReviewStatus struct { - // Rules is the list of rules (no particular sort) that are allowed for the subject + // rules is the list of rules (no particular sort) that are allowed for the subject Rules []PolicyRule `json:"rules" protobuf:"bytes,1,rep,name=rules"` - // EvaluationError can appear in combination with Rules. It means some error happened during evaluation + // evaluationError can appear in combination with Rules. It means some error happened during evaluation // that may have prevented additional rules from being populated. EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,2,opt,name=evaluationError"` } @@ -223,7 +223,7 @@ type SubjectRulesReviewStatus struct { type ResourceAccessReviewResponse struct { metav1.TypeMeta `json:",inline"` - // Namespace is the namespace used for the access review + // namespace is the namespace used for the access review Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` // UsersSlice is the list of users who can perform the action // +k8s:conversion-gen=false @@ -269,13 +269,13 @@ type ResourceAccessReview struct { type SubjectAccessReviewResponse struct { metav1.TypeMeta `json:",inline"` - // Namespace is the namespace used for the access review + // namespace is the namespace used for the access review Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` - // Allowed is required. True if the action would be allowed, false otherwise. + // allowed is required. True if the action would be allowed, false otherwise. Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` - // Reason is optional. It indicates why a request was allowed or denied. + // reason is optional. It indicates why a request was allowed or denied. Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` - // EvaluationError is an indication that some error occurred during the authorization check. + // evaluationError is an indication that some error occurred during the authorization check. // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"` @@ -309,12 +309,12 @@ type SubjectAccessReview struct { // Action describes the action being tested. Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` - // User is optional. If both User and Groups are empty, the current authenticated user is used. + // user is optional. If both User and Groups are empty, the current authenticated user is used. User string `json:"user" protobuf:"bytes,2,opt,name=user"` // GroupsSlice is optional. Groups is the list of groups to which the User belongs. // +k8s:conversion-gen=false GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"` - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". // Nil for a self-SAR, means "use the scopes on this request". // Nil for a regular SAR, means the same as empty. // +k8s:conversion-gen=false @@ -359,12 +359,12 @@ type LocalSubjectAccessReview struct { // Action describes the action being tested. The Namespace element is FORCED to the current namespace. Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` - // User is optional. If both User and Groups are empty, the current authenticated user is used. + // user is optional. If both User and Groups are empty, the current authenticated user is used. User string `json:"user" protobuf:"bytes,2,opt,name=user"` - // Groups is optional. Groups is the list of groups to which the User belongs. + // groups is optional. Groups is the list of groups to which the User belongs. // +k8s:conversion-gen=false GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"` - // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". // Nil for a self-SAR, means "use the scopes on this request". // Nil for a regular SAR, means the same as empty. // +k8s:conversion-gen=false @@ -373,9 +373,9 @@ type LocalSubjectAccessReview struct { // Action describes a request to the API server type Action struct { - // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - // Verb is one of: get, list, watch, create, update, delete + // verb is one of: get, list, watch, create, update, delete Verb string `json:"verb" protobuf:"bytes,2,opt,name=verb"` // Group is the API group of the resource // Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined @@ -383,15 +383,15 @@ type Action struct { // Version is the API version of the resource // Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined Version string `json:"resourceAPIVersion" protobuf:"bytes,4,opt,name=resourceAPIVersion"` - // Resource is one of the existing resource types + // resource is one of the existing resource types Resource string `json:"resource" protobuf:"bytes,5,opt,name=resource"` - // ResourceName is the name of the resource being requested for a "get" or deleted for a "delete" + // resourceName is the name of the resource being requested for a "get" or deleted for a "delete" ResourceName string `json:"resourceName" protobuf:"bytes,6,opt,name=resourceName"` - // Path is the path of a non resource URL + // path is the path of a non resource URL Path string `json:"path" protobuf:"bytes,8,opt,name=path"` - // IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy) + // isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy) IsNonResourceURL bool `json:"isNonResourceURL" protobuf:"varint,9,opt,name=isNonResourceURL"` - // Content is the actual content of the request for create and update + // content is the actual content of the request for create and update // +kubebuilder:pruning:PreserveUnknownFields Content kruntime.RawExtension `json:"content,omitempty" protobuf:"bytes,7,opt,name=content"` } @@ -409,7 +409,7 @@ type RoleBindingList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of RoleBindings + // items is a list of RoleBindings Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -426,7 +426,7 @@ type RoleList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of Roles + // items is a list of Roles Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -445,10 +445,10 @@ type ClusterRole struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Rules holds all the PolicyRules for this ClusterRole + // rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` - // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // aggregationRule is an optional field that describes how to build the Rules for this ClusterRole. // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be // stomped by the controller. AggregationRule *rbacv1.AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` @@ -471,26 +471,26 @@ type ClusterRoleBinding struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // UserNames holds all the usernames directly bound to the role. + // userNames holds all the usernames directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"` - // GroupNames holds all the groups directly bound to the role. + // groupNames holds all the groups directly bound to the role. // This field should only be specified when supporting legacy clients and servers. // See Subjects for further details. // +k8s:conversion-gen=false // +optional GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"` - // Subjects hold object references to authorize with this rule. + // subjects hold object references to authorize with this rule. // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. // Thus newer clients that do not need to support backwards compatibility should send // only fully qualified Subjects and should omit the UserNames and GroupNames fields. // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"` - // RoleRef can only reference the current namespace and the global namespace. + // roleRef can only reference the current namespace and the global namespace. // If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. // Since Policy is a singleton, this is sufficient knowledge to locate a role. RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"` @@ -498,17 +498,17 @@ type ClusterRoleBinding struct { // NamedClusterRole relates a name with a cluster role type NamedClusterRole struct { - // Name is the name of the cluster role + // name is the name of the cluster role Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Role is the cluster role being named + // role is the cluster role being named Role ClusterRole `json:"role" protobuf:"bytes,2,opt,name=role"` } // NamedClusterRoleBinding relates a name with a cluster role binding type NamedClusterRoleBinding struct { - // Name is the name of the cluster role binding + // name is the name of the cluster role binding Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // RoleBinding is the cluster role binding being named + // roleBinding is the cluster role binding being named RoleBinding ClusterRoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"` } @@ -525,7 +525,7 @@ type ClusterRoleBindingList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of ClusterRoleBindings + // items is a list of ClusterRoleBindings Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -542,7 +542,7 @@ type ClusterRoleList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of ClusterRoles + // items is a list of ClusterRoles Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -569,22 +569,22 @@ type RoleBindingRestriction struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - // Spec defines the matcher. + // spec defines the matcher. Spec RoleBindingRestrictionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } // RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one // field must be non-nil. type RoleBindingRestrictionSpec struct { - // UserRestriction matches against user subjects. + // userrestriction matches against user subjects. // +nullable UserRestriction *UserRestriction `json:"userrestriction" protobuf:"bytes,1,opt,name=userrestriction"` - // GroupRestriction matches against group subjects. + // grouprestriction matches against group subjects. // +nullable GroupRestriction *GroupRestriction `json:"grouprestriction" protobuf:"bytes,2,opt,name=grouprestriction"` - // ServiceAccountRestriction matches against service-account subjects. + // serviceaccountrestriction matches against service-account subjects. // +nullable ServiceAccountRestriction *ServiceAccountRestriction `json:"serviceaccountrestriction" protobuf:"bytes,3,opt,name=serviceaccountrestriction"` } @@ -602,7 +602,7 @@ type RoleBindingRestrictionList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of RoleBindingRestriction objects. + // items is a list of RoleBindingRestriction objects. Items []RoleBindingRestriction `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -610,10 +610,10 @@ type RoleBindingRestrictionList struct { // a string match on the name of a group to which the user belongs, or a label // selector applied to the user labels. type UserRestriction struct { - // Users specifies a list of literal user names. + // users specifies a list of literal user names. Users []string `json:"users" protobuf:"bytes,1,rep,name=users"` - // Groups specifies a list of literal group names. + // groups specifies a list of literal group names. // +nullable Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"` @@ -625,7 +625,7 @@ type UserRestriction struct { // GroupRestriction matches a group either by a string match on the group name // or a label selector applied to group labels. type GroupRestriction struct { - // Groups is a list of groups used to match against an individual user's + // groups is a list of groups used to match against an individual user's // groups. If the user is a member of one of the whitelisted groups, the user // is allowed to be bound to a role. // +nullable @@ -640,20 +640,20 @@ type GroupRestriction struct { // either the service-account name or the name of the service account's // namespace. type ServiceAccountRestriction struct { - // ServiceAccounts specifies a list of literal service-account names. + // serviceaccounts specifies a list of literal service-account names. ServiceAccounts []ServiceAccountReference `json:"serviceaccounts" protobuf:"bytes,1,rep,name=serviceaccounts"` - // Namespaces specifies a list of literal namespace names. + // namespaces specifies a list of literal namespace names. Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` } // ServiceAccountReference specifies a service account and namespace by their // names. type ServiceAccountReference struct { - // Name is the name of the service account. + // name is the name of the service account. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Namespace is the namespace of the service account. Service accounts from + // namespace is the namespace of the service account. Service accounts from // inside the whitelisted namespaces are allowed to be bound to roles. If // Namespace is empty, then the namespace of the RoleBindingRestriction in // which the ServiceAccountReference is embedded is used. diff --git a/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go index a8f9b374e2..a1c28a3ec1 100644 --- a/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go @@ -13,15 +13,15 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_Action = map[string]string{ "": "Action describes a request to the API server", - "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", - "verb": "Verb is one of: get, list, watch, create, update, delete", + "namespace": "namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", + "verb": "verb is one of: get, list, watch, create, update, delete", "resourceAPIGroup": "Group is the API group of the resource Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined", "resourceAPIVersion": "Version is the API version of the resource Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined", - "resource": "Resource is one of the existing resource types", - "resourceName": "ResourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", - "path": "Path is the path of a non resource URL", - "isNonResourceURL": "IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", - "content": "Content is the actual content of the request for create and update", + "resource": "resource is one of the existing resource types", + "resourceName": "resourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", + "path": "path is the path of a non resource URL", + "isNonResourceURL": "isNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", + "content": "content is the actual content of the request for create and update", } func (Action) SwaggerDoc() map[string]string { @@ -31,8 +31,8 @@ func (Action) SwaggerDoc() map[string]string { var map_ClusterRole = map[string]string{ "": "ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "rules": "Rules holds all the PolicyRules for this ClusterRole", - "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", + "rules": "rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "aggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", } func (ClusterRole) SwaggerDoc() map[string]string { @@ -42,10 +42,10 @@ func (ClusterRole) SwaggerDoc() map[string]string { var map_ClusterRoleBinding = map[string]string{ "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "userNames": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", - "groupNames": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", - "subjects": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", - "roleRef": "RoleRef can only reference the current namespace and the global namespace. If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", + "userNames": "userNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "groupNames": "groupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "subjects": "subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", + "roleRef": "roleRef can only reference the current namespace and the global namespace. If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", } func (ClusterRoleBinding) SwaggerDoc() map[string]string { @@ -55,7 +55,7 @@ func (ClusterRoleBinding) SwaggerDoc() map[string]string { var map_ClusterRoleBindingList = map[string]string{ "": "ClusterRoleBindingList is a collection of ClusterRoleBindings\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of ClusterRoleBindings", + "items": "items is a list of ClusterRoleBindings", } func (ClusterRoleBindingList) SwaggerDoc() map[string]string { @@ -65,7 +65,7 @@ func (ClusterRoleBindingList) SwaggerDoc() map[string]string { var map_ClusterRoleList = map[string]string{ "": "ClusterRoleList is a collection of ClusterRoles\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of ClusterRoles", + "items": "items is a list of ClusterRoles", } func (ClusterRoleList) SwaggerDoc() map[string]string { @@ -74,7 +74,7 @@ func (ClusterRoleList) SwaggerDoc() map[string]string { var map_GroupRestriction = map[string]string{ "": "GroupRestriction matches a group either by a string match on the group name or a label selector applied to group labels.", - "groups": "Groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role.", + "groups": "groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role.", "labels": "Selectors specifies a list of label selectors over group labels.", } @@ -102,9 +102,9 @@ func (LocalResourceAccessReview) SwaggerDoc() map[string]string { var map_LocalSubjectAccessReview = map[string]string{ "": "LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "user": "User is optional. If both User and Groups are empty, the current authenticated user is used.", - "groups": "Groups is optional. Groups is the list of groups to which the User belongs.", - "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", + "user": "user is optional. If both User and Groups are empty, the current authenticated user is used.", + "groups": "groups is optional. Groups is the list of groups to which the User belongs.", + "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", } func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { @@ -113,8 +113,8 @@ func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { var map_NamedClusterRole = map[string]string{ "": "NamedClusterRole relates a name with a cluster role", - "name": "Name is the name of the cluster role", - "role": "Role is the cluster role being named", + "name": "name is the name of the cluster role", + "role": "role is the cluster role being named", } func (NamedClusterRole) SwaggerDoc() map[string]string { @@ -123,8 +123,8 @@ func (NamedClusterRole) SwaggerDoc() map[string]string { var map_NamedClusterRoleBinding = map[string]string{ "": "NamedClusterRoleBinding relates a name with a cluster role binding", - "name": "Name is the name of the cluster role binding", - "roleBinding": "RoleBinding is the cluster role binding being named", + "name": "name is the name of the cluster role binding", + "roleBinding": "roleBinding is the cluster role binding being named", } func (NamedClusterRoleBinding) SwaggerDoc() map[string]string { @@ -133,8 +133,8 @@ func (NamedClusterRoleBinding) SwaggerDoc() map[string]string { var map_NamedRole = map[string]string{ "": "NamedRole relates a Role with a name", - "name": "Name is the name of the role", - "role": "Role is the role being named", + "name": "name is the name of the role", + "role": "role is the role being named", } func (NamedRole) SwaggerDoc() map[string]string { @@ -143,8 +143,8 @@ func (NamedRole) SwaggerDoc() map[string]string { var map_NamedRoleBinding = map[string]string{ "": "NamedRoleBinding relates a role binding with a name", - "name": "Name is the name of the role binding", - "roleBinding": "RoleBinding is the role binding being named", + "name": "name is the name of the role binding", + "roleBinding": "roleBinding is the role binding being named", } func (NamedRoleBinding) SwaggerDoc() map[string]string { @@ -153,11 +153,11 @@ func (NamedRoleBinding) SwaggerDoc() map[string]string { var map_PolicyRule = map[string]string{ "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", - "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", - "attributeRestrictions": "AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed", - "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", - "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "verbs": "verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "attributeRestrictions": "attributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.", + "apiGroups": "apiGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed", + "resources": "resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "resourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", "nonResourceURLs": "NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.", } @@ -176,7 +176,7 @@ func (ResourceAccessReview) SwaggerDoc() map[string]string { var map_ResourceAccessReviewResponse = map[string]string{ "": "ResourceAccessReviewResponse describes who can perform the action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "namespace": "Namespace is the namespace used for the access review", + "namespace": "namespace is the namespace used for the access review", "users": "UsersSlice is the list of users who can perform the action", "groups": "GroupsSlice is the list of groups who can perform the action", "evalutionError": "EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.", @@ -189,7 +189,7 @@ func (ResourceAccessReviewResponse) SwaggerDoc() map[string]string { var map_Role = map[string]string{ "": "Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "rules": "Rules holds all the PolicyRules for this Role", + "rules": "rules holds all the PolicyRules for this Role", } func (Role) SwaggerDoc() map[string]string { @@ -199,10 +199,10 @@ func (Role) SwaggerDoc() map[string]string { var map_RoleBinding = map[string]string{ "": "RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "userNames": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", - "groupNames": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", - "subjects": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", - "roleRef": "RoleRef can only reference the current namespace and the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", + "userNames": "userNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "groupNames": "groupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "subjects": "subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", + "roleRef": "roleRef can only reference the current namespace and the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", } func (RoleBinding) SwaggerDoc() map[string]string { @@ -212,7 +212,7 @@ func (RoleBinding) SwaggerDoc() map[string]string { var map_RoleBindingList = map[string]string{ "": "RoleBindingList is a collection of RoleBindings\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of RoleBindings", + "items": "items is a list of RoleBindings", } func (RoleBindingList) SwaggerDoc() map[string]string { @@ -222,7 +222,7 @@ func (RoleBindingList) SwaggerDoc() map[string]string { var map_RoleBindingRestriction = map[string]string{ "": "RoleBindingRestriction is an object that can be matched against a subject (user, group, or service account) to determine whether rolebindings on that subject are allowed in the namespace to which the RoleBindingRestriction belongs. If any one of those RoleBindingRestriction objects matches a subject, rolebindings on that subject in the namespace are allowed.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the matcher.", + "spec": "spec defines the matcher.", } func (RoleBindingRestriction) SwaggerDoc() map[string]string { @@ -232,7 +232,7 @@ func (RoleBindingRestriction) SwaggerDoc() map[string]string { var map_RoleBindingRestrictionList = map[string]string{ "": "RoleBindingRestrictionList is a collection of RoleBindingRestriction objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of RoleBindingRestriction objects.", + "items": "items is a list of RoleBindingRestriction objects.", } func (RoleBindingRestrictionList) SwaggerDoc() map[string]string { @@ -241,9 +241,9 @@ func (RoleBindingRestrictionList) SwaggerDoc() map[string]string { var map_RoleBindingRestrictionSpec = map[string]string{ "": "RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one field must be non-nil.", - "userrestriction": "UserRestriction matches against user subjects.", - "grouprestriction": "GroupRestriction matches against group subjects.", - "serviceaccountrestriction": "ServiceAccountRestriction matches against service-account subjects.", + "userrestriction": "userrestriction matches against user subjects.", + "grouprestriction": "grouprestriction matches against group subjects.", + "serviceaccountrestriction": "serviceaccountrestriction matches against service-account subjects.", } func (RoleBindingRestrictionSpec) SwaggerDoc() map[string]string { @@ -253,7 +253,7 @@ func (RoleBindingRestrictionSpec) SwaggerDoc() map[string]string { var map_RoleList = map[string]string{ "": "RoleList is a collection of Roles\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of Roles", + "items": "items is a list of Roles", } func (RoleList) SwaggerDoc() map[string]string { @@ -263,8 +263,8 @@ func (RoleList) SwaggerDoc() map[string]string { var map_SelfSubjectRulesReview = map[string]string{ "": "SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec adds information about how to conduct the check", - "status": "Status is completed by the server to tell which permissions you have", + "spec": "spec adds information about how to conduct the check", + "status": "status is completed by the server to tell which permissions you have", } func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { @@ -273,7 +273,7 @@ func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { var map_SelfSubjectRulesReviewSpec = map[string]string{ "": "SelfSubjectRulesReviewSpec adds information about how to conduct the check", - "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil means \"use the scopes on this request\".", + "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil means \"use the scopes on this request\".", } func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { @@ -282,8 +282,8 @@ func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { var map_ServiceAccountReference = map[string]string{ "": "ServiceAccountReference specifies a service account and namespace by their names.", - "name": "Name is the name of the service account.", - "namespace": "Namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used.", + "name": "name is the name of the service account.", + "namespace": "namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used.", } func (ServiceAccountReference) SwaggerDoc() map[string]string { @@ -292,8 +292,8 @@ func (ServiceAccountReference) SwaggerDoc() map[string]string { var map_ServiceAccountRestriction = map[string]string{ "": "ServiceAccountRestriction matches a service account by a string match on either the service-account name or the name of the service account's namespace.", - "serviceaccounts": "ServiceAccounts specifies a list of literal service-account names.", - "namespaces": "Namespaces specifies a list of literal namespace names.", + "serviceaccounts": "serviceaccounts specifies a list of literal service-account names.", + "namespaces": "namespaces specifies a list of literal namespace names.", } func (ServiceAccountRestriction) SwaggerDoc() map[string]string { @@ -303,9 +303,9 @@ func (ServiceAccountRestriction) SwaggerDoc() map[string]string { var map_SubjectAccessReview = map[string]string{ "": "SubjectAccessReview is an object for requesting information about whether a user or group can perform an action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "user": "User is optional. If both User and Groups are empty, the current authenticated user is used.", + "user": "user is optional. If both User and Groups are empty, the current authenticated user is used.", "groups": "GroupsSlice is optional. Groups is the list of groups to which the User belongs.", - "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", + "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", } func (SubjectAccessReview) SwaggerDoc() map[string]string { @@ -314,10 +314,10 @@ func (SubjectAccessReview) SwaggerDoc() map[string]string { var map_SubjectAccessReviewResponse = map[string]string{ "": "SubjectAccessReviewResponse describes whether or not a user or group can perform an action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "namespace": "Namespace is the namespace used for the access review", - "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", - "reason": "Reason is optional. It indicates why a request was allowed or denied.", - "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.", + "namespace": "namespace is the namespace used for the access review", + "allowed": "allowed is required. True if the action would be allowed, false otherwise.", + "reason": "reason is optional. It indicates why a request was allowed or denied.", + "evaluationError": "evaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.", } func (SubjectAccessReviewResponse) SwaggerDoc() map[string]string { @@ -327,8 +327,8 @@ func (SubjectAccessReviewResponse) SwaggerDoc() map[string]string { var map_SubjectRulesReview = map[string]string{ "": "SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec adds information about how to conduct the check", - "status": "Status is completed by the server to tell which permissions you have", + "spec": "spec adds information about how to conduct the check", + "status": "status is completed by the server to tell which permissions you have", } func (SubjectRulesReview) SwaggerDoc() map[string]string { @@ -337,9 +337,9 @@ func (SubjectRulesReview) SwaggerDoc() map[string]string { var map_SubjectRulesReviewSpec = map[string]string{ "": "SubjectRulesReviewSpec adds information about how to conduct the check", - "user": "User is optional. At least one of User and Groups must be specified.", - "groups": "Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.", - "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\".", + "user": "user is optional. At least one of User and Groups must be specified.", + "groups": "groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.", + "scopes": "scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\".", } func (SubjectRulesReviewSpec) SwaggerDoc() map[string]string { @@ -348,8 +348,8 @@ func (SubjectRulesReviewSpec) SwaggerDoc() map[string]string { var map_SubjectRulesReviewStatus = map[string]string{ "": "SubjectRulesReviewStatus is contains the result of a rules check", - "rules": "Rules is the list of rules (no particular sort) that are allowed for the subject", - "evaluationError": "EvaluationError can appear in combination with Rules. It means some error happened during evaluation that may have prevented additional rules from being populated.", + "rules": "rules is the list of rules (no particular sort) that are allowed for the subject", + "evaluationError": "evaluationError can appear in combination with Rules. It means some error happened during evaluation that may have prevented additional rules from being populated.", } func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string { @@ -358,8 +358,8 @@ func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string { var map_UserRestriction = map[string]string{ "": "UserRestriction matches a user either by a string match on the user name, a string match on the name of a group to which the user belongs, or a label selector applied to the user labels.", - "users": "Users specifies a list of literal user names.", - "groups": "Groups specifies a list of literal group names.", + "users": "users specifies a list of literal user names.", + "groups": "groups specifies a list of literal group names.", "labels": "Selectors specifies a list of label selectors over user labels.", } diff --git a/vendor/github.com/openshift/api/build/v1/generated.proto b/vendor/github.com/openshift/api/build/v1/generated.proto index b71670f4ec..92ae73426c 100644 --- a/vendor/github.com/openshift/api/build/v1/generated.proto +++ b/vendor/github.com/openshift/api/build/v1/generated.proto @@ -81,10 +81,10 @@ message Build { // BuildCondition describes the state of a build at a certain point. message BuildCondition { - // Type of build condition. + // type of build condition. optional string type = 1; - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. optional string status = 2; // The last time this condition was updated. @@ -141,7 +141,7 @@ message BuildConfigSpec { // +optional repeated BuildTriggerPolicy triggers = 1; - // RunPolicy describes how the new build created from this build + // runPolicy describes how the new build created from this build // configuration will be scheduled for execution. // This is optional, if not specified we default to "Serial". optional string runPolicy = 2; @@ -165,7 +165,7 @@ message BuildConfigStatus { // lastVersion is used to inform about number of last triggered build. optional int64 lastVersion = 1; - // ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, + // imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger. repeated ImageChangeTriggerStatus imageChangeTriggers = 2; @@ -231,7 +231,7 @@ message BuildLogOptions { // slightly more or slightly less than the specified limit. optional int64 limitBytes = 8; - // noWait if true causes the call to return immediately even if the build + // nowait if true causes the call to return immediately even if the build // is not available yet. Otherwise the server will wait until the build has started. // TODO: Fix the tag to 'noWait' in v2 optional bool nowait = 9; @@ -259,7 +259,7 @@ message BuildOutput { // the build unless Namespace is specified. optional .k8s.io.api.core.v1.ObjectReference to = 1; - // PushSecret is the name of a Secret that would be used for setting + // pushSecret is the name of a Secret that would be used for setting // up the authentication for executing the Docker push to authentication // enabled Docker Registry (or Docker Hub). optional .k8s.io.api.core.v1.LocalObjectReference pushSecret = 2; @@ -392,10 +392,10 @@ message BuildRequest { // build configuration and contains information about those triggers. repeated BuildTriggerCause triggeredBy = 8; - // DockerStrategyOptions contains additional docker-strategy specific options for the build + // dockerStrategyOptions contains additional docker-strategy specific options for the build optional DockerStrategyOptions dockerStrategyOptions = 9; - // SourceStrategyOptions contains additional source-strategy specific options for the build + // sourceStrategyOptions contains additional source-strategy specific options for the build optional SourceStrategyOptions sourceStrategyOptions = 10; } @@ -510,7 +510,7 @@ message BuildStatus { // logSnippet is the last few lines of the build log. This value is only set for builds that failed. optional string logSnippet = 12; - // Conditions represents the latest available observations of a build's current state. + // conditions represents the latest available observations of a build's current state. // +patchMergeKey=type // +patchStrategy=merge repeated BuildCondition conditions = 13; @@ -550,7 +550,7 @@ message BuildStrategy { // customStrategy holds the parameters to the Custom build strategy optional CustomBuildStrategy customStrategy = 4; - // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. + // jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. // Deprecated: use OpenShift Pipelines optional JenkinsPipelineBuildStrategy jenkinsPipelineStrategy = 5; } @@ -567,7 +567,7 @@ message BuildTriggerCause { // genericWebHook holds data about a builds generic webhook trigger. optional GenericWebHookCause genericWebHook = 2; - // gitHubWebHook represents data for a GitHub webhook that fired a + // githubWebHook represents data for a GitHub webhook that fired a // specific build. optional GitHubWebHookCause githubWebHook = 3; @@ -575,11 +575,11 @@ message BuildTriggerCause { // that triggered a new build. optional ImageChangeCause imageChangeBuild = 4; - // GitLabWebHook represents data for a GitLab webhook that fired a specific + // gitlabWebHook represents data for a GitLab webhook that fired a specific // build. optional GitLabWebHookCause gitlabWebHook = 5; - // BitbucketWebHook represents data for a Bitbucket webhook that fired a + // bitbucketWebHook represents data for a Bitbucket webhook that fired a // specific build. optional BitbucketWebHookCause bitbucketWebHook = 6; } @@ -743,10 +743,10 @@ message CommonSpec { // causes into struct so we can share it in the specific causes; it is too late for // GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket. message CommonWebHookCause { - // Revision is the git source revision information of the trigger. + // revision is the git source revision information of the trigger. optional SourceRevision revision = 1; - // Secret is the obfuscated webhook secret that triggered a build. + // secret is the obfuscated webhook secret that triggered a build. optional string secret = 2; } @@ -884,7 +884,7 @@ message GenericWebHookEvent { // ValueFrom is not supported. repeated .k8s.io.api.core.v1.EnvVar env = 3; - // DockerStrategyOptions contains additional docker-strategy specific options for the build + // dockerStrategyOptions contains additional docker-strategy specific options for the build optional DockerStrategyOptions dockerStrategyOptions = 4; } @@ -918,7 +918,7 @@ message GitInfo { optional GitSourceRevision gitSourceRevision = 2; - // Refs is a list of GitRefs for the provided repo - generally sent + // refs is a list of GitRefs for the provided repo - generally sent // when used from a post-receive hook. This field is optional and is // used when sending multiple refs repeated GitRefInfo refs = 3; @@ -1061,12 +1061,12 @@ message ImageStreamTagReference { // JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. // Deprecated: use OpenShift Pipelines message JenkinsPipelineBuildStrategy { - // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline + // jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. optional string jenkinsfilePath = 1; - // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. + // jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. optional string jenkinsfile = 2; // env contains additional environment variables you want to pass into a build pipeline. @@ -1115,7 +1115,7 @@ message SecretBuildSource { // SecretLocalReference contains information that points to the local secret being used message SecretLocalReference { - // Name is the name of the resource in the same namespace being referenced + // name is the name of the resource in the same namespace being referenced optional string name = 1; } @@ -1176,7 +1176,7 @@ message SourceRevision { // +k8s:conversion-gen=false optional string type = 1; - // Git contains information about git-based build source + // git contains information about git-based build source optional GitSourceRevision git = 2; } diff --git a/vendor/github.com/openshift/api/build/v1/types.go b/vendor/github.com/openshift/api/build/v1/types.go index ba836aad81..12bf67db1a 100644 --- a/vendor/github.com/openshift/api/build/v1/types.go +++ b/vendor/github.com/openshift/api/build/v1/types.go @@ -116,7 +116,7 @@ type BuildTriggerCause struct { // genericWebHook holds data about a builds generic webhook trigger. GenericWebHook *GenericWebHookCause `json:"genericWebHook,omitempty" protobuf:"bytes,2,opt,name=genericWebHook"` - // gitHubWebHook represents data for a GitHub webhook that fired a + // githubWebHook represents data for a GitHub webhook that fired a //specific build. GitHubWebHook *GitHubWebHookCause `json:"githubWebHook,omitempty" protobuf:"bytes,3,opt,name=githubWebHook"` @@ -124,11 +124,11 @@ type BuildTriggerCause struct { // that triggered a new build. ImageChangeBuild *ImageChangeCause `json:"imageChangeBuild,omitempty" protobuf:"bytes,4,opt,name=imageChangeBuild"` - // GitLabWebHook represents data for a GitLab webhook that fired a specific + // gitlabWebHook represents data for a GitLab webhook that fired a specific // build. GitLabWebHook *GitLabWebHookCause `json:"gitlabWebHook,omitempty" protobuf:"bytes,5,opt,name=gitlabWebHook"` - // BitbucketWebHook represents data for a Bitbucket webhook that fired a + // bitbucketWebHook represents data for a Bitbucket webhook that fired a // specific build. BitbucketWebHook *BitbucketWebHookCause `json:"bitbucketWebHook,omitempty" protobuf:"bytes,6,opt,name=bitbucketWebHook"` } @@ -158,10 +158,10 @@ type GitHubWebHookCause struct { // causes into struct so we can share it in the specific causes; it is too late for // GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket. type CommonWebHookCause struct { - // Revision is the git source revision information of the trigger. + // revision is the git source revision information of the trigger. Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` - // Secret is the obfuscated webhook secret that triggered a build. + // secret is the obfuscated webhook secret that triggered a build. Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` } @@ -237,7 +237,7 @@ type BuildStatus struct { // logSnippet is the last few lines of the build log. This value is only set for builds that failed. LogSnippet string `json:"logSnippet,omitempty" protobuf:"bytes,12,opt,name=logSnippet"` - // Conditions represents the latest available observations of a build's current state. + // conditions represents the latest available observations of a build's current state. // +patchMergeKey=type // +patchStrategy=merge Conditions []BuildCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,13,rep,name=conditions"` @@ -358,9 +358,9 @@ type BuildConditionType string // BuildCondition describes the state of a build at a certain point. type BuildCondition struct { - // Type of build condition. + // type of build condition. Type BuildConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildConditionType"` - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` // The last time this condition was updated. LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` @@ -562,7 +562,7 @@ type SourceRevision struct { // +k8s:conversion-gen=false Type BuildSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` - // Git contains information about git-based build source + // git contains information about git-based build source Git *GitSourceRevision `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"` } @@ -632,7 +632,7 @@ type BuildStrategy struct { // customStrategy holds the parameters to the Custom build strategy CustomStrategy *CustomBuildStrategy `json:"customStrategy,omitempty" protobuf:"bytes,4,opt,name=customStrategy"` - // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. + // jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. // Deprecated: use OpenShift Pipelines JenkinsPipelineStrategy *JenkinsPipelineBuildStrategy `json:"jenkinsPipelineStrategy,omitempty" protobuf:"bytes,5,opt,name=jenkinsPipelineStrategy"` } @@ -801,12 +801,12 @@ type SourceBuildStrategy struct { // JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. // Deprecated: use OpenShift Pipelines type JenkinsPipelineBuildStrategy struct { - // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline + // jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. JenkinsfilePath string `json:"jenkinsfilePath,omitempty" protobuf:"bytes,1,opt,name=jenkinsfilePath"` - // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. + // jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. Jenkinsfile string `json:"jenkinsfile,omitempty" protobuf:"bytes,2,opt,name=jenkinsfile"` // env contains additional environment variables you want to pass into a build pipeline. @@ -911,7 +911,7 @@ type BuildOutput struct { // the build unless Namespace is specified. To *corev1.ObjectReference `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"` - // PushSecret is the name of a Secret that would be used for setting + // pushSecret is the name of a Secret that would be used for setting // up the authentication for executing the Docker push to authentication // enabled Docker Registry (or Docker Hub). PushSecret *corev1.LocalObjectReference `json:"pushSecret,omitempty" protobuf:"bytes,2,opt,name=pushSecret"` @@ -964,7 +964,7 @@ type BuildConfigSpec struct { // +optional Triggers []BuildTriggerPolicy `json:"triggers,omitempty" protobuf:"bytes,1,rep,name=triggers"` - // RunPolicy describes how the new build created from this build + // runPolicy describes how the new build created from this build // configuration will be scheduled for execution. // This is optional, if not specified we default to "Serial". RunPolicy BuildRunPolicy `json:"runPolicy,omitempty" protobuf:"bytes,2,opt,name=runPolicy,casttype=BuildRunPolicy"` @@ -1007,7 +1007,7 @@ type BuildConfigStatus struct { // lastVersion is used to inform about number of last triggered build. LastVersion int64 `json:"lastVersion" protobuf:"varint,1,opt,name=lastVersion"` - // ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, + // imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger. ImageChangeTriggers []ImageChangeTriggerStatus `json:"imageChangeTriggers,omitempty" protobuf:"bytes,2,rep,name=imageChangeTriggers"` @@ -1015,7 +1015,7 @@ type BuildConfigStatus struct { // SecretLocalReference contains information that points to the local secret being used type SecretLocalReference struct { - // Name is the name of the resource in the same namespace being referenced + // name is the name of the resource in the same namespace being referenced Name string `json:"name" protobuf:"bytes,1,opt,name=name"` } @@ -1203,7 +1203,7 @@ type GenericWebHookEvent struct { // ValueFrom is not supported. Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` - // DockerStrategyOptions contains additional docker-strategy specific options for the build + // dockerStrategyOptions contains additional docker-strategy specific options for the build DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,4,opt,name=dockerStrategyOptions"` } @@ -1212,7 +1212,7 @@ type GitInfo struct { GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"` GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"` - // Refs is a list of GitRefs for the provided repo - generally sent + // refs is a list of GitRefs for the provided repo - generally sent // when used from a post-receive hook. This field is optional and is // used when sending multiple refs Refs []GitRefInfo `json:"refs" protobuf:"bytes,3,rep,name=refs"` @@ -1287,10 +1287,10 @@ type BuildRequest struct { // build configuration and contains information about those triggers. TriggeredBy []BuildTriggerCause `json:"triggeredBy,omitempty" protobuf:"bytes,8,rep,name=triggeredBy"` - // DockerStrategyOptions contains additional docker-strategy specific options for the build + // dockerStrategyOptions contains additional docker-strategy specific options for the build DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,9,opt,name=dockerStrategyOptions"` - // SourceStrategyOptions contains additional source-strategy specific options for the build + // sourceStrategyOptions contains additional source-strategy specific options for the build SourceStrategyOptions *SourceStrategyOptions `json:"sourceStrategyOptions,omitempty" protobuf:"bytes,10,opt,name=sourceStrategyOptions"` } @@ -1368,7 +1368,7 @@ type BuildLogOptions struct { // slightly more or slightly less than the specified limit. LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` - // noWait if true causes the call to return immediately even if the build + // nowait if true causes the call to return immediately even if the build // is not available yet. Otherwise the server will wait until the build has started. // TODO: Fix the tag to 'noWait' in v2 NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"` diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go index 72ff507b7d..1da7843537 100644 --- a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go @@ -57,8 +57,8 @@ func (Build) SwaggerDoc() map[string]string { var map_BuildCondition = map[string]string{ "": "BuildCondition describes the state of a build at a certain point.", - "type": "Type of build condition.", - "status": "Status of the condition, one of True, False, Unknown.", + "type": "type of build condition.", + "status": "status of the condition, one of True, False, Unknown.", "lastUpdateTime": "The last time this condition was updated.", "lastTransitionTime": "The last time the condition transitioned from one status to another.", "reason": "The reason for the condition's last transition.", @@ -93,7 +93,7 @@ func (BuildConfigList) SwaggerDoc() map[string]string { var map_BuildConfigSpec = map[string]string{ "": "BuildConfigSpec describes when and how builds are created", "triggers": "triggers determine how new Builds can be launched from a BuildConfig. If no triggers are defined, a new build can only occur as a result of an explicit client build creation.", - "runPolicy": "RunPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".", + "runPolicy": "runPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".", "successfulBuildsHistoryLimit": "successfulBuildsHistoryLimit is the number of old successful builds to retain. When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. If removed after the BuildConfig has been created, all successful builds are retained.", "failedBuildsHistoryLimit": "failedBuildsHistoryLimit is the number of old failed builds to retain. When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. If removed after the BuildConfig has been created, all failed builds are retained.", } @@ -105,7 +105,7 @@ func (BuildConfigSpec) SwaggerDoc() map[string]string { var map_BuildConfigStatus = map[string]string{ "": "BuildConfigStatus contains current state of the build config object.", "lastVersion": "lastVersion is used to inform about number of last triggered build.", - "imageChangeTriggers": "ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.", + "imageChangeTriggers": "imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.", } func (BuildConfigStatus) SwaggerDoc() map[string]string { @@ -140,7 +140,7 @@ var map_BuildLogOptions = map[string]string{ "timestamps": "timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", "tailLines": "tailLines, If set, is the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", "limitBytes": "limitBytes, If set, is the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", - "nowait": "noWait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.", + "nowait": "nowait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.", "version": "version of the build for which to view logs.", "insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).", } @@ -152,7 +152,7 @@ func (BuildLogOptions) SwaggerDoc() map[string]string { var map_BuildOutput = map[string]string{ "": "BuildOutput is input to a build strategy and describes the container image that the strategy should produce.", "to": "to defines an optional location to push the output of this build to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This value will be used to look up a container image repository to push to. In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of the build unless Namespace is specified.", - "pushSecret": "PushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).", + "pushSecret": "pushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).", "imageLabels": "imageLabels define a list of labels that are applied to the resulting image. If there are multiple labels with the same name then the last one in the list is used.", } @@ -181,8 +181,8 @@ var map_BuildRequest = map[string]string{ "lastVersion": "lastVersion (optional) is the LastVersion of the BuildConfig that was used to generate the build. If the BuildConfig in the generator doesn't match, a build will not be generated.", "env": "env contains additional environment variables you want to pass into a builder container.", "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.", - "dockerStrategyOptions": "DockerStrategyOptions contains additional docker-strategy specific options for the build", - "sourceStrategyOptions": "SourceStrategyOptions contains additional source-strategy specific options for the build", + "dockerStrategyOptions": "dockerStrategyOptions contains additional docker-strategy specific options for the build", + "sourceStrategyOptions": "sourceStrategyOptions contains additional source-strategy specific options for the build", } func (BuildRequest) SwaggerDoc() map[string]string { @@ -229,7 +229,7 @@ var map_BuildStatus = map[string]string{ "output": "output describes the container image the build has produced.", "stages": "stages contains details about each stage that occurs during the build including start time, duration (in milliseconds), and the steps that occured within each stage.", "logSnippet": "logSnippet is the last few lines of the build log. This value is only set for builds that failed.", - "conditions": "Conditions represents the latest available observations of a build's current state.", + "conditions": "conditions represents the latest available observations of a build's current state.", } func (BuildStatus) SwaggerDoc() map[string]string { @@ -260,7 +260,7 @@ var map_BuildStrategy = map[string]string{ "dockerStrategy": "dockerStrategy holds the parameters to the container image build strategy.", "sourceStrategy": "sourceStrategy holds the parameters to the Source build strategy.", "customStrategy": "customStrategy holds the parameters to the Custom build strategy", - "jenkinsPipelineStrategy": "JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. Deprecated: use OpenShift Pipelines", + "jenkinsPipelineStrategy": "jenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. Deprecated: use OpenShift Pipelines", } func (BuildStrategy) SwaggerDoc() map[string]string { @@ -271,10 +271,10 @@ var map_BuildTriggerCause = map[string]string{ "": "BuildTriggerCause holds information about a triggered build. It is used for displaying build trigger data for each build and build configuration in oc describe. It is also used to describe which triggers led to the most recent update in the build configuration.", "message": "message is used to store a human readable message for why the build was triggered. E.g.: \"Manually triggered by user\", \"Configuration change\",etc.", "genericWebHook": "genericWebHook holds data about a builds generic webhook trigger.", - "githubWebHook": "gitHubWebHook represents data for a GitHub webhook that fired a specific build.", + "githubWebHook": "githubWebHook represents data for a GitHub webhook that fired a specific build.", "imageChangeBuild": "imageChangeBuild stores information about an imagechange event that triggered a new build.", - "gitlabWebHook": "GitLabWebHook represents data for a GitLab webhook that fired a specific build.", - "bitbucketWebHook": "BitbucketWebHook represents data for a Bitbucket webhook that fired a specific build.", + "gitlabWebHook": "gitlabWebHook represents data for a GitLab webhook that fired a specific build.", + "bitbucketWebHook": "bitbucketWebHook represents data for a Bitbucket webhook that fired a specific build.", } func (BuildTriggerCause) SwaggerDoc() map[string]string { @@ -347,8 +347,8 @@ func (CommonSpec) SwaggerDoc() map[string]string { var map_CommonWebHookCause = map[string]string{ "": "CommonWebHookCause factors out the identical format of these webhook causes into struct so we can share it in the specific causes; it is too late for GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket.", - "revision": "Revision is the git source revision information of the trigger.", - "secret": "Secret is the obfuscated webhook secret that triggered a build.", + "revision": "revision is the git source revision information of the trigger.", + "secret": "secret is the obfuscated webhook secret that triggered a build.", } func (CommonWebHookCause) SwaggerDoc() map[string]string { @@ -422,7 +422,7 @@ var map_GenericWebHookEvent = map[string]string{ "type": "type is the type of source repository", "git": "git is the git information if the Type is BuildSourceGit", "env": "env contains additional environment variables you want to pass into a builder container. ValueFrom is not supported.", - "dockerStrategyOptions": "DockerStrategyOptions contains additional docker-strategy specific options for the build", + "dockerStrategyOptions": "dockerStrategyOptions contains additional docker-strategy specific options for the build", } func (GenericWebHookEvent) SwaggerDoc() map[string]string { @@ -451,7 +451,7 @@ func (GitHubWebHookCause) SwaggerDoc() map[string]string { var map_GitInfo = map[string]string{ "": "GitInfo is the aggregated git information for a generic webhook post", - "refs": "Refs is a list of GitRefs for the provided repo - generally sent when used from a post-receive hook. This field is optional and is used when sending multiple refs", + "refs": "refs is a list of GitRefs for the provided repo - generally sent when used from a post-receive hook. This field is optional and is used when sending multiple refs", } func (GitInfo) SwaggerDoc() map[string]string { @@ -562,8 +562,8 @@ func (ImageStreamTagReference) SwaggerDoc() map[string]string { var map_JenkinsPipelineBuildStrategy = map[string]string{ "": "JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. Deprecated: use OpenShift Pipelines", - "jenkinsfilePath": "JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.", - "jenkinsfile": "Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.", + "jenkinsfilePath": "jenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.", + "jenkinsfile": "jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.", "env": "env contains additional environment variables you want to pass into a build pipeline.", } @@ -594,7 +594,7 @@ func (SecretBuildSource) SwaggerDoc() map[string]string { var map_SecretLocalReference = map[string]string{ "": "SecretLocalReference contains information that points to the local secret being used", - "name": "Name is the name of the resource in the same namespace being referenced", + "name": "name is the name of the resource in the same namespace being referenced", } func (SecretLocalReference) SwaggerDoc() map[string]string { @@ -639,7 +639,7 @@ func (SourceControlUser) SwaggerDoc() map[string]string { var map_SourceRevision = map[string]string{ "": "SourceRevision is the revision or commit information from the source for the build", "type": "type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images'", - "git": "Git contains information about git-based build source", + "git": "git contains information about git-based build source", } func (SourceRevision) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto b/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto index 085b49b25e..328de7c5a2 100644 --- a/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto @@ -40,12 +40,10 @@ message CloudPrivateIPConfig { optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the definition of the desired private IP request. - // +kubebuilder:validation:Required // +required optional CloudPrivateIPConfigSpec spec = 2; // status is the observed status of the desired private IP request. Read-only. - // +kubebuilder:validation:Optional // +optional optional CloudPrivateIPConfigStatus status = 3; } @@ -68,7 +66,6 @@ message CloudPrivateIPConfigList { // +k8s:openapi-gen=true message CloudPrivateIPConfigSpec { // node is the node name, as specified by the Kubernetes field: node.metadata.name - // +kubebuilder:validation:Optional // +optional optional string node = 1; } @@ -77,12 +74,10 @@ message CloudPrivateIPConfigSpec { // +k8s:openapi-gen=true message CloudPrivateIPConfigStatus { // node is the node name, as specified by the Kubernetes field: node.metadata.name - // +kubebuilder:validation:Optional // +optional optional string node = 1; // condition is the assignment condition of the private IP and its status - // +kubebuilder:validation:Required // +required repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2; } diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/types.go b/vendor/github.com/openshift/api/cloudnetwork/v1/types.go index 4c19e44c3f..de27f8eb60 100644 --- a/vendor/github.com/openshift/api/cloudnetwork/v1/types.go +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/types.go @@ -33,11 +33,9 @@ type CloudPrivateIPConfig struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is the definition of the desired private IP request. - // +kubebuilder:validation:Required // +required Spec CloudPrivateIPConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // status is the observed status of the desired private IP request. Read-only. - // +kubebuilder:validation:Optional // +optional Status CloudPrivateIPConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -46,7 +44,6 @@ type CloudPrivateIPConfig struct { // +k8s:openapi-gen=true type CloudPrivateIPConfigSpec struct { // node is the node name, as specified by the Kubernetes field: node.metadata.name - // +kubebuilder:validation:Optional // +optional Node string `json:"node" protobuf:"bytes,1,opt,name=node"` } @@ -55,11 +52,9 @@ type CloudPrivateIPConfigSpec struct { // +k8s:openapi-gen=true type CloudPrivateIPConfigStatus struct { // node is the node name, as specified by the Kubernetes field: node.metadata.name - // +kubebuilder:validation:Optional // +optional Node string `json:"node" protobuf:"bytes,1,opt,name=node"` // condition is the assignment condition of the private IP and its status - // +kubebuilder:validation:Required // +required Conditions []metav1.Condition `json:"conditions" protobuf:"bytes,2,rep,name=conditions"` } diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go index 61302592ea..c1024bf1ed 100644 --- a/vendor/github.com/openshift/api/config/v1/register.go +++ b/vendor/github.com/openshift/api/config/v1/register.go @@ -72,6 +72,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ImageDigestMirrorSetList{}, &ImageTagMirrorSet{}, &ImageTagMirrorSetList{}, + &ClusterMonitoring{}, + &ClusterMonitoringList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go index d4d09e7fee..3e17ca0ccb 100644 --- a/vendor/github.com/openshift/api/config/v1/types.go +++ b/vendor/github.com/openshift/api/config/v1/types.go @@ -9,7 +9,7 @@ import ( // The namespace must be specified at the point of use. type ConfigMapFileReference struct { Name string `json:"name"` - // Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + // key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. Key string `json:"key,omitempty"` } @@ -17,7 +17,6 @@ type ConfigMapFileReference struct { // The namespace must be specified at the point of use. type ConfigMapNameReference struct { // name is the metadata.name of the referenced config map - // +kubebuilder:validation:Required // +required Name string `json:"name"` } @@ -26,7 +25,6 @@ type ConfigMapNameReference struct { // The namespace must be specified at the point of use. type SecretNameReference struct { // name is the metadata.name of the referenced secret - // +kubebuilder:validation:Required // +required Name string `json:"name"` } @@ -35,47 +33,47 @@ type SecretNameReference struct { type HTTPServingInfo struct { // ServingInfo is the HTTP serving information ServingInfo `json:",inline"` - // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. + // maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. MaxRequestsInFlight int64 `json:"maxRequestsInFlight"` - // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if + // requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if // -1 there is no limit on requests. RequestTimeoutSeconds int64 `json:"requestTimeoutSeconds"` } // ServingInfo holds information about serving web pages type ServingInfo struct { - // BindAddress is the ip:port to serve on + // bindAddress is the ip:port to serve on BindAddress string `json:"bindAddress"` - // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // bindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", // "tcp4", and "tcp6" BindNetwork string `json:"bindNetwork"` // CertInfo is the TLS cert info for serving secure traffic. // this is anonymous so that we can inline it for serialization CertInfo `json:",inline"` - // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + // clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates // +optional ClientCA string `json:"clientCA,omitempty"` - // NamedCertificates is a list of certificates to use to secure requests to specific hostnames + // namedCertificates is a list of certificates to use to secure requests to specific hostnames NamedCertificates []NamedCertificate `json:"namedCertificates,omitempty"` - // MinTLSVersion is the minimum TLS version supported. + // minTLSVersion is the minimum TLS version supported. // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants MinTLSVersion string `json:"minTLSVersion,omitempty"` - // CipherSuites contains an overridden list of ciphers for the server to support. + // cipherSuites contains an overridden list of ciphers for the server to support. // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants CipherSuites []string `json:"cipherSuites,omitempty"` } // CertInfo relates a certificate with a private key type CertInfo struct { - // CertFile is a file containing a PEM-encoded certificate + // certFile is a file containing a PEM-encoded certificate CertFile string `json:"certFile"` - // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + // keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile KeyFile string `json:"keyFile"` } // NamedCertificate specifies a certificate/key, and the names it should be served for type NamedCertificate struct { - // Names is a list of DNS names this certificate should be used to secure + // names is a list of DNS names this certificate should be used to secure // A name can be a normal DNS name, or can contain leading wildcard segments. Names []string `json:"names,omitempty"` // CertInfo is the TLS cert info for serving secure traffic @@ -121,24 +119,24 @@ type StringSource struct { // StringSourceSpec specifies a string value, or external location type StringSourceSpec struct { - // Value specifies the cleartext value, or an encrypted value if keyFile is specified. + // value specifies the cleartext value, or an encrypted value if keyFile is specified. Value string `json:"value"` - // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. + // env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. Env string `json:"env"` - // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified. + // file references a file containing the cleartext value, or an encrypted value if a keyFile is specified. File string `json:"file"` - // KeyFile references a file containing the key to use to decrypt the value. + // keyFile references a file containing the key to use to decrypt the value. KeyFile string `json:"keyFile"` } // RemoteConnectionInfo holds information necessary for establishing a remote connection type RemoteConnectionInfo struct { - // URL is the remote URL to connect to + // url is the remote URL to connect to URL string `json:"url"` - // CA is the CA for verifying TLS connections + // ca is the CA for verifying TLS connections CA string `json:"ca"` // CertInfo is the TLS client cert information to present // this is anonymous so that we can inline it for serialization @@ -160,11 +158,11 @@ type AdmissionConfig struct { // AdmissionPluginConfig holds the necessary configuration options for admission plugins type AdmissionPluginConfig struct { - // Location is the path to a configuration file that contains the plugin's + // location is the path to a configuration file that contains the plugin's // configuration Location string `json:"location"` - // Configuration is an embedded configuration object to be used as the plugin's + // configuration is an embedded configuration object to be used as the plugin's // configuration. If present, it will be used instead of the path to the configuration file. // +nullable // +kubebuilder:pruning:PreserveUnknownFields @@ -205,9 +203,9 @@ type AuditConfig struct { // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB. MaximumFileSizeMegabytes int32 `json:"maximumFileSizeMegabytes"` - // PolicyFile is a path to the file that defines the audit policy configuration. + // policyFile is a path to the file that defines the audit policy configuration. PolicyFile string `json:"policyFile"` - // PolicyConfiguration is an embedded policy configuration object to be used + // policyConfiguration is an embedded policy configuration object to be used // as the audit policy configuration. If present, it will be used instead of // the path to the policy file. // +nullable @@ -225,9 +223,9 @@ type AuditConfig struct { // EtcdConnectionInfo holds information necessary for connecting to an etcd server type EtcdConnectionInfo struct { - // URLs are the URLs for etcd + // urls are the URLs for etcd URLs []string `json:"urls,omitempty"` - // CA is a file containing trusted roots for the etcd server certificates + // ca is a file containing trusted roots for the etcd server certificates CA string `json:"ca"` // CertInfo is the TLS client cert information for securing communication to etcd // this is anonymous so that we can inline it for serialization @@ -237,7 +235,7 @@ type EtcdConnectionInfo struct { type EtcdStorageConfig struct { EtcdConnectionInfo `json:",inline"` - // StoragePrefix is the path within etcd that the OpenShift resources will + // storagePrefix is the path within etcd that the OpenShift resources will // be rooted under. This value, if changed, will mean existing objects in etcd will // no longer be located. StoragePrefix string `json:"storagePrefix"` @@ -287,7 +285,7 @@ type ClientConnectionOverrides struct { // GenericControllerConfig provides information to configure a controller type GenericControllerConfig struct { - // ServingInfo is the HTTP serving information for the controller's endpoints + // servingInfo is the HTTP serving information for the controller's endpoints ServingInfo HTTPServingInfo `json:"servingInfo"` // leaderElection provides information to elect a leader. Only override this if you have a specific need @@ -324,7 +322,6 @@ type RequiredHSTSPolicy struct { // The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. // foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*. // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:Required // +required DomainPatterns []string `json:"domainPatterns"` diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index d815556d28..75b647f745 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -27,7 +27,6 @@ type APIServer struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec APIServerSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -129,7 +128,6 @@ type Audit struct { type AuditCustomRule struct { // group is a name of group a request user must be member of in order to this profile to apply. // - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Group string `json:"group"` @@ -146,7 +144,6 @@ type AuditCustomRule struct { // // If unset, the 'Default' profile is used as the default. // - // +kubebuilder:validation:Required // +required Profile AuditProfileType `json:"profile,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index f6f0c12a3b..65dffddb00 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -26,7 +26,6 @@ type Authentication struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec AuthenticationSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -81,7 +80,7 @@ type AuthenticationSpec struct { // +optional ServiceAccountIssuer string `json:"serviceAccountIssuer"` - // OIDCProviders are OIDC identity providers that can issue tokens + // oidcProviders are OIDC identity providers that can issue tokens // for this cluster // Can only be set if "Type" is set to "OIDC". // @@ -110,7 +109,7 @@ type AuthenticationStatus struct { // The namespace for this config map is openshift-config-managed. IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"` - // OIDCClients is where participating operators place the current OIDC client status + // oidcClients is where participating operators place the current OIDC client status // for OIDC clients that can be customized by the cluster-admin. // // +listType=map @@ -181,7 +180,6 @@ type WebhookTokenAuthenticator struct { // The key "kubeConfig" is used to locate the data. // If the secret or expected key is not found, the webhook is not honored. // If the specified kube config data is not valid, the webhook is not honored. - // +kubebuilder:validation:Required // +required KubeConfig SecretNameReference `json:"kubeConfig"` } @@ -195,19 +193,17 @@ const ( ) type OIDCProvider struct { - // Name of the OIDC provider + // name of the OIDC provider // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required Name string `json:"name"` - // Issuer describes atributes of the OIDC token issuer + // issuer describes atributes of the OIDC token issuer // - // +kubebuilder:validation:Required // +required Issuer TokenIssuer `json:"issuer"` - // OIDCClients contains configuration for the platform's clients that + // oidcClients contains configuration for the platform's clients that // need to request tokens from the issuer // // +listType=map @@ -216,11 +212,11 @@ type OIDCProvider struct { // +kubebuilder:validation:MaxItems=20 OIDCClients []OIDCClientConfig `json:"oidcClients"` - // ClaimMappings describes rules on how to transform information from an + // claimMappings describes rules on how to transform information from an // ID token into a cluster identity ClaimMappings TokenClaimMappings `json:"claimMappings"` - // ClaimValidationRules are rules that are applied to validate token claims to authenticate users. + // claimValidationRules are rules that are applied to validate token claims to authenticate users. // // +listType=atomic ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"` @@ -234,17 +230,15 @@ type TokenIssuer struct { // Must use the https:// scheme. // // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` - // +kubebuilder:validation:Required // +required URL string `json:"issuerURL"` - // Audiences is an array of audiences that the token was issued for. + // audiences is an array of audiences that the token was issued for. // Valid tokens must include at least one of these values in their // "aud" claim. // Must be set to exactly one value. // // +listType=set - // +kubebuilder:validation:Required // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=10 // +required @@ -258,94 +252,88 @@ type TokenIssuer struct { } type TokenClaimMappings struct { - // Username is a name of the claim that should be used to construct + // username is a name of the claim that should be used to construct // usernames for the cluster identity. // // Default value: "sub" Username UsernameClaimMapping `json:"username,omitempty"` - // Groups is a name of the claim that should be used to construct + // groups is a name of the claim that should be used to construct // groups for the cluster identity. // The referenced claim must use array of strings values. Groups PrefixedClaimMapping `json:"groups,omitempty"` } type TokenClaimMapping struct { - // Claim is a JWT token claim to be used in the mapping + // claim is a JWT token claim to be used in the mapping // - // +kubebuilder:validation:Required // +required Claim string `json:"claim"` } type OIDCClientConfig struct { - // ComponentName is the name of the component that is supposed to consume this + // componentName is the name of the component that is supposed to consume this // client configuration // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required // +required ComponentName string `json:"componentName"` - // ComponentNamespace is the namespace of the component that is supposed to consume this + // componentNamespace is the namespace of the component that is supposed to consume this // client configuration // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required // +required ComponentNamespace string `json:"componentNamespace"` - // ClientID is the identifier of the OIDC client from the OIDC provider + // clientID is the identifier of the OIDC client from the OIDC provider // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required ClientID string `json:"clientID"` - // ClientSecret refers to a secret in the `openshift-config` namespace that + // clientSecret refers to a secret in the `openshift-config` namespace that // contains the client secret in the `clientSecret` key of the `.data` field ClientSecret SecretNameReference `json:"clientSecret"` - // ExtraScopes is an optional set of scopes to request tokens with. + // extraScopes is an optional set of scopes to request tokens with. // // +listType=set ExtraScopes []string `json:"extraScopes"` } type OIDCClientStatus struct { - // ComponentName is the name of the component that will consume a client configuration. + // componentName is the name of the component that will consume a client configuration. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required // +required ComponentName string `json:"componentName"` - // ComponentNamespace is the namespace of the component that will consume a client configuration. + // componentNamespace is the namespace of the component that will consume a client configuration. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required // +required ComponentNamespace string `json:"componentNamespace"` - // CurrentOIDCClients is a list of clients that the component is currently using. + // currentOIDCClients is a list of clients that the component is currently using. // // +listType=map // +listMapKey=issuerURL // +listMapKey=clientID CurrentOIDCClients []OIDCClientReference `json:"currentOIDCClients"` - // ConsumingUsers is a slice of ServiceAccounts that need to have read + // consumingUsers is a slice of ServiceAccounts that need to have read // permission on the `clientSecret` secret. // // +kubebuilder:validation:MaxItems=5 // +listType=set ConsumingUsers []ConsumingUser `json:"consumingUsers"` - // Conditions are used to communicate the state of the `oidcClients` entry. + // conditions are used to communicate the state of the `oidcClients` entry. // // Supported conditions include Available, Degraded and Progressing. // @@ -362,7 +350,6 @@ type OIDCClientReference struct { // OIDCName refers to the `name` of the provider from `oidcProviders` // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required OIDCProviderName string `json:"oidcProviderName"` @@ -370,14 +357,12 @@ type OIDCClientReference struct { // Must use the https:// scheme. // // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` - // +kubebuilder:validation:Required // +required IssuerURL string `json:"issuerURL"` - // ClientID is the identifier of the OIDC client from the OIDC provider + // clientID is the identifier of the OIDC client from the OIDC provider // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required ClientID string `json:"clientID"` } @@ -386,7 +371,7 @@ type OIDCClientReference struct { type UsernameClaimMapping struct { TokenClaimMapping `json:",inline"` - // PrefixPolicy specifies how a prefix should apply. + // prefixPolicy specifies how a prefix should apply. // // By default, claims other than `email` will be prefixed with the issuer URL to // prevent naming clashes with other plugins. @@ -427,7 +412,6 @@ var ( ) type UsernamePrefix struct { - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required PrefixString string `json:"prefixString"` @@ -436,7 +420,7 @@ type UsernamePrefix struct { type PrefixedClaimMapping struct { TokenClaimMapping `json:",inline"` - // Prefix is a string to prefix the value from the token in the result of the + // prefix is a string to prefix the value from the token in the result of the // claim mapping. // // By default, no prefixing occurs. @@ -454,30 +438,28 @@ const ( ) type TokenClaimValidationRule struct { - // Type sets the type of the validation rule + // type sets the type of the validation rule // // +kubebuilder:validation:Enum={"RequiredClaim"} // +kubebuilder:default="RequiredClaim" Type TokenValidationRuleType `json:"type"` - // RequiredClaim allows configuring a required claim name and its expected + // requiredClaim allows configuring a required claim name and its expected // value RequiredClaim *TokenRequiredClaim `json:"requiredClaim"` } type TokenRequiredClaim struct { - // Claim is a name of a required claim. Only claims with string values are + // claim is a name of a required claim. Only claims with string values are // supported. // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required Claim string `json:"claim"` - // RequiredValue is the required value for the claim. + // requiredValue is the required value for the claim. // // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:Required // +required RequiredValue string `json:"requiredValue"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go index dad47666db..dcde1fc5b8 100644 --- a/vendor/github.com/openshift/api/config/v1/types_build.go +++ b/vendor/github.com/openshift/api/config/v1/types_build.go @@ -29,14 +29,13 @@ type Build struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // Spec holds user-settable values for the build controller configuration - // +kubebuilder:validation:Required + // spec holds user-settable values for the build controller configuration // +required Spec BuildSpec `json:"spec"` } type BuildSpec struct { - // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that // should be trusted for image pushes and pulls during builds. // The namespace for this config map is openshift-config. // @@ -45,16 +44,16 @@ type BuildSpec struct { // // +optional AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` - // BuildDefaults controls the default information for Builds + // buildDefaults controls the default information for Builds // +optional BuildDefaults BuildDefaults `json:"buildDefaults"` - // BuildOverrides controls override settings for builds + // buildOverrides controls override settings for builds // +optional BuildOverrides BuildOverrides `json:"buildOverrides"` } type BuildDefaults struct { - // DefaultProxy contains the default proxy settings for all build operations, including image pull/push + // defaultProxy contains the default proxy settings for all build operations, including image pull/push // and source download. // // Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables @@ -62,55 +61,55 @@ type BuildDefaults struct { // +optional DefaultProxy *ProxySpec `json:"defaultProxy,omitempty"` - // GitProxy contains the proxy settings for git operations only. If set, this will override + // gitProxy contains the proxy settings for git operations only. If set, this will override // any Proxy settings for all git commands, such as git clone. // // Values that are not set here will be inherited from DefaultProxy. // +optional GitProxy *ProxySpec `json:"gitProxy,omitempty"` - // Env is a set of default environment variables that will be applied to the + // env is a set of default environment variables that will be applied to the // build if the specified variables do not exist on the build // +optional Env []corev1.EnvVar `json:"env,omitempty"` - // ImageLabels is a list of docker labels that are applied to the resulting image. + // imageLabels is a list of docker labels that are applied to the resulting image. // User can override a default label by providing a label with the same name in their // Build/BuildConfig. // +optional ImageLabels []ImageLabel `json:"imageLabels,omitempty"` - // Resources defines resource requirements to execute the build. + // resources defines resource requirements to execute the build. // +optional Resources corev1.ResourceRequirements `json:"resources"` } type ImageLabel struct { - // Name defines the name of the label. It must have non-zero length. + // name defines the name of the label. It must have non-zero length. Name string `json:"name"` - // Value defines the literal value of the label. + // value defines the literal value of the label. // +optional Value string `json:"value,omitempty"` } type BuildOverrides struct { - // ImageLabels is a list of docker labels that are applied to the resulting image. + // imageLabels is a list of docker labels that are applied to the resulting image. // If user provided a label in their Build/BuildConfig with the same name as one in this // list, the user's label will be overwritten. // +optional ImageLabels []ImageLabel `json:"imageLabels,omitempty"` - // NodeSelector is a selector which must be true for the build pod to fit on a node + // nodeSelector is a selector which must be true for the build pod to fit on a node // +optional NodeSelector map[string]string `json:"nodeSelector,omitempty"` - // Tolerations is a list of Tolerations that will override any existing + // tolerations is a list of Tolerations that will override any existing // tolerations set on a build pod. // +optional Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - // ForcePull overrides, if set, the equivalent value in the builds, + // forcePull overrides, if set, the equivalent value in the builds, // i.e. false disables force pull for all builds, // true enables force pull for all builds, // independently of what each build specifies itself diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_monitoring.go b/vendor/github.com/openshift/api/config/v1/types_cluster_monitoring.go new file mode 100644 index 0000000000..0770986581 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_monitoring.go @@ -0,0 +1,103 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterMonitoring is the Custom Resource object which holds the current status of Cluster Monitoring Operator. CMO is a central component of the monitoring stack. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:internal +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1929 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clustermonitoring,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations="description=Cluster Monitoring Operators configuration API" +// +openshift:enable:FeatureGate=ClusterMonitoringConfig +// ClusterMonitoring is the Schema for the Cluster Monitoring Operators API +type ClusterMonitoring struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user configuration for the Cluster Monitoring Operator + // +required + Spec ClusterMonitoringSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ClusterMonitoringStatus `json:"status,omitempty"` +} + +// MonitoringOperatorStatus defines the observed state of MonitoringOperator +type ClusterMonitoringStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:internal +type ClusterMonitoringList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of ClusterMonitoring + // +optional + Items []ClusterMonitoring `json:"items"` +} + +// ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator +// +required +type ClusterMonitoringSpec struct { + // userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring. + // +required + UserDefined UserDefinedMonitoring `json:"userDefined"` +} + +// UserDefinedMonitoring config for user-defined projects. +// +required +type UserDefinedMonitoring struct { + // mode defines the different configurations of UserDefinedMonitoring + // Valid values are Disabled and NamespaceIsolated + // Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. + // NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. + // +kubebuilder:validation:Enum:="Disabled";"NamespaceIsolated" + // +required + Mode UserDefinedMode `json:"mode"` +} + +// UserDefinedMode specifies mode for UserDefine Monitoring +// +enum +type UserDefinedMode string + +const ( + // UserDefinedDisabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. + UserDefinedDisabled UserDefinedMode = "Disabled" + // UserDefinedNamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. + UserDefinedNamespaceIsolated UserDefinedMode = "NamespaceIsolated" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go index 7951762ccd..4a6823640d 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -34,7 +34,6 @@ type ClusterOperator struct { metav1.ObjectMeta `json:"metadata"` // spec holds configuration that could apply to any operator. - // +kubebuilder:validation:Required // +required Spec ClusterOperatorSpec `json:"spec"` @@ -80,14 +79,12 @@ type ClusterOperatorStatus struct { type OperandVersion struct { // name is the name of the particular operand this version is for. It usually matches container images, not operators. - // +kubebuilder:validation:Required // +required Name string `json:"name"` // version indicates which version of a particular operand is currently being managed. It must always match the Available // operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout // 1.1.0 - // +kubebuilder:validation:Required // +required Version string `json:"version"` } @@ -95,18 +92,15 @@ type OperandVersion struct { // ObjectReference contains enough information to let you inspect or modify the referred object. type ObjectReference struct { // group of the referent. - // +kubebuilder:validation:Required // +required Group string `json:"group"` // resource of the referent. - // +kubebuilder:validation:Required // +required Resource string `json:"resource"` // namespace of the referent. // +optional Namespace string `json:"namespace,omitempty"` // name of the referent. - // +kubebuilder:validation:Required // +required Name string `json:"name"` } @@ -128,17 +122,14 @@ const ( // +k8s:deepcopy-gen=true type ClusterOperatorStatusCondition struct { // type specifies the aspect reported by this condition. - // +kubebuilder:validation:Required // +required Type ClusterStatusConditionType `json:"type"` // status of the condition, one of True, False, Unknown. - // +kubebuilder:validation:Required // +required Status ConditionStatus `json:"status"` // lastTransitionTime is the time of the last update to the current status property. - // +kubebuilder:validation:Required // +required LastTransitionTime metav1.Time `json:"lastTransitionTime"` diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index 61386a72e4..8994ca97cd 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -34,7 +34,6 @@ type ClusterVersion struct { // spec is the desired state of the cluster version - the operator will work // to ensure that the desired version is applied to the cluster. - // +kubebuilder:validation:Required // +required Spec ClusterVersionSpec `json:"spec"` // status contains information about the available updates and any in-progress @@ -51,7 +50,6 @@ type ClusterVersionSpec struct { // clusterID uniquely identifies this cluster. This is expected to be // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in // hexadecimal values). This is a required field. - // +kubebuilder:validation:Required // +required ClusterID ClusterID `json:"clusterID"` @@ -138,7 +136,6 @@ type ClusterVersionStatus struct { // desired is the version that the cluster is reconciling towards. // If the cluster is not yet fully initialized desired will be set // with the information available, which may be an image or a tag. - // +kubebuilder:validation:Required // +required Desired Release `json:"desired"` @@ -156,14 +153,12 @@ type ClusterVersionStatus struct { // observedGeneration reports which version of the spec is being synced. // If this value is not equal to metadata.generation, then the desired // and conditions fields may represent a previous version. - // +kubebuilder:validation:Required // +required ObservedGeneration int64 `json:"observedGeneration"` // versionHash is a fingerprint of the content that the cluster will be // updated with. It is used by the operator to avoid unnecessary work // and is for internal use only. - // +kubebuilder:validation:Required // +required VersionHash string `json:"versionHash"` @@ -190,7 +185,6 @@ type ClusterVersionStatus struct { // may be empty if no updates are recommended, if the update service // is unavailable, or if an invalid channel has been specified. // +nullable - // +kubebuilder:validation:Required // +listType=atomic // +required AvailableUpdates []Release `json:"availableUpdates"` @@ -226,12 +220,10 @@ type UpdateHistory struct { // indicates the update is not fully applied, while the Completed state // indicates the update was successfully rolled out at least once (all // parts of the update successfully applied). - // +kubebuilder:validation:Required // +required State UpdateState `json:"state"` // startedTime is the time at which the update was started. - // +kubebuilder:validation:Required // +required StartedTime metav1.Time `json:"startedTime"` @@ -239,7 +231,6 @@ type UpdateHistory struct { // that is currently being applied will have a null completion time. // Completion time will always be set for entries that are not the current // update (usually to the started time of the next update). - // +kubebuilder:validation:Required // +required // +nullable CompletionTime *metav1.Time `json:"completionTime"` @@ -253,7 +244,6 @@ type UpdateHistory struct { // image is a container image location that contains the update. This value // is always populated. - // +kubebuilder:validation:Required // +required Image string `json:"image"` @@ -261,7 +251,6 @@ type UpdateHistory struct { // before it was installed. If this is false the cluster may not be trusted. // Verified does not cover upgradeable checks that depend on the cluster // state at the time when the update target was accepted. - // +kubebuilder:validation:Required // +required Verified bool `json:"verified"` @@ -288,7 +277,7 @@ const ( ) // ClusterVersionCapability enumerates optional, core cluster components. -// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig;ImageRegistry;OperatorLifecycleManager;CloudCredential;Ingress;CloudControllerManager +// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig;ImageRegistry;OperatorLifecycleManager;CloudCredential;Ingress;CloudControllerManager;OperatorLifecycleManagerV1 type ClusterVersionCapability string const ( @@ -379,10 +368,14 @@ const ( // allows to distribute Docker images ClusterVersionCapabilityImageRegistry ClusterVersionCapability = "ImageRegistry" - // ClusterVersionCapabilityOperatorLifecycleManager manages the Operator Lifecycle Manager + // ClusterVersionCapabilityOperatorLifecycleManager manages the Operator Lifecycle Manager (legacy) // which itself manages the lifecycle of operators ClusterVersionCapabilityOperatorLifecycleManager ClusterVersionCapability = "OperatorLifecycleManager" + // ClusterVersionCapabilityOperatorLifecycleManagerV1 manages the Operator Lifecycle Manager (v1) + // which itself manages the lifecycle of operators + ClusterVersionCapabilityOperatorLifecycleManagerV1 ClusterVersionCapability = "OperatorLifecycleManagerV1" + // ClusterVersionCapabilityCloudCredential manages credentials for cloud providers // in openshift cluster ClusterVersionCapabilityCloudCredential ClusterVersionCapability = "CloudCredential" @@ -422,6 +415,7 @@ var KnownClusterVersionCapabilities = []ClusterVersionCapability{ ClusterVersionCapabilityDeploymentConfig, ClusterVersionCapabilityImageRegistry, ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, ClusterVersionCapabilityCloudCredential, ClusterVersionCapabilityIngress, ClusterVersionCapabilityCloudControllerManager, @@ -600,6 +594,7 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers ClusterVersionCapabilityDeploymentConfig, ClusterVersionCapabilityImageRegistry, ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, ClusterVersionCapabilityCloudCredential, ClusterVersionCapabilityIngress, ClusterVersionCapabilityCloudControllerManager, @@ -618,6 +613,7 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers ClusterVersionCapabilityDeploymentConfig, ClusterVersionCapabilityImageRegistry, ClusterVersionCapabilityOperatorLifecycleManager, + ClusterVersionCapabilityOperatorLifecycleManagerV1, ClusterVersionCapabilityCloudCredential, ClusterVersionCapabilityIngress, ClusterVersionCapabilityCloudControllerManager, @@ -664,28 +660,23 @@ type ClusterVersionCapabilitiesStatus struct { // +k8s:deepcopy-gen=true type ComponentOverride struct { // kind indentifies which object to override. - // +kubebuilder:validation:Required // +required Kind string `json:"kind"` // group identifies the API group that the kind is in. - // +kubebuilder:validation:Required // +required Group string `json:"group"` // namespace is the component's namespace. If the resource is cluster // scoped, the namespace should be empty. - // +kubebuilder:validation:Required // +required Namespace string `json:"namespace"` // name is the component's name. - // +kubebuilder:validation:Required // +required Name string `json:"name"` // unmanaged controls if cluster version operator should stop managing the // resources in this cluster. // Default: false - // +kubebuilder:validation:Required // +required Unmanaged bool `json:"unmanaged"` } @@ -694,8 +685,8 @@ type ComponentOverride struct { type URL string // Update represents an administrator update request. -// +kubebuilder:validation:XValidation:rule="has(self.architecture) && has(self.image) ? (self.architecture == '' || self.image == '') : true",message="cannot set both Architecture and Image" -// +kubebuilder:validation:XValidation:rule="has(self.architecture) && self.architecture != '' ? self.version != '' : true",message="Version must be set if Architecture is set" +// +kubebuilder:validation:XValidation:rule="has(self.architecture) && has(self.image) ? (self.architecture == \"\" || self.image == \"\") : true",message="cannot set both Architecture and Image" +// +kubebuilder:validation:XValidation:rule="has(self.architecture) && self.architecture != \"\" ? self.version != \"\" : true",message="Version must be set if Architecture is set" // +k8s:deepcopy-gen=true type Update struct { // architecture is an optional field that indicates the desired @@ -739,6 +730,16 @@ type Update struct { // Release represents an OpenShift release image and associated metadata. // +k8s:deepcopy-gen=true type Release struct { + // architecture is an optional field that indicates the + // value of the cluster architecture. In this context cluster + // architecture means either a single architecture or a multi + // architecture. + // Valid values are 'Multi' and empty. + // + // +openshift:enable:FeatureGate=ImageStreamImportMode + // +optional + Architecture ClusterVersionArchitecture `json:"architecture,omitempty"` + // version is a semantic version identifying the update version. When this // field is part of spec, version is optional if image is specified. // +required @@ -776,7 +777,6 @@ const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates" // may not be recommended for the current cluster. type ConditionalUpdate struct { // release is the target of the update. - // +kubebuilder:validation:Required // +required Release Release `json:"release"` @@ -785,7 +785,6 @@ type ConditionalUpdate struct { // operator will evaluate all entries, and only recommend the // update if there is at least one entry and all entries // recommend the update. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinItems=1 // +patchMergeKey=name // +patchStrategy=merge @@ -809,7 +808,6 @@ type ConditionalUpdate struct { // +k8s:deepcopy-gen=true type ConditionalUpdateRisk struct { // url contains information about this risk. - // +kubebuilder:validation:Required // +kubebuilder:validation:Format=uri // +kubebuilder:validation:MinLength=1 // +required @@ -818,7 +816,6 @@ type ConditionalUpdateRisk struct { // name is the CamelCase reason for not recommending a // conditional update, in the event that matchingRules match the // cluster state. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Name string `json:"name"` @@ -828,7 +825,6 @@ type ConditionalUpdateRisk struct { // state. This is only to be consumed by humans. It may // contain Line Feed characters (U+000A), which should be // rendered as new lines. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Message string `json:"message"` @@ -839,7 +835,6 @@ type ConditionalUpdateRisk struct { // operator will walk the slice in order, and stop after the // first it can successfully evaluate. If no condition can be // successfully evaluated, the update will not be recommended. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinItems=1 // +listType=atomic // +required @@ -854,24 +849,22 @@ type ConditionalUpdateRisk struct { type ClusterCondition struct { // type represents the cluster-condition type. This defines // the members and semantics of any additional properties. - // +kubebuilder:validation:Required // +kubebuilder:validation:Enum={"Always","PromQL"} // +required Type string `json:"type"` - // promQL represents a cluster condition based on PromQL. + // promql represents a cluster condition based on PromQL. // +optional PromQL *PromQLClusterCondition `json:"promql,omitempty"` } // PromQLClusterCondition represents a cluster condition based on PromQL. type PromQLClusterCondition struct { - // PromQL is a PromQL query classifying clusters. This query + // promql is a PromQL query classifying clusters. This query // query should return a 1 in the match case and a 0 in the // does-not-match case. Queries which return no time // series, or which return values besides 0 or 1, are // evaluation failures. - // +kubebuilder:validation:Required // +required PromQL string `json:"promql"` } @@ -900,7 +893,7 @@ type SignatureStore struct { // // +kubebuilder:validation:Type=string // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL" - // +kubebuilder:validation:Required + // +required URL string `json:"url"` // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go index e8f197b344..0ccc4a8f85 100644 --- a/vendor/github.com/openshift/api/config/v1/types_console.go +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -28,7 +28,6 @@ type Console struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ConsoleSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go index 5daa5d78d2..06eb75ccf7 100644 --- a/vendor/github.com/openshift/api/config/v1/types_dns.go +++ b/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -24,7 +24,6 @@ type DNS struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec DNSSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -121,7 +120,7 @@ type DNSPlatformSpec struct { // and must handle unrecognized platforms with best-effort defaults. // // +unionDiscriminator - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:XValidation:rule="self in ['','AWS']",message="allowed values are '' and 'AWS'" Type PlatformType `json:"type"` diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 88d94ac527..81bc14f2c7 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -26,7 +26,6 @@ type FeatureGate struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required // +kubebuilder:validation:XValidation:rule="has(oldSelf.featureSet) ? has(self.featureSet) : true",message=".spec.featureSet cannot be removed" Spec FeatureGateSpec `json:"spec"` @@ -117,7 +116,6 @@ type FeatureGateStatus struct { type FeatureGateDetails struct { // version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field. - // +kubebuilder:validation:Required // +required Version string `json:"version"` // enabled is a list of all feature gates that are enabled in the cluster for the named version. @@ -130,7 +128,7 @@ type FeatureGateDetails struct { type FeatureGateAttributes struct { // name is the name of the FeatureGate. - // +kubebuilder:validation:Required + // +required Name FeatureGateName `json:"name"` // possible (probable?) future additions include diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go index d3c694a56f..3db935c7fe 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image.go +++ b/vendor/github.com/openshift/api/config/v1/types_image.go @@ -29,7 +29,6 @@ type Image struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ImageSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. diff --git a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go index 74df4027f9..0bd0d77705 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go @@ -25,7 +25,6 @@ type ImageContentPolicy struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ImageContentPolicySpec `json:"spec"` } @@ -76,7 +75,6 @@ type ImageContentPolicyList struct { type RepositoryDigestMirrors struct { // source is the repository that users refer to, e.g. in image pull specifications. // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` Source string `json:"source"` // allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. diff --git a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go index 43d748c0c3..df2258d12f 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go @@ -25,7 +25,6 @@ type ImageDigestMirrorSet struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ImageDigestMirrorSetSpec `json:"spec"` // status contains the observed state of the resource. @@ -110,7 +109,6 @@ type ImageDigestMirrors struct { // for more information about the format, see the document about the location field: // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` Source string `json:"source"` // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. diff --git a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go index ca8d35515e..b7e1a6a873 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go @@ -25,7 +25,6 @@ type ImageTagMirrorSet struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ImageTagMirrorSetSpec `json:"spec"` // status contains the observed state of the resource. @@ -95,7 +94,6 @@ type ImageTagMirrors struct { // for more information about the format, see the document about the location field: // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^\*(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$|^((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?)(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$` Source string `json:"source"` // mirrors is zero or more locations that may also contain the same images. No mirror will be configured if not specified. diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 0daa62d309..0293603d78 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -27,7 +27,6 @@ type Infrastructure struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec InfrastructureSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -99,7 +98,8 @@ type InfrastructureStatus struct { // The 'External' mode indicates that the control plane is hosted externally to the cluster and that // its components are not visible within the cluster. // +kubebuilder:default=HighlyAvailable - // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=HighlyAvailable;SingleReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate=HighlyAvailableArbiter,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;External ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"` // infrastructureTopology expresses the expectations for infrastructure services that do not run on control @@ -136,6 +136,9 @@ const ( // "HighlyAvailable" is for operators to configure high-availability as much as possible. HighlyAvailableTopologyMode TopologyMode = "HighlyAvailable" + // "HighlyAvailableArbiter" is for operators to configure for an arbiter HA deployment. + HighlyAvailableArbiterMode TopologyMode = "HighlyAvailableArbiter" + // "SingleReplica" is for operators to avoid spending resources for high-availability purpose. SingleReplicaTopologyMode TopologyMode = "SingleReplica" @@ -257,7 +260,7 @@ const ( // ExternalPlatformSpec holds the desired state for the generic External infrastructure provider. type ExternalPlatformSpec struct { - // PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. + // platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. // This field is solely for informational and reporting purposes and is not expected to be used for decision-making. // +kubebuilder:default:="Unknown" // +default="Unknown" @@ -283,55 +286,55 @@ type PlatformSpec struct { // +unionDiscriminator Type PlatformType `json:"type"` - // AWS contains settings specific to the Amazon Web Services infrastructure provider. + // aws contains settings specific to the Amazon Web Services infrastructure provider. // +optional AWS *AWSPlatformSpec `json:"aws,omitempty"` - // Azure contains settings specific to the Azure infrastructure provider. + // azure contains settings specific to the Azure infrastructure provider. // +optional Azure *AzurePlatformSpec `json:"azure,omitempty"` - // GCP contains settings specific to the Google Cloud Platform infrastructure provider. + // gcp contains settings specific to the Google Cloud Platform infrastructure provider. // +optional GCP *GCPPlatformSpec `json:"gcp,omitempty"` - // BareMetal contains settings specific to the BareMetal platform. + // baremetal contains settings specific to the BareMetal platform. // +optional BareMetal *BareMetalPlatformSpec `json:"baremetal,omitempty"` - // OpenStack contains settings specific to the OpenStack infrastructure provider. + // openstack contains settings specific to the OpenStack infrastructure provider. // +optional OpenStack *OpenStackPlatformSpec `json:"openstack,omitempty"` - // Ovirt contains settings specific to the oVirt infrastructure provider. + // ovirt contains settings specific to the oVirt infrastructure provider. // +optional Ovirt *OvirtPlatformSpec `json:"ovirt,omitempty"` - // VSphere contains settings specific to the VSphere infrastructure provider. + // vsphere contains settings specific to the VSphere infrastructure provider. // +optional VSphere *VSpherePlatformSpec `json:"vsphere,omitempty"` - // IBMCloud contains settings specific to the IBMCloud infrastructure provider. + // ibmcloud contains settings specific to the IBMCloud infrastructure provider. // +optional IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` - // Kubevirt contains settings specific to the kubevirt infrastructure provider. + // kubevirt contains settings specific to the kubevirt infrastructure provider. // +optional Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"` - // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // equinixMetal contains settings specific to the Equinix Metal infrastructure provider. // +optional EquinixMetal *EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` - // PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. + // powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. // +optional PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"` - // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. // +optional AlibabaCloud *AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"` - // Nutanix contains settings specific to the Nutanix infrastructure provider. + // nutanix contains settings specific to the Nutanix infrastructure provider. // +optional Nutanix *NutanixPlatformSpec `json:"nutanix,omitempty"` @@ -401,59 +404,59 @@ type PlatformStatus struct { // Currently this value cannot be changed once set. Type PlatformType `json:"type"` - // AWS contains settings specific to the Amazon Web Services infrastructure provider. + // aws contains settings specific to the Amazon Web Services infrastructure provider. // +optional AWS *AWSPlatformStatus `json:"aws,omitempty"` - // Azure contains settings specific to the Azure infrastructure provider. + // azure contains settings specific to the Azure infrastructure provider. // +optional Azure *AzurePlatformStatus `json:"azure,omitempty"` - // GCP contains settings specific to the Google Cloud Platform infrastructure provider. + // gcp contains settings specific to the Google Cloud Platform infrastructure provider. // +optional GCP *GCPPlatformStatus `json:"gcp,omitempty"` - // BareMetal contains settings specific to the BareMetal platform. + // baremetal contains settings specific to the BareMetal platform. // +optional BareMetal *BareMetalPlatformStatus `json:"baremetal,omitempty"` - // OpenStack contains settings specific to the OpenStack infrastructure provider. + // openstack contains settings specific to the OpenStack infrastructure provider. // +optional OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"` - // Ovirt contains settings specific to the oVirt infrastructure provider. + // ovirt contains settings specific to the oVirt infrastructure provider. // +optional Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"` - // VSphere contains settings specific to the VSphere infrastructure provider. + // vsphere contains settings specific to the VSphere infrastructure provider. // +optional VSphere *VSpherePlatformStatus `json:"vsphere,omitempty"` - // IBMCloud contains settings specific to the IBMCloud infrastructure provider. + // ibmcloud contains settings specific to the IBMCloud infrastructure provider. // +optional IBMCloud *IBMCloudPlatformStatus `json:"ibmcloud,omitempty"` - // Kubevirt contains settings specific to the kubevirt infrastructure provider. + // kubevirt contains settings specific to the kubevirt infrastructure provider. // +optional Kubevirt *KubevirtPlatformStatus `json:"kubevirt,omitempty"` - // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // equinixMetal contains settings specific to the Equinix Metal infrastructure provider. // +optional EquinixMetal *EquinixMetalPlatformStatus `json:"equinixMetal,omitempty"` - // PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. + // powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider. // +optional PowerVS *PowerVSPlatformStatus `json:"powervs,omitempty"` - // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. // +optional AlibabaCloud *AlibabaCloudPlatformStatus `json:"alibabaCloud,omitempty"` - // Nutanix contains settings specific to the Nutanix infrastructure provider. + // nutanix contains settings specific to the Nutanix infrastructure provider. // +optional Nutanix *NutanixPlatformStatus `json:"nutanix,omitempty"` - // External contains settings specific to the generic External infrastructure provider. + // external contains settings specific to the generic External infrastructure provider. // +optional External *ExternalPlatformStatus `json:"external,omitempty"` } @@ -492,7 +495,7 @@ type AWSPlatformStatus struct { // region holds the default AWS region for new AWS resources created by the cluster. Region string `json:"region"` - // ServiceEndpoints list contains custom endpoints which will override default + // serviceEndpoints list contains custom endpoints which will override default // service endpoint of AWS Services. // There must be only one ServiceEndpoint for a service. // +listType=atomic @@ -526,7 +529,6 @@ type AWSPlatformStatus struct { // AWSResourceTag is a tag to apply to AWS resources created for the cluster. type AWSResourceTag struct { // key is the key of the tag - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` @@ -535,7 +537,6 @@ type AWSResourceTag struct { // value is the value of the tag. // Some AWS service do not support empty values. Since tags are added to resources in many services, the // length of the tag value must meet the requirements of all services. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` @@ -584,14 +585,14 @@ type AzureResourceTag struct { // key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key // must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric // characters and the following special characters `_ . -`. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 // +kubebuilder:validation:Pattern=`^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$` Key string `json:"key"` // value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value // must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.=+-@]+$` @@ -683,7 +684,7 @@ type GCPResourceLabel struct { // and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` // and `openshift-io`. // +kubebuilder:validation:XValidation:rule="!self.startsWith('openshift-io') && !self.startsWith('kubernetes-io')",message="label keys must not start with either `openshift-io` or `kubernetes-io`" - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[a-z][0-9a-z_-]{0,62}$` @@ -691,7 +692,7 @@ type GCPResourceLabel struct { // value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. // Value must contain only lowercase letters, numeric characters, and the following special characters `_-`. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[0-9a-z_-]{1,63}$` @@ -707,7 +708,7 @@ type GCPResourceTag struct { // An OrganizationID must consist of decimal numbers, and cannot have leading zeroes. // A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, // and hyphens, and must start with a letter, and cannot end with a hyphen. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=32 // +kubebuilder:validation:Pattern=`(^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$)` @@ -716,7 +717,7 @@ type GCPResourceTag struct { // key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. // Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase // alphanumeric characters, and the following special characters `._-`. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$` @@ -725,7 +726,7 @@ type GCPResourceTag struct { // value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. // Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase // alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$` @@ -1162,12 +1163,34 @@ type VSpherePlatformLoadBalancer struct { Type PlatformLoadBalancerType `json:"type,omitempty"` } -// VSpherePlatformFailureDomainSpec holds the region and zone failure domain and -// the vCenter topology of that failure domain. +// The VSphereFailureDomainZoneType is a string representation of a failure domain +// zone type. There are two supportable types HostGroup and ComputeCluster +// +enum +type VSphereFailureDomainZoneType string + +// The VSphereFailureDomainRegionType is a string representation of a failure domain +// region type. There are two supportable types ComputeCluster and Datacenter +// +enum +type VSphereFailureDomainRegionType string + +const ( + // HostGroupFailureDomainZone is a failure domain zone for a vCenter vm-host group. + HostGroupFailureDomainZone VSphereFailureDomainZoneType = "HostGroup" + // ComputeClusterFailureDomainZone is a failure domain zone for a vCenter compute cluster. + ComputeClusterFailureDomainZone VSphereFailureDomainZoneType = "ComputeCluster" + // DatacenterFailureDomainRegion is a failure domain region for a vCenter datacenter. + DatacenterFailureDomainRegion VSphereFailureDomainRegionType = "Datacenter" + // ComputeClusterFailureDomainRegion is a failure domain region for a vCenter compute cluster. + ComputeClusterFailureDomainRegion VSphereFailureDomainRegionType = "ComputeCluster" +) + +// VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=VSphereHostVMGroupZonal,rule="has(self.zoneAffinity) && self.zoneAffinity.type == 'HostGroup' ? has(self.regionAffinity) && self.regionAffinity.type == 'ComputeCluster' : true",message="when zoneAffinity type is HostGroup, regionAffinity type must be ComputeCluster" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=VSphereHostVMGroupZonal,rule="has(self.zoneAffinity) && self.zoneAffinity.type == 'ComputeCluster' ? has(self.regionAffinity) && self.regionAffinity.type == 'Datacenter' : true",message="when zoneAffinity type is ComputeCluster, regionAffinity type must be Datacenter" type VSpherePlatformFailureDomainSpec struct { // name defines the arbitrary but unique name // of a failure domain. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 Name string `json:"name"` @@ -1177,7 +1200,7 @@ type VSpherePlatformFailureDomainSpec struct { // category in vCenter must be named openshift-region. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=80 - // +kubebuilder:validation:Required + // +required Region string `json:"region"` // zone defines the name of a zone tag that will @@ -1185,19 +1208,34 @@ type VSpherePlatformFailureDomainSpec struct { // category in vCenter must be named openshift-zone. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=80 - // +kubebuilder:validation:Required + // +required Zone string `json:"zone"` + // regionAffinity holds the type of region, Datacenter or ComputeCluster. + // When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. + // When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology. + // +openshift:validation:featureGate=VSphereHostVMGroupZonal + // +optional + RegionAffinity *VSphereFailureDomainRegionAffinity `json:"regionAffinity,omitempty"` + + // zoneAffinity holds the type of the zone and the hostGroup which + // vmGroup and the hostGroup names in vCenter corresponds to + // a vm-host group of type Virtual Machine and Host respectively. Is also + // contains the vmHostRule which is an affinity vm-host rule in vCenter. + // +openshift:validation:featureGate=VSphereHostVMGroupZonal + // +optional + ZoneAffinity *VSphereFailureDomainZoneAffinity `json:"zoneAffinity,omitempty"` + // server is the fully-qualified domain name or the IP address of the vCenter server. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=255 // --- // + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname Server string `json:"server"` - // Topology describes a given failure domain using vSphere constructs - // +kubebuilder:validation:Required + // topology describes a given failure domain using vSphere constructs + // +required Topology VSpherePlatformTopology `json:"topology"` } @@ -1206,7 +1244,7 @@ type VSpherePlatformFailureDomainSpec struct { type VSpherePlatformTopology struct { // datacenter is the name of vCenter datacenter in which virtual machines will be located. // The maximum length of the datacenter name is 80 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=80 Datacenter string `json:"datacenter"` @@ -1214,7 +1252,7 @@ type VSpherePlatformTopology struct { // in which virtual machine will be located. // The absolute path is of the form //host/. // The maximum length of the path is 2048 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=2048 // +kubebuilder:validation:Pattern=`^/.*?/host/.*?` ComputeCluster string `json:"computeCluster"` @@ -1227,7 +1265,7 @@ type VSpherePlatformTopology struct { // `govc ls 'network/*'` // Networks should be in the form of an absolute path: // //network/. - // +kubebuilder:validation:Required + // +required // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 // +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiNetworks,maxItems=10 // +kubebuilder:validation:MinItems=1 @@ -1238,7 +1276,7 @@ type VSpherePlatformTopology struct { // virtual machine is located. // The absolute path is of the form //datastore/ // The maximum length of the path is 2048 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=2048 // +kubebuilder:validation:Pattern=`^/.*?/datastore/.*?` Datastore string `json:"datastore"` @@ -1277,12 +1315,80 @@ type VSpherePlatformTopology struct { Template string `json:"template,omitempty"` } +// VSphereFailureDomainZoneAffinity contains the vCenter cluster vm-host group (virtual machine and host types) +// and the vm-host affinity rule that together creates an affinity configuration for vm-host based zonal. +// This configuration within vCenter creates the required association between a failure domain, virtual machines +// and ESXi hosts to create a vm-host based zone. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'HostGroup' ? has(self.hostGroup) : !has(self.hostGroup)",message="hostGroup is required when type is HostGroup, and forbidden otherwise" +// +union +type VSphereFailureDomainZoneAffinity struct { + // type determines the vSphere object type for a zone within this failure domain. + // Available types are ComputeCluster and HostGroup. + // When set to ComputeCluster, this means the vCenter cluster defined is the zone. + // When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and + // this means the zone is defined by the grouping of those fields. + // +kubebuilder:validation:Enum:=HostGroup;ComputeCluster + // +required + // +unionDiscriminator + Type VSphereFailureDomainZoneType `json:"type"` + + // hostGroup holds the vmGroup and the hostGroup names in vCenter + // corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also + // contains the vmHostRule which is an affinity vm-host rule in vCenter. + // +unionMember + // +optional + HostGroup *VSphereFailureDomainHostGroup `json:"hostGroup,omitempty"` +} + +// VSphereFailureDomainRegionAffinity contains the region type which is the string representation of the +// VSphereFailureDomainRegionType with available options of Datacenter and ComputeCluster. +// +union +type VSphereFailureDomainRegionAffinity struct { + // type determines the vSphere object type for a region within this failure domain. + // Available types are Datacenter and ComputeCluster. + // When set to Datacenter, this means the vCenter Datacenter defined is the region. + // When set to ComputeCluster, this means the vCenter cluster defined is the region. + // +kubebuilder:validation:Enum:=ComputeCluster;Datacenter + // +required + // +unionDiscriminator + Type VSphereFailureDomainRegionType `json:"type"` +} + +// VSphereFailureDomainHostGroup holds the vmGroup and the hostGroup names in vCenter +// corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also +// contains the vmHostRule which is an affinity vm-host rule in vCenter. +type VSphereFailureDomainHostGroup struct { + // vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. + // vmGroup is limited to 80 characters. + // This field is required when the VSphereFailureDomain ZoneType is HostGroup + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + VMGroup string `json:"vmGroup"` + + // hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + // hostGroup is limited to 80 characters. + // This field is required when the VSphereFailureDomain ZoneType is HostGroup + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + HostGroup string `json:"hostGroup"` + + // vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. + // vmHostRule is limited to 80 characters. + // This field is required when the VSphereFailureDomain ZoneType is HostGroup + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + // +required + VMHostRule string `json:"vmHostRule"` +} + // VSpherePlatformVCenterSpec stores the vCenter connection fields. // This is used by the vSphere CCM. type VSpherePlatformVCenterSpec struct { // server is the fully-qualified domain name or the IP address of the vCenter server. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=255 // --- // + Validation is applied via a patch, we validate the format as either ipv4, ipv6 or hostname @@ -1303,7 +1409,7 @@ type VSpherePlatformVCenterSpec struct { // be used by the Cloud Controller Manager. // Each datacenter listed here should be used within // a topology. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinItems=1 // +listType=set Datacenters []string `json:"datacenters"` @@ -1504,14 +1610,14 @@ type IBMCloudServiceEndpoint struct { // Whereas the IBM Cloud Private VPC service for US South (Dallas) could be configured // with the service `name` of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com` // - // +kubebuilder:validation:Required + // +required Name IBMCloudServiceName `json:"name"` // url is fully qualified URI with scheme https, that overrides the default generated // endpoint for a client. // This must be provided and cannot be empty. // - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Type=string // +kubebuilder:validation:XValidation:rule="isURL(self)",message="url must be a valid absolute URL" URL string `json:"url"` @@ -1523,20 +1629,20 @@ type IBMCloudPlatformSpec struct{} // IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider. type IBMCloudPlatformStatus struct { - // Location is where the cluster has been deployed + // location is where the cluster has been deployed Location string `json:"location,omitempty"` - // ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. + // resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. ResourceGroupName string `json:"resourceGroupName,omitempty"` - // ProviderType indicates the type of cluster that was created + // providerType indicates the type of cluster that was created ProviderType IBMCloudProviderType `json:"providerType,omitempty"` - // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + // cisInstanceCRN is the CRN of the Cloud Internet Services instance managing // the DNS zone for the cluster's base domain CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` - // DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + // dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone // for the cluster's base domain DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"` @@ -1592,15 +1698,15 @@ type PowerVSServiceEndpoint struct { // ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller // Power Cloud - https://cloud.ibm.com/apidocs/power-cloud // - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$` + // +required + // +kubebuilder:validation:Enum=CIS;COS;COSConfig;DNSServices;GlobalCatalog;GlobalSearch;GlobalTagging;HyperProtect;IAM;KeyProtect;Power;ResourceController;ResourceManager;VPC Name string `json:"name"` // url is fully qualified URI with scheme https, that overrides the default generated // endpoint for a client. // This must be provided and cannot be empty. // - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Type=string // +kubebuilder:validation:Format=uri // +kubebuilder:validation:Pattern=`^https://` @@ -1647,11 +1753,11 @@ type PowerVSPlatformStatus struct { // +optional ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"` - // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + // cisInstanceCRN is the CRN of the Cloud Internet Services instance managing // the DNS zone for the cluster's base domain CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` - // DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + // dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone // for the cluster's base domain DNSInstanceCRN string `json:"dnsInstanceCRN,omitempty"` } @@ -1663,7 +1769,6 @@ type AlibabaCloudPlatformSpec struct{} // AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider. type AlibabaCloudPlatformStatus struct { // region specifies the region for Alibaba Cloud resources created for the cluster. - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^[0-9A-Za-z-]+$` // +required Region string `json:"region"` @@ -1682,13 +1787,11 @@ type AlibabaCloudPlatformStatus struct { // AlibabaCloudResourceTag is the set of tags to add to apply to resources. type AlibabaCloudResourceTag struct { // key is the key of the tag. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 // +required Key string `json:"key"` // value is the value of the tag. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 // +required @@ -1723,7 +1826,7 @@ type NutanixPlatformSpec struct { // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the // proxy spec.noProxy list. - // +kubebuilder:validation:Required + // +required PrismCentral NutanixPrismEndpoint `json:"prismCentral"` // prismElements holds one or more endpoint address and port data to access the Nutanix @@ -1731,7 +1834,7 @@ type NutanixPlatformSpec struct { // Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) // used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) // spread over multiple Prism Elements (clusters) of the Prism Central. - // +kubebuilder:validation:Required + // +required // +listType=map // +listMapKey=name PrismElements []NutanixPrismElementEndpoint `json:"prismElements"` @@ -1739,6 +1842,7 @@ type NutanixPlatformSpec struct { // failureDomains configures failure domains information for the Nutanix platform. // When set, the failure domains defined here may be used to spread Machines across // prism element clusters to improve fault tolerance of the cluster. + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=NutanixMultiSubnets,maxItems=32 // +listType=map // +listMapKey=name // +optional @@ -1752,7 +1856,7 @@ type NutanixFailureDomain struct { // It must consist of only lower case alphanumeric characters and hyphens (-). // It must start and end with an alphanumeric character. // This value is arbitrary and is used to identify the failure domain within the platform. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=64 // +kubebuilder:validation:Pattern=`[a-z0-9]([-a-z0-9]*[a-z0-9])?` @@ -1761,17 +1865,19 @@ type NutanixFailureDomain struct { // cluster is to identify the cluster (the Prism Element under management of the Prism Central), // in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained // from the Prism Central console or using the prism_central API. - // +kubebuilder:validation:Required + // +required Cluster NutanixResourceIdentifier `json:"cluster"` // subnets holds a list of identifiers (one or more) of the cluster's network subnets + // If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. // for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be // obtained from the Prism Central console or using the prism_central API. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:MaxItems=1 - // +listType=map - // +listMapKey=type + // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 + // +openshift:validation:FeatureGateAwareMaxItems:featureGate=NutanixMultiSubnets,maxItems=32 + // +openshift:validation:FeatureGateAwareXValidation:featureGate=NutanixMultiSubnets,rule="self.all(x, self.exists_one(y, x == y))",message="each subnet must be unique" + // +listType=atomic Subnets []NutanixResourceIdentifier `json:"subnets"` } @@ -1794,7 +1900,7 @@ const ( type NutanixResourceIdentifier struct { // type is the identifier type to use for this resource. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Type NutanixIdentifierType `json:"type"` // uuid is the UUID of the resource in the PC. It cannot be empty if the type is UUID. @@ -1809,12 +1915,12 @@ type NutanixResourceIdentifier struct { // NutanixPrismEndpoint holds the endpoint address and port to access the Nutanix Prism Central or Element (cluster) type NutanixPrismEndpoint struct { // address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=256 Address string `json:"address"` // port is the port number to access the Nutanix Prism Central or Element (cluster) - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 Port int32 `json:"port"` @@ -1824,7 +1930,7 @@ type NutanixPrismEndpoint struct { type NutanixPrismElementEndpoint struct { // name is the name of the Prism Element (cluster). This value will correspond with // the cluster field configured on other resources (eg Machines, PVCs, etc). - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=256 Name string `json:"name"` @@ -1832,7 +1938,7 @@ type NutanixPrismElementEndpoint struct { // When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. // Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the // proxy spec.noProxy list. - // +kubebuilder:validation:Required + // +required Endpoint NutanixPrismEndpoint `json:"endpoint"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go index 302913a16f..9492e08a72 100644 --- a/vendor/github.com/openshift/api/config/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -27,7 +27,6 @@ type Ingress struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec IngressSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -150,7 +149,7 @@ type AWSIngressSpec struct { // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb // +unionDiscriminator // +kubebuilder:validation:Enum:=NLB;Classic - // +kubebuilder:validation:Required + // +required Type AWSLBType `json:"type,omitempty"` } @@ -223,7 +222,6 @@ type ComponentRouteSpec struct { // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required // +required Namespace string `json:"namespace"` @@ -233,12 +231,10 @@ type ComponentRouteSpec struct { // entry in the list of status.componentRoutes if the route is to be customized. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required // +required Name string `json:"name"` // hostname is the hostname that should be used by the route. - // +kubebuilder:validation:Required // +required Hostname Hostname `json:"hostname"` @@ -260,7 +256,6 @@ type ComponentRouteStatus struct { // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 - // +kubebuilder:validation:Required // +required Namespace string `json:"namespace"` @@ -271,12 +266,10 @@ type ComponentRouteStatus struct { // entry in the list of spec.componentRoutes if the route is to be customized. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required // +required Name string `json:"name"` // defaultHostname is the hostname of this route prior to customization. - // +kubebuilder:validation:Required // +required DefaultHostname Hostname `json:"defaultHostname"` @@ -310,7 +303,6 @@ type ComponentRouteStatus struct { // relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. // +kubebuilder:validation:MinItems=1 - // +kubebuilder:validation:Required // +required RelatedObjects []ObjectReference `json:"relatedObjects"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index 1eeae69dac..95e55a7ffc 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -30,7 +30,6 @@ type Network struct { // As a general rule, this SHOULD NOT be read directly. Instead, you should // consume the NetworkStatus, as it indicates the currently deployed configuration. // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. - // +kubebuilder:validation:Required // +required Spec NetworkSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -55,7 +54,7 @@ type NetworkSpec struct { // +listType=atomic ServiceNetwork []string `json:"serviceNetwork"` - // NetworkType is the plugin that is to be deployed (e.g. OVNKubernetes). + // networkType is the plugin that is to be deployed (e.g. OVNKubernetes). // This should match a value that the cluster-network-operator understands, // or else no networking will be installed. // Currently supported values are: @@ -101,13 +100,13 @@ type NetworkStatus struct { // +listType=atomic ServiceNetwork []string `json:"serviceNetwork,omitempty"` - // NetworkType is the plugin that is deployed (e.g. OVNKubernetes). + // networkType is the plugin that is deployed (e.g. OVNKubernetes). NetworkType string `json:"networkType,omitempty"` - // ClusterNetworkMTU is the MTU for inter-pod networking. + // clusterNetworkMTU is the MTU for inter-pod networking. ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"` - // Migration contains the cluster network migration configuration. + // migration contains the cluster network migration configuration. Migration *NetworkMigration `json:"migration,omitempty"` // conditions represents the observations of a network.config current state. @@ -185,35 +184,35 @@ type NetworkList struct { // NetworkMigration represents the network migration status. type NetworkMigration struct { - // NetworkType is the target plugin that is being deployed. + // networkType is the target plugin that is being deployed. // DEPRECATED: network type migration is no longer supported, // so this should always be unset. // +optional NetworkType string `json:"networkType,omitempty"` - // MTU is the MTU configuration that is being deployed. + // mtu is the MTU configuration that is being deployed. // +optional MTU *MTUMigration `json:"mtu,omitempty"` } // MTUMigration contains infomation about MTU migration. type MTUMigration struct { - // Network contains MTU migration configuration for the default network. + // network contains MTU migration configuration for the default network. // +optional Network *MTUMigrationValues `json:"network,omitempty"` - // Machine contains MTU migration configuration for the machine's uplink. + // machine contains MTU migration configuration for the machine's uplink. // +optional Machine *MTUMigrationValues `json:"machine,omitempty"` } // MTUMigrationValues contains the values for a MTU migration. type MTUMigrationValues struct { - // To is the MTU to migrate to. + // to is the MTU to migrate to. // +kubebuilder:validation:Minimum=0 To *uint32 `json:"to"` - // From is the MTU to migrate from. + // from is the MTU to migrate from. // +kubebuilder:validation:Minimum=0 // +optional From *uint32 `json:"from,omitempty"` diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go index a50328c91f..3fc7bc0c39 100644 --- a/vendor/github.com/openshift/api/config/v1/types_node.go +++ b/vendor/github.com/openshift/api/config/v1/types_node.go @@ -28,7 +28,6 @@ type Node struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec NodeSpec `json:"spec"` @@ -38,11 +37,11 @@ type Node struct { } type NodeSpec struct { - // CgroupMode determines the cgroups version on the node + // cgroupMode determines the cgroups version on the node // +optional CgroupMode CgroupMode `json:"cgroupMode,omitempty"` - // WorkerLatencyProfile determins the how fast the kubelet is updating + // workerLatencyProfile determins the how fast the kubelet is updating // the status and corresponding reaction of the cluster // +optional WorkerLatencyProfile WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go index dce08a17f5..20845e4dbe 100644 --- a/vendor/github.com/openshift/api/config/v1/types_oauth.go +++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go @@ -27,7 +27,6 @@ type OAuth struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec OAuthSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go index 78fd3f41a0..3d219862be 100644 --- a/vendor/github.com/openshift/api/config/v1/types_project.go +++ b/vendor/github.com/openshift/api/config/v1/types_project.go @@ -24,7 +24,6 @@ type Project struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ProjectSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go index 2dfc66b1cd..ed40176ce3 100644 --- a/vendor/github.com/openshift/api/config/v1/types_proxy.go +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -25,8 +25,7 @@ type Proxy struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // Spec holds user-settable values for the proxy configuration - // +kubebuilder:validation:Required + // spec holds user-settable values for the proxy configuration // +required Spec ProxySpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go index 2749f4f70d..c90d5633f6 100644 --- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -25,7 +25,6 @@ type Scheduler struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec SchedulerSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -74,7 +73,7 @@ type SchedulerSpec struct { // would not be applied. // +optional DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"` - // MastersSchedulable allows masters nodes to be schedulable. When this flag is + // mastersSchedulable allows masters nodes to be schedulable. When this flag is // turned on, all the master nodes in the cluster will be made schedulable, // so that workload pods can run on them. The default value for this field is false, // meaning none of the master nodes are schedulable. diff --git a/vendor/github.com/openshift/api/config/v1/types_testreporting.go b/vendor/github.com/openshift/api/config/v1/types_testreporting.go index 4d642e060b..00953957f4 100644 --- a/vendor/github.com/openshift/api/config/v1/types_testreporting.go +++ b/vendor/github.com/openshift/api/config/v1/types_testreporting.go @@ -15,7 +15,6 @@ type TestReporting struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required // +required Spec TestReportingSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -24,20 +23,20 @@ type TestReporting struct { } type TestReportingSpec struct { - // TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing. + // testsForFeatureGates is a list, indexed by FeatureGate and includes information about testing. TestsForFeatureGates []FeatureGateTests `json:"testsForFeatureGates"` } type FeatureGateTests struct { - // FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance. + // featureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance. FeatureGate string `json:"featureGate"` - // Tests contains an item for every TestName + // tests contains an item for every TestName Tests []TestDetails `json:"tests"` } type TestDetails struct { - // TestName is the name of the test as it appears in junit XMLs. + // testName is the name of the test as it appears in junit XMLs. // It does not include the suite name since the same test can be executed in many suites. TestName string `json:"testName"` } diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-CustomNoUpgrade.crd.yaml index 8f6562b78d..5061a69c6f 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-CustomNoUpgrade.crd.yaml @@ -94,6 +94,7 @@ spec: - CloudCredential - Ingress - CloudControllerManager + - OperatorLifecycleManagerV1 type: string type: array x-kubernetes-list-type: atomic @@ -193,10 +194,10 @@ spec: x-kubernetes-validations: - message: cannot set both Architecture and Image rule: 'has(self.architecture) && has(self.image) ? (self.architecture - == '''' || self.image == '''') : true' + == "" || self.image == "") : true' - message: Version must be set if Architecture is set - rule: 'has(self.architecture) && self.architecture != '''' ? self.version - != '''' : true' + rule: 'has(self.architecture) && self.architecture != "" ? self.version + != "" : true' overrides: description: |- overrides is list of overides for components that are managed by @@ -317,6 +318,17 @@ spec: description: Release represents an OpenShift release image and associated metadata. properties: + architecture: + description: |- + architecture is an optional field that indicates the + value of the cluster architecture. In this context cluster + architecture means either a single architecture or a multi + architecture. + Valid values are 'Multi' and empty. + enum: + - Multi + - "" + type: string channels: description: |- channels is the set of Cincinnati channels to which the release @@ -378,6 +390,7 @@ spec: - CloudCredential - Ingress - CloudControllerManager + - OperatorLifecycleManagerV1 type: string type: array x-kubernetes-list-type: atomic @@ -404,6 +417,7 @@ spec: - CloudCredential - Ingress - CloudControllerManager + - OperatorLifecycleManagerV1 type: string type: array x-kubernetes-list-type: atomic @@ -490,6 +504,17 @@ spec: release: description: release is the target of the update. properties: + architecture: + description: |- + architecture is an optional field that indicates the + value of the cluster architecture. In this context cluster + architecture means either a single architecture or a multi + architecture. + Valid values are 'Multi' and empty. + enum: + - Multi + - "" + type: string channels: description: |- channels is the set of Cincinnati channels to which the release @@ -549,12 +574,12 @@ spec: fail to evaluate. properties: promql: - description: promQL represents a cluster condition + description: promql represents a cluster condition based on PromQL. properties: promql: description: |- - PromQL is a PromQL query classifying clusters. This query + promql is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are @@ -665,6 +690,17 @@ spec: If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag. properties: + architecture: + description: |- + architecture is an optional field that indicates the + value of the cluster architecture. In this context cluster + architecture means either a single architecture or a multi + architecture. + Valid values are 'Multi' and empty. + enum: + - Multi + - "" + type: string channels: description: |- channels is the set of Cincinnati channels to which the release diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-Default.crd.yaml index 1cc057283e..caeeb9064a 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-Default.crd.yaml @@ -94,6 +94,7 @@ spec: - CloudCredential - Ingress - CloudControllerManager + - OperatorLifecycleManagerV1 type: string type: array x-kubernetes-list-type: atomic @@ -193,10 +194,10 @@ spec: x-kubernetes-validations: - message: cannot set both Architecture and Image rule: 'has(self.architecture) && has(self.image) ? (self.architecture - == '''' || self.image == '''') : true' + == "" || self.image == "") : true' - message: Version must be set if Architecture is set - rule: 'has(self.architecture) && self.architecture != '''' ? self.version - != '''' : true' + rule: 'has(self.architecture) && self.architecture != "" ? self.version + != "" : true' overrides: description: |- overrides is list of overides for components that are managed by @@ -327,6 +328,7 @@ spec: - CloudCredential - Ingress - CloudControllerManager + - OperatorLifecycleManagerV1 type: string type: array x-kubernetes-list-type: atomic @@ -353,6 +355,7 @@ spec: - CloudCredential - Ingress - CloudControllerManager + - OperatorLifecycleManagerV1 type: string type: array x-kubernetes-list-type: atomic @@ -498,12 +501,12 @@ spec: fail to evaluate. properties: promql: - description: promQL represents a cluster condition + description: promql represents a cluster condition based on PromQL. properties: promql: description: |- - PromQL is a PromQL query classifying clusters. This query + promql is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml index 9792d578b5..d7319febe0 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml @@ -94,6 +94,7 @@ spec: - CloudCredential - Ingress - CloudControllerManager + - OperatorLifecycleManagerV1 type: string type: array x-kubernetes-list-type: atomic @@ -193,10 +194,10 @@ spec: x-kubernetes-validations: - message: cannot set both Architecture and Image rule: 'has(self.architecture) && has(self.image) ? (self.architecture - == '''' || self.image == '''') : true' + == "" || self.image == "") : true' - message: Version must be set if Architecture is set - rule: 'has(self.architecture) && self.architecture != '''' ? self.version - != '''' : true' + rule: 'has(self.architecture) && self.architecture != "" ? self.version + != "" : true' overrides: description: |- overrides is list of overides for components that are managed by @@ -317,6 +318,17 @@ spec: description: Release represents an OpenShift release image and associated metadata. properties: + architecture: + description: |- + architecture is an optional field that indicates the + value of the cluster architecture. In this context cluster + architecture means either a single architecture or a multi + architecture. + Valid values are 'Multi' and empty. + enum: + - Multi + - "" + type: string channels: description: |- channels is the set of Cincinnati channels to which the release @@ -378,6 +390,7 @@ spec: - CloudCredential - Ingress - CloudControllerManager + - OperatorLifecycleManagerV1 type: string type: array x-kubernetes-list-type: atomic @@ -404,6 +417,7 @@ spec: - CloudCredential - Ingress - CloudControllerManager + - OperatorLifecycleManagerV1 type: string type: array x-kubernetes-list-type: atomic @@ -490,6 +504,17 @@ spec: release: description: release is the target of the update. properties: + architecture: + description: |- + architecture is an optional field that indicates the + value of the cluster architecture. In this context cluster + architecture means either a single architecture or a multi + architecture. + Valid values are 'Multi' and empty. + enum: + - Multi + - "" + type: string channels: description: |- channels is the set of Cincinnati channels to which the release @@ -549,12 +574,12 @@ spec: fail to evaluate. properties: promql: - description: promQL represents a cluster condition + description: promql represents a cluster condition based on PromQL. properties: promql: description: |- - PromQL is a PromQL query classifying clusters. This query + promql is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are @@ -665,6 +690,17 @@ spec: If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag. properties: + architecture: + description: |- + architecture is an optional field that indicates the + value of the cluster architecture. In this context cluster + architecture means either a single architecture or a multi + architecture. + Valid values are 'Multi' and empty. + enum: + - Multi + - "" + type: string channels: description: |- channels is the set of Cincinnati channels to which the release diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-TechPreviewNoUpgrade.crd.yaml index 6af36d6562..4380248875 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-TechPreviewNoUpgrade.crd.yaml @@ -94,6 +94,7 @@ spec: - CloudCredential - Ingress - CloudControllerManager + - OperatorLifecycleManagerV1 type: string type: array x-kubernetes-list-type: atomic @@ -193,10 +194,10 @@ spec: x-kubernetes-validations: - message: cannot set both Architecture and Image rule: 'has(self.architecture) && has(self.image) ? (self.architecture - == '''' || self.image == '''') : true' + == "" || self.image == "") : true' - message: Version must be set if Architecture is set - rule: 'has(self.architecture) && self.architecture != '''' ? self.version - != '''' : true' + rule: 'has(self.architecture) && self.architecture != "" ? self.version + != "" : true' overrides: description: |- overrides is list of overides for components that are managed by @@ -317,6 +318,17 @@ spec: description: Release represents an OpenShift release image and associated metadata. properties: + architecture: + description: |- + architecture is an optional field that indicates the + value of the cluster architecture. In this context cluster + architecture means either a single architecture or a multi + architecture. + Valid values are 'Multi' and empty. + enum: + - Multi + - "" + type: string channels: description: |- channels is the set of Cincinnati channels to which the release @@ -378,6 +390,7 @@ spec: - CloudCredential - Ingress - CloudControllerManager + - OperatorLifecycleManagerV1 type: string type: array x-kubernetes-list-type: atomic @@ -404,6 +417,7 @@ spec: - CloudCredential - Ingress - CloudControllerManager + - OperatorLifecycleManagerV1 type: string type: array x-kubernetes-list-type: atomic @@ -490,6 +504,17 @@ spec: release: description: release is the target of the update. properties: + architecture: + description: |- + architecture is an optional field that indicates the + value of the cluster architecture. In this context cluster + architecture means either a single architecture or a multi + architecture. + Valid values are 'Multi' and empty. + enum: + - Multi + - "" + type: string channels: description: |- channels is the set of Cincinnati channels to which the release @@ -549,12 +574,12 @@ spec: fail to evaluate. properties: promql: - description: promQL represents a cluster condition + description: promql represents a cluster condition based on PromQL. properties: promql: description: |- - PromQL is a PromQL query classifying clusters. This query + promql is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are @@ -665,6 +690,17 @@ spec: If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag. properties: + architecture: + description: |- + architecture is an optional field that indicates the + value of the cluster architecture. In this context cluster + architecture means either a single architecture or a multi + architecture. + Valid values are 'Multi' and empty. + enum: + - Multi + - "" + type: string channels: description: |- channels is the set of Cincinnati channels to which the release diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_03_config-operator_01_proxies.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_03_config-operator_01_proxies.crd.yaml index b774262cd0..99ea9d49cc 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_03_config-operator_01_proxies.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_03_config-operator_01_proxies.crd.yaml @@ -43,7 +43,7 @@ spec: metadata: type: object spec: - description: Spec holds user-settable values for the proxy configuration + description: spec holds user-settable values for the proxy configuration properties: httpProxy: description: httpProxy is the URL of the proxy for HTTP requests. Empty diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift.crd.yaml index 8f7dc33442..93b2b00626 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Hypershift.crd.yaml @@ -69,7 +69,7 @@ spec: type: object oidcProviders: description: |- - OIDCProviders are OIDC identity providers that can issue tokens + oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if "Type" is set to "OIDC". @@ -78,22 +78,22 @@ spec: properties: claimMappings: description: |- - ClaimMappings describes rules on how to transform information from an + claimMappings describes rules on how to transform information from an ID token into a cluster identity properties: groups: description: |- - Groups is a name of the claim that should be used to construct + groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values. properties: claim: - description: Claim is a JWT token claim to be used in + description: claim is a JWT token claim to be used in the mapping type: string prefix: description: |- - Prefix is a string to prefix the value from the token in the result of the + prefix is a string to prefix the value from the token in the result of the claim mapping. By default, no prefixing occurs. @@ -107,13 +107,13 @@ spec: type: object username: description: |- - Username is a name of the claim that should be used to construct + username is a name of the claim that should be used to construct usernames for the cluster identity. Default value: "sub" properties: claim: - description: Claim is a JWT token claim to be used in + description: claim is a JWT token claim to be used in the mapping type: string prefix: @@ -126,7 +126,7 @@ spec: type: object prefixPolicy: description: |- - PrefixPolicy specifies how a prefix should apply. + prefixPolicy specifies how a prefix should apply. By default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins. @@ -161,23 +161,23 @@ spec: > 0) : !has(self.prefix)' type: object claimValidationRules: - description: ClaimValidationRules are rules that are applied + description: claimValidationRules are rules that are applied to validate token claims to authenticate users. items: properties: requiredClaim: description: |- - RequiredClaim allows configuring a required claim name and its expected + requiredClaim allows configuring a required claim name and its expected value properties: claim: description: |- - Claim is a name of a required claim. Only claims with string values are + claim is a name of a required claim. Only claims with string values are supported. minLength: 1 type: string requiredValue: - description: RequiredValue is the required value for + description: requiredValue is the required value for the claim. minLength: 1 type: string @@ -187,7 +187,7 @@ spec: type: object type: default: RequiredClaim - description: Type sets the type of the validation rule + description: type sets the type of the validation rule enum: - RequiredClaim type: string @@ -195,11 +195,11 @@ spec: type: array x-kubernetes-list-type: atomic issuer: - description: Issuer describes atributes of the OIDC token issuer + description: issuer describes atributes of the OIDC token issuer properties: audiences: description: |- - Audiences is an array of audiences that the token was issued for. + audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their "aud" claim. Must be set to exactly one value. @@ -235,23 +235,23 @@ spec: - issuerURL type: object name: - description: Name of the OIDC provider + description: name of the OIDC provider minLength: 1 type: string oidcClients: description: |- - OIDCClients contains configuration for the platform's clients that + oidcClients contains configuration for the platform's clients that need to request tokens from the issuer items: properties: clientID: - description: ClientID is the identifier of the OIDC client + description: clientID is the identifier of the OIDC client from the OIDC provider minLength: 1 type: string clientSecret: description: |- - ClientSecret refers to a secret in the `openshift-config` namespace that + clientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field properties: name: @@ -263,20 +263,20 @@ spec: type: object componentName: description: |- - ComponentName is the name of the component that is supposed to consume this + componentName is the name of the component that is supposed to consume this client configuration maxLength: 256 minLength: 1 type: string componentNamespace: description: |- - ComponentNamespace is the namespace of the component that is supposed to consume this + componentNamespace is the namespace of the component that is supposed to consume this client configuration maxLength: 63 minLength: 1 type: string extraScopes: - description: ExtraScopes is an optional set of scopes + description: extraScopes is an optional set of scopes to request tokens with. items: type: string @@ -414,25 +414,25 @@ spec: type: object oidcClients: description: |- - OIDCClients is where participating operators place the current OIDC client status + oidcClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin. items: properties: componentName: - description: ComponentName is the name of the component that + description: componentName is the name of the component that will consume a client configuration. maxLength: 256 minLength: 1 type: string componentNamespace: - description: ComponentNamespace is the namespace of the component + description: componentNamespace is the namespace of the component that will consume a client configuration. maxLength: 63 minLength: 1 type: string conditions: description: |- - Conditions are used to communicate the state of the `oidcClients` entry. + conditions are used to communicate the state of the `oidcClients` entry. Supported conditions include Available, Degraded and Progressing. @@ -500,7 +500,7 @@ spec: x-kubernetes-list-type: map consumingUsers: description: |- - ConsumingUsers is a slice of ServiceAccounts that need to have read + consumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret. items: description: ConsumingUser is an alias for string which we @@ -513,12 +513,12 @@ spec: type: array x-kubernetes-list-type: set currentOIDCClients: - description: CurrentOIDCClients is a list of clients that the + description: currentOIDCClients is a list of clients that the component is currently using. items: properties: clientID: - description: ClientID is the identifier of the OIDC client + description: clientID is the identifier of the OIDC client from the OIDC provider minLength: 1 type: string diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-CustomNoUpgrade.crd.yaml index 9d7058b463..07c2f49d23 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-CustomNoUpgrade.crd.yaml @@ -70,7 +70,7 @@ spec: type: object oidcProviders: description: |- - OIDCProviders are OIDC identity providers that can issue tokens + oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if "Type" is set to "OIDC". @@ -79,22 +79,22 @@ spec: properties: claimMappings: description: |- - ClaimMappings describes rules on how to transform information from an + claimMappings describes rules on how to transform information from an ID token into a cluster identity properties: groups: description: |- - Groups is a name of the claim that should be used to construct + groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values. properties: claim: - description: Claim is a JWT token claim to be used in + description: claim is a JWT token claim to be used in the mapping type: string prefix: description: |- - Prefix is a string to prefix the value from the token in the result of the + prefix is a string to prefix the value from the token in the result of the claim mapping. By default, no prefixing occurs. @@ -108,13 +108,13 @@ spec: type: object username: description: |- - Username is a name of the claim that should be used to construct + username is a name of the claim that should be used to construct usernames for the cluster identity. Default value: "sub" properties: claim: - description: Claim is a JWT token claim to be used in + description: claim is a JWT token claim to be used in the mapping type: string prefix: @@ -127,7 +127,7 @@ spec: type: object prefixPolicy: description: |- - PrefixPolicy specifies how a prefix should apply. + prefixPolicy specifies how a prefix should apply. By default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins. @@ -162,23 +162,23 @@ spec: > 0) : !has(self.prefix)' type: object claimValidationRules: - description: ClaimValidationRules are rules that are applied + description: claimValidationRules are rules that are applied to validate token claims to authenticate users. items: properties: requiredClaim: description: |- - RequiredClaim allows configuring a required claim name and its expected + requiredClaim allows configuring a required claim name and its expected value properties: claim: description: |- - Claim is a name of a required claim. Only claims with string values are + claim is a name of a required claim. Only claims with string values are supported. minLength: 1 type: string requiredValue: - description: RequiredValue is the required value for + description: requiredValue is the required value for the claim. minLength: 1 type: string @@ -188,7 +188,7 @@ spec: type: object type: default: RequiredClaim - description: Type sets the type of the validation rule + description: type sets the type of the validation rule enum: - RequiredClaim type: string @@ -196,11 +196,11 @@ spec: type: array x-kubernetes-list-type: atomic issuer: - description: Issuer describes atributes of the OIDC token issuer + description: issuer describes atributes of the OIDC token issuer properties: audiences: description: |- - Audiences is an array of audiences that the token was issued for. + audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their "aud" claim. Must be set to exactly one value. @@ -236,23 +236,23 @@ spec: - issuerURL type: object name: - description: Name of the OIDC provider + description: name of the OIDC provider minLength: 1 type: string oidcClients: description: |- - OIDCClients contains configuration for the platform's clients that + oidcClients contains configuration for the platform's clients that need to request tokens from the issuer items: properties: clientID: - description: ClientID is the identifier of the OIDC client + description: clientID is the identifier of the OIDC client from the OIDC provider minLength: 1 type: string clientSecret: description: |- - ClientSecret refers to a secret in the `openshift-config` namespace that + clientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field properties: name: @@ -264,20 +264,20 @@ spec: type: object componentName: description: |- - ComponentName is the name of the component that is supposed to consume this + componentName is the name of the component that is supposed to consume this client configuration maxLength: 256 minLength: 1 type: string componentNamespace: description: |- - ComponentNamespace is the namespace of the component that is supposed to consume this + componentNamespace is the namespace of the component that is supposed to consume this client configuration maxLength: 63 minLength: 1 type: string extraScopes: - description: ExtraScopes is an optional set of scopes + description: extraScopes is an optional set of scopes to request tokens with. items: type: string @@ -415,25 +415,25 @@ spec: type: object oidcClients: description: |- - OIDCClients is where participating operators place the current OIDC client status + oidcClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin. items: properties: componentName: - description: ComponentName is the name of the component that + description: componentName is the name of the component that will consume a client configuration. maxLength: 256 minLength: 1 type: string componentNamespace: - description: ComponentNamespace is the namespace of the component + description: componentNamespace is the namespace of the component that will consume a client configuration. maxLength: 63 minLength: 1 type: string conditions: description: |- - Conditions are used to communicate the state of the `oidcClients` entry. + conditions are used to communicate the state of the `oidcClients` entry. Supported conditions include Available, Degraded and Progressing. @@ -501,7 +501,7 @@ spec: x-kubernetes-list-type: map consumingUsers: description: |- - ConsumingUsers is a slice of ServiceAccounts that need to have read + consumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret. items: description: ConsumingUser is an alias for string which we @@ -514,12 +514,12 @@ spec: type: array x-kubernetes-list-type: set currentOIDCClients: - description: CurrentOIDCClients is a list of clients that the + description: currentOIDCClients is a list of clients that the component is currently using. items: properties: clientID: - description: ClientID is the identifier of the OIDC client + description: clientID is the identifier of the OIDC client from the OIDC provider minLength: 1 type: string diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-DevPreviewNoUpgrade.crd.yaml index b2968832e3..718aeff5d1 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-DevPreviewNoUpgrade.crd.yaml @@ -70,7 +70,7 @@ spec: type: object oidcProviders: description: |- - OIDCProviders are OIDC identity providers that can issue tokens + oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if "Type" is set to "OIDC". @@ -79,22 +79,22 @@ spec: properties: claimMappings: description: |- - ClaimMappings describes rules on how to transform information from an + claimMappings describes rules on how to transform information from an ID token into a cluster identity properties: groups: description: |- - Groups is a name of the claim that should be used to construct + groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values. properties: claim: - description: Claim is a JWT token claim to be used in + description: claim is a JWT token claim to be used in the mapping type: string prefix: description: |- - Prefix is a string to prefix the value from the token in the result of the + prefix is a string to prefix the value from the token in the result of the claim mapping. By default, no prefixing occurs. @@ -108,13 +108,13 @@ spec: type: object username: description: |- - Username is a name of the claim that should be used to construct + username is a name of the claim that should be used to construct usernames for the cluster identity. Default value: "sub" properties: claim: - description: Claim is a JWT token claim to be used in + description: claim is a JWT token claim to be used in the mapping type: string prefix: @@ -127,7 +127,7 @@ spec: type: object prefixPolicy: description: |- - PrefixPolicy specifies how a prefix should apply. + prefixPolicy specifies how a prefix should apply. By default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins. @@ -162,23 +162,23 @@ spec: > 0) : !has(self.prefix)' type: object claimValidationRules: - description: ClaimValidationRules are rules that are applied + description: claimValidationRules are rules that are applied to validate token claims to authenticate users. items: properties: requiredClaim: description: |- - RequiredClaim allows configuring a required claim name and its expected + requiredClaim allows configuring a required claim name and its expected value properties: claim: description: |- - Claim is a name of a required claim. Only claims with string values are + claim is a name of a required claim. Only claims with string values are supported. minLength: 1 type: string requiredValue: - description: RequiredValue is the required value for + description: requiredValue is the required value for the claim. minLength: 1 type: string @@ -188,7 +188,7 @@ spec: type: object type: default: RequiredClaim - description: Type sets the type of the validation rule + description: type sets the type of the validation rule enum: - RequiredClaim type: string @@ -196,11 +196,11 @@ spec: type: array x-kubernetes-list-type: atomic issuer: - description: Issuer describes atributes of the OIDC token issuer + description: issuer describes atributes of the OIDC token issuer properties: audiences: description: |- - Audiences is an array of audiences that the token was issued for. + audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their "aud" claim. Must be set to exactly one value. @@ -236,23 +236,23 @@ spec: - issuerURL type: object name: - description: Name of the OIDC provider + description: name of the OIDC provider minLength: 1 type: string oidcClients: description: |- - OIDCClients contains configuration for the platform's clients that + oidcClients contains configuration for the platform's clients that need to request tokens from the issuer items: properties: clientID: - description: ClientID is the identifier of the OIDC client + description: clientID is the identifier of the OIDC client from the OIDC provider minLength: 1 type: string clientSecret: description: |- - ClientSecret refers to a secret in the `openshift-config` namespace that + clientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field properties: name: @@ -264,20 +264,20 @@ spec: type: object componentName: description: |- - ComponentName is the name of the component that is supposed to consume this + componentName is the name of the component that is supposed to consume this client configuration maxLength: 256 minLength: 1 type: string componentNamespace: description: |- - ComponentNamespace is the namespace of the component that is supposed to consume this + componentNamespace is the namespace of the component that is supposed to consume this client configuration maxLength: 63 minLength: 1 type: string extraScopes: - description: ExtraScopes is an optional set of scopes + description: extraScopes is an optional set of scopes to request tokens with. items: type: string @@ -415,25 +415,25 @@ spec: type: object oidcClients: description: |- - OIDCClients is where participating operators place the current OIDC client status + oidcClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin. items: properties: componentName: - description: ComponentName is the name of the component that + description: componentName is the name of the component that will consume a client configuration. maxLength: 256 minLength: 1 type: string componentNamespace: - description: ComponentNamespace is the namespace of the component + description: componentNamespace is the namespace of the component that will consume a client configuration. maxLength: 63 minLength: 1 type: string conditions: description: |- - Conditions are used to communicate the state of the `oidcClients` entry. + conditions are used to communicate the state of the `oidcClients` entry. Supported conditions include Available, Degraded and Progressing. @@ -501,7 +501,7 @@ spec: x-kubernetes-list-type: map consumingUsers: description: |- - ConsumingUsers is a slice of ServiceAccounts that need to have read + consumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret. items: description: ConsumingUser is an alias for string which we @@ -514,12 +514,12 @@ spec: type: array x-kubernetes-list-type: set currentOIDCClients: - description: CurrentOIDCClients is a list of clients that the + description: currentOIDCClients is a list of clients that the component is currently using. items: properties: clientID: - description: ClientID is the identifier of the OIDC client + description: clientID is the identifier of the OIDC client from the OIDC provider minLength: 1 type: string diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-TechPreviewNoUpgrade.crd.yaml index bd450f3a7d..875290968b 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-TechPreviewNoUpgrade.crd.yaml @@ -70,7 +70,7 @@ spec: type: object oidcProviders: description: |- - OIDCProviders are OIDC identity providers that can issue tokens + oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if "Type" is set to "OIDC". @@ -79,22 +79,22 @@ spec: properties: claimMappings: description: |- - ClaimMappings describes rules on how to transform information from an + claimMappings describes rules on how to transform information from an ID token into a cluster identity properties: groups: description: |- - Groups is a name of the claim that should be used to construct + groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values. properties: claim: - description: Claim is a JWT token claim to be used in + description: claim is a JWT token claim to be used in the mapping type: string prefix: description: |- - Prefix is a string to prefix the value from the token in the result of the + prefix is a string to prefix the value from the token in the result of the claim mapping. By default, no prefixing occurs. @@ -108,13 +108,13 @@ spec: type: object username: description: |- - Username is a name of the claim that should be used to construct + username is a name of the claim that should be used to construct usernames for the cluster identity. Default value: "sub" properties: claim: - description: Claim is a JWT token claim to be used in + description: claim is a JWT token claim to be used in the mapping type: string prefix: @@ -127,7 +127,7 @@ spec: type: object prefixPolicy: description: |- - PrefixPolicy specifies how a prefix should apply. + prefixPolicy specifies how a prefix should apply. By default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins. @@ -162,23 +162,23 @@ spec: > 0) : !has(self.prefix)' type: object claimValidationRules: - description: ClaimValidationRules are rules that are applied + description: claimValidationRules are rules that are applied to validate token claims to authenticate users. items: properties: requiredClaim: description: |- - RequiredClaim allows configuring a required claim name and its expected + requiredClaim allows configuring a required claim name and its expected value properties: claim: description: |- - Claim is a name of a required claim. Only claims with string values are + claim is a name of a required claim. Only claims with string values are supported. minLength: 1 type: string requiredValue: - description: RequiredValue is the required value for + description: requiredValue is the required value for the claim. minLength: 1 type: string @@ -188,7 +188,7 @@ spec: type: object type: default: RequiredClaim - description: Type sets the type of the validation rule + description: type sets the type of the validation rule enum: - RequiredClaim type: string @@ -196,11 +196,11 @@ spec: type: array x-kubernetes-list-type: atomic issuer: - description: Issuer describes atributes of the OIDC token issuer + description: issuer describes atributes of the OIDC token issuer properties: audiences: description: |- - Audiences is an array of audiences that the token was issued for. + audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their "aud" claim. Must be set to exactly one value. @@ -236,23 +236,23 @@ spec: - issuerURL type: object name: - description: Name of the OIDC provider + description: name of the OIDC provider minLength: 1 type: string oidcClients: description: |- - OIDCClients contains configuration for the platform's clients that + oidcClients contains configuration for the platform's clients that need to request tokens from the issuer items: properties: clientID: - description: ClientID is the identifier of the OIDC client + description: clientID is the identifier of the OIDC client from the OIDC provider minLength: 1 type: string clientSecret: description: |- - ClientSecret refers to a secret in the `openshift-config` namespace that + clientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field properties: name: @@ -264,20 +264,20 @@ spec: type: object componentName: description: |- - ComponentName is the name of the component that is supposed to consume this + componentName is the name of the component that is supposed to consume this client configuration maxLength: 256 minLength: 1 type: string componentNamespace: description: |- - ComponentNamespace is the namespace of the component that is supposed to consume this + componentNamespace is the namespace of the component that is supposed to consume this client configuration maxLength: 63 minLength: 1 type: string extraScopes: - description: ExtraScopes is an optional set of scopes + description: extraScopes is an optional set of scopes to request tokens with. items: type: string @@ -415,25 +415,25 @@ spec: type: object oidcClients: description: |- - OIDCClients is where participating operators place the current OIDC client status + oidcClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin. items: properties: componentName: - description: ComponentName is the name of the component that + description: componentName is the name of the component that will consume a client configuration. maxLength: 256 minLength: 1 type: string componentNamespace: - description: ComponentNamespace is the namespace of the component + description: componentNamespace is the namespace of the component that will consume a client configuration. maxLength: 63 minLength: 1 type: string conditions: description: |- - Conditions are used to communicate the state of the `oidcClients` entry. + conditions are used to communicate the state of the `oidcClients` entry. Supported conditions include Available, Degraded and Progressing. @@ -501,7 +501,7 @@ spec: x-kubernetes-list-type: map consumingUsers: description: |- - ConsumingUsers is a slice of ServiceAccounts that need to have read + consumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret. items: description: ConsumingUser is an alias for string which we @@ -514,12 +514,12 @@ spec: type: array x-kubernetes-list-type: set currentOIDCClients: - description: CurrentOIDCClients is a list of clients that the + description: currentOIDCClients is a list of clients that the component is currently using. items: properties: clientID: - description: ClientID is the identifier of the OIDC client + description: clientID is the identifier of the OIDC client from the OIDC provider minLength: 1 type: string diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-CustomNoUpgrade.crd.yaml new file mode 100644 index 0000000000..e30d0805b4 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-CustomNoUpgrade.crd.yaml @@ -0,0 +1,81 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1929 + api.openshift.io/merged-by-featuregates: "true" + description: Cluster Monitoring Operators configuration API + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: clustermonitoring.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterMonitoring + listKind: ClusterMonitoringList + plural: clustermonitoring + singular: clustermonitoring + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + ClusterMonitoring is the Custom Resource object which holds the current status of Cluster Monitoring Operator. CMO is a central component of the monitoring stack. + + Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. + ClusterMonitoring is the Schema for the Cluster Monitoring Operators API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec holds user configuration for the Cluster Monitoring + Operator + properties: + userDefined: + description: userDefined set the deployment mode for user-defined + monitoring in addition to the default platform monitoring. + properties: + mode: + description: |- + mode defines the different configurations of UserDefinedMonitoring + Valid values are Disabled and NamespaceIsolated + Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. + NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. + enum: + - Disabled + - NamespaceIsolated + type: string + required: + - mode + type: object + required: + - userDefined + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..aa5b31c9d3 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,81 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1929 + api.openshift.io/merged-by-featuregates: "true" + description: Cluster Monitoring Operators configuration API + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: clustermonitoring.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterMonitoring + listKind: ClusterMonitoringList + plural: clustermonitoring + singular: clustermonitoring + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + ClusterMonitoring is the Custom Resource object which holds the current status of Cluster Monitoring Operator. CMO is a central component of the monitoring stack. + + Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. + ClusterMonitoring is the Schema for the Cluster Monitoring Operators API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec holds user configuration for the Cluster Monitoring + Operator + properties: + userDefined: + description: userDefined set the deployment mode for user-defined + monitoring in addition to the default platform monitoring. + properties: + mode: + description: |- + mode defines the different configurations of UserDefinedMonitoring + Valid values are Disabled and NamespaceIsolated + Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. + NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. + enum: + - Disabled + - NamespaceIsolated + type: string + required: + - mode + type: object + required: + - userDefined + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..e9534ebe19 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_clustermonitoring-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,81 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1929 + api.openshift.io/merged-by-featuregates: "true" + description: Cluster Monitoring Operators configuration API + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: clustermonitoring.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterMonitoring + listKind: ClusterMonitoringList + plural: clustermonitoring + singular: clustermonitoring + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + ClusterMonitoring is the Custom Resource object which holds the current status of Cluster Monitoring Operator. CMO is a central component of the monitoring stack. + + Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. + ClusterMonitoring is the Schema for the Cluster Monitoring Operators API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec holds user configuration for the Cluster Monitoring + Operator + properties: + userDefined: + description: userDefined set the deployment mode for user-defined + monitoring in addition to the default platform monitoring. + properties: + mode: + description: |- + mode defines the different configurations of UserDefinedMonitoring + Valid values are Disabled and NamespaceIsolated + Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. + NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. + enum: + - Disabled + - NamespaceIsolated + type: string + required: + - mode + type: object + required: + - userDefined + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml index 6b66f8b9b2..299a6d52a1 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml @@ -62,7 +62,7 @@ spec: All the clients are expected to use the generated ConfigMap only. properties: key: - description: Key allows pointing to a specific key/value inside + description: key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. type: string name: @@ -74,11 +74,11 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. type: object aws: - description: AWS contains settings specific to the Amazon Web + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: serviceEndpoints: @@ -110,11 +110,11 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure infrastructure + description: azure contains settings specific to the Azure infrastructure provider. type: object baremetal: - description: BareMetal contains settings specific to the BareMetal + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIPs: @@ -198,7 +198,7 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' equinixMetal: - description: EquinixMetal contains settings specific to the Equinix + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. type: object external: @@ -209,7 +209,7 @@ spec: platformName: default: Unknown description: |- - PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. + platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making. type: string x-kubernetes-validations: @@ -217,19 +217,19 @@ spec: rule: oldSelf == 'Unknown' || self == oldSelf type: object gcp: - description: GCP contains settings specific to the Google Cloud + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. type: object ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. type: object kubevirt: - description: Kubevirt contains settings specific to the kubevirt + description: kubevirt contains settings specific to the kubevirt infrastructure provider. type: object nutanix: - description: Nutanix contains settings specific to the Nutanix + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: failureDomains: @@ -288,6 +288,7 @@ spec: subnets: description: |- subnets holds a list of identifiers (one or more) of the cluster's network subnets + If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API. items: @@ -323,17 +324,19 @@ spec: is Name, and forbidden otherwise rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) : !has(self.name)' - maxItems: 1 + maxItems: 32 minItems: 1 type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: each subnet must be unique + rule: self.all(x, self.exists_one(y, x == y)) required: - cluster - name - subnets type: object + maxItems: 32 type: array x-kubernetes-list-map-keys: - name @@ -416,7 +419,7 @@ spec: - prismElements type: object openstack: - description: OpenStack contains settings specific to the OpenStack + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIPs: @@ -500,11 +503,11 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure + description: ovirt contains settings specific to the oVirt infrastructure provider. type: object powervs: - description: PowerVS contains settings specific to the IBM Power + description: powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. properties: serviceEndpoints: @@ -523,7 +526,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -572,7 +589,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the VSphere + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIPs: @@ -608,9 +625,9 @@ spec: failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used. items: - description: |- - VSpherePlatformFailureDomainSpec holds the region and zone failure domain and - the vCenter topology of that failure domain. + description: VSpherePlatformFailureDomainSpec holds the + region and zone failure domain and the vCenter topology + of that failure domain. properties: name: description: |- @@ -627,6 +644,25 @@ spec: maxLength: 80 minLength: 1 type: string + regionAffinity: + description: |- + regionAffinity holds the type of region, Datacenter or ComputeCluster. + When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. + When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology. + properties: + type: + description: |- + type determines the vSphere object type for a region within this failure domain. + Available types are Datacenter and ComputeCluster. + When set to Datacenter, this means the vCenter Datacenter defined is the region. + When set to ComputeCluster, this means the vCenter cluster defined is the region. + enum: + - ComputeCluster + - Datacenter + type: string + required: + - type + type: object server: anyOf: - format: ipv4 @@ -638,7 +674,7 @@ spec: minLength: 1 type: string topology: - description: Topology describes a given failure domain + description: topology describes a given failure domain using vSphere constructs properties: computeCluster: @@ -727,6 +763,67 @@ spec: maxLength: 80 minLength: 1 type: string + zoneAffinity: + description: |- + zoneAffinity holds the type of the zone and the hostGroup which + vmGroup and the hostGroup names in vCenter corresponds to + a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup holds the vmGroup and the hostGroup names in vCenter + corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + hostGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmGroup: + description: |- + vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. + vmGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmHostRule: + description: |- + vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. + vmHostRule is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + required: + - hostGroup + - vmGroup + - vmHostRule + type: object + type: + description: |- + type determines the vSphere object type for a zone within this failure domain. + Available types are ComputeCluster and HostGroup. + When set to ComputeCluster, this means the vCenter cluster defined is the zone. + When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and + this means the zone is defined by the grouping of those fields. + enum: + - HostGroup + - ComputeCluster + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: hostGroup is required when type is HostGroup, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''HostGroup'' + ? has(self.hostGroup) : !has(self.hostGroup)' required: - name - region @@ -734,6 +831,17 @@ spec: - topology - zone type: object + x-kubernetes-validations: + - message: when zoneAffinity type is HostGroup, regionAffinity + type must be ComputeCluster + rule: 'has(self.zoneAffinity) && self.zoneAffinity.type + == ''HostGroup'' ? has(self.regionAffinity) && self.regionAffinity.type + == ''ComputeCluster'' : true' + - message: when zoneAffinity type is ComputeCluster, regionAffinity + type must be Datacenter + rule: 'has(self.zoneAffinity) && self.zoneAffinity.type + == ''ComputeCluster'' ? has(self.regionAffinity) && + self.regionAffinity.type == ''Datacenter'' : true' type: array x-kubernetes-list-map-keys: - name @@ -958,6 +1066,7 @@ spec: its components are not visible within the cluster. enum: - HighlyAvailable + - HighlyAvailableArbiter - SingleReplica - External type: string @@ -1031,7 +1140,7 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. properties: region: @@ -1074,7 +1183,7 @@ spec: - region type: object aws: - description: AWS contains settings specific to the Amazon Web + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: cloudLoadBalancerConfig: @@ -1219,7 +1328,7 @@ spec: x-kubernetes-list-type: atomic serviceEndpoints: description: |- - ServiceEndpoints list contains custom endpoints which will override default + serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. items: @@ -1246,7 +1355,7 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure infrastructure + description: azure contains settings specific to the Azure infrastructure provider. properties: armEndpoint: @@ -1319,7 +1428,7 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' baremetal: - description: BareMetal contains settings specific to the BareMetal + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIP: @@ -1429,7 +1538,7 @@ spec: type: string type: object equinixMetal: - description: EquinixMetal contains settings specific to the Equinix + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. properties: apiServerInternalIP: @@ -1446,7 +1555,7 @@ spec: type: string type: object external: - description: External contains settings specific to the generic + description: external contains settings specific to the generic External infrastructure provider. properties: cloudControllerManager: @@ -1485,7 +1594,7 @@ spec: once set rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) gcp: - description: GCP contains settings specific to the Google Cloud + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. properties: cloudLoadBalancerConfig: @@ -1708,28 +1817,28 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string location: - description: Location is where the cluster has been deployed + description: location is where the cluster has been deployed type: string providerType: - description: ProviderType indicates the type of cluster that + description: providerType indicates the type of cluster that was created type: string resourceGroupName: - description: ResourceGroupName is the Resource Group for new + description: resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. type: string serviceEndpoints: @@ -1784,7 +1893,7 @@ spec: x-kubernetes-list-type: map type: object kubevirt: - description: Kubevirt contains settings specific to the kubevirt + description: kubevirt contains settings specific to the kubevirt infrastructure provider. properties: apiServerInternalIP: @@ -1801,7 +1910,7 @@ spec: type: string type: object nutanix: - description: Nutanix contains settings specific to the Nutanix + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: apiServerInternalIP: @@ -1885,7 +1994,7 @@ spec: type: object type: object openstack: - description: OpenStack contains settings specific to the OpenStack + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIP: @@ -2000,7 +2109,7 @@ spec: type: string type: object ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure + description: ovirt contains settings specific to the oVirt infrastructure provider. properties: apiServerInternalIP: @@ -2088,17 +2197,17 @@ spec: type: string type: object powervs: - description: PowerVS contains settings specific to the Power Systems + description: powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string region: @@ -2134,7 +2243,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -2194,7 +2317,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the VSphere + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIP: diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml index f2f1d4fb83..55ec17de53 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml @@ -62,7 +62,7 @@ spec: All the clients are expected to use the generated ConfigMap only. properties: key: - description: Key allows pointing to a specific key/value inside + description: key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. type: string name: @@ -74,11 +74,11 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. type: object aws: - description: AWS contains settings specific to the Amazon Web + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: serviceEndpoints: @@ -110,11 +110,11 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure infrastructure + description: azure contains settings specific to the Azure infrastructure provider. type: object baremetal: - description: BareMetal contains settings specific to the BareMetal + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIPs: @@ -198,7 +198,7 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' equinixMetal: - description: EquinixMetal contains settings specific to the Equinix + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. type: object external: @@ -209,7 +209,7 @@ spec: platformName: default: Unknown description: |- - PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. + platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making. type: string x-kubernetes-validations: @@ -217,19 +217,19 @@ spec: rule: oldSelf == 'Unknown' || self == oldSelf type: object gcp: - description: GCP contains settings specific to the Google Cloud + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. type: object ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. type: object kubevirt: - description: Kubevirt contains settings specific to the kubevirt + description: kubevirt contains settings specific to the kubevirt infrastructure provider. type: object nutanix: - description: Nutanix contains settings specific to the Nutanix + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: failureDomains: @@ -288,6 +288,7 @@ spec: subnets: description: |- subnets holds a list of identifiers (one or more) of the cluster's network subnets + If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API. items: @@ -326,9 +327,7 @@ spec: maxItems: 1 minItems: 1 type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map + x-kubernetes-list-type: atomic required: - cluster - name @@ -416,7 +415,7 @@ spec: - prismElements type: object openstack: - description: OpenStack contains settings specific to the OpenStack + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIPs: @@ -500,11 +499,11 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure + description: ovirt contains settings specific to the oVirt infrastructure provider. type: object powervs: - description: PowerVS contains settings specific to the IBM Power + description: powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. properties: serviceEndpoints: @@ -523,7 +522,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -572,7 +585,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the VSphere + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIPs: @@ -608,9 +621,9 @@ spec: failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used. items: - description: |- - VSpherePlatformFailureDomainSpec holds the region and zone failure domain and - the vCenter topology of that failure domain. + description: VSpherePlatformFailureDomainSpec holds the + region and zone failure domain and the vCenter topology + of that failure domain. properties: name: description: |- @@ -627,6 +640,25 @@ spec: maxLength: 80 minLength: 1 type: string + regionAffinity: + description: |- + regionAffinity holds the type of region, Datacenter or ComputeCluster. + When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. + When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology. + properties: + type: + description: |- + type determines the vSphere object type for a region within this failure domain. + Available types are Datacenter and ComputeCluster. + When set to Datacenter, this means the vCenter Datacenter defined is the region. + When set to ComputeCluster, this means the vCenter cluster defined is the region. + enum: + - ComputeCluster + - Datacenter + type: string + required: + - type + type: object server: anyOf: - format: ipv4 @@ -638,7 +670,7 @@ spec: minLength: 1 type: string topology: - description: Topology describes a given failure domain + description: topology describes a given failure domain using vSphere constructs properties: computeCluster: @@ -727,6 +759,67 @@ spec: maxLength: 80 minLength: 1 type: string + zoneAffinity: + description: |- + zoneAffinity holds the type of the zone and the hostGroup which + vmGroup and the hostGroup names in vCenter corresponds to + a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup holds the vmGroup and the hostGroup names in vCenter + corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + hostGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmGroup: + description: |- + vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. + vmGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmHostRule: + description: |- + vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. + vmHostRule is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + required: + - hostGroup + - vmGroup + - vmHostRule + type: object + type: + description: |- + type determines the vSphere object type for a zone within this failure domain. + Available types are ComputeCluster and HostGroup. + When set to ComputeCluster, this means the vCenter cluster defined is the zone. + When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and + this means the zone is defined by the grouping of those fields. + enum: + - HostGroup + - ComputeCluster + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: hostGroup is required when type is HostGroup, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''HostGroup'' + ? has(self.hostGroup) : !has(self.hostGroup)' required: - name - region @@ -907,7 +1000,7 @@ spec: - datacenters - server type: object - maxItems: 1 + maxItems: 3 minItems: 0 type: array x-kubernetes-list-type: atomic @@ -1031,7 +1124,7 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. properties: region: @@ -1074,7 +1167,7 @@ spec: - region type: object aws: - description: AWS contains settings specific to the Amazon Web + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: region: @@ -1115,7 +1208,7 @@ spec: x-kubernetes-list-type: atomic serviceEndpoints: description: |- - ServiceEndpoints list contains custom endpoints which will override default + serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. items: @@ -1142,7 +1235,7 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure infrastructure + description: azure contains settings specific to the Azure infrastructure provider. properties: armEndpoint: @@ -1215,7 +1308,7 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' baremetal: - description: BareMetal contains settings specific to the BareMetal + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIP: @@ -1325,7 +1418,7 @@ spec: type: string type: object equinixMetal: - description: EquinixMetal contains settings specific to the Equinix + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. properties: apiServerInternalIP: @@ -1342,7 +1435,7 @@ spec: type: string type: object external: - description: External contains settings specific to the generic + description: external contains settings specific to the generic External infrastructure provider. properties: cloudControllerManager: @@ -1381,7 +1474,7 @@ spec: once set rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) gcp: - description: GCP contains settings specific to the Google Cloud + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. properties: projectID: @@ -1500,28 +1593,28 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string location: - description: Location is where the cluster has been deployed + description: location is where the cluster has been deployed type: string providerType: - description: ProviderType indicates the type of cluster that + description: providerType indicates the type of cluster that was created type: string resourceGroupName: - description: ResourceGroupName is the Resource Group for new + description: resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. type: string serviceEndpoints: @@ -1576,7 +1669,7 @@ spec: x-kubernetes-list-type: map type: object kubevirt: - description: Kubevirt contains settings specific to the kubevirt + description: kubevirt contains settings specific to the kubevirt infrastructure provider. properties: apiServerInternalIP: @@ -1593,7 +1686,7 @@ spec: type: string type: object nutanix: - description: Nutanix contains settings specific to the Nutanix + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: apiServerInternalIP: @@ -1677,7 +1770,7 @@ spec: type: object type: object openstack: - description: OpenStack contains settings specific to the OpenStack + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIP: @@ -1792,7 +1885,7 @@ spec: type: string type: object ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure + description: ovirt contains settings specific to the oVirt infrastructure provider. properties: apiServerInternalIP: @@ -1880,17 +1973,17 @@ spec: type: string type: object powervs: - description: PowerVS contains settings specific to the Power Systems + description: powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string region: @@ -1926,7 +2019,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -1986,7 +2093,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the VSphere + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIP: diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml index 851694b04d..9b6a6716a5 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml @@ -62,7 +62,7 @@ spec: All the clients are expected to use the generated ConfigMap only. properties: key: - description: Key allows pointing to a specific key/value inside + description: key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. type: string name: @@ -74,11 +74,11 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. type: object aws: - description: AWS contains settings specific to the Amazon Web + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: serviceEndpoints: @@ -110,11 +110,11 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure infrastructure + description: azure contains settings specific to the Azure infrastructure provider. type: object baremetal: - description: BareMetal contains settings specific to the BareMetal + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIPs: @@ -198,7 +198,7 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' equinixMetal: - description: EquinixMetal contains settings specific to the Equinix + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. type: object external: @@ -209,7 +209,7 @@ spec: platformName: default: Unknown description: |- - PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. + platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making. type: string x-kubernetes-validations: @@ -217,19 +217,19 @@ spec: rule: oldSelf == 'Unknown' || self == oldSelf type: object gcp: - description: GCP contains settings specific to the Google Cloud + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. type: object ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. type: object kubevirt: - description: Kubevirt contains settings specific to the kubevirt + description: kubevirt contains settings specific to the kubevirt infrastructure provider. type: object nutanix: - description: Nutanix contains settings specific to the Nutanix + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: failureDomains: @@ -288,6 +288,7 @@ spec: subnets: description: |- subnets holds a list of identifiers (one or more) of the cluster's network subnets + If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API. items: @@ -323,17 +324,19 @@ spec: is Name, and forbidden otherwise rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) : !has(self.name)' - maxItems: 1 + maxItems: 32 minItems: 1 type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: each subnet must be unique + rule: self.all(x, self.exists_one(y, x == y)) required: - cluster - name - subnets type: object + maxItems: 32 type: array x-kubernetes-list-map-keys: - name @@ -416,7 +419,7 @@ spec: - prismElements type: object openstack: - description: OpenStack contains settings specific to the OpenStack + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIPs: @@ -500,11 +503,11 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure + description: ovirt contains settings specific to the oVirt infrastructure provider. type: object powervs: - description: PowerVS contains settings specific to the IBM Power + description: powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. properties: serviceEndpoints: @@ -523,7 +526,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -572,7 +589,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the VSphere + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIPs: @@ -608,9 +625,9 @@ spec: failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used. items: - description: |- - VSpherePlatformFailureDomainSpec holds the region and zone failure domain and - the vCenter topology of that failure domain. + description: VSpherePlatformFailureDomainSpec holds the + region and zone failure domain and the vCenter topology + of that failure domain. properties: name: description: |- @@ -627,6 +644,25 @@ spec: maxLength: 80 minLength: 1 type: string + regionAffinity: + description: |- + regionAffinity holds the type of region, Datacenter or ComputeCluster. + When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. + When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology. + properties: + type: + description: |- + type determines the vSphere object type for a region within this failure domain. + Available types are Datacenter and ComputeCluster. + When set to Datacenter, this means the vCenter Datacenter defined is the region. + When set to ComputeCluster, this means the vCenter cluster defined is the region. + enum: + - ComputeCluster + - Datacenter + type: string + required: + - type + type: object server: anyOf: - format: ipv4 @@ -638,7 +674,7 @@ spec: minLength: 1 type: string topology: - description: Topology describes a given failure domain + description: topology describes a given failure domain using vSphere constructs properties: computeCluster: @@ -727,6 +763,67 @@ spec: maxLength: 80 minLength: 1 type: string + zoneAffinity: + description: |- + zoneAffinity holds the type of the zone and the hostGroup which + vmGroup and the hostGroup names in vCenter corresponds to + a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup holds the vmGroup and the hostGroup names in vCenter + corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + hostGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmGroup: + description: |- + vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. + vmGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmHostRule: + description: |- + vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. + vmHostRule is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + required: + - hostGroup + - vmGroup + - vmHostRule + type: object + type: + description: |- + type determines the vSphere object type for a zone within this failure domain. + Available types are ComputeCluster and HostGroup. + When set to ComputeCluster, this means the vCenter cluster defined is the zone. + When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and + this means the zone is defined by the grouping of those fields. + enum: + - HostGroup + - ComputeCluster + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: hostGroup is required when type is HostGroup, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''HostGroup'' + ? has(self.hostGroup) : !has(self.hostGroup)' required: - name - region @@ -734,6 +831,17 @@ spec: - topology - zone type: object + x-kubernetes-validations: + - message: when zoneAffinity type is HostGroup, regionAffinity + type must be ComputeCluster + rule: 'has(self.zoneAffinity) && self.zoneAffinity.type + == ''HostGroup'' ? has(self.regionAffinity) && self.regionAffinity.type + == ''ComputeCluster'' : true' + - message: when zoneAffinity type is ComputeCluster, regionAffinity + type must be Datacenter + rule: 'has(self.zoneAffinity) && self.zoneAffinity.type + == ''ComputeCluster'' ? has(self.regionAffinity) && + self.regionAffinity.type == ''Datacenter'' : true' type: array x-kubernetes-list-map-keys: - name @@ -958,6 +1066,7 @@ spec: its components are not visible within the cluster. enum: - HighlyAvailable + - HighlyAvailableArbiter - SingleReplica - External type: string @@ -1031,7 +1140,7 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. properties: region: @@ -1074,7 +1183,7 @@ spec: - region type: object aws: - description: AWS contains settings specific to the Amazon Web + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: cloudLoadBalancerConfig: @@ -1219,7 +1328,7 @@ spec: x-kubernetes-list-type: atomic serviceEndpoints: description: |- - ServiceEndpoints list contains custom endpoints which will override default + serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. items: @@ -1246,7 +1355,7 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure infrastructure + description: azure contains settings specific to the Azure infrastructure provider. properties: armEndpoint: @@ -1319,7 +1428,7 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' baremetal: - description: BareMetal contains settings specific to the BareMetal + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIP: @@ -1429,7 +1538,7 @@ spec: type: string type: object equinixMetal: - description: EquinixMetal contains settings specific to the Equinix + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. properties: apiServerInternalIP: @@ -1446,7 +1555,7 @@ spec: type: string type: object external: - description: External contains settings specific to the generic + description: external contains settings specific to the generic External infrastructure provider. properties: cloudControllerManager: @@ -1485,7 +1594,7 @@ spec: once set rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) gcp: - description: GCP contains settings specific to the Google Cloud + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. properties: cloudLoadBalancerConfig: @@ -1708,28 +1817,28 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string location: - description: Location is where the cluster has been deployed + description: location is where the cluster has been deployed type: string providerType: - description: ProviderType indicates the type of cluster that + description: providerType indicates the type of cluster that was created type: string resourceGroupName: - description: ResourceGroupName is the Resource Group for new + description: resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. type: string serviceEndpoints: @@ -1784,7 +1893,7 @@ spec: x-kubernetes-list-type: map type: object kubevirt: - description: Kubevirt contains settings specific to the kubevirt + description: kubevirt contains settings specific to the kubevirt infrastructure provider. properties: apiServerInternalIP: @@ -1801,7 +1910,7 @@ spec: type: string type: object nutanix: - description: Nutanix contains settings specific to the Nutanix + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: apiServerInternalIP: @@ -1885,7 +1994,7 @@ spec: type: object type: object openstack: - description: OpenStack contains settings specific to the OpenStack + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIP: @@ -2000,7 +2109,7 @@ spec: type: string type: object ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure + description: ovirt contains settings specific to the oVirt infrastructure provider. properties: apiServerInternalIP: @@ -2088,17 +2197,17 @@ spec: type: string type: object powervs: - description: PowerVS contains settings specific to the Power Systems + description: powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string region: @@ -2134,7 +2243,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -2194,7 +2317,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the VSphere + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIP: diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml index b82d69e723..b0c12e2eb2 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml @@ -62,7 +62,7 @@ spec: All the clients are expected to use the generated ConfigMap only. properties: key: - description: Key allows pointing to a specific key/value inside + description: key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. type: string name: @@ -74,11 +74,11 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. type: object aws: - description: AWS contains settings specific to the Amazon Web + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: serviceEndpoints: @@ -110,11 +110,11 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure infrastructure + description: azure contains settings specific to the Azure infrastructure provider. type: object baremetal: - description: BareMetal contains settings specific to the BareMetal + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIPs: @@ -198,7 +198,7 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' equinixMetal: - description: EquinixMetal contains settings specific to the Equinix + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. type: object external: @@ -209,7 +209,7 @@ spec: platformName: default: Unknown description: |- - PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. + platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making. type: string x-kubernetes-validations: @@ -217,19 +217,19 @@ spec: rule: oldSelf == 'Unknown' || self == oldSelf type: object gcp: - description: GCP contains settings specific to the Google Cloud + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. type: object ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. type: object kubevirt: - description: Kubevirt contains settings specific to the kubevirt + description: kubevirt contains settings specific to the kubevirt infrastructure provider. type: object nutanix: - description: Nutanix contains settings specific to the Nutanix + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: failureDomains: @@ -288,6 +288,7 @@ spec: subnets: description: |- subnets holds a list of identifiers (one or more) of the cluster's network subnets + If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API. items: @@ -323,17 +324,19 @@ spec: is Name, and forbidden otherwise rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) : !has(self.name)' - maxItems: 1 + maxItems: 32 minItems: 1 type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: each subnet must be unique + rule: self.all(x, self.exists_one(y, x == y)) required: - cluster - name - subnets type: object + maxItems: 32 type: array x-kubernetes-list-map-keys: - name @@ -416,7 +419,7 @@ spec: - prismElements type: object openstack: - description: OpenStack contains settings specific to the OpenStack + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIPs: @@ -500,11 +503,11 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure + description: ovirt contains settings specific to the oVirt infrastructure provider. type: object powervs: - description: PowerVS contains settings specific to the IBM Power + description: powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. properties: serviceEndpoints: @@ -523,7 +526,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -572,7 +589,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the VSphere + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIPs: @@ -608,9 +625,9 @@ spec: failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used. items: - description: |- - VSpherePlatformFailureDomainSpec holds the region and zone failure domain and - the vCenter topology of that failure domain. + description: VSpherePlatformFailureDomainSpec holds the + region and zone failure domain and the vCenter topology + of that failure domain. properties: name: description: |- @@ -627,6 +644,25 @@ spec: maxLength: 80 minLength: 1 type: string + regionAffinity: + description: |- + regionAffinity holds the type of region, Datacenter or ComputeCluster. + When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. + When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology. + properties: + type: + description: |- + type determines the vSphere object type for a region within this failure domain. + Available types are Datacenter and ComputeCluster. + When set to Datacenter, this means the vCenter Datacenter defined is the region. + When set to ComputeCluster, this means the vCenter cluster defined is the region. + enum: + - ComputeCluster + - Datacenter + type: string + required: + - type + type: object server: anyOf: - format: ipv4 @@ -638,7 +674,7 @@ spec: minLength: 1 type: string topology: - description: Topology describes a given failure domain + description: topology describes a given failure domain using vSphere constructs properties: computeCluster: @@ -727,6 +763,67 @@ spec: maxLength: 80 minLength: 1 type: string + zoneAffinity: + description: |- + zoneAffinity holds the type of the zone and the hostGroup which + vmGroup and the hostGroup names in vCenter corresponds to + a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup holds the vmGroup and the hostGroup names in vCenter + corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + hostGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmGroup: + description: |- + vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. + vmGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmHostRule: + description: |- + vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. + vmHostRule is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + required: + - hostGroup + - vmGroup + - vmHostRule + type: object + type: + description: |- + type determines the vSphere object type for a zone within this failure domain. + Available types are ComputeCluster and HostGroup. + When set to ComputeCluster, this means the vCenter cluster defined is the zone. + When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and + this means the zone is defined by the grouping of those fields. + enum: + - HostGroup + - ComputeCluster + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: hostGroup is required when type is HostGroup, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''HostGroup'' + ? has(self.hostGroup) : !has(self.hostGroup)' required: - name - region @@ -734,6 +831,17 @@ spec: - topology - zone type: object + x-kubernetes-validations: + - message: when zoneAffinity type is HostGroup, regionAffinity + type must be ComputeCluster + rule: 'has(self.zoneAffinity) && self.zoneAffinity.type + == ''HostGroup'' ? has(self.regionAffinity) && self.regionAffinity.type + == ''ComputeCluster'' : true' + - message: when zoneAffinity type is ComputeCluster, regionAffinity + type must be Datacenter + rule: 'has(self.zoneAffinity) && self.zoneAffinity.type + == ''ComputeCluster'' ? has(self.regionAffinity) && + self.regionAffinity.type == ''Datacenter'' : true' type: array x-kubernetes-list-map-keys: - name @@ -958,6 +1066,7 @@ spec: its components are not visible within the cluster. enum: - HighlyAvailable + - HighlyAvailableArbiter - SingleReplica - External type: string @@ -1031,7 +1140,7 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to the Alibaba + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. properties: region: @@ -1074,7 +1183,7 @@ spec: - region type: object aws: - description: AWS contains settings specific to the Amazon Web + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: cloudLoadBalancerConfig: @@ -1219,7 +1328,7 @@ spec: x-kubernetes-list-type: atomic serviceEndpoints: description: |- - ServiceEndpoints list contains custom endpoints which will override default + serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. items: @@ -1246,7 +1355,7 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure infrastructure + description: azure contains settings specific to the Azure infrastructure provider. properties: armEndpoint: @@ -1319,7 +1428,7 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' baremetal: - description: BareMetal contains settings specific to the BareMetal + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIP: @@ -1429,7 +1538,7 @@ spec: type: string type: object equinixMetal: - description: EquinixMetal contains settings specific to the Equinix + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. properties: apiServerInternalIP: @@ -1446,7 +1555,7 @@ spec: type: string type: object external: - description: External contains settings specific to the generic + description: external contains settings specific to the generic External infrastructure provider. properties: cloudControllerManager: @@ -1485,7 +1594,7 @@ spec: once set rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) gcp: - description: GCP contains settings specific to the Google Cloud + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. properties: cloudLoadBalancerConfig: @@ -1708,28 +1817,28 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' ibmcloud: - description: IBMCloud contains settings specific to the IBMCloud + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string location: - description: Location is where the cluster has been deployed + description: location is where the cluster has been deployed type: string providerType: - description: ProviderType indicates the type of cluster that + description: providerType indicates the type of cluster that was created type: string resourceGroupName: - description: ResourceGroupName is the Resource Group for new + description: resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. type: string serviceEndpoints: @@ -1784,7 +1893,7 @@ spec: x-kubernetes-list-type: map type: object kubevirt: - description: Kubevirt contains settings specific to the kubevirt + description: kubevirt contains settings specific to the kubevirt infrastructure provider. properties: apiServerInternalIP: @@ -1801,7 +1910,7 @@ spec: type: string type: object nutanix: - description: Nutanix contains settings specific to the Nutanix + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: apiServerInternalIP: @@ -1885,7 +1994,7 @@ spec: type: object type: object openstack: - description: OpenStack contains settings specific to the OpenStack + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIP: @@ -2000,7 +2109,7 @@ spec: type: string type: object ovirt: - description: Ovirt contains settings specific to the oVirt infrastructure + description: ovirt contains settings specific to the oVirt infrastructure provider. properties: apiServerInternalIP: @@ -2088,17 +2197,17 @@ spec: type: string type: object powervs: - description: PowerVS contains settings specific to the Power Systems + description: powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string region: @@ -2134,7 +2243,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -2194,7 +2317,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the VSphere + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIP: diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_networks.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_networks.crd.yaml index 49db650d7a..fd9546e536 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_networks.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_networks.crd.yaml @@ -260,7 +260,7 @@ spec: type: object networkType: description: |- - NetworkType is the plugin that is to be deployed (e.g. OVNKubernetes). + networkType is the plugin that is to be deployed (e.g. OVNKubernetes). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: @@ -318,7 +318,7 @@ spec: type: array x-kubernetes-list-type: atomic clusterNetworkMTU: - description: ClusterNetworkMTU is the MTU for inter-pod networking. + description: clusterNetworkMTU is the MTU for inter-pod networking. type: integer conditions: description: |- @@ -383,37 +383,37 @@ spec: - type x-kubernetes-list-type: map migration: - description: Migration contains the cluster network migration configuration. + description: migration contains the cluster network migration configuration. properties: mtu: - description: MTU is the MTU configuration that is being deployed. + description: mtu is the MTU configuration that is being deployed. properties: machine: - description: Machine contains MTU migration configuration + description: machine contains MTU migration configuration for the machine's uplink. properties: from: - description: From is the MTU to migrate from. + description: from is the MTU to migrate from. format: int32 minimum: 0 type: integer to: - description: To is the MTU to migrate to. + description: to is the MTU to migrate to. format: int32 minimum: 0 type: integer type: object network: - description: Network contains MTU migration configuration + description: network contains MTU migration configuration for the default network. properties: from: - description: From is the MTU to migrate from. + description: from is the MTU to migrate from. format: int32 minimum: 0 type: integer to: - description: To is the MTU to migrate to. + description: to is the MTU to migrate to. format: int32 minimum: 0 type: integer @@ -421,13 +421,13 @@ spec: type: object networkType: description: |- - NetworkType is the target plugin that is being deployed. + networkType is the target plugin that is being deployed. DEPRECATED: network type migration is no longer supported, so this should always be unset. type: string type: object networkType: - description: NetworkType is the plugin that is deployed (e.g. OVNKubernetes). + description: networkType is the plugin that is deployed (e.g. OVNKubernetes). type: string serviceNetwork: description: |- diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-CustomNoUpgrade.crd.yaml index 469400577a..776be2eeb6 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-CustomNoUpgrade.crd.yaml @@ -47,7 +47,7 @@ spec: description: spec holds user settable values for configuration properties: cgroupMode: - description: CgroupMode determines the cgroups version on the node + description: cgroupMode determines the cgroups version on the node enum: - v1 - v2 @@ -76,7 +76,7 @@ spec: rule: self == "" || self.matches('^[0-9]*.[0-9]*.[0-9]*$') workerLatencyProfile: description: |- - WorkerLatencyProfile determins the how fast the kubelet is updating + workerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster enum: - Default diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-Default.crd.yaml index b79a394c6e..a4d69857db 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-Default.crd.yaml @@ -47,7 +47,7 @@ spec: description: spec holds user settable values for configuration properties: cgroupMode: - description: CgroupMode determines the cgroups version on the node + description: cgroupMode determines the cgroups version on the node enum: - v1 - v2 @@ -55,7 +55,7 @@ spec: type: string workerLatencyProfile: description: |- - WorkerLatencyProfile determins the how fast the kubelet is updating + workerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster enum: - Default diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-DevPreviewNoUpgrade.crd.yaml index 99b124d572..00413af2e2 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-DevPreviewNoUpgrade.crd.yaml @@ -47,7 +47,7 @@ spec: description: spec holds user settable values for configuration properties: cgroupMode: - description: CgroupMode determines the cgroups version on the node + description: cgroupMode determines the cgroups version on the node enum: - v1 - v2 @@ -76,7 +76,7 @@ spec: rule: self == "" || self.matches('^[0-9]*.[0-9]*.[0-9]*$') workerLatencyProfile: description: |- - WorkerLatencyProfile determins the how fast the kubelet is updating + workerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster enum: - Default diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-TechPreviewNoUpgrade.crd.yaml index 8db838df77..8504be1e39 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_nodes-TechPreviewNoUpgrade.crd.yaml @@ -47,7 +47,7 @@ spec: description: spec holds user settable values for configuration properties: cgroupMode: - description: CgroupMode determines the cgroups version on the node + description: cgroupMode determines the cgroups version on the node enum: - v1 - v2 @@ -76,7 +76,7 @@ spec: rule: self == "" || self.matches('^[0-9]*.[0-9]*.[0-9]*$') workerLatencyProfile: description: |- - WorkerLatencyProfile determins the how fast the kubelet is updating + workerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster enum: - Default diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-CustomNoUpgrade.crd.yaml index f557085f8f..d058dd7dcc 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-CustomNoUpgrade.crd.yaml @@ -72,7 +72,7 @@ spec: type: string mastersSchedulable: description: |- - MastersSchedulable allows masters nodes to be schedulable. When this flag is + mastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-Default.crd.yaml index cbfff13ab7..37abbe9a53 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-Default.crd.yaml @@ -72,7 +72,7 @@ spec: type: string mastersSchedulable: description: |- - MastersSchedulable allows masters nodes to be schedulable. When this flag is + mastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-DevPreviewNoUpgrade.crd.yaml index 39aae9eced..a62eb09733 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-DevPreviewNoUpgrade.crd.yaml @@ -72,7 +72,7 @@ spec: type: string mastersSchedulable: description: |- - MastersSchedulable allows masters nodes to be schedulable. When this flag is + mastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-TechPreviewNoUpgrade.crd.yaml index 34b79b3f1f..24fbbb3021 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-TechPreviewNoUpgrade.crd.yaml @@ -72,7 +72,7 @@ spec: type: string mastersSchedulable: description: |- - MastersSchedulable allows masters nodes to be schedulable. When this flag is + mastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_openshift-controller-manager_01_builds.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_openshift-controller-manager_01_builds.crd.yaml index 34ca783627..21eee52c73 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_openshift-controller-manager_01_builds.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_openshift-controller-manager_01_builds.crd.yaml @@ -46,12 +46,12 @@ spec: metadata: type: object spec: - description: Spec holds user-settable values for the build controller + description: spec holds user-settable values for the build controller configuration properties: additionalTrustedCA: description: |- - AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that + additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config. @@ -66,11 +66,11 @@ spec: - name type: object buildDefaults: - description: BuildDefaults controls the default information for Builds + description: buildDefaults controls the default information for Builds properties: defaultProxy: description: |- - DefaultProxy contains the default proxy settings for all build operations, including image pull/push + defaultProxy contains the default proxy settings for all build operations, including image pull/push and source download. Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables @@ -132,7 +132,7 @@ spec: type: object env: description: |- - Env is a set of default environment variables that will be applied to the + env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build items: description: EnvVar represents an environment variable present @@ -254,7 +254,7 @@ spec: type: array gitProxy: description: |- - GitProxy contains the proxy settings for git operations only. If set, this will override + gitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone. Values that are not set here will be inherited from DefaultProxy. @@ -315,22 +315,22 @@ spec: type: object imageLabels: description: |- - ImageLabels is a list of docker labels that are applied to the resulting image. + imageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig. items: properties: name: - description: Name defines the name of the label. It must + description: name defines the name of the label. It must have non-zero length. type: string value: - description: Value defines the literal value of the label. + description: value defines the literal value of the label. type: string type: object type: array resources: - description: Resources defines resource requirements to execute + description: resources defines resource requirements to execute the build. properties: claims: @@ -391,40 +391,40 @@ spec: type: object type: object buildOverrides: - description: BuildOverrides controls override settings for builds + description: buildOverrides controls override settings for builds properties: forcePull: description: |- - ForcePull overrides, if set, the equivalent value in the builds, + forcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself type: boolean imageLabels: description: |- - ImageLabels is a list of docker labels that are applied to the resulting image. + imageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten. items: properties: name: - description: Name defines the name of the label. It must + description: name defines the name of the label. It must have non-zero length. type: string value: - description: Value defines the literal value of the label. + description: value defines the literal value of the label. type: string type: object type: array nodeSelector: additionalProperties: type: string - description: NodeSelector is a selector which must be true for + description: nodeSelector is a selector which must be true for the build pod to fit on a node type: object tolerations: description: |- - Tolerations is a list of Tolerations that will override any existing + tolerations is a list of Tolerations that will override any existing tolerations set on a build pod. items: description: |- diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 1b7fa44aad..1a77c6af5b 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -1003,6 +1003,100 @@ func (in *ClusterCondition) DeepCopy() *ClusterCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMonitoring) DeepCopyInto(out *ClusterMonitoring) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMonitoring. +func (in *ClusterMonitoring) DeepCopy() *ClusterMonitoring { + if in == nil { + return nil + } + out := new(ClusterMonitoring) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterMonitoring) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMonitoringList) DeepCopyInto(out *ClusterMonitoringList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterMonitoring, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMonitoringList. +func (in *ClusterMonitoringList) DeepCopy() *ClusterMonitoringList { + if in == nil { + return nil + } + out := new(ClusterMonitoringList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterMonitoringList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMonitoringSpec) DeepCopyInto(out *ClusterMonitoringSpec) { + *out = *in + out.UserDefined = in.UserDefined + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMonitoringSpec. +func (in *ClusterMonitoringSpec) DeepCopy() *ClusterMonitoringSpec { + if in == nil { + return nil + } + out := new(ClusterMonitoringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMonitoringStatus) DeepCopyInto(out *ClusterMonitoringStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMonitoringStatus. +func (in *ClusterMonitoringStatus) DeepCopy() *ClusterMonitoringStatus { + if in == nil { + return nil + } + out := new(ClusterMonitoringStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { *out = *in @@ -5789,6 +5883,22 @@ func (in *UpdateHistory) DeepCopy() *UpdateHistory { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserDefinedMonitoring) DeepCopyInto(out *UserDefinedMonitoring) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserDefinedMonitoring. +func (in *UserDefinedMonitoring) DeepCopy() *UserDefinedMonitoring { + if in == nil { + return nil + } + out := new(UserDefinedMonitoring) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UsernameClaimMapping) DeepCopyInto(out *UsernameClaimMapping) { *out = *in @@ -5827,9 +5937,72 @@ func (in *UsernamePrefix) DeepCopy() *UsernamePrefix { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereFailureDomainHostGroup) DeepCopyInto(out *VSphereFailureDomainHostGroup) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainHostGroup. +func (in *VSphereFailureDomainHostGroup) DeepCopy() *VSphereFailureDomainHostGroup { + if in == nil { + return nil + } + out := new(VSphereFailureDomainHostGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereFailureDomainRegionAffinity) DeepCopyInto(out *VSphereFailureDomainRegionAffinity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainRegionAffinity. +func (in *VSphereFailureDomainRegionAffinity) DeepCopy() *VSphereFailureDomainRegionAffinity { + if in == nil { + return nil + } + out := new(VSphereFailureDomainRegionAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereFailureDomainZoneAffinity) DeepCopyInto(out *VSphereFailureDomainZoneAffinity) { + *out = *in + if in.HostGroup != nil { + in, out := &in.HostGroup, &out.HostGroup + *out = new(VSphereFailureDomainHostGroup) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereFailureDomainZoneAffinity. +func (in *VSphereFailureDomainZoneAffinity) DeepCopy() *VSphereFailureDomainZoneAffinity { + if in == nil { + return nil + } + out := new(VSphereFailureDomainZoneAffinity) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VSpherePlatformFailureDomainSpec) DeepCopyInto(out *VSpherePlatformFailureDomainSpec) { *out = *in + if in.RegionAffinity != nil { + in, out := &in.RegionAffinity, &out.RegionAffinity + *out = new(VSphereFailureDomainRegionAffinity) + **out = **in + } + if in.ZoneAffinity != nil { + in, out := &in.ZoneAffinity, &out.ZoneAffinity + *out = new(VSphereFailureDomainZoneAffinity) + (*in).DeepCopyInto(*out) + } in.Topology.DeepCopyInto(&out.Topology) return } diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index abfea5eaf0..bad62b529f 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -64,6 +64,30 @@ builds.config.openshift.io: TopLevelFeatureGates: [] Version: v1 +clustermonitoring.config.openshift.io: + Annotations: + description: Cluster Monitoring Operators configuration API + ApprovedPRNumber: https://github.com/openshift/api/pull/1929 + CRDName: clustermonitoring.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ClusterMonitoringConfig + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterMonitoring + Labels: {} + PluralName: clustermonitoring + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - ClusterMonitoringConfig + Version: v1 + clusteroperators.config.openshift.io: Annotations: include.release.openshift.io/self-managed-high-availability: "true" @@ -115,6 +139,7 @@ clusterversions.config.openshift.io: Capability: "" Category: "" FeatureGates: + - ImageStreamImportMode - SignatureStores FilenameOperatorName: cluster-version-operator FilenameOperatorOrdering: "01" @@ -314,7 +339,10 @@ infrastructures.config.openshift.io: - BareMetalLoadBalancer - GCPClusterHostedDNS - GCPLabelsTags + - HighlyAvailableArbiter + - NutanixMultiSubnets - VSphereControlPlaneMachineSet + - VSphereHostVMGroupZonal - VSphereMultiNetworks - VSphereMultiVCenters FilenameOperatorName: config-operator diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 145a7e4c04..c45b58eef9 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -22,8 +22,8 @@ func (AdmissionConfig) SwaggerDoc() map[string]string { var map_AdmissionPluginConfig = map[string]string{ "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins", - "location": "Location is the path to a configuration file that contains the plugin's configuration", - "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", + "location": "location is the path to a configuration file that contains the plugin's configuration", + "configuration": "configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", } func (AdmissionPluginConfig) SwaggerDoc() map[string]string { @@ -37,8 +37,8 @@ var map_AuditConfig = map[string]string{ "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.", "maximumRetainedFiles": "Maximum number of old log files to retain.", "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.", - "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.", - "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", + "policyFile": "policyFile is a path to the file that defines the audit policy configuration.", + "policyConfiguration": "policyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", "logFormat": "Format of saved audits (legacy or json).", "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.", "webHookMode": "Strategy for sending audit events (block or batch).", @@ -50,8 +50,8 @@ func (AuditConfig) SwaggerDoc() map[string]string { var map_CertInfo = map[string]string{ "": "CertInfo relates a certificate with a private key", - "certFile": "CertFile is a file containing a PEM-encoded certificate", - "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "certFile": "certFile is a file containing a PEM-encoded certificate", + "keyFile": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", } func (CertInfo) SwaggerDoc() map[string]string { @@ -71,7 +71,7 @@ func (ClientConnectionOverrides) SwaggerDoc() map[string]string { var map_ConfigMapFileReference = map[string]string{ "": "ConfigMapFileReference references a config map in a specific namespace. The namespace must be specified at the point of use.", - "key": "Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.", + "key": "key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.", } func (ConfigMapFileReference) SwaggerDoc() map[string]string { @@ -107,8 +107,8 @@ func (DelegatedAuthorization) SwaggerDoc() map[string]string { var map_EtcdConnectionInfo = map[string]string{ "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server", - "urls": "URLs are the URLs for etcd", - "ca": "CA is a file containing trusted roots for the etcd server certificates", + "urls": "urls are the URLs for etcd", + "ca": "ca is a file containing trusted roots for the etcd server certificates", } func (EtcdConnectionInfo) SwaggerDoc() map[string]string { @@ -116,7 +116,7 @@ func (EtcdConnectionInfo) SwaggerDoc() map[string]string { } var map_EtcdStorageConfig = map[string]string{ - "storagePrefix": "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", + "storagePrefix": "storagePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", } func (EtcdStorageConfig) SwaggerDoc() map[string]string { @@ -138,7 +138,7 @@ func (GenericAPIServerConfig) SwaggerDoc() map[string]string { var map_GenericControllerConfig = map[string]string{ "": "GenericControllerConfig provides information to configure a controller", - "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints", + "servingInfo": "servingInfo is the HTTP serving information for the controller's endpoints", "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need", "authentication": "authentication allows configuration of authentication for the endpoints", "authorization": "authorization allows configuration of authentication for the endpoints", @@ -150,8 +150,8 @@ func (GenericControllerConfig) SwaggerDoc() map[string]string { var map_HTTPServingInfo = map[string]string{ "": "HTTPServingInfo holds configuration for serving HTTP", - "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", - "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", + "maxRequestsInFlight": "maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", + "requestTimeoutSeconds": "requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", } func (HTTPServingInfo) SwaggerDoc() map[string]string { @@ -193,7 +193,7 @@ func (MaxAgePolicy) SwaggerDoc() map[string]string { var map_NamedCertificate = map[string]string{ "": "NamedCertificate specifies a certificate/key, and the names it should be served for", - "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", + "names": "names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", } func (NamedCertificate) SwaggerDoc() map[string]string { @@ -202,8 +202,8 @@ func (NamedCertificate) SwaggerDoc() map[string]string { var map_RemoteConnectionInfo = map[string]string{ "": "RemoteConnectionInfo holds information necessary for establishing a remote connection", - "url": "URL is the remote URL to connect to", - "ca": "CA is the CA for verifying TLS connections", + "url": "url is the remote URL to connect to", + "ca": "ca is the CA for verifying TLS connections", } func (RemoteConnectionInfo) SwaggerDoc() map[string]string { @@ -233,12 +233,12 @@ func (SecretNameReference) SwaggerDoc() map[string]string { var map_ServingInfo = map[string]string{ "": "ServingInfo holds information about serving web pages", - "bindAddress": "BindAddress is the ip:port to serve on", - "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", - "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", - "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", - "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", - "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", + "bindAddress": "bindAddress is the ip:port to serve on", + "bindNetwork": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "clientCA": "clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "namedCertificates": "namedCertificates is a list of certificates to use to secure requests to specific hostnames", + "minTLSVersion": "minTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "cipherSuites": "cipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", } func (ServingInfo) SwaggerDoc() map[string]string { @@ -255,10 +255,10 @@ func (StringSource) SwaggerDoc() map[string]string { var map_StringSourceSpec = map[string]string{ "": "StringSourceSpec specifies a string value, or external location", - "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", - "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", - "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", - "keyFile": "KeyFile references a file containing the key to use to decrypt the value.", + "value": "value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "env": "env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "file": "file references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "keyFile": "keyFile references a file containing the key to use to decrypt the value.", } func (StringSourceSpec) SwaggerDoc() map[string]string { @@ -369,7 +369,7 @@ var map_AuthenticationSpec = map[string]string{ "webhookTokenAuthenticators": "webhookTokenAuthenticators is DEPRECATED, setting it has no effect.", "webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.\n\nCan only be set if \"Type\" is set to \"None\".", "serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the previous issuer value. Instead, the tokens issued by previous service account issuer will continue to be trusted for a time period chosen by the platform (currently set to 24h). This time period is subject to change over time. This allows internal components to transition to use new service account issuer without service distruption.", - "oidcProviders": "OIDCProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.", + "oidcProviders": "oidcProviders are OIDC identity providers that can issue tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\".\n\nAt most one provider can be configured.", } func (AuthenticationSpec) SwaggerDoc() map[string]string { @@ -378,7 +378,7 @@ func (AuthenticationSpec) SwaggerDoc() map[string]string { var map_AuthenticationStatus = map[string]string{ "integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.", - "oidcClients": "OIDCClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.", + "oidcClients": "oidcClients is where participating operators place the current OIDC client status for OIDC clients that can be customized by the cluster-admin.", } func (AuthenticationStatus) SwaggerDoc() map[string]string { @@ -395,11 +395,11 @@ func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { } var map_OIDCClientConfig = map[string]string{ - "componentName": "ComponentName is the name of the component that is supposed to consume this client configuration", - "componentNamespace": "ComponentNamespace is the namespace of the component that is supposed to consume this client configuration", - "clientID": "ClientID is the identifier of the OIDC client from the OIDC provider", - "clientSecret": "ClientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field", - "extraScopes": "ExtraScopes is an optional set of scopes to request tokens with.", + "componentName": "componentName is the name of the component that is supposed to consume this client configuration", + "componentNamespace": "componentNamespace is the namespace of the component that is supposed to consume this client configuration", + "clientID": "clientID is the identifier of the OIDC client from the OIDC provider", + "clientSecret": "clientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field", + "extraScopes": "extraScopes is an optional set of scopes to request tokens with.", } func (OIDCClientConfig) SwaggerDoc() map[string]string { @@ -409,7 +409,7 @@ func (OIDCClientConfig) SwaggerDoc() map[string]string { var map_OIDCClientReference = map[string]string{ "oidcProviderName": "OIDCName refers to the `name` of the provider from `oidcProviders`", "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", - "clientID": "ClientID is the identifier of the OIDC client from the OIDC provider", + "clientID": "clientID is the identifier of the OIDC client from the OIDC provider", } func (OIDCClientReference) SwaggerDoc() map[string]string { @@ -417,11 +417,11 @@ func (OIDCClientReference) SwaggerDoc() map[string]string { } var map_OIDCClientStatus = map[string]string{ - "componentName": "ComponentName is the name of the component that will consume a client configuration.", - "componentNamespace": "ComponentNamespace is the namespace of the component that will consume a client configuration.", - "currentOIDCClients": "CurrentOIDCClients is a list of clients that the component is currently using.", - "consumingUsers": "ConsumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.", - "conditions": "Conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", + "componentName": "componentName is the name of the component that will consume a client configuration.", + "componentNamespace": "componentNamespace is the namespace of the component that will consume a client configuration.", + "currentOIDCClients": "currentOIDCClients is a list of clients that the component is currently using.", + "consumingUsers": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.", + "conditions": "conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", } func (OIDCClientStatus) SwaggerDoc() map[string]string { @@ -429,11 +429,11 @@ func (OIDCClientStatus) SwaggerDoc() map[string]string { } var map_OIDCProvider = map[string]string{ - "name": "Name of the OIDC provider", - "issuer": "Issuer describes atributes of the OIDC token issuer", - "oidcClients": "OIDCClients contains configuration for the platform's clients that need to request tokens from the issuer", - "claimMappings": "ClaimMappings describes rules on how to transform information from an ID token into a cluster identity", - "claimValidationRules": "ClaimValidationRules are rules that are applied to validate token claims to authenticate users.", + "name": "name of the OIDC provider", + "issuer": "issuer describes atributes of the OIDC token issuer", + "oidcClients": "oidcClients contains configuration for the platform's clients that need to request tokens from the issuer", + "claimMappings": "claimMappings describes rules on how to transform information from an ID token into a cluster identity", + "claimValidationRules": "claimValidationRules are rules that are applied to validate token claims to authenticate users.", } func (OIDCProvider) SwaggerDoc() map[string]string { @@ -441,7 +441,7 @@ func (OIDCProvider) SwaggerDoc() map[string]string { } var map_PrefixedClaimMapping = map[string]string{ - "prefix": "Prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", + "prefix": "prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", } func (PrefixedClaimMapping) SwaggerDoc() map[string]string { @@ -449,7 +449,7 @@ func (PrefixedClaimMapping) SwaggerDoc() map[string]string { } var map_TokenClaimMapping = map[string]string{ - "claim": "Claim is a JWT token claim to be used in the mapping", + "claim": "claim is a JWT token claim to be used in the mapping", } func (TokenClaimMapping) SwaggerDoc() map[string]string { @@ -457,8 +457,8 @@ func (TokenClaimMapping) SwaggerDoc() map[string]string { } var map_TokenClaimMappings = map[string]string{ - "username": "Username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"", - "groups": "Groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.", + "username": "username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"", + "groups": "groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.", } func (TokenClaimMappings) SwaggerDoc() map[string]string { @@ -466,8 +466,8 @@ func (TokenClaimMappings) SwaggerDoc() map[string]string { } var map_TokenClaimValidationRule = map[string]string{ - "type": "Type sets the type of the validation rule", - "requiredClaim": "RequiredClaim allows configuring a required claim name and its expected value", + "type": "type sets the type of the validation rule", + "requiredClaim": "requiredClaim allows configuring a required claim name and its expected value", } func (TokenClaimValidationRule) SwaggerDoc() map[string]string { @@ -476,7 +476,7 @@ func (TokenClaimValidationRule) SwaggerDoc() map[string]string { var map_TokenIssuer = map[string]string{ "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", - "audiences": "Audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.", + "audiences": "audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.", "issuerCertificateAuthority": "CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the \"ca-bundle.crt\" key. If unset, system trust is used instead.", } @@ -485,8 +485,8 @@ func (TokenIssuer) SwaggerDoc() map[string]string { } var map_TokenRequiredClaim = map[string]string{ - "claim": "Claim is a name of a required claim. Only claims with string values are supported.", - "requiredValue": "RequiredValue is the required value for the claim.", + "claim": "claim is a name of a required claim. Only claims with string values are supported.", + "requiredValue": "requiredValue is the required value for the claim.", } func (TokenRequiredClaim) SwaggerDoc() map[string]string { @@ -494,7 +494,7 @@ func (TokenRequiredClaim) SwaggerDoc() map[string]string { } var map_UsernameClaimMapping = map[string]string{ - "prefixPolicy": "PrefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"", + "prefixPolicy": "prefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"", } func (UsernameClaimMapping) SwaggerDoc() map[string]string { @@ -513,7 +513,7 @@ func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { var map_Build = map[string]string{ "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec holds user-settable values for the build controller configuration", + "spec": "spec holds user-settable values for the build controller configuration", } func (Build) SwaggerDoc() map[string]string { @@ -521,11 +521,11 @@ func (Build) SwaggerDoc() map[string]string { } var map_BuildDefaults = map[string]string{ - "defaultProxy": "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.", - "gitProxy": "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.", - "env": "Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", - "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", - "resources": "Resources defines resource requirements to execute the build.", + "defaultProxy": "defaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.", + "gitProxy": "gitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.", + "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "imageLabels": "imageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", + "resources": "resources defines resource requirements to execute the build.", } func (BuildDefaults) SwaggerDoc() map[string]string { @@ -542,10 +542,10 @@ func (BuildList) SwaggerDoc() map[string]string { } var map_BuildOverrides = map[string]string{ - "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", - "nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node", - "tolerations": "Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", - "forcePull": "ForcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", + "imageLabels": "imageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", + "tolerations": "tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", + "forcePull": "forcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", } func (BuildOverrides) SwaggerDoc() map[string]string { @@ -553,9 +553,9 @@ func (BuildOverrides) SwaggerDoc() map[string]string { } var map_BuildSpec = map[string]string{ - "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", - "buildDefaults": "BuildDefaults controls the default information for Builds", - "buildOverrides": "BuildOverrides controls override settings for builds", + "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", + "buildDefaults": "buildDefaults controls the default information for Builds", + "buildOverrides": "buildOverrides controls override settings for builds", } func (BuildSpec) SwaggerDoc() map[string]string { @@ -563,14 +563,61 @@ func (BuildSpec) SwaggerDoc() map[string]string { } var map_ImageLabel = map[string]string{ - "name": "Name defines the name of the label. It must have non-zero length.", - "value": "Value defines the literal value of the label.", + "name": "name defines the name of the label. It must have non-zero length.", + "value": "value defines the literal value of the label.", } func (ImageLabel) SwaggerDoc() map[string]string { return map_ImageLabel } +var map_ClusterMonitoring = map[string]string{ + "": "ClusterMonitoring is the Custom Resource object which holds the current status of Cluster Monitoring Operator. CMO is a central component of the monitoring stack.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. ClusterMonitoring is the Schema for the Cluster Monitoring Operators API", + "metadata": "metadata is the standard object metadata.", + "spec": "spec holds user configuration for the Cluster Monitoring Operator", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (ClusterMonitoring) SwaggerDoc() map[string]string { + return map_ClusterMonitoring +} + +var map_ClusterMonitoringList = map[string]string{ + "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list metadata.", + "items": "items is a list of ClusterMonitoring", +} + +func (ClusterMonitoringList) SwaggerDoc() map[string]string { + return map_ClusterMonitoringList +} + +var map_ClusterMonitoringSpec = map[string]string{ + "": "ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator", + "userDefined": "userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring.", +} + +func (ClusterMonitoringSpec) SwaggerDoc() map[string]string { + return map_ClusterMonitoringSpec +} + +var map_ClusterMonitoringStatus = map[string]string{ + "": "MonitoringOperatorStatus defines the observed state of MonitoringOperator", +} + +func (ClusterMonitoringStatus) SwaggerDoc() map[string]string { + return map_ClusterMonitoringStatus +} + +var map_UserDefinedMonitoring = map[string]string{ + "": "UserDefinedMonitoring config for user-defined projects.", + "mode": "mode defines the different configurations of UserDefinedMonitoring Valid values are Disabled and NamespaceIsolated Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level.", +} + +func (UserDefinedMonitoring) SwaggerDoc() map[string]string { + return map_UserDefinedMonitoring +} + var map_ClusterOperator = map[string]string{ "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -648,7 +695,7 @@ func (OperandVersion) SwaggerDoc() map[string]string { var map_ClusterCondition = map[string]string{ "": "ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate.", "type": "type represents the cluster-condition type. This defines the members and semantics of any additional properties.", - "promql": "promQL represents a cluster condition based on PromQL.", + "promql": "promql represents a cluster condition based on PromQL.", } func (ClusterCondition) SwaggerDoc() map[string]string { @@ -764,7 +811,7 @@ func (ConditionalUpdateRisk) SwaggerDoc() map[string]string { var map_PromQLClusterCondition = map[string]string{ "": "PromQLClusterCondition represents a cluster condition based on PromQL.", - "promql": "PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.", + "promql": "promql is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.", } func (PromQLClusterCondition) SwaggerDoc() map[string]string { @@ -772,11 +819,12 @@ func (PromQLClusterCondition) SwaggerDoc() map[string]string { } var map_Release = map[string]string{ - "": "Release represents an OpenShift release image and associated metadata.", - "version": "version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified.", - "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", - "url": "url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases.", - "channels": "channels is the set of Cincinnati channels to which the release currently belongs.", + "": "Release represents an OpenShift release image and associated metadata.", + "architecture": "architecture is an optional field that indicates the value of the cluster architecture. In this context cluster architecture means either a single architecture or a multi architecture. Valid values are 'Multi' and empty.", + "version": "version is a semantic version identifying the update version. When this field is part of spec, version is optional if image is specified.", + "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", + "url": "url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases.", + "channels": "channels is the set of Cincinnati channels to which the release currently belongs.", } func (Release) SwaggerDoc() map[string]string { @@ -1186,7 +1234,7 @@ func (AWSPlatformSpec) SwaggerDoc() map[string]string { var map_AWSPlatformStatus = map[string]string{ "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.", "region": "region holds the default AWS region for new AWS resources created by the cluster.", - "serviceEndpoints": "ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", + "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.", "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", } @@ -1360,7 +1408,7 @@ func (EquinixMetalPlatformStatus) SwaggerDoc() map[string]string { var map_ExternalPlatformSpec = map[string]string{ "": "ExternalPlatformSpec holds the desired state for the generic External infrastructure provider.", - "platformName": "PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.", + "platformName": "platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.", } func (ExternalPlatformSpec) SwaggerDoc() map[string]string { @@ -1428,11 +1476,11 @@ func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string { var map_IBMCloudPlatformStatus = map[string]string{ "": "IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider.", - "location": "Location is where the cluster has been deployed", - "resourceGroupName": "ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", - "providerType": "ProviderType indicates the type of cluster that was created", - "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", - "dnsInstanceCRN": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", + "location": "location is where the cluster has been deployed", + "resourceGroupName": "resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", + "providerType": "providerType indicates the type of cluster that was created", + "cisInstanceCRN": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", + "dnsInstanceCRN": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM Cloud service. These endpoints are consumed by components within the cluster to reach the respective IBM Cloud Services.", } @@ -1519,7 +1567,7 @@ var map_NutanixFailureDomain = map[string]string{ "": "NutanixFailureDomain configures failure domain information for the Nutanix platform.", "name": "name defines the unique name of a failure domain. Name is required and must be at most 64 characters in length. It must consist of only lower case alphanumeric characters and hyphens (-). It must start and end with an alphanumeric character. This value is arbitrary and is used to identify the failure domain within the platform.", "cluster": "cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", - "subnets": "subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", + "subnets": "subnets holds a list of identifiers (one or more) of the cluster's network subnets If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", } func (NutanixFailureDomain) SwaggerDoc() map[string]string { @@ -1660,19 +1708,19 @@ func (OvirtPlatformStatus) SwaggerDoc() map[string]string { var map_PlatformSpec = map[string]string{ "": "PlatformSpec holds the desired state specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.", "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", - "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", - "azure": "Azure contains settings specific to the Azure infrastructure provider.", - "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", - "baremetal": "BareMetal contains settings specific to the BareMetal platform.", - "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", - "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", - "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.", - "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", - "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", - "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", - "powervs": "PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.", - "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", - "nutanix": "Nutanix contains settings specific to the Nutanix infrastructure provider.", + "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "azure contains settings specific to the Azure infrastructure provider.", + "gcp": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "baremetal contains settings specific to the BareMetal platform.", + "openstack": "openstack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "vsphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "ibmcloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "equinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", + "nutanix": "nutanix contains settings specific to the Nutanix infrastructure provider.", "external": "ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately.", } @@ -1683,20 +1731,20 @@ func (PlatformSpec) SwaggerDoc() map[string]string { var map_PlatformStatus = map[string]string{ "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.", "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.\n\nThis value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set.", - "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", - "azure": "Azure contains settings specific to the Azure infrastructure provider.", - "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", - "baremetal": "BareMetal contains settings specific to the BareMetal platform.", - "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", - "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", - "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.", - "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", - "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", - "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", - "powervs": "PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider.", - "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", - "nutanix": "Nutanix contains settings specific to the Nutanix infrastructure provider.", - "external": "External contains settings specific to the generic External infrastructure provider.", + "aws": "aws contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "azure contains settings specific to the Azure infrastructure provider.", + "gcp": "gcp contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "baremetal contains settings specific to the BareMetal platform.", + "openstack": "openstack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "vsphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "ibmcloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "equinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", + "nutanix": "nutanix contains settings specific to the Nutanix infrastructure provider.", + "external": "external contains settings specific to the generic External infrastructure provider.", } func (PlatformStatus) SwaggerDoc() map[string]string { @@ -1718,8 +1766,8 @@ var map_PowerVSPlatformStatus = map[string]string{ "zone": "zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported", "resourceGroup": "resourceGroup is the resource group name for new IBMCloud resources created for a cluster. The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. When omitted, the image registry operator won't be able to configure storage, which results in the image registry cluster operator not being in an available state.", "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", - "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", - "dnsInstanceCRN": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", + "cisInstanceCRN": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", + "dnsInstanceCRN": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", } func (PowerVSPlatformStatus) SwaggerDoc() map[string]string { @@ -1736,13 +1784,45 @@ func (PowerVSServiceEndpoint) SwaggerDoc() map[string]string { return map_PowerVSServiceEndpoint } +var map_VSphereFailureDomainHostGroup = map[string]string{ + "": "VSphereFailureDomainHostGroup holds the vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", + "vmGroup": "vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. vmGroup is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", + "hostGroup": "hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. hostGroup is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", + "vmHostRule": "vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. vmHostRule is limited to 80 characters. This field is required when the VSphereFailureDomain ZoneType is HostGroup", +} + +func (VSphereFailureDomainHostGroup) SwaggerDoc() map[string]string { + return map_VSphereFailureDomainHostGroup +} + +var map_VSphereFailureDomainRegionAffinity = map[string]string{ + "": "VSphereFailureDomainRegionAffinity contains the region type which is the string representation of the VSphereFailureDomainRegionType with available options of Datacenter and ComputeCluster.", + "type": "type determines the vSphere object type for a region within this failure domain. Available types are Datacenter and ComputeCluster. When set to Datacenter, this means the vCenter Datacenter defined is the region. When set to ComputeCluster, this means the vCenter cluster defined is the region.", +} + +func (VSphereFailureDomainRegionAffinity) SwaggerDoc() map[string]string { + return map_VSphereFailureDomainRegionAffinity +} + +var map_VSphereFailureDomainZoneAffinity = map[string]string{ + "": "VSphereFailureDomainZoneAffinity contains the vCenter cluster vm-host group (virtual machine and host types) and the vm-host affinity rule that together creates an affinity configuration for vm-host based zonal. This configuration within vCenter creates the required association between a failure domain, virtual machines and ESXi hosts to create a vm-host based zone.", + "type": "type determines the vSphere object type for a zone within this failure domain. Available types are ComputeCluster and HostGroup. When set to ComputeCluster, this means the vCenter cluster defined is the zone. When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and this means the zone is defined by the grouping of those fields.", + "hostGroup": "hostGroup holds the vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", +} + +func (VSphereFailureDomainZoneAffinity) SwaggerDoc() map[string]string { + return map_VSphereFailureDomainZoneAffinity +} + var map_VSpherePlatformFailureDomainSpec = map[string]string{ - "": "VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain.", - "name": "name defines the arbitrary but unique name of a failure domain.", - "region": "region defines the name of a region tag that will be attached to a vCenter datacenter. The tag category in vCenter must be named openshift-region.", - "zone": "zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone.", - "server": "server is the fully-qualified domain name or the IP address of the vCenter server.", - "topology": "Topology describes a given failure domain using vSphere constructs", + "": "VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain.", + "name": "name defines the arbitrary but unique name of a failure domain.", + "region": "region defines the name of a region tag that will be attached to a vCenter datacenter. The tag category in vCenter must be named openshift-region.", + "zone": "zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone.", + "regionAffinity": "regionAffinity holds the type of region, Datacenter or ComputeCluster. When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology.", + "zoneAffinity": "zoneAffinity holds the type of the zone and the hostGroup which vmGroup and the hostGroup names in vCenter corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also contains the vmHostRule which is an affinity vm-host rule in vCenter.", + "server": "server is the fully-qualified domain name or the IP address of the vCenter server.", + "topology": "topology describes a given failure domain using vSphere constructs", } func (VSpherePlatformFailureDomainSpec) SwaggerDoc() map[string]string { @@ -1961,8 +2041,8 @@ func (ExternalIPPolicy) SwaggerDoc() map[string]string { var map_MTUMigration = map[string]string{ "": "MTUMigration contains infomation about MTU migration.", - "network": "Network contains MTU migration configuration for the default network.", - "machine": "Machine contains MTU migration configuration for the machine's uplink.", + "network": "network contains MTU migration configuration for the default network.", + "machine": "machine contains MTU migration configuration for the machine's uplink.", } func (MTUMigration) SwaggerDoc() map[string]string { @@ -1971,8 +2051,8 @@ func (MTUMigration) SwaggerDoc() map[string]string { var map_MTUMigrationValues = map[string]string{ "": "MTUMigrationValues contains the values for a MTU migration.", - "to": "To is the MTU to migrate to.", - "from": "From is the MTU to migrate from.", + "to": "to is the MTU to migrate to.", + "from": "from is the MTU to migrate from.", } func (MTUMigrationValues) SwaggerDoc() map[string]string { @@ -2031,8 +2111,8 @@ func (NetworkList) SwaggerDoc() map[string]string { var map_NetworkMigration = map[string]string{ "": "NetworkMigration represents the network migration status.", - "networkType": "NetworkType is the target plugin that is being deployed. DEPRECATED: network type migration is no longer supported, so this should always be unset.", - "mtu": "MTU is the MTU configuration that is being deployed.", + "networkType": "networkType is the target plugin that is being deployed. DEPRECATED: network type migration is no longer supported, so this should always be unset.", + "mtu": "mtu is the MTU configuration that is being deployed.", } func (NetworkMigration) SwaggerDoc() map[string]string { @@ -2043,7 +2123,7 @@ var map_NetworkSpec = map[string]string{ "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.", "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.", - "networkType": "NetworkType is the plugin that is to be deployed (e.g. OVNKubernetes). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OVNKubernetes This field is immutable after installation.", + "networkType": "networkType is the plugin that is to be deployed (e.g. OVNKubernetes). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OVNKubernetes This field is immutable after installation.", "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.", "serviceNodePortRange": "The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed.", "networkDiagnostics": "networkDiagnostics defines network diagnostics configuration.\n\nTakes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. If networkDiagnostics is not specified or is empty, and the spec.disableNetworkDiagnostics flag in network.operator.openshift.io is set to true, the network diagnostics feature will be disabled.", @@ -2057,9 +2137,9 @@ var map_NetworkStatus = map[string]string{ "": "NetworkStatus is the current network configuration.", "clusterNetwork": "IP address pool to use for pod IPs.", "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.", - "networkType": "NetworkType is the plugin that is deployed (e.g. OVNKubernetes).", - "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.", - "migration": "Migration contains the cluster network migration configuration.", + "networkType": "networkType is the plugin that is deployed (e.g. OVNKubernetes).", + "clusterNetworkMTU": "clusterNetworkMTU is the MTU for inter-pod networking.", + "migration": "migration contains the cluster network migration configuration.", "conditions": "conditions represents the observations of a network.config current state. Known .status.conditions.type are: \"NetworkDiagnosticsAvailable\"", } @@ -2088,8 +2168,8 @@ func (NodeList) SwaggerDoc() map[string]string { } var map_NodeSpec = map[string]string{ - "cgroupMode": "CgroupMode determines the cgroups version on the node", - "workerLatencyProfile": "WorkerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster", + "cgroupMode": "cgroupMode determines the cgroups version on the node", + "workerLatencyProfile": "workerLatencyProfile determins the how fast the kubelet is updating the status and corresponding reaction of the cluster", "minimumKubeletVersion": "minimumKubeletVersion is the lowest version of a kubelet that can join the cluster. Specifically, the apiserver will deny most authorization requests of kubelets that are older than the specified version, only allowing the kubelet to get and update its node object, and perform subjectaccessreviews. This means any kubelet that attempts to join the cluster will not be able to run any assigned workloads, and will eventually be marked as not ready. Its max length is 8, so maximum version allowed is either \"9.999.99\" or \"99.99.99\". Since the kubelet reports the version of the kubernetes release, not Openshift, this field references the underlying kubernetes version this version of Openshift is based off of. In other words: if an admin wishes to ensure no nodes run an older version than Openshift 4.17, then they should set the minimumKubeletVersion to 1.30.0. When comparing versions, the kubelet's version is stripped of any contents outside of major.minor.patch version. Thus, a kubelet with version \"1.0.0-ec.0\" will be compatible with minimumKubeletVersion \"1.0.0\" or earlier.", } @@ -2437,7 +2517,7 @@ func (TemplateReference) SwaggerDoc() map[string]string { var map_Proxy = map[string]string{ "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec holds user-settable values for the proxy configuration", + "spec": "spec holds user-settable values for the proxy configuration", "status": "status holds observed values from the cluster. They may not be overridden.", } @@ -2512,7 +2592,7 @@ var map_SchedulerSpec = map[string]string{ "profile": "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods.\n\nValid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"", "profileCustomizations": "profileCustomizations contains configuration for modifying the default behavior of existing scheduler profiles.", "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod's selector. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.", - "mastersSchedulable": "MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", + "mastersSchedulable": "mastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", } func (SchedulerSpec) SwaggerDoc() map[string]string { @@ -2520,8 +2600,8 @@ func (SchedulerSpec) SwaggerDoc() map[string]string { } var map_FeatureGateTests = map[string]string{ - "featureGate": "FeatureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.", - "tests": "Tests contains an item for every TestName", + "featureGate": "featureGate is the name of the FeatureGate as it appears in The FeatureGate CR instance.", + "tests": "tests contains an item for every TestName", } func (FeatureGateTests) SwaggerDoc() map[string]string { @@ -2529,7 +2609,7 @@ func (FeatureGateTests) SwaggerDoc() map[string]string { } var map_TestDetails = map[string]string{ - "testName": "TestName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.", + "testName": "testName is the name of the test as it appears in junit XMLs. It does not include the suite name since the same test can be executed in many suites.", } func (TestDetails) SwaggerDoc() map[string]string { @@ -2547,7 +2627,7 @@ func (TestReporting) SwaggerDoc() map[string]string { } var map_TestReportingSpec = map[string]string{ - "testsForFeatureGates": "TestsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.", + "testsForFeatureGates": "testsForFeatureGates is a list, indexed by FeatureGate and includes information about testing.", } func (TestReportingSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go index 65eb5c1f75..e52a2e5c53 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go @@ -24,18 +24,16 @@ type Backup struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec BackupSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. - // +kubebuilder:validation:Optional // +optional Status BackupStatus `json:"status"` } type BackupSpec struct { // etcd specifies the configuration for periodic backups of the etcd cluster - // +kubebuilder:validation:Required + // +required EtcdBackupSpec EtcdBackupSpec `json:"etcd"` } @@ -45,12 +43,11 @@ type BackupStatus struct { // EtcdBackupSpec provides configuration for automated etcd backups to the cluster-etcd-operator type EtcdBackupSpec struct { - // Schedule defines the recurring backup schedule in Cron format + // schedule defines the recurring backup schedule in Cron format // every 2 hours: 0 */2 * * * // every day at 3am: 0 3 * * * // Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. // The current default is "no backups", but will change in the future. - // +kubebuilder:validation:Optional // +optional // +kubebuilder:validation:Pattern:=`^(@(annually|yearly|monthly|weekly|daily|hourly))|(\*|(?:\*|(?:[0-9]|(?:[1-5][0-9])))\/(?:[0-9]|(?:[1-5][0-9]))|(?:[0-9]|(?:[1-5][0-9]))(?:(?:\-[0-9]|\-(?:[1-5][0-9]))?|(?:\,(?:[0-9]|(?:[1-5][0-9])))*)) (\*|(?:\*|(?:\*|(?:[0-9]|1[0-9]|2[0-3])))\/(?:[0-9]|1[0-9]|2[0-3])|(?:[0-9]|1[0-9]|2[0-3])(?:(?:\-(?:[0-9]|1[0-9]|2[0-3]))?|(?:\,(?:[0-9]|1[0-9]|2[0-3]))*)) (\*|(?:[1-9]|(?:[12][0-9])|3[01])(?:(?:\-(?:[1-9]|(?:[12][0-9])|3[01]))?|(?:\,(?:[1-9]|(?:[12][0-9])|3[01]))*)) (\*|(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)(?:(?:\-(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))?|(?:\,(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))*)) (\*|(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT)(?:(?:\-(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))?|(?:\,(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))*))$` Schedule string `json:"schedule"` @@ -73,7 +70,6 @@ type EtcdBackupSpec struct { // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. // If not specified, this will default to the time zone of the kube-controller-manager process. // See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones - // +kubebuilder:validation:Optional // +optional // +kubebuilder:validation:Pattern:=`^([A-Za-z_]+([+-]*0)*|[A-Za-z_]+(\/[A-Za-z_]+){1,2})(\/GMT[+-]\d{1,2})?$` TimeZone string `json:"timeZone"` @@ -84,17 +80,15 @@ type EtcdBackupSpec struct { // [A-Za-z_]+(/[A-Za-z_]+){1,2} - One or more alphabetical characters (uppercase or lowercase) or underscores, followed by one or two occurrences of a forward slash followed by one or more alphabetical characters or underscores. This allows for matching timezone identifiers with 2 or 3 parts, e.g America/Argentina/Buenos_Aires // (/GMT[+-]\d{1,2})? - Makes the GMT offset suffix optional. It matches "/GMT" followed by either a plus ("+") or minus ("-") sign and one or two digits (the GMT offset) - // RetentionPolicy defines the retention policy for retaining and deleting existing backups. - // +kubebuilder:validation:Optional + // retentionPolicy defines the retention policy for retaining and deleting existing backups. // +optional RetentionPolicy RetentionPolicy `json:"retentionPolicy"` - // PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the + // pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the // etcd backup files would be saved // The PVC itself must always be created in the "openshift-etcd" namespace // If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. // In the future this would be backups saved across the control-plane master nodes. - // +kubebuilder:validation:Optional // +optional PVCName string `json:"pvcName"` } @@ -115,45 +109,40 @@ const ( // This struct is a discriminated union that allows users to select the type of retention policy from the supported types. // +union type RetentionPolicy struct { - // RetentionType sets the type of retention policy. + // retentionType sets the type of retention policy. // Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. // Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. // The current default is RetentionNumber with 15 backups kept. // +unionDiscriminator // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Enum:="";"RetentionNumber";"RetentionSize" RetentionType RetentionType `json:"retentionType"` - // RetentionNumber configures the retention policy based on the number of backups - // +kubebuilder:validation:Optional + // retentionNumber configures the retention policy based on the number of backups // +optional RetentionNumber *RetentionNumberConfig `json:"retentionNumber,omitempty"` - // RetentionSize configures the retention policy based on the size of backups - // +kubebuilder:validation:Optional + // retentionSize configures the retention policy based on the size of backups // +optional RetentionSize *RetentionSizeConfig `json:"retentionSize,omitempty"` } // RetentionNumberConfig specifies the configuration of the retention policy on the number of backups type RetentionNumberConfig struct { - // MaxNumberOfBackups defines the maximum number of backups to retain. + // maxNumberOfBackups defines the maximum number of backups to retain. // If the existing number of backups saved is equal to MaxNumberOfBackups then // the oldest backup will be removed before a new backup is initiated. // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Required // +required MaxNumberOfBackups int `json:"maxNumberOfBackups,omitempty"` } // RetentionSizeConfig specifies the configuration of the retention policy on the total size of backups type RetentionSizeConfig struct { - // MaxSizeOfBackupsGb defines the total size in GB of backups to retain. + // maxSizeOfBackupsGb defines the total size in GB of backups to retain. // If the current total size backups exceeds MaxSizeOfBackupsGb then // the oldest backup will be removed before a new backup is initiated. // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Required // +required MaxSizeOfBackupsGb int `json:"maxSizeOfBackupsGb,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go index 14650fd48f..5eaeeea736 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_image_policy.go @@ -24,7 +24,7 @@ type ClusterImagePolicy struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec contains the configuration for the cluster image policy. - // +kubebuilder:validation:Required + // +required Spec ClusterImagePolicySpec `json:"spec"` // status contains the observed state of the resource. // +optional @@ -44,13 +44,13 @@ type ClusterImagePolicySpec struct { // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. // For additional details about the format, please refer to the document explaining the docker transport field, // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxItems=256 // +listType=set Scopes []ImageScope `json:"scopes"` // policy contains configuration to allow scopes to be verified, and defines how // images not matching the verification policy will be treated. - // +kubebuilder:validation:Required + // +required Policy Policy `json:"policy"` } diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go index a177ddb0d6..7f57d88f91 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go @@ -23,7 +23,7 @@ type ImagePolicy struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required + // +required Spec ImagePolicySpec `json:"spec"` // status contains the observed state of the resource. // +optional @@ -43,13 +43,13 @@ type ImagePolicySpec struct { // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. // For additional details about the format, please refer to the document explaining the docker transport field, // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxItems=256 // +listType=set Scopes []ImageScope `json:"scopes"` // policy contains configuration to allow scopes to be verified, and defines how // images not matching the verification policy will be treated. - // +kubebuilder:validation:Required + // +required Policy Policy `json:"policy"` } @@ -62,7 +62,7 @@ type ImageScope string // Policy defines the verification policy for the items in the scopes list. type Policy struct { // rootOfTrust specifies the root of trust for the policy. - // +kubebuilder:validation:Required + // +required RootOfTrust PolicyRootOfTrust `json:"rootOfTrust"` // signedIdentity specifies what image identity the signature claims about the image. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is "MatchRepoDigestOrExact". // +optional @@ -78,7 +78,7 @@ type PolicyRootOfTrust struct { // "PublicKey" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. // "FulcioCAWithRekor" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required PolicyType PolicyType `json:"policyType"` // publicKey defines the root of trust based on a sigstore public key. // +optional @@ -102,7 +102,7 @@ const ( type PublicKey struct { // keyData contains inline base64-encoded data for the PEM format public key. // KeyData must be at most 8192 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=8192 KeyData []byte `json:"keyData"` // rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key. @@ -116,16 +116,16 @@ type PublicKey struct { type FulcioCAWithRekor struct { // fulcioCAData contains inline base64-encoded data for the PEM format fulcio CA. // fulcioCAData must be at most 8192 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=8192 FulcioCAData []byte `json:"fulcioCAData"` // rekorKeyData contains inline base64-encoded data for the PEM format from the Rekor public key. // rekorKeyData must be at most 8192 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=8192 RekorKeyData []byte `json:"rekorKeyData"` // fulcioSubject specifies OIDC issuer and the email of the Fulcio authentication configuration. - // +kubebuilder:validation:Required + // +required FulcioSubject PolicyFulcioSubject `json:"fulcioSubject,omitempty"` } @@ -133,12 +133,12 @@ type FulcioCAWithRekor struct { type PolicyFulcioSubject struct { // oidcIssuer contains the expected OIDC issuer. It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. // Example: "https://expected.OIDC.issuer/" - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:XValidation:rule="isURL(self)",message="oidcIssuer must be a valid URL" OIDCIssuer string `json:"oidcIssuer"` // signedEmail holds the email address the the Fulcio certificate is issued for. // Example: "expected-signing-user@example.com" - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address" SignedEmail string `json:"signedEmail"` } @@ -157,7 +157,7 @@ type PolicyIdentity struct { // "ExactRepository" means that the identity in the signature must be in the same repository as a specific identity specified by "repository". // "RemapIdentity" means that the signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the "prefix" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required MatchPolicy IdentityMatchPolicy `json:"matchPolicy"` // exactRepository is required if matchPolicy is set to "ExactRepository". // +optional @@ -175,7 +175,7 @@ type IdentityRepositoryPrefix string type PolicyMatchExactRepository struct { // repository is the reference of the image identity to be matched. // The value should be a repository name (by omitting the tag or digest) in a registry implementing the "Docker Registry HTTP API V2". For example, docker.io/library/busybox - // +kubebuilder:validation:Required + // +required Repository IdentityRepositoryPrefix `json:"repository"` } @@ -186,12 +186,12 @@ type PolicyMatchRemapIdentity struct { // The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. - // +kubebuilder:validation:Required + // +required Prefix IdentityRepositoryPrefix `json:"prefix"` // signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as "prefix". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. - // +kubebuilder:validation:Required + // +required SignedPrefix IdentityRepositoryPrefix `json:"signedPrefix"` } diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go index 171e96d5b8..3ae4de157c 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go @@ -24,7 +24,7 @@ type InsightsDataGather struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required + // +required Spec InsightsDataGatherSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. // +optional diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-CustomNoUpgrade.crd.yaml index 48a6aa7dcb..aba325d883 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-CustomNoUpgrade.crd.yaml @@ -51,23 +51,23 @@ spec: properties: pvcName: description: |- - PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the + pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the "openshift-etcd" namespace If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes. type: string retentionPolicy: - description: RetentionPolicy defines the retention policy for + description: retentionPolicy defines the retention policy for retaining and deleting existing backups. properties: retentionNumber: - description: RetentionNumber configures the retention policy + description: retentionNumber configures the retention policy based on the number of backups properties: maxNumberOfBackups: description: |- - MaxNumberOfBackups defines the maximum number of backups to retain. + maxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated. minimum: 1 @@ -76,12 +76,12 @@ spec: - maxNumberOfBackups type: object retentionSize: - description: RetentionSize configures the retention policy + description: retentionSize configures the retention policy based on the size of backups properties: maxSizeOfBackupsGb: description: |- - MaxSizeOfBackupsGb defines the total size in GB of backups to retain. + maxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated. minimum: 1 @@ -99,7 +99,7 @@ spec: - RetentionNumber - RetentionSize description: |- - RetentionType sets the type of retention policy. + retentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept. @@ -109,7 +109,7 @@ spec: type: object schedule: description: |- - Schedule defines the recurring backup schedule in Cron format + schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-DevPreviewNoUpgrade.crd.yaml index 4fa66b80aa..62339318c4 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-DevPreviewNoUpgrade.crd.yaml @@ -51,23 +51,23 @@ spec: properties: pvcName: description: |- - PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the + pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the "openshift-etcd" namespace If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes. type: string retentionPolicy: - description: RetentionPolicy defines the retention policy for + description: retentionPolicy defines the retention policy for retaining and deleting existing backups. properties: retentionNumber: - description: RetentionNumber configures the retention policy + description: retentionNumber configures the retention policy based on the number of backups properties: maxNumberOfBackups: description: |- - MaxNumberOfBackups defines the maximum number of backups to retain. + maxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated. minimum: 1 @@ -76,12 +76,12 @@ spec: - maxNumberOfBackups type: object retentionSize: - description: RetentionSize configures the retention policy + description: retentionSize configures the retention policy based on the size of backups properties: maxSizeOfBackupsGb: description: |- - MaxSizeOfBackupsGb defines the total size in GB of backups to retain. + maxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated. minimum: 1 @@ -99,7 +99,7 @@ spec: - RetentionNumber - RetentionSize description: |- - RetentionType sets the type of retention policy. + retentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept. @@ -109,7 +109,7 @@ spec: type: object schedule: description: |- - Schedule defines the recurring backup schedule in Cron format + schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-TechPreviewNoUpgrade.crd.yaml index 41d03aa7b5..7ce67bdaf3 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-TechPreviewNoUpgrade.crd.yaml @@ -51,23 +51,23 @@ spec: properties: pvcName: description: |- - PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the + pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the "openshift-etcd" namespace If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes. type: string retentionPolicy: - description: RetentionPolicy defines the retention policy for + description: retentionPolicy defines the retention policy for retaining and deleting existing backups. properties: retentionNumber: - description: RetentionNumber configures the retention policy + description: retentionNumber configures the retention policy based on the number of backups properties: maxNumberOfBackups: description: |- - MaxNumberOfBackups defines the maximum number of backups to retain. + maxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated. minimum: 1 @@ -76,12 +76,12 @@ spec: - maxNumberOfBackups type: object retentionSize: - description: RetentionSize configures the retention policy + description: retentionSize configures the retention policy based on the size of backups properties: maxSizeOfBackupsGb: description: |- - MaxSizeOfBackupsGb defines the total size in GB of backups to retain. + maxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated. minimum: 1 @@ -99,7 +99,7 @@ spec: - RetentionNumber - RetentionSize description: |- - RetentionType sets the type of retention policy. + retentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept. @@ -109,7 +109,7 @@ spec: type: object schedule: description: |- - Schedule defines the recurring backup schedule in Cron format + schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go index 55468f38da..e6accce0d7 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go @@ -41,10 +41,10 @@ func (BackupSpec) SwaggerDoc() map[string]string { var map_EtcdBackupSpec = map[string]string{ "": "EtcdBackupSpec provides configuration for automated etcd backups to the cluster-etcd-operator", - "schedule": "Schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is \"no backups\", but will change in the future.", + "schedule": "schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is \"no backups\", but will change in the future.", "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", - "retentionPolicy": "RetentionPolicy defines the retention policy for retaining and deleting existing backups.", - "pvcName": "PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", + "retentionPolicy": "retentionPolicy defines the retention policy for retaining and deleting existing backups.", + "pvcName": "pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", } func (EtcdBackupSpec) SwaggerDoc() map[string]string { @@ -53,7 +53,7 @@ func (EtcdBackupSpec) SwaggerDoc() map[string]string { var map_RetentionNumberConfig = map[string]string{ "": "RetentionNumberConfig specifies the configuration of the retention policy on the number of backups", - "maxNumberOfBackups": "MaxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated.", + "maxNumberOfBackups": "maxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated.", } func (RetentionNumberConfig) SwaggerDoc() map[string]string { @@ -62,9 +62,9 @@ func (RetentionNumberConfig) SwaggerDoc() map[string]string { var map_RetentionPolicy = map[string]string{ "": "RetentionPolicy defines the retention policy for retaining and deleting existing backups. This struct is a discriminated union that allows users to select the type of retention policy from the supported types.", - "retentionType": "RetentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept.", - "retentionNumber": "RetentionNumber configures the retention policy based on the number of backups", - "retentionSize": "RetentionSize configures the retention policy based on the size of backups", + "retentionType": "retentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept.", + "retentionNumber": "retentionNumber configures the retention policy based on the number of backups", + "retentionSize": "retentionSize configures the retention policy based on the size of backups", } func (RetentionPolicy) SwaggerDoc() map[string]string { @@ -73,7 +73,7 @@ func (RetentionPolicy) SwaggerDoc() map[string]string { var map_RetentionSizeConfig = map[string]string{ "": "RetentionSizeConfig specifies the configuration of the retention policy on the total size of backups", - "maxSizeOfBackupsGb": "MaxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated.", + "maxSizeOfBackupsGb": "maxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated.", } func (RetentionSizeConfig) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/console/v1/types_console_link.go b/vendor/github.com/openshift/api/console/v1/types_console_link.go index 24a5dbadcd..977fcbda97 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_link.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_link.go @@ -56,7 +56,7 @@ type ApplicationMenuSpec struct { // This can be any text that will appear as a subheading in the application menu dropdown. // A new section will be created if the text does not match text of an existing section. Section string `json:"section"` - // imageUrl is the URL for the icon used in front of the link in the application menu. + // imageURL is the URL for the icon used in front of the link in the application menu. // The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels. // +optional ImageURL string `json:"imageURL,omitempty"` diff --git a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go index 24954687d5..632e13c722 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go @@ -26,7 +26,8 @@ type ConsolePlugin struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // +kubebuilder:validation:Required + // spec contains the desired configuration for the console plugin. + // +required Spec ConsolePluginSpec `json:"spec"` } @@ -34,20 +35,146 @@ type ConsolePlugin struct { type ConsolePluginSpec struct { // displayName is the display name of the plugin. // The dispalyName should be between 1 and 128 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 DisplayName string `json:"displayName"` // backend holds the configuration of backend which is serving console's plugin . - // +kubebuilder:validation:Required + // +required Backend ConsolePluginBackend `json:"backend"` // proxy is a list of proxies that describe various service type // to which the plugin needs to connect to. + // +listType=atomic // +optional Proxy []ConsolePluginProxy `json:"proxy,omitempty"` // i18n is the configuration of plugin's localization resources. // +optional I18n ConsolePluginI18n `json:"i18n"` + // contentSecurityPolicy is a list of Content-Security-Policy (CSP) directives for the plugin. + // Each directive specifies a list of values, appropriate for the given directive type, + // for example a list of remote endpoints for fetch directives such as ScriptSrc. + // Console web application uses CSP to detect and mitigate certain types of attacks, + // such as cross-site scripting (XSS) and data injection attacks. + // Dynamic plugins should specify this field if need to load assets from outside + // the cluster or if violation reports are observed. Dynamic plugins should always prefer + // loading their assets from within the cluster, either by vendoring them, or fetching + // from a cluster service. + // CSP violation reports can be viewed in the browser's console logs during development and + // testing of the plugin in the OpenShift web console. + // Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc and FontSrc. + // Each of the available directives may be defined only once in the list. + // The value 'self' is automatically included in all fetch directives by the OpenShift web + // console's backend. + // For more information about the CSP directives, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy + // + // The OpenShift web console server aggregates the CSP directives and values across + // its own default values and all enabled ConsolePlugin CRs, merging them into a single + // policy string that is sent to the browser via `Content-Security-Policy` HTTP response header. + // + // Example: + // ConsolePlugin A directives: + // script-src: https://script1.com/, https://script2.com/ + // font-src: https://font1.com/ + // + // ConsolePlugin B directives: + // script-src: https://script2.com/, https://script3.com/ + // font-src: https://font2.com/ + // img-src: https://img1.com/ + // + // Unified set of CSP directives, passed to the OpenShift web console server: + // script-src: https://script1.com/, https://script2.com/, https://script3.com/ + // font-src: https://font1.com/, https://font2.com/ + // img-src: https://img1.com/ + // + // OpenShift web console server CSP response header: + // Content-Security-Policy: default-src 'self'; base-uri 'self'; script-src 'self' https://script1.com/ https://script2.com/ https://script3.com/; font-src 'self' https://font1.com/ https://font2.com/; img-src 'self' https://img1.com/; style-src 'self'; frame-src 'none'; object-src 'none' + // + // +openshift:enable:FeatureGate=ConsolePluginContentSecurityPolicy + // +kubebuilder:validation:MaxItems=5 + // +kubebuilder:validation:XValidation:rule="self.map(x, x.values.map(y, y.size()).sum()).sum() < 8192",message="the total combined size of values of all directives must not exceed 8192 (8kb)" + // +listType=map + // +listMapKey=directive + // +optional + ContentSecurityPolicy []ConsolePluginCSP `json:"contentSecurityPolicy"` +} + +// DirectiveType is an enumeration of OpenShift web console supported CSP directives. +// LoadType is an enumeration of i18n loading types. +// +kubebuilder:validation:Enum:="DefaultSrc";"ScriptSrc";"StyleSrc";"ImgSrc";"FontSrc" +// +enum +type DirectiveType string + +const ( + // DefaultSrc directive serves as a fallback for the other CSP fetch directives. + // For more information about the DefaultSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src + DefaultSrc DirectiveType = "DefaultSrc" + // ScriptSrc directive specifies valid sources for JavaScript. + // For more information about the ScriptSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src + ScriptSrc DirectiveType = "ScriptSrc" + // StyleSrc directive specifies valid sources for stylesheets. + // For more information about the StyleSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/style-src + StyleSrc DirectiveType = "StyleSrc" + // ImgSrc directive specifies a valid sources of images and favicons. + // For more information about the ImgSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/img-src + ImgSrc DirectiveType = "ImgSrc" + // FontSrc directive specifies valid sources for fonts loaded using @font-face. + // For more information about the FontSrcdirective, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/font-src + FontSrc DirectiveType = "FontSrc" +) + +// CSPDirectiveValue is single value for a Content-Security-Policy directive. +// Each directive value must have a maximum length of 1024 characters and must not contain +// whitespace, commas (,), semicolons (;) or single quotes ('). The value '*' is not permitted. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=1024 +// +kubebuilder:validation:XValidation:rule="!self.contains(\"'\")",message="CSP directive value cannot contain a quote" +// +kubebuilder:validation:XValidation:rule="!self.matches('\\\\s')",message="CSP directive value cannot contain a whitespace" +// +kubebuilder:validation:XValidation:rule="!self.contains(',')",message="CSP directive value cannot contain a comma" +// +kubebuilder:validation:XValidation:rule="!self.contains(';')",message="CSP directive value cannot contain a semi-colon" +// +kubebuilder:validation:XValidation:rule="self != '*'",message="CSP directive value cannot be a wildcard" +type CSPDirectiveValue string + +// ConsolePluginCSP holds configuration for a specific CSP directive +type ConsolePluginCSP struct { + // directive specifies which Content-Security-Policy directive to configure. + // Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc and FontSrc. + // DefaultSrc directive serves as a fallback for the other CSP fetch directives. + // For more information about the DefaultSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src + // ScriptSrc directive specifies valid sources for JavaScript. + // For more information about the ScriptSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src + // StyleSrc directive specifies valid sources for stylesheets. + // For more information about the StyleSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/style-src + // ImgSrc directive specifies a valid sources of images and favicons. + // For more information about the ImgSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/img-src + // FontSrc directive specifies valid sources for fonts loaded using @font-face. + // For more information about the FontSrc directive, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/font-src + // +required + Directive DirectiveType `json:"directive"` + // values defines an array of values to append to the console defaults for this directive. + // Each ConsolePlugin may define their own directives with their values. These will be set + // by the OpenShift web console's backend, as part of its Content-Security-Policy header. + // The array can contain at most 16 values. Each directive value must have a maximum length + // of 1024 characters and must not contain whitespace, commas (,), semicolons (;) or single + // quotes ('). The value '*' is not permitted. + // Each value in the array must be unique. + // + // +required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))",message="each CSP directive value must be unique" + // +listType=atomic + Values []CSPDirectiveValue `json:"values"` } // LoadType is an enumeration of i18n loading types @@ -75,7 +202,7 @@ type ConsolePluginI18n struct { // When set to Preload, all localization resources are fetched when the plugin is loaded. // When set to Lazy, localization resources are lazily loaded as and when they are required by the console. // When omitted or set to the empty string, the behaviour is equivalent to Lazy type. - // +kubebuilder:validation:Required + // +required LoadType LoadType `json:"loadType"` } @@ -83,7 +210,7 @@ type ConsolePluginI18n struct { // to which console's backend will proxy the plugin's requests. type ConsolePluginProxy struct { // endpoint provides information about endpoint to which the request is proxied to. - // +kubebuilder:validation:Required + // +required Endpoint ConsolePluginProxyEndpoint `json:"endpoint"` // alias is a proxy name that identifies the plugin's proxy. An alias name // should be unique per plugin. The console backend exposes following @@ -95,7 +222,7 @@ type ConsolePluginProxy struct { // // /api/proxy/plugin/acm/search/pods?namespace=openshift-apiserver // - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 // +kubebuilder:validation:Pattern=`^[A-Za-z0-9-_]+$` @@ -122,7 +249,7 @@ type ConsolePluginProxyEndpoint struct { // --- // + When handling unknown values, consumers should report an error and stop processing the plugin. // - // +kubebuilder:validation:Required + // +required // +unionDiscriminator Type ConsolePluginProxyType `json:"type"` // service is an in-cluster Service that the plugin will connect to. @@ -162,18 +289,18 @@ const ( // console's backend will proxy the plugin's requests. type ConsolePluginProxyServiceConfig struct { // name of Service that the plugin needs to connect to. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 Name string `json:"name"` // namespace of Service that the plugin needs to connect to - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 Namespace string `json:"namespace"` // port on which the Service that the plugin needs to connect to // is listening on. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Maximum:=65535 // +kubebuilder:validation:Minimum:=1 Port int32 `json:"port"` @@ -197,7 +324,7 @@ type ConsolePluginBackend struct { // --- // + When handling unknown values, consumers should report an error and stop processing the plugin. // - // +kubebuilder:validation:Required + // +required // +unionDiscriminator Type ConsolePluginBackendType `json:"type"` // service is a Kubernetes Service that exposes the plugin using a @@ -212,17 +339,17 @@ type ConsolePluginBackend struct { // console dynamic plugin assets. type ConsolePluginService struct { // name of Service that is serving the plugin assets. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 Name string `json:"name"` // namespace of Service that is serving the plugin assets. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 Namespace string `json:"namespace"` // port on which the Service that is serving the plugin is listening to. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Maximum:=65535 // +kubebuilder:validation:Minimum:=1 Port int32 `json:"port"` diff --git a/vendor/github.com/openshift/api/console/v1/types_console_quick_start.go b/vendor/github.com/openshift/api/console/v1/types_console_quick_start.go index bb62fb8fc8..1eef701e8b 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_quick_start.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_quick_start.go @@ -28,7 +28,6 @@ type ConsoleQuickStart struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required // +required Spec ConsoleQuickStartSpec `json:"spec"` } @@ -36,7 +35,6 @@ type ConsoleQuickStart struct { // ConsoleQuickStartSpec is the desired quick start configuration. type ConsoleQuickStartSpec struct { // displayName is the display name of the Quick Start. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required DisplayName string `json:"displayName"` @@ -48,12 +46,10 @@ type ConsoleQuickStartSpec struct { // +optional Tags []string `json:"tags,omitempty"` // durationMinutes describes approximately how many minutes it will take to complete the Quick Start. - // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum=1 // +required DurationMinutes int `json:"durationMinutes"` // description is the description of the Quick Start. (includes markdown) - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 // +required @@ -62,12 +58,10 @@ type ConsoleQuickStartSpec struct { // +optional Prerequisites []string `json:"prerequisites,omitempty"` // introduction describes the purpose of the Quick Start. (includes markdown) - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Introduction string `json:"introduction"` // tasks is the list of steps the user has to perform to complete the Quick Start. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinItems=1 // +required Tasks []ConsoleQuickStartTask `json:"tasks"` @@ -87,12 +81,10 @@ type ConsoleQuickStartSpec struct { // ConsoleQuickStartTask is a single step in a Quick Start. type ConsoleQuickStartTask struct { // title describes the task and is displayed as a step heading. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Title string `json:"title"` // description describes the steps needed to complete the task. (includes markdown) - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Description string `json:"description"` @@ -109,12 +101,10 @@ type ConsoleQuickStartTask struct { type ConsoleQuickStartTaskReview struct { // instructions contains steps that user needs to take in order // to validate his work after going through a task. (includes markdown) - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Instructions string `json:"instructions"` // failedTaskHelp contains suggestions for a failed task review and is shown at the end of task. (includes markdown) - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required FailedTaskHelp string `json:"failedTaskHelp"` @@ -123,12 +113,10 @@ type ConsoleQuickStartTaskReview struct { // ConsoleQuickStartTaskSummary contains information about a passed step. type ConsoleQuickStartTaskSummary struct { // success describes the succesfully passed task. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +required Success string `json:"success"` // failed briefly describes the unsuccessfully passed task. (includes markdown) - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 // +required diff --git a/vendor/github.com/openshift/api/console/v1/types_console_sample.go b/vendor/github.com/openshift/api/console/v1/types_console_sample.go index c0175bf9ba..bd0f656969 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_sample.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_sample.go @@ -25,7 +25,7 @@ type ConsoleSample struct { metav1.ObjectMeta `json:"metadata"` // spec contains configuration for a console sample. - // +kubebuilder:validation:Required + // +required Spec ConsoleSampleSpec `json:"spec"` } @@ -35,7 +35,7 @@ type ConsoleSampleSpec struct { // title is the display name of the sample. // // It is required and must be no more than 50 characters in length. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=50 Title string `json:"title"` @@ -46,7 +46,7 @@ type ConsoleSampleSpec struct { // // The abstract is shown on the sample card tile below the title and provider // and is limited to three lines of content. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=100 Abstract string `json:"abstract"` @@ -56,7 +56,7 @@ type ConsoleSampleSpec struct { // // It is a README.md-like content for additional information, links, pre-conditions, and other instructions. // It will be rendered as Markdown so that it can contain line breaks, links, and other simple formatting. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=4096 Description string `json:"description"` @@ -119,7 +119,7 @@ type ConsoleSampleSpec struct { // source defines where to deploy the sample service from. // The sample may be sourced from an external git repository or container image. - // +kubebuilder:validation:Required + // +required Source ConsoleSampleSource `json:"source"` } @@ -143,7 +143,7 @@ const ( type ConsoleSampleSource struct { // type of the sample, currently supported: "GitImport";"ContainerImport" // +unionDiscriminator - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Enum:="GitImport";"ContainerImport" Type ConsoleSampleSourceType `json:"type"` @@ -161,7 +161,7 @@ type ConsoleSampleSource struct { // ConsoleSampleGitImportSource let the user import code from a public Git repository. type ConsoleSampleGitImportSource struct { // repository contains the reference to the actual Git repository. - // +kubebuilder:validation:Required + // +required Repository ConsoleSampleGitImportSourceRepository `json:"repository"` // service contains configuration for the Service resource created for this sample. // +optional @@ -183,7 +183,7 @@ type ConsoleSampleGitImportSourceRepository struct { // - https://bitbucket.org// // // The url must have a maximum length of 256 characters. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 // +kubebuilder:validation:Pattern=`^https:\/\/(github.com|gitlab.com|bitbucket.org)\/[a-zA-Z0-9-]+\/[a-zA-Z0-9-]+(.git)?$` @@ -232,7 +232,7 @@ type ConsoleSampleContainerImportSource struct { // - quay.io// // - quay.io//@sha256: // - quay.io//: - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 Image string `json:"image"` diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go index b7cd66da0c..d4fefaa37c 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go @@ -416,6 +416,27 @@ func (in *ConsolePluginBackend) DeepCopy() *ConsolePluginBackend { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginCSP) DeepCopyInto(out *ConsolePluginCSP) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]CSPDirectiveValue, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginCSP. +func (in *ConsolePluginCSP) DeepCopy() *ConsolePluginCSP { + if in == nil { + return nil + } + out := new(ConsolePluginCSP) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConsolePluginI18n) DeepCopyInto(out *ConsolePluginI18n) { *out = *in @@ -547,6 +568,13 @@ func (in *ConsolePluginSpec) DeepCopyInto(out *ConsolePluginSpec) { } } out.I18n = in.I18n + if in.ContentSecurityPolicy != nil { + in, out := &in.ContentSecurityPolicy, &out.ContentSecurityPolicy + *out = make([]ConsolePluginCSP, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml index 98abc7147b..250f873a09 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml @@ -137,7 +137,8 @@ consoleplugins.console.openshift.io: CRDName: consoleplugins.console.openshift.io Capability: Console Category: "" - FeatureGates: [] + FeatureGates: + - ConsolePluginContentSecurityPolicy FilenameOperatorName: "" FilenameOperatorOrdering: "90" FilenameRunLevel: "" diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go index c6f2070fa4..9207500fbd 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go @@ -91,7 +91,7 @@ func (ConsoleExternalLogLinkSpec) SwaggerDoc() map[string]string { var map_ApplicationMenuSpec = map[string]string{ "": "ApplicationMenuSpec is the specification of the desired section and icon used for the link in the application menu.", "section": "section is the section of the application menu in which the link should appear. This can be any text that will appear as a subheading in the application menu dropdown. A new section will be created if the text does not match text of an existing section.", - "imageURL": "imageUrl is the URL for the icon used in front of the link in the application menu. The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels.", + "imageURL": "imageURL is the URL for the icon used in front of the link in the application menu. The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels.", } func (ApplicationMenuSpec) SwaggerDoc() map[string]string { @@ -171,6 +171,7 @@ func (ConsoleNotificationSpec) SwaggerDoc() map[string]string { var map_ConsolePlugin = map[string]string{ "": "ConsolePlugin is an extension for customizing OpenShift web console by dynamically loading code from another service running on the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the desired configuration for the console plugin.", } func (ConsolePlugin) SwaggerDoc() map[string]string { @@ -187,6 +188,16 @@ func (ConsolePluginBackend) SwaggerDoc() map[string]string { return map_ConsolePluginBackend } +var map_ConsolePluginCSP = map[string]string{ + "": "ConsolePluginCSP holds configuration for a specific CSP directive", + "directive": "directive specifies which Content-Security-Policy directive to configure. Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc and FontSrc. DefaultSrc directive serves as a fallback for the other CSP fetch directives. For more information about the DefaultSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/default-src ScriptSrc directive specifies valid sources for JavaScript. For more information about the ScriptSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src StyleSrc directive specifies valid sources for stylesheets. For more information about the StyleSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/style-src ImgSrc directive specifies a valid sources of images and favicons. For more information about the ImgSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/img-src FontSrc directive specifies valid sources for fonts loaded using @font-face. For more information about the FontSrc directive, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/font-src", + "values": "values defines an array of values to append to the console defaults for this directive. Each ConsolePlugin may define their own directives with their values. These will be set by the OpenShift web console's backend, as part of its Content-Security-Policy header. The array can contain at most 16 values. Each directive value must have a maximum length of 1024 characters and must not contain whitespace, commas (,), semicolons (;) or single quotes ('). The value '*' is not permitted. Each value in the array must be unique.", +} + +func (ConsolePluginCSP) SwaggerDoc() map[string]string { + return map_ConsolePluginCSP +} + var map_ConsolePluginI18n = map[string]string{ "": "ConsolePluginI18n holds information on localization resources that are served by the dynamic plugin.", "loadType": "loadType indicates how the plugin's localization resource should be loaded. Valid values are Preload, Lazy and the empty string. When set to Preload, all localization resources are fetched when the plugin is loaded. When set to Lazy, localization resources are lazily loaded as and when they are required by the console. When omitted or set to the empty string, the behaviour is equivalent to Lazy type.", @@ -251,11 +262,12 @@ func (ConsolePluginService) SwaggerDoc() map[string]string { } var map_ConsolePluginSpec = map[string]string{ - "": "ConsolePluginSpec is the desired plugin configuration.", - "displayName": "displayName is the display name of the plugin. The dispalyName should be between 1 and 128 characters.", - "backend": "backend holds the configuration of backend which is serving console's plugin .", - "proxy": "proxy is a list of proxies that describe various service type to which the plugin needs to connect to.", - "i18n": "i18n is the configuration of plugin's localization resources.", + "": "ConsolePluginSpec is the desired plugin configuration.", + "displayName": "displayName is the display name of the plugin. The dispalyName should be between 1 and 128 characters.", + "backend": "backend holds the configuration of backend which is serving console's plugin .", + "proxy": "proxy is a list of proxies that describe various service type to which the plugin needs to connect to.", + "i18n": "i18n is the configuration of plugin's localization resources.", + "contentSecurityPolicy": "contentSecurityPolicy is a list of Content-Security-Policy (CSP) directives for the plugin. Each directive specifies a list of values, appropriate for the given directive type, for example a list of remote endpoints for fetch directives such as ScriptSrc. Console web application uses CSP to detect and mitigate certain types of attacks, such as cross-site scripting (XSS) and data injection attacks. Dynamic plugins should specify this field if need to load assets from outside the cluster or if violation reports are observed. Dynamic plugins should always prefer loading their assets from within the cluster, either by vendoring them, or fetching from a cluster service. CSP violation reports can be viewed in the browser's console logs during development and testing of the plugin in the OpenShift web console. Available directive types are DefaultSrc, ScriptSrc, StyleSrc, ImgSrc and FontSrc. Each of the available directives may be defined only once in the list. The value 'self' is automatically included in all fetch directives by the OpenShift web console's backend. For more information about the CSP directives, see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy\n\nThe OpenShift web console server aggregates the CSP directives and values across its own default values and all enabled ConsolePlugin CRs, merging them into a single policy string that is sent to the browser via `Content-Security-Policy` HTTP response header.\n\nExample:\n ConsolePlugin A directives:\n script-src: https://script1.com/, https://script2.com/\n font-src: https://font1.com/\n\n ConsolePlugin B directives:\n script-src: https://script2.com/, https://script3.com/\n font-src: https://font2.com/\n img-src: https://img1.com/\n\n Unified set of CSP directives, passed to the OpenShift web console server:\n script-src: https://script1.com/, https://script2.com/, https://script3.com/\n font-src: https://font1.com/, https://font2.com/\n img-src: https://img1.com/\n\n OpenShift web console server CSP response header:\n Content-Security-Policy: default-src 'self'; base-uri 'self'; script-src 'self' https://script1.com/ https://script2.com/ https://script3.com/; font-src 'self' https://font1.com/ https://font2.com/; img-src 'self' https://img1.com/; style-src 'self'; frame-src 'none'; object-src 'none'", } func (ConsolePluginSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/envtest-releases.yaml b/vendor/github.com/openshift/api/envtest-releases.yaml index 5651bbcc9d..a0e3f0ebdf 100644 --- a/vendor/github.com/openshift/api/envtest-releases.yaml +++ b/vendor/github.com/openshift/api/envtest-releases.yaml @@ -25,3 +25,16 @@ releases: envtest-v1.31.1-linux-arm64.tar.gz: hash: 86fa42c6a3d92e438e35d6066587d0e4f36b910885e10520868959ece2fe740d99abc735f69d6ebe8920291f70d3819b169ad5ddd2db805f8f56a3b83eee3893 selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.31.1-linux-arm64.tar.gz + v1.31.2: + envtest-v1.31.2-darwin-amd64.tar.gz: + hash: 4356c4495be7adc311868569bd69c5c17bfdabc243db3c656ac598be87698647e59d030a5f3c659b5ee0084bb0a9d33ea1faa2f5abfe0d762ec3368877cfd17f + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.31.2-darwin-amd64.tar.gz + envtest-v1.31.2-darwin-arm64.tar.gz: + hash: e1a759927343dfbbdff2909b7ea0046eb5c6840aea763b8d5d8229931fa35dcdcd5659fdace7a4eab1e41bc0b04c683aa96508f26aa38b3b5d3945799cb02324 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.31.2-darwin-arm64.tar.gz + envtest-v1.31.2-linux-amd64.tar.gz: + hash: c9efa849326afc471aff9ee17109491fe3e4d6d76b6d24e6ee8787ef44776abdc57ce6e96f013abf86c91d4ee94660e617a1623d9a71dd95238b6b6bd800aef7 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.31.2-linux-amd64.tar.gz + envtest-v1.31.2-linux-arm64.tar.gz: + hash: f6ad42b701537ddfd6873e9700f8e73927763878eaf36a5437d71fb62bffda91ce7f502e13f9ef4b508d37973ccddd3d847eba0d7150f7acb5495fd82558fbad + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.31.2-linux-arm64.tar.gz diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md index 9911208250..b48c03a50c 100644 --- a/vendor/github.com/openshift/api/features.md +++ b/vendor/github.com/openshift/api/features.md @@ -6,24 +6,28 @@ | MachineAPIMigration| | | | | | | | MachineAPIOperatorDisableMachineHealthCheckController| | | | | | | | MultiArchInstallAzure| | | | | | | +| ClusterVersionOperatorConfiguration| | | Enabled | Enabled | | | | GatewayAPI| | | Enabled | Enabled | | | +| NewOLM| | Enabled | | Enabled | | Enabled | | AWSClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | -| AdditionalRoutingCapabilities| | | Enabled | Enabled | Enabled | Enabled | | AutomatedEtcdBackup| | | Enabled | Enabled | Enabled | Enabled | | BootcNodeManagement| | | Enabled | Enabled | Enabled | Enabled | -| CSIDriverSharedResource| | | Enabled | Enabled | Enabled | Enabled | +| CPMSMachineNamePrefix| | | Enabled | Enabled | Enabled | Enabled | | ClusterMonitoringConfig| | | Enabled | Enabled | Enabled | Enabled | +| ConsolePluginContentSecurityPolicy| | | Enabled | Enabled | Enabled | Enabled | | DNSNameResolver| | | Enabled | Enabled | Enabled | Enabled | | DynamicResourceAllocation| | | Enabled | Enabled | Enabled | Enabled | | EtcdBackendQuota| | | Enabled | Enabled | Enabled | Enabled | | Example| | | Enabled | Enabled | Enabled | Enabled | | GCPClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | +| HighlyAvailableArbiter| | | Enabled | Enabled | Enabled | Enabled | | ImageStreamImportMode| | | Enabled | Enabled | Enabled | Enabled | | IngressControllerDynamicConfigurationManager| | | Enabled | Enabled | Enabled | Enabled | | InsightsConfig| | | Enabled | Enabled | Enabled | Enabled | | InsightsConfigAPI| | | Enabled | Enabled | Enabled | Enabled | | InsightsOnDemandDataGather| | | Enabled | Enabled | Enabled | Enabled | | InsightsRuntimeExtractor| | | Enabled | Enabled | Enabled | Enabled | +| KMSEncryptionProvider| | | Enabled | Enabled | Enabled | Enabled | | MachineAPIProviderOpenStack| | | Enabled | Enabled | Enabled | Enabled | | MachineConfigNodes| | | Enabled | Enabled | Enabled | Enabled | | MaxUnavailableStatefulSet| | | Enabled | Enabled | Enabled | Enabled | @@ -31,8 +35,8 @@ | MinimumKubeletVersion| | | Enabled | Enabled | Enabled | Enabled | | MixedCPUsAllocation| | | Enabled | Enabled | Enabled | Enabled | | NetworkSegmentation| | | Enabled | Enabled | Enabled | Enabled | -| NewOLM| | | Enabled | Enabled | Enabled | Enabled | | NodeSwap| | | Enabled | Enabled | Enabled | Enabled | +| NutanixMultiSubnets| | | Enabled | Enabled | Enabled | Enabled | | OVNObservability| | | Enabled | Enabled | Enabled | Enabled | | OnClusterBuild| | | Enabled | Enabled | Enabled | Enabled | | PersistentIPsForVirtualization| | | Enabled | Enabled | Enabled | Enabled | @@ -48,12 +52,13 @@ | UpgradeStatus| | | Enabled | Enabled | Enabled | Enabled | | UserNamespacesPodSecurityStandards| | | Enabled | Enabled | Enabled | Enabled | | UserNamespacesSupport| | | Enabled | Enabled | Enabled | Enabled | +| VSphereHostVMGroupZonal| | | Enabled | Enabled | Enabled | Enabled | | VSphereMultiNetworks| | | Enabled | Enabled | Enabled | Enabled | -| VSphereMultiVCenters| | | Enabled | Enabled | Enabled | Enabled | | VolumeAttributesClass| | | Enabled | Enabled | Enabled | Enabled | | VolumeGroupSnapshot| | | Enabled | Enabled | Enabled | Enabled | | ExternalOIDC| Enabled | | Enabled | Enabled | Enabled | Enabled | | AWSEFSDriverVolumeMetrics| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| AdditionalRoutingCapabilities| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AdminNetworkPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AlibabaPlatform| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AzureWorkloadIdentity| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | @@ -78,5 +83,6 @@ | SetEIPForNLBIngressController| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereControlPlaneMachineSet| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereDriverConfiguration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| VSphereMultiVCenters| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereStaticIPs| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ValidatingAdmissionPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | diff --git a/vendor/github.com/openshift/api/features/features.go b/vendor/github.com/openshift/api/features/features.go index e55ec593d7..d79959e8cf 100644 --- a/vendor/github.com/openshift/api/features/features.go +++ b/vendor/github.com/openshift/api/features/features.go @@ -36,6 +36,14 @@ func AllFeatureSets() map[ClusterProfileName]map[configv1.FeatureSet]*FeatureGat var ( allFeatureGates = map[ClusterProfileName]map[configv1.FeatureSet]*FeatureGateEnabledDisabled{} + FeatureGateConsolePluginCSP = newFeatureGate("ConsolePluginContentSecurityPolicy"). + reportProblemsToJiraComponent("Management Console"). + contactPerson("jhadvig"). + productScope(ocpSpecific). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/openshift/enhancements/pull/1706"). + mustRegister() + FeatureGateServiceAccountTokenNodeBinding = newFeatureGate("ServiceAccountTokenNodeBinding"). reportProblemsToJiraComponent("apiserver-auth"). contactPerson("stlaz"). @@ -76,14 +84,6 @@ var ( enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateCSIDriverSharedResource = newFeatureGate("CSIDriverSharedResource"). - reportProblemsToJiraComponent("builds"). - contactPerson("adkaplan"). - productScope(ocpSpecific). - enhancementPR("https://github.com/openshift/enhancements/pull/1056"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateBuildCSIVolumes = newFeatureGate("BuildCSIVolumes"). reportProblemsToJiraComponent("builds"). contactPerson("adkaplan"). @@ -195,12 +195,20 @@ var ( enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateVSphereHostVMGroupZonal = newFeatureGate("VSphereHostVMGroupZonal"). + reportProblemsToJiraComponent("splat"). + contactPerson("jcpowermac"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1677"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateVSphereMultiVCenters = newFeatureGate("VSphereMultiVCenters"). reportProblemsToJiraComponent("splat"). contactPerson("vr4manta"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateVSphereStaticIPs = newFeatureGate("VSphereStaticIPs"). @@ -219,6 +227,14 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateCPMSMachineNamePrefix = newFeatureGate("CPMSMachineNamePrefix"). + reportProblemsToJiraComponent("Cloud Compute / ControlPlaneMachineSet"). + contactPerson("chiragkyal"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1714"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateAdminNetworkPolicy = newFeatureGate("AdminNetworkPolicy"). reportProblemsToJiraComponent("Networking/ovn-kubernetes"). contactPerson("tssurya"). @@ -240,7 +256,7 @@ var ( contactPerson("jcaamano"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateRouteAdvertisements = newFeatureGate("RouteAdvertisements"). @@ -487,7 +503,7 @@ var ( contactPerson("joe"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). mustRegister() FeatureGateInsightsOnDemandDataGather = newFeatureGate("InsightsOnDemandDataGather"). @@ -670,4 +686,36 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). enhancementPR("https://github.com/openshift/enhancements/pull/1697"). mustRegister() + + FeatureGateNutanixMultiSubnets = newFeatureGate("NutanixMultiSubnets"). + reportProblemsToJiraComponent("Cloud Compute / Nutanix Provider"). + contactPerson("yanhli"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1711"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateKMSEncryptionProvider = newFeatureGate("KMSEncryptionProvider"). + reportProblemsToJiraComponent("kube-apiserver"). + contactPerson("swghosh"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1682"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateHighlyAvailableArbiter = newFeatureGate("HighlyAvailableArbiter"). + reportProblemsToJiraComponent("TwoNode / Arbiter"). + contactPerson("eggfoobar"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1674"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateCVOConfiguration = newFeatureGate("ClusterVersionOperatorConfiguration"). + reportProblemsToJiraComponent("Cluster Version Operator"). + contactPerson("dhurta"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1492"). + enableIn(configv1.DevPreviewNoUpgrade). + mustRegister() ) diff --git a/vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go b/vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go index 91f25fb673..4700f91140 100644 --- a/vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go +++ b/vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go @@ -27,7 +27,6 @@ type HelmChartRepository struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec HelmChartRepositorySpec `json:"spec"` diff --git a/vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go b/vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go index 37ff581c14..8049c4fe5e 100644 --- a/vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go +++ b/vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go @@ -26,7 +26,6 @@ type ProjectHelmChartRepository struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ProjectHelmChartRepositorySpec `json:"spec"` diff --git a/vendor/github.com/openshift/api/image/v1/generated.proto b/vendor/github.com/openshift/api/image/v1/generated.proto index 6b5f24cb21..dabdc6d84a 100644 --- a/vendor/github.com/openshift/api/image/v1/generated.proto +++ b/vendor/github.com/openshift/api/image/v1/generated.proto @@ -47,39 +47,39 @@ message Image { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // DockerImageReference is the string that can be used to pull this image. + // dockerImageReference is the string that can be used to pull this image. optional string dockerImageReference = 2; - // DockerImageMetadata contains metadata about this image + // dockerImageMetadata contains metadata about this image // +patchStrategy=replace // +kubebuilder:pruning:PreserveUnknownFields optional .k8s.io.apimachinery.pkg.runtime.RawExtension dockerImageMetadata = 3; - // DockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" + // dockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" optional string dockerImageMetadataVersion = 4; - // DockerImageManifest is the raw JSON of the manifest + // dockerImageManifest is the raw JSON of the manifest optional string dockerImageManifest = 5; - // DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list. + // dockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list. repeated ImageLayer dockerImageLayers = 6; - // Signatures holds all signatures of the image. + // signatures holds all signatures of the image. // +patchMergeKey=name // +patchStrategy=merge repeated ImageSignature signatures = 7; - // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + // dockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. repeated bytes dockerImageSignatures = 8; - // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + // dockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. optional string dockerImageManifestMediaType = 9; - // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + // dockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. // Will not be set when the image represents a manifest list. optional string dockerImageConfig = 10; - // DockerImageManifests holds information about sub-manifests when the image represents a manifest list. + // dockerImageManifests holds information about sub-manifests when the image represents a manifest list. // When this field is present, no DockerImageLayers should be specified. repeated ImageManifest dockerImageManifests = 11; } @@ -114,56 +114,56 @@ message ImageBlobReferences { // ImageImportSpec describes a request to import a specific image. message ImageImportSpec { - // From is the source of an image to import; only kind DockerImage is allowed + // from is the source of an image to import; only kind DockerImage is allowed optional .k8s.io.api.core.v1.ObjectReference from = 1; - // To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used + // to is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used optional .k8s.io.api.core.v1.LocalObjectReference to = 2; - // ImportPolicy is the policy controlling how the image is imported + // importPolicy is the policy controlling how the image is imported optional TagImportPolicy importPolicy = 3; - // ReferencePolicy defines how other components should consume the image + // referencePolicy defines how other components should consume the image optional TagReferencePolicy referencePolicy = 5; - // IncludeManifest determines if the manifest for each image is returned in the response + // includeManifest determines if the manifest for each image is returned in the response optional bool includeManifest = 4; } // ImageImportStatus describes the result of an image import. message ImageImportStatus { - // Status is the status of the image import, including errors encountered while retrieving the image + // status is the status of the image import, including errors encountered while retrieving the image optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; - // Image is the metadata of that image, if the image was located + // image is the metadata of that image, if the image was located optional Image image = 2; - // Tag is the tag this image was located under, if any + // tag is the tag this image was located under, if any optional string tag = 3; - // Manifests holds sub-manifests metadata when importing a manifest list + // manifests holds sub-manifests metadata when importing a manifest list repeated Image manifests = 4; } // ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. message ImageLayer { - // Name of the layer as defined by the underlying store. + // name of the layer as defined by the underlying store. optional string name = 1; - // Size of the layer in bytes as defined by the underlying store. + // size of the layer in bytes as defined by the underlying store. optional int64 size = 2; - // MediaType of the referenced object. + // mediaType of the referenced object. optional string mediaType = 3; } // ImageLayerData contains metadata about an image layer. message ImageLayerData { - // Size of the layer in bytes as defined by the underlying store. This field is + // size of the layer in bytes as defined by the underlying store. This field is // optional if the necessary information about size is not available. optional int64 size = 1; - // MediaType of the referenced object. + // mediaType of the referenced object. optional string mediaType = 2; } @@ -176,7 +176,7 @@ message ImageList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of images + // items is a list of images repeated Image items = 2; } @@ -194,23 +194,23 @@ message ImageLookupPolicy { // ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular // Image object. message ImageManifest { - // Digest is the unique identifier for the manifest. It refers to an Image object. + // digest is the unique identifier for the manifest. It refers to an Image object. optional string digest = 1; - // MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, + // mediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, // application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json. optional string mediaType = 2; - // ManifestSize represents the size of the raw object contents, in bytes. + // manifestSize represents the size of the raw object contents, in bytes. optional int64 manifestSize = 3; - // Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`. + // architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`. optional string architecture = 4; - // OS specifies the operating system, for example `linux`. + // os specifies the operating system, for example `linux`. optional string os = 5; - // Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU + // variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU // variant of the ARM CPU. optional string variant = 6; } @@ -234,7 +234,7 @@ message ImageSignature { // Required: An opaque binary string which is an image's signature. optional bytes content = 3; - // Conditions represent the latest available observations of a signature's current state. + // conditions represent the latest available observations of a signature's current state. // +patchMergeKey=type // +patchStrategy=merge repeated SignatureCondition conditions = 4; @@ -280,11 +280,11 @@ message ImageStream { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec describes the desired state of this stream + // spec describes the desired state of this stream // +optional optional ImageStreamSpec spec = 2; - // Status describes the current state of this stream + // status describes the current state of this stream // +optional optional ImageStreamStatus status = 3; } @@ -309,7 +309,7 @@ message ImageStreamImage { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Image associated with the ImageStream and image name. + // image associated with the ImageStream and image name. optional Image image = 2; } @@ -329,36 +329,36 @@ message ImageStreamImport { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec is a description of the images that the user wishes to import + // spec is a description of the images that the user wishes to import optional ImageStreamImportSpec spec = 2; - // Status is the result of importing the image + // status is the result of importing the image optional ImageStreamImportStatus status = 3; } // ImageStreamImportSpec defines what images should be imported. message ImageStreamImportSpec { - // Import indicates whether to perform an import - if so, the specified tags are set on the spec + // import indicates whether to perform an import - if so, the specified tags are set on the spec // and status of the image stream defined by the type meta. optional bool import = 1; - // Repository is an optional import of an entire container image repository. A maximum limit on the + // repository is an optional import of an entire container image repository. A maximum limit on the // number of tags imported this way is imposed by the server. optional RepositoryImportSpec repository = 2; - // Images are a list of individual images to import. + // images are a list of individual images to import. repeated ImageImportSpec images = 3; } // ImageStreamImportStatus contains information about the status of an image stream import. message ImageStreamImportStatus { - // Import is the image stream that was successfully updated or created when 'to' was set. + // import is the image stream that was successfully updated or created when 'to' was set. optional ImageStream import = 1; - // Repository is set if spec.repository was set to the outcome of the import + // repository is set if spec.repository was set to the outcome of the import optional RepositoryImportStatus repository = 2; - // Images is set with the result of importing spec.images + // images is set with the result of importing spec.images repeated ImageImportStatus images = 3; } @@ -389,7 +389,7 @@ message ImageStreamList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of imageStreams + // items is a list of imageStreams repeated ImageStream items = 2; } @@ -409,10 +409,10 @@ message ImageStreamMapping { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Image is a container image. + // image is a container image. optional Image image = 2; - // Tag is a string value this image can be located with inside the stream. + // tag is a string value this image can be located with inside the stream. optional string tag = 3; } @@ -434,16 +434,16 @@ message ImageStreamSpec { // ImageStreamStatus contains information about the state of this image stream. message ImageStreamStatus { - // DockerImageRepository represents the effective location this stream may be accessed at. + // dockerImageRepository represents the effective location this stream may be accessed at. // May be empty until the server determines where the repository is located optional string dockerImageRepository = 1; - // PublicDockerImageRepository represents the public location from where the image can + // publicDockerImageRepository represents the public location from where the image can // be pulled outside the cluster. This field may be empty if the administrator // has not exposed the integrated registry externally. optional string publicDockerImageRepository = 3; - // Tags are a historical record of images associated with each tag. The first entry in the + // tags are a historical record of images associated with each tag. The first entry in the // TagEvent array is the currently tagged image. // +patchMergeKey=tag // +patchStrategy=merge @@ -496,7 +496,7 @@ message ImageStreamTagList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of image stream tags + // items is the list of image stream tags repeated ImageStreamTag items = 2; } @@ -543,46 +543,46 @@ message ImageTagList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of image stream tags + // items is the list of image stream tags repeated ImageTag items = 2; } // NamedTagEventList relates a tag to its image history. message NamedTagEventList { - // Tag is the tag for which the history is recorded + // tag is the tag for which the history is recorded optional string tag = 1; // Standard object's metadata. repeated TagEvent items = 2; - // Conditions is an array of conditions that apply to the tag event list. + // conditions is an array of conditions that apply to the tag event list. repeated TagEventCondition conditions = 3; } // RepositoryImportSpec describes a request to import images from a container image repository. message RepositoryImportSpec { - // From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed + // from is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed optional .k8s.io.api.core.v1.ObjectReference from = 1; - // ImportPolicy is the policy controlling how the image is imported + // importPolicy is the policy controlling how the image is imported optional TagImportPolicy importPolicy = 2; - // ReferencePolicy defines how other components should consume the image + // referencePolicy defines how other components should consume the image optional TagReferencePolicy referencePolicy = 4; - // IncludeManifest determines if the manifest for each image is returned in the response + // includeManifest determines if the manifest for each image is returned in the response optional bool includeManifest = 3; } // RepositoryImportStatus describes the result of an image repository import message RepositoryImportStatus { - // Status reflects whether any failure occurred during import + // status reflects whether any failure occurred during import optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; - // Images is a list of images successfully retrieved by the import of the repository. + // images is a list of images successfully retrieved by the import of the repository. repeated ImageImportStatus images = 2; - // AdditionalTags are tags that exist in the repository but were not imported because + // additionalTags are tags that exist in the repository but were not imported because // a maximum limit of automatic imports was applied. repeated string additionalTags = 3; } @@ -602,10 +602,10 @@ message SecretList { // SignatureCondition describes an image signature condition of particular kind at particular probe time. message SignatureCondition { - // Type of signature condition, Complete or Failed. + // type of signature condition, Complete or Failed. optional string type = 1; - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. optional string status = 2; // Last time the condition was checked. @@ -624,7 +624,7 @@ message SignatureCondition { // SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject // of signing certificate or key. message SignatureGenericEntity { - // Organization name. + // organization name. optional string organization = 1; // Common name (e.g. openshift-signing-service). @@ -648,55 +648,55 @@ message SignatureSubject { // TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. message TagEvent { - // Created holds the time the TagEvent was created + // created holds the time the TagEvent was created optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 1; - // DockerImageReference is the string that can be used to pull this image + // dockerImageReference is the string that can be used to pull this image optional string dockerImageReference = 2; - // Image is the image + // image is the image optional string image = 3; - // Generation is the spec tag generation that resulted in this tag being updated + // generation is the spec tag generation that resulted in this tag being updated optional int64 generation = 4; } // TagEventCondition contains condition information for a tag event. message TagEventCondition { - // Type of tag event condition, currently only ImportSuccess + // type of tag event condition, currently only ImportSuccess optional string type = 1; - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. optional string status = 2; - // LastTransitionTIme is the time the condition transitioned from one status to another. + // lastTransitionTime is the time the condition transitioned from one status to another. optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - // Reason is a brief machine readable explanation for the condition's last transition. + // reason is a brief machine readable explanation for the condition's last transition. optional string reason = 4; - // Message is a human readable description of the details about last transition, complementing reason. + // message is a human readable description of the details about last transition, complementing reason. optional string message = 5; - // Generation is the spec tag generation that this status corresponds to + // generation is the spec tag generation that this status corresponds to optional int64 generation = 6; } // TagImportPolicy controls how images related to this tag will be imported. message TagImportPolicy { - // Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. + // insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. optional bool insecure = 1; - // Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported + // scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported optional bool scheduled = 2; - // ImportMode describes how to import an image manifest. + // importMode describes how to import an image manifest. optional string importMode = 3; } // TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. message TagReference { - // Name of the tag + // name of the tag optional string name = 1; // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags. @@ -708,11 +708,11 @@ message TagReference { // can only reference a tag within this same ImageStream. optional .k8s.io.api.core.v1.ObjectReference from = 3; - // Reference states if the tag will be imported. Default value is false, which means the tag will + // reference states if the tag will be imported. Default value is false, which means the tag will // be imported. optional bool reference = 4; - // Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference + // generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference // is changed the generation is set to match the current stream generation (which is incremented every // time spec is changed). Other processes in the system like the image importer observe that the // generation of spec tag is newer than the generation recorded in the status and use that as a trigger @@ -722,10 +722,10 @@ message TagReference { // +optional optional int64 generation = 5; - // ImportPolicy is information that controls how images may be imported by the server. + // importPolicy is information that controls how images may be imported by the server. optional TagImportPolicy importPolicy = 6; - // ReferencePolicy defines how other components should consume the image. + // referencePolicy defines how other components should consume the image. optional TagReferencePolicy referencePolicy = 7; } @@ -733,7 +733,7 @@ message TagReference { // image change triggers in deployment configs or builds are resolved. This allows the image stream // author to control how images are accessed. message TagReferencePolicy { - // Type determines how the image pull spec should be transformed when the image stream tag is used in + // type determines how the image pull spec should be transformed when the image stream tag is used in // deployment config triggers or new builds. The default value is `Source`, indicating the original // location of the image should be used (if imported). The user may also specify `Local`, indicating // that the pull spec should point to the integrated container image registry and leverage the registry's diff --git a/vendor/github.com/openshift/api/image/v1/types.go b/vendor/github.com/openshift/api/image/v1/types.go index 9919c0fe76..d4ee4bff69 100644 --- a/vendor/github.com/openshift/api/image/v1/types.go +++ b/vendor/github.com/openshift/api/image/v1/types.go @@ -19,7 +19,7 @@ type ImageList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of images + // items is a list of images Items []Image `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -45,30 +45,30 @@ type Image struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // DockerImageReference is the string that can be used to pull this image. + // dockerImageReference is the string that can be used to pull this image. DockerImageReference string `json:"dockerImageReference,omitempty" protobuf:"bytes,2,opt,name=dockerImageReference"` - // DockerImageMetadata contains metadata about this image + // dockerImageMetadata contains metadata about this image // +patchStrategy=replace // +kubebuilder:pruning:PreserveUnknownFields DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty" patchStrategy:"replace" protobuf:"bytes,3,opt,name=dockerImageMetadata"` - // DockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" + // dockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty" protobuf:"bytes,4,opt,name=dockerImageMetadataVersion"` - // DockerImageManifest is the raw JSON of the manifest + // dockerImageManifest is the raw JSON of the manifest DockerImageManifest string `json:"dockerImageManifest,omitempty" protobuf:"bytes,5,opt,name=dockerImageManifest"` - // DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list. + // dockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list. DockerImageLayers []ImageLayer `json:"dockerImageLayers,omitempty" protobuf:"bytes,6,rep,name=dockerImageLayers"` - // Signatures holds all signatures of the image. + // signatures holds all signatures of the image. // +patchMergeKey=name // +patchStrategy=merge Signatures []ImageSignature `json:"signatures,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=signatures"` - // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + // dockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty" protobuf:"bytes,8,rep,name=dockerImageSignatures"` - // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + // dockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. DockerImageManifestMediaType string `json:"dockerImageManifestMediaType,omitempty" protobuf:"bytes,9,opt,name=dockerImageManifestMediaType"` - // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + // dockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. // Will not be set when the image represents a manifest list. DockerImageConfig string `json:"dockerImageConfig,omitempty" protobuf:"bytes,10,opt,name=dockerImageConfig"` - // DockerImageManifests holds information about sub-manifests when the image represents a manifest list. + // dockerImageManifests holds information about sub-manifests when the image represents a manifest list. // When this field is present, no DockerImageLayers should be specified. DockerImageManifests []ImageManifest `json:"dockerImageManifests,omitempty" protobuf:"bytes,11,rep,name=dockerImageManifests"` } @@ -76,29 +76,29 @@ type Image struct { // ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular // Image object. type ImageManifest struct { - // Digest is the unique identifier for the manifest. It refers to an Image object. + // digest is the unique identifier for the manifest. It refers to an Image object. Digest string `json:"digest" protobuf:"bytes,1,opt,name=digest"` - // MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, + // mediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, // application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json. MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"` - // ManifestSize represents the size of the raw object contents, in bytes. + // manifestSize represents the size of the raw object contents, in bytes. ManifestSize int64 `json:"manifestSize" protobuf:"varint,3,opt,name=manifestSize"` - // Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`. + // architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`. Architecture string `json:"architecture" protobuf:"bytes,4,opt,name=architecture"` - // OS specifies the operating system, for example `linux`. + // os specifies the operating system, for example `linux`. OS string `json:"os" protobuf:"bytes,5,opt,name=os"` - // Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU + // variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU // variant of the ARM CPU. Variant string `json:"variant,omitempty" protobuf:"bytes,6,opt,name=variant"` } // ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. type ImageLayer struct { - // Name of the layer as defined by the underlying store. + // name of the layer as defined by the underlying store. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Size of the layer in bytes as defined by the underlying store. + // size of the layer in bytes as defined by the underlying store. LayerSize int64 `json:"size" protobuf:"varint,2,opt,name=size"` - // MediaType of the referenced object. + // mediaType of the referenced object. MediaType string `json:"mediaType" protobuf:"bytes,3,opt,name=mediaType"` } @@ -126,7 +126,7 @@ type ImageSignature struct { Type string `json:"type" protobuf:"bytes,2,opt,name=type"` // Required: An opaque binary string which is an image's signature. Content []byte `json:"content" protobuf:"bytes,3,opt,name=content"` - // Conditions represent the latest available observations of a signature's current state. + // conditions represent the latest available observations of a signature's current state. // +patchMergeKey=type // +patchStrategy=merge Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` @@ -154,9 +154,9 @@ type SignatureConditionType string // SignatureCondition describes an image signature condition of particular kind at particular probe time. type SignatureCondition struct { - // Type of signature condition, Complete or Failed. + // type of signature condition, Complete or Failed. Type SignatureConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=SignatureConditionType"` - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` // Last time the condition was checked. LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` @@ -171,7 +171,7 @@ type SignatureCondition struct { // SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject // of signing certificate or key. type SignatureGenericEntity struct { - // Organization name. + // organization name. Organization string `json:"organization,omitempty" protobuf:"bytes,1,opt,name=organization"` // Common name (e.g. openshift-signing-service). CommonName string `json:"commonName,omitempty" protobuf:"bytes,2,opt,name=commonName"` @@ -204,7 +204,7 @@ type ImageStreamList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of imageStreams + // items is a list of imageStreams Items []ImageStream `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -237,10 +237,10 @@ type ImageStream struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec describes the desired state of this stream + // spec describes the desired state of this stream // +optional Spec ImageStreamSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status describes the current state of this stream + // status describes the current state of this stream // +optional Status ImageStreamStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -272,7 +272,7 @@ type ImageLookupPolicy struct { // TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. type TagReference struct { - // Name of the tag + // name of the tag Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags. // +optional @@ -281,10 +281,10 @@ type TagReference struct { // are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references // can only reference a tag within this same ImageStream. From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,3,opt,name=from"` - // Reference states if the tag will be imported. Default value is false, which means the tag will + // reference states if the tag will be imported. Default value is false, which means the tag will // be imported. Reference bool `json:"reference,omitempty" protobuf:"varint,4,opt,name=reference"` - // Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference + // generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference // is changed the generation is set to match the current stream generation (which is incremented every // time spec is changed). Other processes in the system like the image importer observe that the // generation of spec tag is newer than the generation recorded in the status and use that as a trigger @@ -293,19 +293,19 @@ type TagReference struct { // nil which will be merged with the current tag generation. // +optional Generation *int64 `json:"generation" protobuf:"varint,5,opt,name=generation"` - // ImportPolicy is information that controls how images may be imported by the server. + // importPolicy is information that controls how images may be imported by the server. ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,6,opt,name=importPolicy"` - // ReferencePolicy defines how other components should consume the image. + // referencePolicy defines how other components should consume the image. ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,7,opt,name=referencePolicy"` } // TagImportPolicy controls how images related to this tag will be imported. type TagImportPolicy struct { - // Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. + // insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. Insecure bool `json:"insecure,omitempty" protobuf:"varint,1,opt,name=insecure"` - // Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported + // scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported Scheduled bool `json:"scheduled,omitempty" protobuf:"varint,2,opt,name=scheduled"` - // ImportMode describes how to import an image manifest. + // importMode describes how to import an image manifest. ImportMode ImportModeType `json:"importMode,omitempty" protobuf:"bytes,3,opt,name=importMode,casttype=ImportModeType"` } @@ -342,7 +342,7 @@ const ( // image change triggers in deployment configs or builds are resolved. This allows the image stream // author to control how images are accessed. type TagReferencePolicy struct { - // Type determines how the image pull spec should be transformed when the image stream tag is used in + // type determines how the image pull spec should be transformed when the image stream tag is used in // deployment config triggers or new builds. The default value is `Source`, indicating the original // location of the image should be used (if imported). The user may also specify `Local`, indicating // that the pull spec should point to the integrated container image registry and leverage the registry's @@ -355,14 +355,14 @@ type TagReferencePolicy struct { // ImageStreamStatus contains information about the state of this image stream. type ImageStreamStatus struct { - // DockerImageRepository represents the effective location this stream may be accessed at. + // dockerImageRepository represents the effective location this stream may be accessed at. // May be empty until the server determines where the repository is located DockerImageRepository string `json:"dockerImageRepository" protobuf:"bytes,1,opt,name=dockerImageRepository"` - // PublicDockerImageRepository represents the public location from where the image can + // publicDockerImageRepository represents the public location from where the image can // be pulled outside the cluster. This field may be empty if the administrator // has not exposed the integrated registry externally. PublicDockerImageRepository string `json:"publicDockerImageRepository,omitempty" protobuf:"bytes,3,opt,name=publicDockerImageRepository"` - // Tags are a historical record of images associated with each tag. The first entry in the + // tags are a historical record of images associated with each tag. The first entry in the // TagEvent array is the currently tagged image. // +patchMergeKey=tag // +patchStrategy=merge @@ -371,23 +371,23 @@ type ImageStreamStatus struct { // NamedTagEventList relates a tag to its image history. type NamedTagEventList struct { - // Tag is the tag for which the history is recorded + // tag is the tag for which the history is recorded Tag string `json:"tag" protobuf:"bytes,1,opt,name=tag"` // Standard object's metadata. Items []TagEvent `json:"items" protobuf:"bytes,2,rep,name=items"` - // Conditions is an array of conditions that apply to the tag event list. + // conditions is an array of conditions that apply to the tag event list. Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` } // TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. type TagEvent struct { - // Created holds the time the TagEvent was created + // created holds the time the TagEvent was created Created metav1.Time `json:"created" protobuf:"bytes,1,opt,name=created"` - // DockerImageReference is the string that can be used to pull this image + // dockerImageReference is the string that can be used to pull this image DockerImageReference string `json:"dockerImageReference" protobuf:"bytes,2,opt,name=dockerImageReference"` - // Image is the image + // image is the image Image string `json:"image" protobuf:"bytes,3,opt,name=image"` - // Generation is the spec tag generation that resulted in this tag being updated + // generation is the spec tag generation that resulted in this tag being updated Generation int64 `json:"generation" protobuf:"varint,4,opt,name=generation"` } @@ -401,17 +401,17 @@ const ( // TagEventCondition contains condition information for a tag event. type TagEventCondition struct { - // Type of tag event condition, currently only ImportSuccess + // type of tag event condition, currently only ImportSuccess Type TagEventConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagEventConditionType"` - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // LastTransitionTIme is the time the condition transitioned from one status to another. + // lastTransitionTime is the time the condition transitioned from one status to another. LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // Reason is a brief machine readable explanation for the condition's last transition. + // reason is a brief machine readable explanation for the condition's last transition. Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // Message is a human readable description of the details about last transition, complementing reason. + // message is a human readable description of the details about last transition, complementing reason. Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` - // Generation is the spec tag generation that this status corresponds to + // generation is the spec tag generation that this status corresponds to Generation int64 `json:"generation" protobuf:"varint,6,opt,name=generation"` } @@ -438,9 +438,9 @@ type ImageStreamMapping struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Image is a container image. + // image is a container image. Image Image `json:"image" protobuf:"bytes,2,opt,name=image"` - // Tag is a string value this image can be located with inside the stream. + // tag is a string value this image can be located with inside the stream. Tag string `json:"tag" protobuf:"bytes,3,opt,name=tag"` } @@ -500,7 +500,7 @@ type ImageStreamTagList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of image stream tags + // items is the list of image stream tags Items []ImageStreamTag `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -555,7 +555,7 @@ type ImageTagList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of image stream tags + // items is the list of image stream tags Items []ImageTag `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -585,7 +585,7 @@ type ImageStreamImage struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Image associated with the ImageStream and image name. + // image associated with the ImageStream and image name. Image Image `json:"image" protobuf:"bytes,2,opt,name=image"` } @@ -651,10 +651,10 @@ type ImageBlobReferences struct { // ImageLayerData contains metadata about an image layer. type ImageLayerData struct { - // Size of the layer in bytes as defined by the underlying store. This field is + // size of the layer in bytes as defined by the underlying store. This field is // optional if the necessary information about size is not available. LayerSize *int64 `json:"size" protobuf:"varint,1,opt,name=size"` - // MediaType of the referenced object. + // mediaType of the referenced object. MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"` } @@ -680,82 +680,82 @@ type ImageStreamImport struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec is a description of the images that the user wishes to import + // spec is a description of the images that the user wishes to import Spec ImageStreamImportSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status is the result of importing the image + // status is the result of importing the image Status ImageStreamImportStatus `json:"status" protobuf:"bytes,3,opt,name=status"` } // ImageStreamImportSpec defines what images should be imported. type ImageStreamImportSpec struct { - // Import indicates whether to perform an import - if so, the specified tags are set on the spec + // import indicates whether to perform an import - if so, the specified tags are set on the spec // and status of the image stream defined by the type meta. Import bool `json:"import" protobuf:"varint,1,opt,name=import"` - // Repository is an optional import of an entire container image repository. A maximum limit on the + // repository is an optional import of an entire container image repository. A maximum limit on the // number of tags imported this way is imposed by the server. Repository *RepositoryImportSpec `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` - // Images are a list of individual images to import. + // images are a list of individual images to import. Images []ImageImportSpec `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` } // ImageStreamImportStatus contains information about the status of an image stream import. type ImageStreamImportStatus struct { - // Import is the image stream that was successfully updated or created when 'to' was set. + // import is the image stream that was successfully updated or created when 'to' was set. Import *ImageStream `json:"import,omitempty" protobuf:"bytes,1,opt,name=import"` - // Repository is set if spec.repository was set to the outcome of the import + // repository is set if spec.repository was set to the outcome of the import Repository *RepositoryImportStatus `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` - // Images is set with the result of importing spec.images + // images is set with the result of importing spec.images Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` } // RepositoryImportSpec describes a request to import images from a container image repository. type RepositoryImportSpec struct { - // From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed + // from is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` - // ImportPolicy is the policy controlling how the image is imported + // importPolicy is the policy controlling how the image is imported ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,2,opt,name=importPolicy"` - // ReferencePolicy defines how other components should consume the image + // referencePolicy defines how other components should consume the image ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,4,opt,name=referencePolicy"` - // IncludeManifest determines if the manifest for each image is returned in the response + // includeManifest determines if the manifest for each image is returned in the response IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,3,opt,name=includeManifest"` } // RepositoryImportStatus describes the result of an image repository import type RepositoryImportStatus struct { - // Status reflects whether any failure occurred during import + // status reflects whether any failure occurred during import Status metav1.Status `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` - // Images is a list of images successfully retrieved by the import of the repository. + // images is a list of images successfully retrieved by the import of the repository. Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,2,rep,name=images"` - // AdditionalTags are tags that exist in the repository but were not imported because + // additionalTags are tags that exist in the repository but were not imported because // a maximum limit of automatic imports was applied. AdditionalTags []string `json:"additionalTags,omitempty" protobuf:"bytes,3,rep,name=additionalTags"` } // ImageImportSpec describes a request to import a specific image. type ImageImportSpec struct { - // From is the source of an image to import; only kind DockerImage is allowed + // from is the source of an image to import; only kind DockerImage is allowed From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` - // To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used + // to is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used To *corev1.LocalObjectReference `json:"to,omitempty" protobuf:"bytes,2,opt,name=to"` - // ImportPolicy is the policy controlling how the image is imported + // importPolicy is the policy controlling how the image is imported ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,3,opt,name=importPolicy"` - // ReferencePolicy defines how other components should consume the image + // referencePolicy defines how other components should consume the image ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,5,opt,name=referencePolicy"` - // IncludeManifest determines if the manifest for each image is returned in the response + // includeManifest determines if the manifest for each image is returned in the response IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,4,opt,name=includeManifest"` } // ImageImportStatus describes the result of an image import. type ImageImportStatus struct { - // Status is the status of the image import, including errors encountered while retrieving the image + // status is the status of the image import, including errors encountered while retrieving the image Status metav1.Status `json:"status" protobuf:"bytes,1,opt,name=status"` - // Image is the metadata of that image, if the image was located + // image is the metadata of that image, if the image was located Image *Image `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` - // Tag is the tag this image was located under, if any + // tag is the tag this image was located under, if any Tag string `json:"tag,omitempty" protobuf:"bytes,3,opt,name=tag"` - // Manifests holds sub-manifests metadata when importing a manifest list + // manifests holds sub-manifests metadata when importing a manifest list Manifests []Image `json:"manifests,omitempty" protobuf:"bytes,4,rep,name=manifests"` } diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go index ec7fc2b457..e0720bec77 100644 --- a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go @@ -27,16 +27,16 @@ func (DockerImageReference) SwaggerDoc() map[string]string { var map_Image = map[string]string{ "": "Image is an immutable representation of a container image and metadata at a point in time. Images are named by taking a hash of their contents (metadata and content) and any change in format, content, or metadata results in a new name. The images resource is primarily for use by cluster administrators and integrations like the cluster image registry - end users instead access images via the imagestreamtags or imagestreamimages resources. While image metadata is stored in the API, any integration that implements the container image registry API must provide its own storage for the raw manifest data, image config, and layer contents.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "dockerImageReference": "DockerImageReference is the string that can be used to pull this image.", - "dockerImageMetadata": "DockerImageMetadata contains metadata about this image", - "dockerImageMetadataVersion": "DockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", - "dockerImageManifest": "DockerImageManifest is the raw JSON of the manifest", - "dockerImageLayers": "DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list.", - "signatures": "Signatures holds all signatures of the image.", - "dockerImageSignatures": "DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.", - "dockerImageManifestMediaType": "DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.", - "dockerImageConfig": "DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. Will not be set when the image represents a manifest list.", - "dockerImageManifests": "DockerImageManifests holds information about sub-manifests when the image represents a manifest list. When this field is present, no DockerImageLayers should be specified.", + "dockerImageReference": "dockerImageReference is the string that can be used to pull this image.", + "dockerImageMetadata": "dockerImageMetadata contains metadata about this image", + "dockerImageMetadataVersion": "dockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", + "dockerImageManifest": "dockerImageManifest is the raw JSON of the manifest", + "dockerImageLayers": "dockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list.", + "signatures": "signatures holds all signatures of the image.", + "dockerImageSignatures": "dockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.", + "dockerImageManifestMediaType": "dockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.", + "dockerImageConfig": "dockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. Will not be set when the image represents a manifest list.", + "dockerImageManifests": "dockerImageManifests holds information about sub-manifests when the image represents a manifest list. When this field is present, no DockerImageLayers should be specified.", } func (Image) SwaggerDoc() map[string]string { @@ -57,11 +57,11 @@ func (ImageBlobReferences) SwaggerDoc() map[string]string { var map_ImageImportSpec = map[string]string{ "": "ImageImportSpec describes a request to import a specific image.", - "from": "From is the source of an image to import; only kind DockerImage is allowed", - "to": "To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used", - "importPolicy": "ImportPolicy is the policy controlling how the image is imported", - "referencePolicy": "ReferencePolicy defines how other components should consume the image", - "includeManifest": "IncludeManifest determines if the manifest for each image is returned in the response", + "from": "from is the source of an image to import; only kind DockerImage is allowed", + "to": "to is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used", + "importPolicy": "importPolicy is the policy controlling how the image is imported", + "referencePolicy": "referencePolicy defines how other components should consume the image", + "includeManifest": "includeManifest determines if the manifest for each image is returned in the response", } func (ImageImportSpec) SwaggerDoc() map[string]string { @@ -70,10 +70,10 @@ func (ImageImportSpec) SwaggerDoc() map[string]string { var map_ImageImportStatus = map[string]string{ "": "ImageImportStatus describes the result of an image import.", - "status": "Status is the status of the image import, including errors encountered while retrieving the image", - "image": "Image is the metadata of that image, if the image was located", - "tag": "Tag is the tag this image was located under, if any", - "manifests": "Manifests holds sub-manifests metadata when importing a manifest list", + "status": "status is the status of the image import, including errors encountered while retrieving the image", + "image": "image is the metadata of that image, if the image was located", + "tag": "tag is the tag this image was located under, if any", + "manifests": "manifests holds sub-manifests metadata when importing a manifest list", } func (ImageImportStatus) SwaggerDoc() map[string]string { @@ -82,9 +82,9 @@ func (ImageImportStatus) SwaggerDoc() map[string]string { var map_ImageLayer = map[string]string{ "": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.", - "name": "Name of the layer as defined by the underlying store.", - "size": "Size of the layer in bytes as defined by the underlying store.", - "mediaType": "MediaType of the referenced object.", + "name": "name of the layer as defined by the underlying store.", + "size": "size of the layer in bytes as defined by the underlying store.", + "mediaType": "mediaType of the referenced object.", } func (ImageLayer) SwaggerDoc() map[string]string { @@ -93,8 +93,8 @@ func (ImageLayer) SwaggerDoc() map[string]string { var map_ImageLayerData = map[string]string{ "": "ImageLayerData contains metadata about an image layer.", - "size": "Size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.", - "mediaType": "MediaType of the referenced object.", + "size": "size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.", + "mediaType": "mediaType of the referenced object.", } func (ImageLayerData) SwaggerDoc() map[string]string { @@ -104,7 +104,7 @@ func (ImageLayerData) SwaggerDoc() map[string]string { var map_ImageList = map[string]string{ "": "ImageList is a list of Image objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of images", + "items": "items is a list of images", } func (ImageList) SwaggerDoc() map[string]string { @@ -122,12 +122,12 @@ func (ImageLookupPolicy) SwaggerDoc() map[string]string { var map_ImageManifest = map[string]string{ "": "ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular Image object.", - "digest": "Digest is the unique identifier for the manifest. It refers to an Image object.", - "mediaType": "MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json.", - "manifestSize": "ManifestSize represents the size of the raw object contents, in bytes.", - "architecture": "Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`.", - "os": "OS specifies the operating system, for example `linux`.", - "variant": "Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU variant of the ARM CPU.", + "digest": "digest is the unique identifier for the manifest. It refers to an Image object.", + "mediaType": "mediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json.", + "manifestSize": "manifestSize represents the size of the raw object contents, in bytes.", + "architecture": "architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`.", + "os": "os specifies the operating system, for example `linux`.", + "variant": "variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU variant of the ARM CPU.", } func (ImageManifest) SwaggerDoc() map[string]string { @@ -139,7 +139,7 @@ var map_ImageSignature = map[string]string{ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "type": "Required: Describes a type of stored blob.", "content": "Required: An opaque binary string which is an image's signature.", - "conditions": "Conditions represent the latest available observations of a signature's current state.", + "conditions": "conditions represent the latest available observations of a signature's current state.", "imageIdentity": "A human readable string representing image's identity. It could be a product name and version, or an image pull spec (e.g. \"registry.access.redhat.com/rhel7/rhel:7.2\").", "signedClaims": "Contains claims from the signature.", "created": "If specified, it is the time of signature's creation.", @@ -154,8 +154,8 @@ func (ImageSignature) SwaggerDoc() map[string]string { var map_ImageStream = map[string]string{ "": "An ImageStream stores a mapping of tags to images, metadata overrides that are applied when images are tagged in a stream, and an optional reference to a container image repository on a registry. Users typically update the spec.tags field to point to external images which are imported from container registries using credentials in your namespace with the pull secret type, or to existing image stream tags and images which are immediately accessible for tagging or pulling. The history of images applied to a tag is visible in the status.tags field and any user who can view an image stream is allowed to tag that image into their own image streams. Access to pull images from the integrated registry is granted by having the \"get imagestreams/layers\" permission on a given image stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both spec and status for that tag to be removed. Image stream history is retained until an administrator runs the prune operation, which removes references that are no longer in use. To preserve a historical image, ensure there is a tag in spec pointing to that image by its digest.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec describes the desired state of this stream", - "status": "Status describes the current state of this stream", + "spec": "spec describes the desired state of this stream", + "status": "status describes the current state of this stream", } func (ImageStream) SwaggerDoc() map[string]string { @@ -165,7 +165,7 @@ func (ImageStream) SwaggerDoc() map[string]string { var map_ImageStreamImage = map[string]string{ "": "ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. User interfaces and regular users can use this resource to access the metadata details of a tagged image in the image stream history for viewing, since Image resources are not directly accessible to end users. A not found error will be returned if no such image is referenced by a tag within the ImageStream. Images are created when spec tags are set on an image stream that represent an image in an external registry, when pushing to the integrated registry, or when tagging an existing image from one image stream to another. The name of an image stream image is in the form \"@\", where the digest is the content addressible identifier for the image (sha256:xxxxx...). You can use ImageStreamImages as the from.kind of an image stream spec tag to reference an image exactly. The only operations supported on the imagestreamimage endpoint are retrieving the image.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "image": "Image associated with the ImageStream and image name.", + "image": "image associated with the ImageStream and image name.", } func (ImageStreamImage) SwaggerDoc() map[string]string { @@ -175,8 +175,8 @@ func (ImageStreamImage) SwaggerDoc() map[string]string { var map_ImageStreamImport = map[string]string{ "": "The image stream import resource provides an easy way for a user to find and import container images from other container image registries into the server. Individual images or an entire image repository may be imported, and users may choose to see the results of the import prior to tagging the resulting images into the specified image stream.\n\nThis API is intended for end-user tools that need to see the metadata of the image prior to import (for instance, to generate an application from it). Clients that know the desired image can continue to create spec.tags directly into their image streams.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is a description of the images that the user wishes to import", - "status": "Status is the result of importing the image", + "spec": "spec is a description of the images that the user wishes to import", + "status": "status is the result of importing the image", } func (ImageStreamImport) SwaggerDoc() map[string]string { @@ -185,9 +185,9 @@ func (ImageStreamImport) SwaggerDoc() map[string]string { var map_ImageStreamImportSpec = map[string]string{ "": "ImageStreamImportSpec defines what images should be imported.", - "import": "Import indicates whether to perform an import - if so, the specified tags are set on the spec and status of the image stream defined by the type meta.", - "repository": "Repository is an optional import of an entire container image repository. A maximum limit on the number of tags imported this way is imposed by the server.", - "images": "Images are a list of individual images to import.", + "import": "import indicates whether to perform an import - if so, the specified tags are set on the spec and status of the image stream defined by the type meta.", + "repository": "repository is an optional import of an entire container image repository. A maximum limit on the number of tags imported this way is imposed by the server.", + "images": "images are a list of individual images to import.", } func (ImageStreamImportSpec) SwaggerDoc() map[string]string { @@ -196,9 +196,9 @@ func (ImageStreamImportSpec) SwaggerDoc() map[string]string { var map_ImageStreamImportStatus = map[string]string{ "": "ImageStreamImportStatus contains information about the status of an image stream import.", - "import": "Import is the image stream that was successfully updated or created when 'to' was set.", - "repository": "Repository is set if spec.repository was set to the outcome of the import", - "images": "Images is set with the result of importing spec.images", + "import": "import is the image stream that was successfully updated or created when 'to' was set.", + "repository": "repository is set if spec.repository was set to the outcome of the import", + "images": "images is set with the result of importing spec.images", } func (ImageStreamImportStatus) SwaggerDoc() map[string]string { @@ -219,7 +219,7 @@ func (ImageStreamLayers) SwaggerDoc() map[string]string { var map_ImageStreamList = map[string]string{ "": "ImageStreamList is a list of ImageStream objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of imageStreams", + "items": "items is a list of imageStreams", } func (ImageStreamList) SwaggerDoc() map[string]string { @@ -229,8 +229,8 @@ func (ImageStreamList) SwaggerDoc() map[string]string { var map_ImageStreamMapping = map[string]string{ "": "ImageStreamMapping represents a mapping from a single image stream tag to a container image as well as the reference to the container image stream the image came from. This resource is used by privileged integrators to create an image resource and to associate it with an image stream in the status tags field. Creating an ImageStreamMapping will allow any user who can view the image stream to tag or pull that image, so only create mappings where the user has proven they have access to the image contents directly. The only operation supported for this resource is create and the metadata name and namespace should be set to the image stream containing the tag that should be updated.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "image": "Image is a container image.", - "tag": "Tag is a string value this image can be located with inside the stream.", + "image": "image is a container image.", + "tag": "tag is a string value this image can be located with inside the stream.", } func (ImageStreamMapping) SwaggerDoc() map[string]string { @@ -250,9 +250,9 @@ func (ImageStreamSpec) SwaggerDoc() map[string]string { var map_ImageStreamStatus = map[string]string{ "": "ImageStreamStatus contains information about the state of this image stream.", - "dockerImageRepository": "DockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server determines where the repository is located", - "publicDockerImageRepository": "PublicDockerImageRepository represents the public location from where the image can be pulled outside the cluster. This field may be empty if the administrator has not exposed the integrated registry externally.", - "tags": "Tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.", + "dockerImageRepository": "dockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server determines where the repository is located", + "publicDockerImageRepository": "publicDockerImageRepository represents the public location from where the image can be pulled outside the cluster. This field may be empty if the administrator has not exposed the integrated registry externally.", + "tags": "tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.", } func (ImageStreamStatus) SwaggerDoc() map[string]string { @@ -276,7 +276,7 @@ func (ImageStreamTag) SwaggerDoc() map[string]string { var map_ImageStreamTagList = map[string]string{ "": "ImageStreamTagList is a list of ImageStreamTag objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of image stream tags", + "items": "items is the list of image stream tags", } func (ImageStreamTagList) SwaggerDoc() map[string]string { @@ -298,7 +298,7 @@ func (ImageTag) SwaggerDoc() map[string]string { var map_ImageTagList = map[string]string{ "": "ImageTagList is a list of ImageTag objects. When listing image tags, the image field is not populated. Tags are returned in alphabetical order by image stream and then tag.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of image stream tags", + "items": "items is the list of image stream tags", } func (ImageTagList) SwaggerDoc() map[string]string { @@ -307,9 +307,9 @@ func (ImageTagList) SwaggerDoc() map[string]string { var map_NamedTagEventList = map[string]string{ "": "NamedTagEventList relates a tag to its image history.", - "tag": "Tag is the tag for which the history is recorded", + "tag": "tag is the tag for which the history is recorded", "items": "Standard object's metadata.", - "conditions": "Conditions is an array of conditions that apply to the tag event list.", + "conditions": "conditions is an array of conditions that apply to the tag event list.", } func (NamedTagEventList) SwaggerDoc() map[string]string { @@ -318,10 +318,10 @@ func (NamedTagEventList) SwaggerDoc() map[string]string { var map_RepositoryImportSpec = map[string]string{ "": "RepositoryImportSpec describes a request to import images from a container image repository.", - "from": "From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed", - "importPolicy": "ImportPolicy is the policy controlling how the image is imported", - "referencePolicy": "ReferencePolicy defines how other components should consume the image", - "includeManifest": "IncludeManifest determines if the manifest for each image is returned in the response", + "from": "from is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed", + "importPolicy": "importPolicy is the policy controlling how the image is imported", + "referencePolicy": "referencePolicy defines how other components should consume the image", + "includeManifest": "includeManifest determines if the manifest for each image is returned in the response", } func (RepositoryImportSpec) SwaggerDoc() map[string]string { @@ -330,9 +330,9 @@ func (RepositoryImportSpec) SwaggerDoc() map[string]string { var map_RepositoryImportStatus = map[string]string{ "": "RepositoryImportStatus describes the result of an image repository import", - "status": "Status reflects whether any failure occurred during import", - "images": "Images is a list of images successfully retrieved by the import of the repository.", - "additionalTags": "AdditionalTags are tags that exist in the repository but were not imported because a maximum limit of automatic imports was applied.", + "status": "status reflects whether any failure occurred during import", + "images": "images is a list of images successfully retrieved by the import of the repository.", + "additionalTags": "additionalTags are tags that exist in the repository but were not imported because a maximum limit of automatic imports was applied.", } func (RepositoryImportStatus) SwaggerDoc() map[string]string { @@ -341,8 +341,8 @@ func (RepositoryImportStatus) SwaggerDoc() map[string]string { var map_SignatureCondition = map[string]string{ "": "SignatureCondition describes an image signature condition of particular kind at particular probe time.", - "type": "Type of signature condition, Complete or Failed.", - "status": "Status of the condition, one of True, False, Unknown.", + "type": "type of signature condition, Complete or Failed.", + "status": "status of the condition, one of True, False, Unknown.", "lastProbeTime": "Last time the condition was checked.", "lastTransitionTime": "Last time the condition transit from one status to another.", "reason": "(brief) reason for the condition's last transition.", @@ -355,7 +355,7 @@ func (SignatureCondition) SwaggerDoc() map[string]string { var map_SignatureGenericEntity = map[string]string{ "": "SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject of signing certificate or key.", - "organization": "Organization name.", + "organization": "organization name.", "commonName": "Common name (e.g. openshift-signing-service).", } @@ -382,10 +382,10 @@ func (SignatureSubject) SwaggerDoc() map[string]string { var map_TagEvent = map[string]string{ "": "TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag.", - "created": "Created holds the time the TagEvent was created", - "dockerImageReference": "DockerImageReference is the string that can be used to pull this image", - "image": "Image is the image", - "generation": "Generation is the spec tag generation that resulted in this tag being updated", + "created": "created holds the time the TagEvent was created", + "dockerImageReference": "dockerImageReference is the string that can be used to pull this image", + "image": "image is the image", + "generation": "generation is the spec tag generation that resulted in this tag being updated", } func (TagEvent) SwaggerDoc() map[string]string { @@ -394,12 +394,12 @@ func (TagEvent) SwaggerDoc() map[string]string { var map_TagEventCondition = map[string]string{ "": "TagEventCondition contains condition information for a tag event.", - "type": "Type of tag event condition, currently only ImportSuccess", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "LastTransitionTIme is the time the condition transitioned from one status to another.", - "reason": "Reason is a brief machine readable explanation for the condition's last transition.", - "message": "Message is a human readable description of the details about last transition, complementing reason.", - "generation": "Generation is the spec tag generation that this status corresponds to", + "type": "type of tag event condition, currently only ImportSuccess", + "status": "status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.", + "reason": "reason is a brief machine readable explanation for the condition's last transition.", + "message": "message is a human readable description of the details about last transition, complementing reason.", + "generation": "generation is the spec tag generation that this status corresponds to", } func (TagEventCondition) SwaggerDoc() map[string]string { @@ -408,9 +408,9 @@ func (TagEventCondition) SwaggerDoc() map[string]string { var map_TagImportPolicy = map[string]string{ "": "TagImportPolicy controls how images related to this tag will be imported.", - "insecure": "Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.", - "scheduled": "Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported", - "importMode": "ImportMode describes how to import an image manifest.", + "insecure": "insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.", + "scheduled": "scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported", + "importMode": "importMode describes how to import an image manifest.", } func (TagImportPolicy) SwaggerDoc() map[string]string { @@ -419,13 +419,13 @@ func (TagImportPolicy) SwaggerDoc() map[string]string { var map_TagReference = map[string]string{ "": "TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track.", - "name": "Name of the tag", + "name": "name of the tag", "annotations": "Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags.", "from": "Optional; if specified, a reference to another image that this tag should point to. Valid values are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references can only reference a tag within this same ImageStream.", - "reference": "Reference states if the tag will be imported. Default value is false, which means the tag will be imported.", - "generation": "Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference is changed the generation is set to match the current stream generation (which is incremented every time spec is changed). Other processes in the system like the image importer observe that the generation of spec tag is newer than the generation recorded in the status and use that as a trigger to import the newest remote tag. To trigger a new import, clients may set this value to zero which will reset the generation to the latest stream generation. Legacy clients will send this value as nil which will be merged with the current tag generation.", - "importPolicy": "ImportPolicy is information that controls how images may be imported by the server.", - "referencePolicy": "ReferencePolicy defines how other components should consume the image.", + "reference": "reference states if the tag will be imported. Default value is false, which means the tag will be imported.", + "generation": "generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference is changed the generation is set to match the current stream generation (which is incremented every time spec is changed). Other processes in the system like the image importer observe that the generation of spec tag is newer than the generation recorded in the status and use that as a trigger to import the newest remote tag. To trigger a new import, clients may set this value to zero which will reset the generation to the latest stream generation. Legacy clients will send this value as nil which will be merged with the current tag generation.", + "importPolicy": "importPolicy is information that controls how images may be imported by the server.", + "referencePolicy": "referencePolicy defines how other components should consume the image.", } func (TagReference) SwaggerDoc() map[string]string { @@ -434,7 +434,7 @@ func (TagReference) SwaggerDoc() map[string]string { var map_TagReferencePolicy = map[string]string{ "": "TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when image change triggers in deployment configs or builds are resolved. This allows the image stream author to control how images are accessed.", - "type": "Type determines how the image pull spec should be transformed when the image stream tag is used in deployment config triggers or new builds. The default value is `Source`, indicating the original location of the image should be used (if imported). The user may also specify `Local`, indicating that the pull spec should point to the integrated container image registry and leverage the registry's ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this image to be managed from the image stream's namespace, so others on the platform can access a remote image but have no access to the remote secret. It also allows the image layers to be mirrored into the local registry which the images can still be pulled even if the upstream registry is unavailable.", + "type": "type determines how the image pull spec should be transformed when the image stream tag is used in deployment config triggers or new builds. The default value is `Source`, indicating the original location of the image should be used (if imported). The user may also specify `Local`, indicating that the pull spec should point to the integrated container image registry and leverage the registry's ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this image to be managed from the image stream's namespace, so others on the platform can access a remote image but have no access to the remote secret. It also allows the image layers to be mirrored into the local registry which the images can still be pulled even if the upstream registry is unavailable.", } func (TagReferencePolicy) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/imageregistry/v1/types.go b/vendor/github.com/openshift/api/imageregistry/v1/types.go index 9b3cc21a4d..3dde9d4c3c 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/types.go +++ b/vendor/github.com/openshift/api/imageregistry/v1/types.go @@ -477,7 +477,7 @@ const ( // EncryptionAlibaba this a union type in kube parlance. Depending on the value for the AlibabaEncryptionMethod, // different pointers may be used type EncryptionAlibaba struct { - // Method defines the different encrytion modes available + // method defines the different encrytion modes available // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. // Currently the default is `AES256`. // +kubebuilder:validation:Enum="KMS";"AES256" @@ -485,14 +485,14 @@ type EncryptionAlibaba struct { // +optional Method AlibabaEncryptionMethod `json:"method"` - // KMS (key management service) is an encryption type that holds the struct for KMS KeyID + // kms (key management service) is an encryption type that holds the struct for KMS KeyID // +optional KMS *KMSEncryptionAlibaba `json:"kms,omitempty"` } type KMSEncryptionAlibaba struct { - // KeyID holds the KMS encryption key ID - // +kubebuilder:validation:Required + // keyID holds the KMS encryption key ID + // +required // +kubebuilder:validation:MinLength=1 KeyID string `json:"keyID"` } @@ -501,7 +501,7 @@ type KMSEncryptionAlibaba struct { // Configures the registry to use Alibaba Cloud Object Storage Service for backend storage. // More about oss, you can look at the [official documentation](https://www.alibabacloud.com/help/product/31815.htm) type ImageRegistryConfigStorageAlibabaOSS struct { - // Bucket is the bucket name in which you want to store the registry's data. + // bucket is the bucket name in which you want to store the registry's data. // About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. // Currently the default will be autogenerated in the form of -image-registry-- @@ -510,20 +510,20 @@ type ImageRegistryConfigStorageAlibabaOSS struct { // +kubebuilder:validation:Pattern=`^[0-9a-z]+(-[0-9a-z]+)*$` // +optional Bucket string `json:"bucket,omitempty"` - // Region is the Alibaba Cloud Region in which your bucket exists. + // region is the Alibaba Cloud Region in which your bucket exists. // For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. // Currently the default will be based on the installed Alibaba Cloud Region. // +optional Region string `json:"region,omitempty"` - // EndpointAccessibility specifies whether the registry use the OSS VPC internal endpoint + // endpointAccessibility specifies whether the registry use the OSS VPC internal endpoint // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. // Currently the default is `Internal`. // +kubebuilder:validation:Enum="Internal";"Public";"" // +kubebuilder:default="Internal" // +optional EndpointAccessibility EndpointAccessibility `json:"endpointAccessibility,omitempty"` - // Encryption specifies whether you would like your data encrypted on the server side. + // encryption specifies whether you would like your data encrypted on the server side. // More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm) // +optional Encryption *EncryptionAlibaba `json:"encryption,omitempty"` @@ -556,7 +556,7 @@ type ImageRegistryConfigStorage struct { // ibmcos represents configuration that uses IBM Cloud Object Storage. // +optional IBMCOS *ImageRegistryConfigStorageIBMCOS `json:"ibmcos,omitempty"` - // Oss represents configuration that uses Alibaba Cloud Object Storage Service. + // oss represents configuration that uses Alibaba Cloud Object Storage Service. // +optional OSS *ImageRegistryConfigStorageAlibabaOSS `json:"oss,omitempty"` // managementState indicates if the operator manages the underlying diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go index ec999f309b..f8b421ae86 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go @@ -52,8 +52,8 @@ func (ConfigList) SwaggerDoc() map[string]string { var map_EncryptionAlibaba = map[string]string{ "": "EncryptionAlibaba this a union type in kube parlance. Depending on the value for the AlibabaEncryptionMethod, different pointers may be used", - "method": "Method defines the different encrytion modes available Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `AES256`.", - "kms": "KMS (key management service) is an encryption type that holds the struct for KMS KeyID", + "method": "method defines the different encrytion modes available Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `AES256`.", + "kms": "kms (key management service) is an encryption type that holds the struct for KMS KeyID", } func (EncryptionAlibaba) SwaggerDoc() map[string]string { @@ -112,7 +112,7 @@ var map_ImageRegistryConfigStorage = map[string]string{ "pvc": "pvc represents configuration that uses a PersistentVolumeClaim.", "azure": "azure represents configuration that uses Azure Blob Storage.", "ibmcos": "ibmcos represents configuration that uses IBM Cloud Object Storage.", - "oss": "Oss represents configuration that uses Alibaba Cloud Object Storage Service.", + "oss": "oss represents configuration that uses Alibaba Cloud Object Storage Service.", "managementState": "managementState indicates if the operator manages the underlying storage unit. If Managed the operator will remove the storage when this operator gets Removed.", } @@ -122,10 +122,10 @@ func (ImageRegistryConfigStorage) SwaggerDoc() map[string]string { var map_ImageRegistryConfigStorageAlibabaOSS = map[string]string{ "": "ImageRegistryConfigStorageAlibabaOSS holds Alibaba Cloud OSS configuration. Configures the registry to use Alibaba Cloud Object Storage Service for backend storage. More about oss, you can look at the [official documentation](https://www.alibabacloud.com/help/product/31815.htm)", - "bucket": "Bucket is the bucket name in which you want to store the registry's data. About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be autogenerated in the form of -image-registry--", - "region": "Region is the Alibaba Cloud Region in which your bucket exists. For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be based on the installed Alibaba Cloud Region.", - "endpointAccessibility": "EndpointAccessibility specifies whether the registry use the OSS VPC internal endpoint Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `Internal`.", - "encryption": "Encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm)", + "bucket": "bucket is the bucket name in which you want to store the registry's data. About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be autogenerated in the form of -image-registry--", + "region": "region is the Alibaba Cloud Region in which your bucket exists. For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be based on the installed Alibaba Cloud Region.", + "endpointAccessibility": "endpointAccessibility specifies whether the registry use the OSS VPC internal endpoint Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `Internal`.", + "encryption": "encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm)", } func (ImageRegistryConfigStorageAlibabaOSS) SwaggerDoc() map[string]string { @@ -266,7 +266,7 @@ func (ImageRegistryStatus) SwaggerDoc() map[string]string { } var map_KMSEncryptionAlibaba = map[string]string{ - "keyID": "KeyID holds the KMS encryption key ID", + "keyID": "keyID holds the KMS encryption key ID", } func (KMSEncryptionAlibaba) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go index 6d29f42e3f..cd1ba7ec56 100644 --- a/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go @@ -38,7 +38,7 @@ type KubeAPIServerConfig struct { // DEPRECATED: consolePublicURL has been deprecated and setting it has no effect. ConsolePublicURL string `json:"consolePublicURL"` - // UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! + // userAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! // TODO I think we should just drop this feature. UserAgentMatchingConfig UserAgentMatchingConfig `json:"userAgentMatchingConfig"` @@ -153,7 +153,7 @@ type UserAgentMatchRule struct { type UserAgentDenyRule struct { UserAgentMatchRule `json:",inline"` - // RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used. + // rejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used. RejectionMessage string `json:"rejectionMessage"` } @@ -231,6 +231,6 @@ type KubeControllerManagerProjectConfig struct { // ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for // pods fulfilling a service to serve with. type ServiceServingCert struct { - // CertFile is a file containing a PEM-encoded certificate + // certFile is a file containing a PEM-encoded certificate CertFile string `json:"certFile"` } diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go index 5ecdd05839..7b5bef143c 100644 --- a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go @@ -28,7 +28,7 @@ var map_KubeAPIServerConfig = map[string]string{ "servicesSubnet": "servicesSubnet is the subnet to use for assigning service IPs", "servicesNodePortRange": "servicesNodePortRange is the range to use for assigning service public ports on a host.", "consolePublicURL": "DEPRECATED: consolePublicURL has been deprecated and setting it has no effect.", - "userAgentMatchingConfig": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", + "userAgentMatchingConfig": "userAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", "imagePolicyConfig": "imagePolicyConfig feeds the image policy admission plugin", "projectConfig": "projectConfig feeds an admission plugin", "serviceAccountPublicKeyFiles": "serviceAccountPublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.", @@ -112,7 +112,7 @@ func (RequestHeaderAuthenticationOptions) SwaggerDoc() map[string]string { var map_ServiceServingCert = map[string]string{ "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", - "certFile": "CertFile is a file containing a PEM-encoded certificate", + "certFile": "certFile is a file containing a PEM-encoded certificate", } func (ServiceServingCert) SwaggerDoc() map[string]string { @@ -121,7 +121,7 @@ func (ServiceServingCert) SwaggerDoc() map[string]string { var map_UserAgentDenyRule = map[string]string{ "": "UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client", - "rejectionMessage": "RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", + "rejectionMessage": "rejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", } func (UserAgentDenyRule) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/types.go b/vendor/github.com/openshift/api/legacyconfig/v1/types.go index eaf40b6ee4..c0e03c233a 100644 --- a/vendor/github.com/openshift/api/legacyconfig/v1/types.go +++ b/vendor/github.com/openshift/api/legacyconfig/v1/types.go @@ -21,7 +21,7 @@ type ExtendedArguments map[string][]string type NodeConfig struct { metav1.TypeMeta `json:",inline"` - // NodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. + // nodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. // If you're describing a set of static nodes to the master, this value must match one of the values in the list NodeName string `json:"nodeName"` @@ -29,40 +29,40 @@ type NodeConfig struct { // If not specified, network parse/lookup on the nodeName is performed and the first non-loopback address is used NodeIP string `json:"nodeIP"` - // ServingInfo describes how to start serving + // servingInfo describes how to start serving ServingInfo ServingInfo `json:"servingInfo"` - // MasterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master + // masterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master MasterKubeConfig string `json:"masterKubeConfig"` - // MasterClientConnectionOverrides provides overrides to the client connection used to connect to the master. + // masterClientConnectionOverrides provides overrides to the client connection used to connect to the master. MasterClientConnectionOverrides *ClientConnectionOverrides `json:"masterClientConnectionOverrides"` - // DNSDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to + // dnsDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to // 'cluster.local'. DNSDomain string `json:"dnsDomain"` - // DNSIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes + // dnsIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes // master. This IP must be listening on port 53 for compatibility with libc resolvers (which cannot be configured // to resolve names from any other port). When running more complex local DNS configurations, this is often set // to the local address of a DNS proxy like dnsmasq, which then will consult either the local DNS (see // dnsBindAddress) or the master DNS. DNSIP string `json:"dnsIP"` - // DNSBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. + // dnsBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. // Because most DNS resolvers will only listen on port 53, if you select an alternative port you will need // a DNS proxy like dnsmasq to answer queries for containers. A common configuration is dnsmasq configured // on a node IP listening on 53 and delegating queries for dnsDomain to this process, while sending other // queries to the host environments nameservers. DNSBindAddress string `json:"dnsBindAddress"` - // DNSNameservers is a list of ip:port values of recursive nameservers to forward queries to when running + // dnsNameservers is a list of ip:port values of recursive nameservers to forward queries to when running // a local DNS server if dnsBindAddress is set. If this value is empty, the DNS server will default to // the nameservers listed in /etc/resolv.conf. If you have configured dnsmasq or another DNS proxy on the // system, this value should be set to the upstream nameservers dnsmasq resolves with. DNSNameservers []string `json:"dnsNameservers"` - // DNSRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. + // dnsRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. // Only the nameservers and port fields are used. The file must exist and parse correctly. It adds extra // nameservers to DNSNameservers if set. DNSRecursiveResolvConf string `json:"dnsRecursiveResolvConf"` @@ -70,57 +70,57 @@ type NodeConfig struct { // Deprecated and maintained for backward compatibility, use NetworkConfig.NetworkPluginName instead DeprecatedNetworkPluginName string `json:"networkPluginName,omitempty"` - // NetworkConfig provides network options for the node + // networkConfig provides network options for the node NetworkConfig NodeNetworkConfig `json:"networkConfig"` - // VolumeDirectory is the directory that volumes will be stored under + // volumeDirectory is the directory that volumes will be stored under VolumeDirectory string `json:"volumeDirectory"` - // ImageConfig holds options that describe how to build image names for system components + // imageConfig holds options that describe how to build image names for system components ImageConfig ImageConfig `json:"imageConfig"` - // AllowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started. + // allowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started. AllowDisabledDocker bool `json:"allowDisabledDocker"` - // PodManifestConfig holds the configuration for enabling the Kubelet to + // podManifestConfig holds the configuration for enabling the Kubelet to // create pods based from a manifest file(s) placed locally on the node PodManifestConfig *PodManifestConfig `json:"podManifestConfig"` - // AuthConfig holds authn/authz configuration options + // authConfig holds authn/authz configuration options AuthConfig NodeAuthConfig `json:"authConfig"` - // DockerConfig holds Docker related configuration options. + // dockerConfig holds Docker related configuration options. DockerConfig DockerConfig `json:"dockerConfig"` - // KubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's + // kubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's // command line arguments. These are not migrated or validated, so if you use them they may become invalid. // These values override other settings in NodeConfig which may cause invalid configurations. KubeletArguments ExtendedArguments `json:"kubeletArguments,omitempty"` - // ProxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's + // proxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's // command line arguments. These are not migrated or validated, so if you use them they may become invalid. // These values override other settings in NodeConfig which may cause invalid configurations. ProxyArguments ExtendedArguments `json:"proxyArguments,omitempty"` - // IPTablesSyncPeriod is how often iptable rules are refreshed + // iptablesSyncPeriod is how often iptable rules are refreshed IPTablesSyncPeriod string `json:"iptablesSyncPeriod"` - // EnableUnidling controls whether or not the hybrid unidling proxy will be set up + // enableUnidling controls whether or not the hybrid unidling proxy will be set up EnableUnidling *bool `json:"enableUnidling"` - // VolumeConfig contains options for configuring volumes on the node. + // volumeConfig contains options for configuring volumes on the node. VolumeConfig NodeVolumeConfig `json:"volumeConfig"` } // NodeVolumeConfig contains options for configuring volumes on the node. type NodeVolumeConfig struct { - // LocalQuota contains options for controlling local volume quota on the node. + // localQuota contains options for controlling local volume quota on the node. LocalQuota LocalQuota `json:"localQuota"` } // MasterVolumeConfig contains options for configuring volume plugins in the master node. type MasterVolumeConfig struct { - // DynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true + // dynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true DynamicProvisioningEnabled *bool `json:"dynamicProvisioningEnabled"` } @@ -134,24 +134,24 @@ type LocalQuota struct { // NodeAuthConfig holds authn/authz configuration options type NodeAuthConfig struct { - // AuthenticationCacheTTL indicates how long an authentication result should be cached. + // authenticationCacheTTL indicates how long an authentication result should be cached. // It takes a valid time duration string (e.g. "5m"). If empty, you get the default timeout. If zero (e.g. "0m"), caching is disabled AuthenticationCacheTTL string `json:"authenticationCacheTTL"` - // AuthenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used. + // authenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used. AuthenticationCacheSize int `json:"authenticationCacheSize"` - // AuthorizationCacheTTL indicates how long an authorization result should be cached. + // authorizationCacheTTL indicates how long an authorization result should be cached. // It takes a valid time duration string (e.g. "5m"). If empty, you get the default timeout. If zero (e.g. "0m"), caching is disabled AuthorizationCacheTTL string `json:"authorizationCacheTTL"` - // AuthorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used. + // authorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used. AuthorizationCacheSize int `json:"authorizationCacheSize"` } // NodeNetworkConfig provides network options for the node type NodeNetworkConfig struct { - // NetworkPluginName is a string specifying the networking plugin + // networkPluginName is a string specifying the networking plugin NetworkPluginName string `json:"networkPluginName"` // Maximum transmission unit for the network packets MTU uint32 `json:"mtu"` @@ -159,14 +159,14 @@ type NodeNetworkConfig struct { // DockerConfig holds Docker related configuration options. type DockerConfig struct { - // ExecHandlerName is the name of the handler to use for executing + // execHandlerName is the name of the handler to use for executing // commands in containers. ExecHandlerName DockerExecHandlerType `json:"execHandlerName"` - // DockerShimSocket is the location of the dockershim socket the kubelet uses. + // dockerShimSocket is the location of the dockershim socket the kubelet uses. // Currently unix socket is supported on Linux, and tcp is supported on windows. // Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735' DockerShimSocket string `json:"dockerShimSocket"` - // DockershimRootDirectory is the dockershim root directory. + // dockerShimRootDirectory is the dockershim root directory. DockershimRootDirectory string `json:"dockerShimRootDirectory"` } @@ -197,45 +197,45 @@ type FeatureList []string type MasterConfig struct { metav1.TypeMeta `json:",inline"` - // ServingInfo describes how to start serving + // servingInfo describes how to start serving ServingInfo HTTPServingInfo `json:"servingInfo"` - // AuthConfig configures authentication options in addition to the standard + // authConfig configures authentication options in addition to the standard // oauth token and client certificate authenticators AuthConfig MasterAuthConfig `json:"authConfig"` - // AggregatorConfig has options for configuring the aggregator component of the API server. + // aggregatorConfig has options for configuring the aggregator component of the API server. AggregatorConfig AggregatorConfig `json:"aggregatorConfig"` // CORSAllowedOrigins CORSAllowedOrigins []string `json:"corsAllowedOrigins"` - // APILevels is a list of API levels that should be enabled on startup: v1 as examples + // apiLevels is a list of API levels that should be enabled on startup: v1 as examples APILevels []string `json:"apiLevels"` - // MasterPublicURL is how clients can access the OpenShift API server + // masterPublicURL is how clients can access the OpenShift API server MasterPublicURL string `json:"masterPublicURL"` - // Controllers is a list of the controllers that should be started. If set to "none", no controllers + // controllers is a list of the controllers that should be started. If set to "none", no controllers // will start automatically. The default value is "*" which will start all controllers. When // using "*", you may exclude controllers by prepending a "-" in front of their name. No other // values are recognized at this time. Controllers string `json:"controllers"` - // AdmissionConfig contains admission control plugin configuration. + // admissionConfig contains admission control plugin configuration. AdmissionConfig AdmissionConfig `json:"admissionConfig"` - // ControllerConfig holds configuration values for controllers + // controllerConfig holds configuration values for controllers ControllerConfig ControllerConfig `json:"controllerConfig"` - // EtcdStorageConfig contains information about how API resources are + // etcdStorageConfig contains information about how API resources are // stored in Etcd. These values are only relevant when etcd is the // backing store for the cluster. EtcdStorageConfig EtcdStorageConfig `json:"etcdStorageConfig"` - // EtcdClientInfo contains information about how to connect to etcd + // etcdClientInfo contains information about how to connect to etcd EtcdClientInfo EtcdConnectionInfo `json:"etcdClientInfo"` - // KubeletClientInfo contains information about how to connect to kubelets + // kubeletClientInfo contains information about how to connect to kubelets KubeletClientInfo KubeletConnectionInfo `json:"kubeletClientInfo"` // KubernetesMasterConfig, if present start the kubernetes master in this process @@ -248,38 +248,38 @@ type MasterConfig struct { // DNSConfig, if present start the DNS server in this process DNSConfig *DNSConfig `json:"dnsConfig"` - // ServiceAccountConfig holds options related to service accounts + // serviceAccountConfig holds options related to service accounts ServiceAccountConfig ServiceAccountConfig `json:"serviceAccountConfig"` - // MasterClients holds all the client connection information for controllers and other system components + // masterClients holds all the client connection information for controllers and other system components MasterClients MasterClients `json:"masterClients"` - // ImageConfig holds options that describe how to build image names for system components + // imageConfig holds options that describe how to build image names for system components ImageConfig ImageConfig `json:"imageConfig"` - // ImagePolicyConfig controls limits and behavior for importing images + // imagePolicyConfig controls limits and behavior for importing images ImagePolicyConfig ImagePolicyConfig `json:"imagePolicyConfig"` - // PolicyConfig holds information about where to locate critical pieces of bootstrapping policy + // policyConfig holds information about where to locate critical pieces of bootstrapping policy PolicyConfig PolicyConfig `json:"policyConfig"` - // ProjectConfig holds information about project creation and defaults + // projectConfig holds information about project creation and defaults ProjectConfig ProjectConfig `json:"projectConfig"` - // RoutingConfig holds information about routing and route generation + // routingConfig holds information about routing and route generation RoutingConfig RoutingConfig `json:"routingConfig"` - // NetworkConfig to be passed to the compiled in network plugin + // networkConfig to be passed to the compiled in network plugin NetworkConfig MasterNetworkConfig `json:"networkConfig"` // MasterVolumeConfig contains options for configuring volume plugins in the master node. VolumeConfig MasterVolumeConfig `json:"volumeConfig"` - // JenkinsPipelineConfig holds information about the default Jenkins template + // jenkinsPipelineConfig holds information about the default Jenkins template // used for JenkinsPipeline build strategy. JenkinsPipelineConfig JenkinsPipelineConfig `json:"jenkinsPipelineConfig"` - // AuditConfig holds information related to auditing capabilities. + // auditConfig holds information related to auditing capabilities. AuditConfig AuditConfig `json:"auditConfig"` // DisableOpenAPI avoids starting the openapi endpoint because it is very expensive. @@ -290,11 +290,11 @@ type MasterConfig struct { // MasterAuthConfig configures authentication options in addition to the standard // oauth token and client certificate authenticators type MasterAuthConfig struct { - // RequestHeader holds options for setting up a front proxy against the API. It is optional. + // requestHeader holds options for setting up a front proxy against the API. It is optional. RequestHeader *RequestHeaderAuthenticationOptions `json:"requestHeader"` // WebhookTokenAuthnConfig, if present configures remote token reviewers WebhookTokenAuthenticators []WebhookTokenAuthenticator `json:"webhookTokenAuthenticators"` - // OAuthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization + // oauthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization // Server Metadata for an external OAuth server. // See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 // This option is mutually exclusive with OAuthConfig @@ -304,22 +304,22 @@ type MasterAuthConfig struct { // RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire // API instead of against the /oauth endpoint. type RequestHeaderAuthenticationOptions struct { - // ClientCA is a file with the trusted signer certs. It is required. + // clientCA is a file with the trusted signer certs. It is required. ClientCA string `json:"clientCA"` - // ClientCommonNames is a required list of common names to require a match from. + // clientCommonNames is a required list of common names to require a match from. ClientCommonNames []string `json:"clientCommonNames"` - // UsernameHeaders is the list of headers to check for user information. First hit wins. + // usernameHeaders is the list of headers to check for user information. First hit wins. UsernameHeaders []string `json:"usernameHeaders"` // GroupNameHeader is the set of headers to check for group information. All are unioned. GroupHeaders []string `json:"groupHeaders"` - // ExtraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested. + // extraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested. ExtraHeaderPrefixes []string `json:"extraHeaderPrefixes"` } // AggregatorConfig holds information required to make the aggregator function. type AggregatorConfig struct { - // ProxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers + // proxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers ProxyClientInfo CertInfo `json:"proxyClientInfo"` } @@ -357,9 +357,9 @@ type AuditConfig struct { // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB. MaximumFileSizeMegabytes int `json:"maximumFileSizeMegabytes"` - // PolicyFile is a path to the file that defines the audit policy configuration. + // policyFile is a path to the file that defines the audit policy configuration. PolicyFile string `json:"policyFile"` - // PolicyConfiguration is an embedded policy configuration object to be used + // policyConfiguration is an embedded policy configuration object to be used // as the audit policy configuration. If present, it will be used instead of // the path to the policy file. PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"` @@ -375,52 +375,52 @@ type AuditConfig struct { // JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy type JenkinsPipelineConfig struct { - // AutoProvisionEnabled determines whether a Jenkins server will be spawned from the provided + // autoProvisionEnabled determines whether a Jenkins server will be spawned from the provided // template when the first build config in the project with type JenkinsPipeline // is created. When not specified this option defaults to true. AutoProvisionEnabled *bool `json:"autoProvisionEnabled"` - // TemplateNamespace contains the namespace name where the Jenkins template is stored + // templateNamespace contains the namespace name where the Jenkins template is stored TemplateNamespace string `json:"templateNamespace"` - // TemplateName is the name of the default Jenkins template + // templateName is the name of the default Jenkins template TemplateName string `json:"templateName"` - // ServiceName is the name of the Jenkins service OpenShift uses to detect + // serviceName is the name of the Jenkins service OpenShift uses to detect // whether a Jenkins pipeline handler has already been installed in a project. // This value *must* match a service name in the provided template. ServiceName string `json:"serviceName"` - // Parameters specifies a set of optional parameters to the Jenkins template. + // parameters specifies a set of optional parameters to the Jenkins template. Parameters map[string]string `json:"parameters"` } // ImagePolicyConfig holds the necessary configuration options for limits and behavior for importing images type ImagePolicyConfig struct { - // MaxImagesBulkImportedPerRepository controls the number of images that are imported when a user + // maxImagesBulkImportedPerRepository controls the number of images that are imported when a user // does a bulk import of a container repository. This number defaults to 50 to prevent users from // importing large numbers of images accidentally. Set -1 for no limit. MaxImagesBulkImportedPerRepository int `json:"maxImagesBulkImportedPerRepository"` - // DisableScheduledImport allows scheduled background import of images to be disabled. + // disableScheduledImport allows scheduled background import of images to be disabled. DisableScheduledImport bool `json:"disableScheduledImport"` - // ScheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams + // scheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams // scheduled for background import are checked against the upstream repository. The default value is 15 minutes. ScheduledImageImportMinimumIntervalSeconds int `json:"scheduledImageImportMinimumIntervalSeconds"` - // MaxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the + // maxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the // background per minute. The default value is 60. Set to -1 for unlimited. MaxScheduledImageImportsPerMinute int `json:"maxScheduledImageImportsPerMinute"` - // AllowedRegistriesForImport limits the container image registries that normal users may import + // allowedRegistriesForImport limits the container image registries that normal users may import // images from. Set this list to the registries that you trust to contain valid Docker // images and that you want applications to be able to import from. Users with // permission to create Images or ImageStreamMappings via the API are not affected by // this policy - typically only administrators or system integrations will have those // permissions. AllowedRegistriesForImport *AllowedRegistries `json:"allowedRegistriesForImport,omitempty"` - // InternalRegistryHostname sets the hostname for the default internal image + // internalRegistryHostname sets the hostname for the default internal image // registry. The value must be in "hostname[:port]" format. InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"` - // ExternalRegistryHostname sets the hostname for the default external image + // externalRegistryHostname sets the hostname for the default external image // registry. The external hostname should be set only when the image registry // is exposed externally. The value is used in 'publicDockerImageRepository' // field in ImageStreams. The value must be in "hostname[:port]" format. ExternalRegistryHostname string `json:"externalRegistryHostname,omitempty"` - // AdditionalTrustedCA is a path to a pem bundle file containing additional CAs that + // additionalTrustedCA is a path to a pem bundle file containing additional CAs that // should be trusted during imagestream import. AdditionalTrustedCA string `json:"additionalTrustedCA,omitempty"` } @@ -431,40 +431,40 @@ type AllowedRegistries []RegistryLocation // RegistryLocation contains a location of the registry specified by the registry domain // name. The domain name might include wildcards, like '*' or '??'. type RegistryLocation struct { - // DomainName specifies a domain name for the registry + // domainName specifies a domain name for the registry // In case the registry use non-standard (80 or 443) port, the port should be included // in the domain name as well. DomainName string `json:"domainName"` - // Insecure indicates whether the registry is secure (https) or insecure (http) + // insecure indicates whether the registry is secure (https) or insecure (http) // By default (if not specified) the registry is assumed as secure. Insecure bool `json:"insecure,omitempty"` } // holds the necessary configuration options for type ProjectConfig struct { - // DefaultNodeSelector holds default project node label selector + // defaultNodeSelector holds default project node label selector DefaultNodeSelector string `json:"defaultNodeSelector"` - // ProjectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint ProjectRequestMessage string `json:"projectRequestMessage"` - // ProjectRequestTemplate is the template to use for creating projects in response to projectrequest. + // projectRequestTemplate is the template to use for creating projects in response to projectrequest. // It is in the format namespace/template and it is optional. // If it is not specified, a default template is used. ProjectRequestTemplate string `json:"projectRequestTemplate"` - // SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled. + // securityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled. SecurityAllocator *SecurityAllocator `json:"securityAllocator"` } // SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled. type SecurityAllocator struct { - // UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the + // uidAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the // block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks // before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the // ranges container images will use once user namespaces are started). UIDAllocatorRange string `json:"uidAllocatorRange"` - // MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is + // mcsAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is // "/[,]". The default is "s0/2" and will allocate from c0 -> c1023, which means a total of 535k labels // are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated // to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default @@ -475,14 +475,14 @@ type SecurityAllocator struct { // * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511 // MCSAllocatorRange string `json:"mcsAllocatorRange"` - // MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS + // mcsLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS // ranges (100k namespaces, 535k/5 labels). MCSLabelsPerProject int `json:"mcsLabelsPerProject"` } // holds the necessary configuration options for type PolicyConfig struct { - // UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! + // userAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! UserAgentMatchingConfig UserAgentMatchingConfig `json:"userAgentMatchingConfig"` } @@ -494,7 +494,7 @@ type UserAgentMatchingConfig struct { // If this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes DeniedClients []UserAgentDenyRule `json:"deniedClients"` - // DefaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given. + // defaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given. DefaultRejectionMessage string `json:"defaultRejectionMessage"` } @@ -512,7 +512,7 @@ type UserAgentMatchRule struct { // 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f Regex string `json:"regex"` - // HTTPVerbs specifies which HTTP verbs should be matched. An empty list means "match all verbs". + // httpVerbs specifies which HTTP verbs should be matched. An empty list means "match all verbs". HTTPVerbs []string `json:"httpVerbs"` } @@ -520,13 +520,13 @@ type UserAgentMatchRule struct { type UserAgentDenyRule struct { UserAgentMatchRule `json:",inline"` - // RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used. + // rejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used. RejectionMessage string `json:"rejectionMessage"` } // RoutingConfig holds the necessary configuration options for routing to subdomains type RoutingConfig struct { - // Subdomain is the suffix appended to $service.$namespace. to form the default route hostname + // subdomain is the suffix appended to $service.$namespace. to form the default route hostname // DEPRECATED: This field is being replaced by routers setting their own defaults. This is the // "default" route. Subdomain string `json:"subdomain"` @@ -534,51 +534,51 @@ type RoutingConfig struct { // MasterNetworkConfig to be passed to the compiled in network plugin type MasterNetworkConfig struct { - // NetworkPluginName is the name of the network plugin to use + // networkPluginName is the name of the network plugin to use NetworkPluginName string `json:"networkPluginName"` - // ClusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead. + // clusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead. DeprecatedClusterNetworkCIDR string `json:"clusterNetworkCIDR,omitempty"` - // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set. + // clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set. ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks"` - // HostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead. + // hostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead. DeprecatedHostSubnetLength uint32 `json:"hostSubnetLength,omitempty"` // ServiceNetwork is the CIDR string to specify the service networks ServiceNetworkCIDR string `json:"serviceNetworkCIDR"` - // ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP + // externalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP // may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that // CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You // should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons. ExternalIPNetworkCIDRs []string `json:"externalIPNetworkCIDRs"` - // IngressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare + // ingressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare // metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. // For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, // nodes, pods, or services. IngressIPNetworkCIDR string `json:"ingressIPNetworkCIDR"` - // VXLANPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value + // vxlanPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value VXLANPort uint32 `json:"vxlanPort,omitempty"` } // ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. type ClusterNetworkEntry struct { - // CIDR defines the total range of a cluster networks address space. + // cidr defines the total range of a cluster networks address space. CIDR string `json:"cidr"` - // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod. + // hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod. HostSubnetLength uint32 `json:"hostSubnetLength"` } // ImageConfig holds the necessary configuration options for building image names for system components type ImageConfig struct { - // Format is the format of the name to be built for the system component + // format is the format of the name to be built for the system component Format string `json:"format"` - // Latest determines if the latest tag will be pulled from the registry + // latest determines if the latest tag will be pulled from the registry Latest bool `json:"latest"` } // RemoteConnectionInfo holds information necessary for establishing a remote connection type RemoteConnectionInfo struct { - // URL is the remote URL to connect to + // url is the remote URL to connect to URL string `json:"url"` - // CA is the CA for verifying TLS connections + // ca is the CA for verifying TLS connections CA string `json:"ca"` // CertInfo is the TLS client cert information to present // this is anonymous so that we can inline it for serialization @@ -587,9 +587,9 @@ type RemoteConnectionInfo struct { // KubeletConnectionInfo holds information necessary for connecting to a kubelet type KubeletConnectionInfo struct { - // Port is the port to connect to kubelets on + // port is the port to connect to kubelets on Port uint `json:"port"` - // CA is the CA for verifying TLS connections to kubelets + // ca is the CA for verifying TLS connections to kubelets CA string `json:"ca"` // CertInfo is the TLS client cert information for securing communication to kubelets // this is anonymous so that we can inline it for serialization @@ -598,9 +598,9 @@ type KubeletConnectionInfo struct { // EtcdConnectionInfo holds information necessary for connecting to an etcd server type EtcdConnectionInfo struct { - // URLs are the URLs for etcd + // urls are the URLs for etcd URLs []string `json:"urls"` - // CA is a file containing trusted roots for the etcd server certificates + // ca is a file containing trusted roots for the etcd server certificates CA string `json:"ca"` // CertInfo is the TLS client cert information for securing communication to etcd // this is anonymous so that we can inline it for serialization @@ -609,19 +609,19 @@ type EtcdConnectionInfo struct { // EtcdStorageConfig holds the necessary configuration options for the etcd storage underlying OpenShift and Kubernetes type EtcdStorageConfig struct { - // KubernetesStorageVersion is the API version that Kube resources in etcd should be + // kubernetesStorageVersion is the API version that Kube resources in etcd should be // serialized to. This value should *not* be advanced until all clients in the // cluster that read from etcd have code that allows them to read the new version. KubernetesStorageVersion string `json:"kubernetesStorageVersion"` - // KubernetesStoragePrefix is the path within etcd that the Kubernetes resources will + // kubernetesStoragePrefix is the path within etcd that the Kubernetes resources will // be rooted under. This value, if changed, will mean existing objects in etcd will // no longer be located. The default value is 'kubernetes.io'. KubernetesStoragePrefix string `json:"kubernetesStoragePrefix"` - // OpenShiftStorageVersion is the API version that OS resources in etcd should be + // openShiftStorageVersion is the API version that OS resources in etcd should be // serialized to. This value should *not* be advanced until all clients in the // cluster that read from etcd have code that allows them to read the new version. OpenShiftStorageVersion string `json:"openShiftStorageVersion"` - // OpenShiftStoragePrefix is the path within etcd that the OpenShift resources will + // openShiftStoragePrefix is the path within etcd that the OpenShift resources will // be rooted under. This value, if changed, will mean existing objects in etcd will // no longer be located. The default value is 'openshift.io'. OpenShiftStoragePrefix string `json:"openShiftStoragePrefix"` @@ -629,29 +629,29 @@ type EtcdStorageConfig struct { // ServingInfo holds information about serving web pages type ServingInfo struct { - // BindAddress is the ip:port to serve on + // bindAddress is the ip:port to serve on BindAddress string `json:"bindAddress"` - // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // bindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", // "tcp4", and "tcp6" BindNetwork string `json:"bindNetwork"` // CertInfo is the TLS cert info for serving secure traffic. // this is anonymous so that we can inline it for serialization CertInfo `json:",inline"` - // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + // clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates ClientCA string `json:"clientCA"` - // NamedCertificates is a list of certificates to use to secure requests to specific hostnames + // namedCertificates is a list of certificates to use to secure requests to specific hostnames NamedCertificates []NamedCertificate `json:"namedCertificates"` - // MinTLSVersion is the minimum TLS version supported. + // minTLSVersion is the minimum TLS version supported. // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants MinTLSVersion string `json:"minTLSVersion,omitempty"` - // CipherSuites contains an overridden list of ciphers for the server to support. + // cipherSuites contains an overridden list of ciphers for the server to support. // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants CipherSuites []string `json:"cipherSuites,omitempty"` } // NamedCertificate specifies a certificate/key, and the names it should be served for type NamedCertificate struct { - // Names is a list of DNS names this certificate should be used to secure + // names is a list of DNS names this certificate should be used to secure // A name can be a normal DNS name, or can contain leading wildcard segments. Names []string `json:"names"` // CertInfo is the TLS cert info for serving secure traffic @@ -662,45 +662,45 @@ type NamedCertificate struct { type HTTPServingInfo struct { // ServingInfo is the HTTP serving information ServingInfo `json:",inline"` - // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. + // maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. MaxRequestsInFlight int `json:"maxRequestsInFlight"` - // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if + // requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if // -1 there is no limit on requests. RequestTimeoutSeconds int `json:"requestTimeoutSeconds"` } // MasterClients holds references to `.kubeconfig` files that qualify master clients for OpenShift and Kubernetes type MasterClients struct { - // OpenShiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master + // openshiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master OpenShiftLoopbackKubeConfig string `json:"openshiftLoopbackKubeConfig"` - // OpenShiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master. + // openshiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master. OpenShiftLoopbackClientConnectionOverrides *ClientConnectionOverrides `json:"openshiftLoopbackClientConnectionOverrides"` } // ClientConnectionOverrides are a set of overrides to the default client connection settings. type ClientConnectionOverrides struct { - // AcceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the // default value of 'application/json'. This field will control all connections to the server used by a particular // client. AcceptContentTypes string `json:"acceptContentTypes"` - // ContentType is the content type used when sending data to the server from this client. + // contentType is the content type used when sending data to the server from this client. ContentType string `json:"contentType"` - // QPS controls the number of queries per second allowed for this connection. + // qps controls the number of queries per second allowed for this connection. QPS float32 `json:"qps"` - // Burst allows extra queries to accumulate when a client is exceeding its rate. + // burst allows extra queries to accumulate when a client is exceeding its rate. Burst int32 `json:"burst"` } // DNSConfig holds the necessary configuration options for DNS type DNSConfig struct { - // BindAddress is the ip:port to serve DNS on + // bindAddress is the ip:port to serve DNS on BindAddress string `json:"bindAddress"` - // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // bindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", // "tcp4", and "tcp6" BindNetwork string `json:"bindNetwork"` - // AllowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open + // allowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open // resolvers can be used for DNS amplification attacks and the master DNS should not be made accessible // to public networks. AllowRecursiveQueries bool `json:"allowRecursiveQueries"` @@ -709,9 +709,9 @@ type DNSConfig struct { // WebhookTokenAuthenticators holds the necessary configuation options for // external token authenticators type WebhookTokenAuthenticator struct { - // ConfigFile is a path to a Kubeconfig file with the webhook configuration + // configFile is a path to a Kubeconfig file with the webhook configuration ConfigFile string `json:"configFile"` - // CacheTTL indicates how long an authentication result should be cached. + // cacheTTL indicates how long an authentication result should be cached. // It takes a valid time duration string (e.g. "5m"). // If empty, you get a default timeout of 2 minutes. // If zero (e.g. "0m"), caching is disabled @@ -720,85 +720,85 @@ type WebhookTokenAuthenticator struct { // OAuthConfig holds the necessary configuration options for OAuth authentication type OAuthConfig struct { - // MasterCA is the CA for verifying the TLS connection back to the MasterURL. + // masterCA is the CA for verifying the TLS connection back to the MasterURL. MasterCA *string `json:"masterCA"` - // MasterURL is used for making server-to-server calls to exchange authorization codes for access tokens + // masterURL is used for making server-to-server calls to exchange authorization codes for access tokens MasterURL string `json:"masterURL"` - // MasterPublicURL is used for building valid client redirect URLs for internal and external access + // masterPublicURL is used for building valid client redirect URLs for internal and external access MasterPublicURL string `json:"masterPublicURL"` - // AssetPublicURL is used for building valid client redirect URLs for external access + // assetPublicURL is used for building valid client redirect URLs for external access AssetPublicURL string `json:"assetPublicURL"` - // AlwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider. + // alwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider. AlwaysShowProviderSelection bool `json:"alwaysShowProviderSelection"` - // IdentityProviders is an ordered list of ways for a user to identify themselves + // identityProviders is an ordered list of ways for a user to identify themselves IdentityProviders []IdentityProvider `json:"identityProviders"` - // GrantConfig describes how to handle grants + // grantConfig describes how to handle grants GrantConfig GrantConfig `json:"grantConfig"` - // SessionConfig hold information about configuring sessions. + // sessionConfig hold information about configuring sessions. SessionConfig *SessionConfig `json:"sessionConfig"` - // TokenConfig contains options for authorization and access tokens + // tokenConfig contains options for authorization and access tokens TokenConfig TokenConfig `json:"tokenConfig"` - // Templates allow you to customize pages like the login page. + // templates allow you to customize pages like the login page. Templates *OAuthTemplates `json:"templates"` } // OAuthTemplates allow for customization of pages like the login page type OAuthTemplates struct { - // Login is a path to a file containing a go template used to render the login page. + // login is a path to a file containing a go template used to render the login page. // If unspecified, the default login page is used. Login string `json:"login"` - // ProviderSelection is a path to a file containing a go template used to render the provider selection page. + // providerSelection is a path to a file containing a go template used to render the provider selection page. // If unspecified, the default provider selection page is used. ProviderSelection string `json:"providerSelection"` - // Error is a path to a file containing a go template used to render error pages during the authentication or grant flow + // error is a path to a file containing a go template used to render error pages during the authentication or grant flow // If unspecified, the default error page is used. Error string `json:"error"` } // ServiceAccountConfig holds the necessary configuration options for a service account type ServiceAccountConfig struct { - // ManagedNames is a list of service account names that will be auto-created in every namespace. + // managedNames is a list of service account names that will be auto-created in every namespace. // If no names are specified, the ServiceAccountsController will not be started. ManagedNames []string `json:"managedNames"` - // LimitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace + // limitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace // without explicitly referencing them LimitSecretReferences bool `json:"limitSecretReferences"` - // PrivateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. + // privateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. // If no private key is specified, the service account TokensController will not be started. PrivateKeyFile string `json:"privateKeyFile"` - // PublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. + // publicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. // (If any file contains a private key, the public portion of the key is used) // The list of public keys is used to verify presented service account tokens. // Each key is tried in order until the list is exhausted or verification succeeds. // If no keys are specified, no service account authentication will be available. PublicKeyFiles []string `json:"publicKeyFiles"` - // MasterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically + // masterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically // inject the contents of this file into pods so they can verify connections to the master. MasterCA string `json:"masterCA"` } // TokenConfig holds the necessary configuration options for authorization and access tokens type TokenConfig struct { - // AuthorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens + // authorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens AuthorizeTokenMaxAgeSeconds int32 `json:"authorizeTokenMaxAgeSeconds"` - // AccessTokenMaxAgeSeconds defines the maximum age of access tokens + // accessTokenMaxAgeSeconds defines the maximum age of access tokens AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds"` - // AccessTokenInactivityTimeoutSeconds defined the default token + // accessTokenInactivityTimeoutSeconds defined the default token // inactivity timeout for tokens granted by any client. // Setting it to nil means the feature is completely disabled (default) // The default setting can be overriden on OAuthClient basis. @@ -815,12 +815,12 @@ type TokenConfig struct { // SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession type SessionConfig struct { - // SessionSecretsFile is a reference to a file containing a serialized SessionSecrets object + // sessionSecretsFile is a reference to a file containing a serialized SessionSecrets object // If no file is specified, a random signing and encryption key are generated at each server start SessionSecretsFile string `json:"sessionSecretsFile"` - // SessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession + // sessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession SessionMaxAgeSeconds int32 `json:"sessionMaxAgeSeconds"` - // SessionName is the cookie name used to store the session + // sessionName is the cookie name used to store the session SessionName string `json:"sessionName"` } @@ -834,7 +834,7 @@ type SessionConfig struct { type SessionSecrets struct { metav1.TypeMeta `json:",inline"` - // Secrets is a list of secrets + // secrets is a list of secrets // New sessions are signed and encrypted using the first secret. // Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets. Secrets []SessionSecret `json:"secrets"` @@ -842,23 +842,23 @@ type SessionSecrets struct { // SessionSecret is a secret used to authenticate/decrypt cookie-based sessions type SessionSecret struct { - // Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes. + // authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes. Authentication string `json:"authentication"` - // Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES- + // encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES- Encryption string `json:"encryption"` } // IdentityProvider provides identities for users authenticating using credentials type IdentityProvider struct { - // Name is used to qualify the identities returned by this provider + // name is used to qualify the identities returned by this provider Name string `json:"name"` // UseAsChallenger indicates whether to issue WWW-Authenticate challenges for this provider UseAsChallenger bool `json:"challenge"` // UseAsLogin indicates whether to use this identity provider for unauthenticated browsers to login against UseAsLogin bool `json:"login"` - // MappingMethod determines how identities from this provider are mapped to users + // mappingMethod determines how identities from this provider are mapped to users MappingMethod string `json:"mappingMethod"` - // Provider contains the information about how to set up a specific identity provider + // provider contains the information about how to set up a specific identity provider Provider runtime.RawExtension `json:"provider"` } @@ -908,7 +908,7 @@ type DenyAllPasswordIdentityProvider struct { type HTPasswdPasswordIdentityProvider struct { metav1.TypeMeta `json:",inline"` - // File is a reference to your htpasswd file + // file is a reference to your htpasswd file File string `json:"file"` } @@ -921,38 +921,38 @@ type HTPasswdPasswordIdentityProvider struct { // +openshift:compatibility-gen:internal type LDAPPasswordIdentityProvider struct { metav1.TypeMeta `json:",inline"` - // URL is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is + // url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is // ldap://host:port/basedn?attribute?scope?filter URL string `json:"url"` - // BindDN is an optional DN to bind with during the search phase. + // bindDN is an optional DN to bind with during the search phase. BindDN string `json:"bindDN"` - // BindPassword is an optional password to bind with during the search phase. + // bindPassword is an optional password to bind with during the search phase. BindPassword StringSource `json:"bindPassword"` // Insecure, if true, indicates the connection should not use TLS. // Cannot be set to true with a URL scheme of "ldaps://" // If false, "ldaps://" URLs connect using TLS, and "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830 Insecure bool `json:"insecure"` - // CA is the optional trusted certificate authority bundle to use when making requests to the server + // ca is the optional trusted certificate authority bundle to use when making requests to the server // If empty, the default system roots are used CA string `json:"ca"` - // Attributes maps LDAP attributes to identities + // attributes maps LDAP attributes to identities Attributes LDAPAttributeMapping `json:"attributes"` } // LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields type LDAPAttributeMapping struct { - // ID is the list of attributes whose values should be used as the user ID. Required. + // id is the list of attributes whose values should be used as the user ID. Required. // LDAP standard identity attribute is "dn" ID []string `json:"id"` - // PreferredUsername is the list of attributes whose values should be used as the preferred username. + // preferredUsername is the list of attributes whose values should be used as the preferred username. // LDAP standard login attribute is "uid" PreferredUsername []string `json:"preferredUsername"` - // Name is the list of attributes whose values should be used as the display name. Optional. + // name is the list of attributes whose values should be used as the display name. Optional. // If unspecified, no display name is set for the identity // LDAP standard display name attribute is "cn" Name []string `json:"name"` - // Email is the list of attributes whose values should be used as the email address. Optional. + // email is the list of attributes whose values should be used as the email address. Optional. // If unspecified, no email is set for the identity Email []string `json:"email"` } @@ -970,7 +970,7 @@ type KeystonePasswordIdentityProvider struct { RemoteConnectionInfo `json:",inline"` // Domain Name is required for keystone v3 DomainName string `json:"domainName"` - // UseKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username + // useKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username UseKeystoneIdentity bool `json:"useKeystoneIdentity"` } @@ -984,7 +984,7 @@ type KeystonePasswordIdentityProvider struct { type RequestHeaderIdentityProvider struct { metav1.TypeMeta `json:",inline"` - // LoginURL is a URL to redirect unauthenticated /authorize requests to + // loginURL is a URL to redirect unauthenticated /authorize requests to // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here // ${url} is replaced with the current URL, escaped to be safe in a query parameter // https://www.example.com/sso-login?then=${url} @@ -992,7 +992,7 @@ type RequestHeaderIdentityProvider struct { // https://www.example.com/auth-proxy/oauth/authorize?${query} LoginURL string `json:"loginURL"` - // ChallengeURL is a URL to redirect unauthenticated /authorize requests to + // challengeURL is a URL to redirect unauthenticated /authorize requests to // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here // ${url} is replaced with the current URL, escaped to be safe in a query parameter // https://www.example.com/sso-login?then=${url} @@ -1000,18 +1000,18 @@ type RequestHeaderIdentityProvider struct { // https://www.example.com/auth-proxy/oauth/authorize?${query} ChallengeURL string `json:"challengeURL"` - // ClientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header. + // clientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header. ClientCA string `json:"clientCA"` - // ClientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. + // clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. ClientCommonNames []string `json:"clientCommonNames"` - // Headers is the set of headers to check for identity information + // headers is the set of headers to check for identity information Headers []string `json:"headers"` - // PreferredUsernameHeaders is the set of headers to check for the preferred username + // preferredUsernameHeaders is the set of headers to check for the preferred username PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"` - // NameHeaders is the set of headers to check for the display name + // nameHeaders is the set of headers to check for the display name NameHeaders []string `json:"nameHeaders"` - // EmailHeaders is the set of headers to check for the email address + // emailHeaders is the set of headers to check for the email address EmailHeaders []string `json:"emailHeaders"` } @@ -1025,18 +1025,18 @@ type RequestHeaderIdentityProvider struct { type GitHubIdentityProvider struct { metav1.TypeMeta `json:",inline"` - // ClientID is the oauth client ID + // clientID is the oauth client ID ClientID string `json:"clientID"` - // ClientSecret is the oauth client secret + // clientSecret is the oauth client secret ClientSecret StringSource `json:"clientSecret"` - // Organizations optionally restricts which organizations are allowed to log in + // organizations optionally restricts which organizations are allowed to log in Organizations []string `json:"organizations"` - // Teams optionally restricts which teams are allowed to log in. Format is /. + // teams optionally restricts which teams are allowed to log in. Format is /. Teams []string `json:"teams"` - // Hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. + // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. // It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname. Hostname string `json:"hostname"` - // CA is the optional trusted certificate authority bundle to use when making requests to the server. + // ca is the optional trusted certificate authority bundle to use when making requests to the server. // If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. CA string `json:"ca"` } @@ -1051,16 +1051,16 @@ type GitHubIdentityProvider struct { type GitLabIdentityProvider struct { metav1.TypeMeta `json:",inline"` - // CA is the optional trusted certificate authority bundle to use when making requests to the server + // ca is the optional trusted certificate authority bundle to use when making requests to the server // If empty, the default system roots are used CA string `json:"ca"` - // URL is the oauth server base URL + // url is the oauth server base URL URL string `json:"url"` - // ClientID is the oauth client ID + // clientID is the oauth client ID ClientID string `json:"clientID"` - // ClientSecret is the oauth client secret + // clientSecret is the oauth client secret ClientSecret StringSource `json:"clientSecret"` - // Legacy determines if OAuth2 or OIDC should be used + // legacy determines if OAuth2 or OIDC should be used // If true, OAuth2 is used // If false, OIDC is used // If nil and the URL's host is gitlab.com, OIDC is used @@ -1080,12 +1080,12 @@ type GitLabIdentityProvider struct { type GoogleIdentityProvider struct { metav1.TypeMeta `json:",inline"` - // ClientID is the oauth client ID + // clientID is the oauth client ID ClientID string `json:"clientID"` - // ClientSecret is the oauth client secret + // clientSecret is the oauth client secret ClientSecret StringSource `json:"clientSecret"` - // HostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to HostedDomain string `json:"hostedDomain"` } @@ -1099,35 +1099,35 @@ type GoogleIdentityProvider struct { type OpenIDIdentityProvider struct { metav1.TypeMeta `json:",inline"` - // CA is the optional trusted certificate authority bundle to use when making requests to the server + // ca is the optional trusted certificate authority bundle to use when making requests to the server // If empty, the default system roots are used CA string `json:"ca"` - // ClientID is the oauth client ID + // clientID is the oauth client ID ClientID string `json:"clientID"` - // ClientSecret is the oauth client secret + // clientSecret is the oauth client secret ClientSecret StringSource `json:"clientSecret"` - // ExtraScopes are any scopes to request in addition to the standard "openid" scope. + // extraScopes are any scopes to request in addition to the standard "openid" scope. ExtraScopes []string `json:"extraScopes"` - // ExtraAuthorizeParameters are any custom parameters to add to the authorize request. + // extraAuthorizeParameters are any custom parameters to add to the authorize request. ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters"` - // URLs to use to authenticate + // urls to use to authenticate URLs OpenIDURLs `json:"urls"` - // Claims mappings + // claims mappings Claims OpenIDClaims `json:"claims"` } // OpenIDURLs are URLs to use when authenticating with an OpenID identity provider type OpenIDURLs struct { - // Authorize is the oauth authorization URL + // authorize is the oauth authorization URL Authorize string `json:"authorize"` - // Token is the oauth token granting URL + // token is the oauth token granting URL Token string `json:"token"` - // UserInfo is the optional userinfo URL. + // userInfo is the optional userinfo URL. // If present, a granted access_token is used to request claims // If empty, a granted id_token is parsed for claims UserInfo string `json:"userInfo"` @@ -1135,23 +1135,23 @@ type OpenIDURLs struct { // OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider type OpenIDClaims struct { - // ID is the list of claims whose values should be used as the user ID. Required. + // id is the list of claims whose values should be used as the user ID. Required. // OpenID standard identity claim is "sub" ID []string `json:"id"` - // PreferredUsername is the list of claims whose values should be used as the preferred username. + // preferredUsername is the list of claims whose values should be used as the preferred username. // If unspecified, the preferred username is determined from the value of the id claim PreferredUsername []string `json:"preferredUsername"` - // Name is the list of claims whose values should be used as the display name. Optional. + // name is the list of claims whose values should be used as the display name. Optional. // If unspecified, no display name is set for the identity Name []string `json:"name"` - // Email is the list of claims whose values should be used as the email address. Optional. + // email is the list of claims whose values should be used as the email address. Optional. // If unspecified, no email is set for the identity Email []string `json:"email"` } // GrantConfig holds the necessary configuration options for grant handlers type GrantConfig struct { - // Method determines the default strategy to use when an OAuth client requests a grant. + // method determines the default strategy to use when an OAuth client requests a grant. // This method will be used only if the specific OAuth client doesn't provide a strategy // of their own. Valid grant handling methods are: // - auto: always approves grant requests, useful for trusted clients @@ -1159,7 +1159,7 @@ type GrantConfig struct { // - deny: always denies grant requests, useful for black-listed clients Method GrantHandlerType `json:"method"` - // ServiceAccountMethod is used for determining client authorization for service account oauth client. + // serviceAccountMethod is used for determining client authorization for service account oauth client. // It must be either: deny, prompt ServiceAccountMethod GrantHandlerType `json:"serviceAccountMethod"` } @@ -1177,13 +1177,13 @@ const ( // EtcdConfig holds the necessary configuration options for connecting with an etcd database type EtcdConfig struct { - // ServingInfo describes how to start serving the etcd master + // servingInfo describes how to start serving the etcd master ServingInfo ServingInfo `json:"servingInfo"` - // Address is the advertised host:port for client connections to etcd + // address is the advertised host:port for client connections to etcd Address string `json:"address"` - // PeerServingInfo describes how to start serving the etcd peer + // peerServingInfo describes how to start serving the etcd peer PeerServingInfo ServingInfo `json:"peerServingInfo"` - // PeerAddress is the advertised host:port for peer connections to etcd + // peerAddress is the advertised host:port for peer connections to etcd PeerAddress string `json:"peerAddress"` // StorageDir is the path to the etcd storage directory @@ -1192,42 +1192,42 @@ type EtcdConfig struct { // KubernetesMasterConfig holds the necessary configuration options for the Kubernetes master type KubernetesMasterConfig struct { - // APILevels is a list of API levels that should be enabled on startup: v1 as examples + // apiLevels is a list of API levels that should be enabled on startup: v1 as examples APILevels []string `json:"apiLevels"` - // DisabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled. + // disabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled. DisabledAPIGroupVersions map[string][]string `json:"disabledAPIGroupVersions"` - // MasterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used. + // masterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used. MasterIP string `json:"masterIP"` - // MasterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked + // masterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked // at an interval that is 2/3 of this value and this value defaults to 15s if unset. In very large clusters, this value may be increased to // reduce the possibility that the master endpoint record expires (due to other load on the etcd server) and causes masters to drop in and // out of the kubernetes service record. It is not recommended to set this value below 15s. MasterEndpointReconcileTTL int `json:"masterEndpointReconcileTTL"` - // ServicesSubnet is the subnet to use for assigning service IPs + // servicesSubnet is the subnet to use for assigning service IPs ServicesSubnet string `json:"servicesSubnet"` - // ServicesNodePortRange is the range to use for assigning service public ports on a host. + // servicesNodePortRange is the range to use for assigning service public ports on a host. ServicesNodePortRange string `json:"servicesNodePortRange"` - // SchedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules. + // schedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules. SchedulerConfigFile string `json:"schedulerConfigFile"` - // PodEvictionTimeout controls grace period for deleting pods on failed nodes. + // podEvictionTimeout controls grace period for deleting pods on failed nodes. // It takes valid time duration string. If empty, you get the default pod eviction timeout. PodEvictionTimeout string `json:"podEvictionTimeout"` - // ProxyClientInfo specifies the client cert/key to use when proxying to pods + // proxyClientInfo specifies the client cert/key to use when proxying to pods ProxyClientInfo CertInfo `json:"proxyClientInfo"` - // APIServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's + // apiServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's // command line arguments. These are not migrated, but if you reference a value that does not exist the server will not // start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations. APIServerArguments ExtendedArguments `json:"apiServerArguments"` - // ControllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the + // controllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the // controller manager's command line arguments. These are not migrated, but if you reference a value that does not exist // the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid // configurations. ControllerArguments ExtendedArguments `json:"controllerArguments"` - // SchedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's + // schedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's // command line arguments. These are not migrated, but if you reference a value that does not exist the server will not // start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations. SchedulerArguments ExtendedArguments `json:"schedulerArguments"` @@ -1235,19 +1235,19 @@ type KubernetesMasterConfig struct { // CertInfo relates a certificate with a private key type CertInfo struct { - // CertFile is a file containing a PEM-encoded certificate + // certFile is a file containing a PEM-encoded certificate CertFile string `json:"certFile"` - // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + // keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile KeyFile string `json:"keyFile"` } // PodManifestConfig holds the necessary configuration options for using pod manifests type PodManifestConfig struct { - // Path specifies the path for the pod manifest file or directory + // path specifies the path for the pod manifest file or directory // If its a directory, its expected to contain on or more manifest files // This is used by the Kubelet to create pods on the node Path string `json:"path"` - // FileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data + // fileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data // The interval needs to be a positive value FileCheckIntervalSeconds int64 `json:"fileCheckIntervalSeconds"` } @@ -1261,16 +1261,16 @@ type StringSource struct { // StringSourceSpec specifies a string value, or external location type StringSourceSpec struct { - // Value specifies the cleartext value, or an encrypted value if keyFile is specified. + // value specifies the cleartext value, or an encrypted value if keyFile is specified. Value string `json:"value"` - // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. + // env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. Env string `json:"env"` - // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified. + // file references a file containing the cleartext value, or an encrypted value if a keyFile is specified. File string `json:"file"` - // KeyFile references a file containing the key to use to decrypt the value. + // keyFile references a file containing the key to use to decrypt the value. KeyFile string `json:"keyFile"` } @@ -1286,16 +1286,16 @@ type LDAPSyncConfig struct { // Host is the scheme, host and port of the LDAP server to connect to: // scheme://host:port URL string `json:"url"` - // BindDN is an optional DN to bind to the LDAP server with + // bindDN is an optional DN to bind to the LDAP server with BindDN string `json:"bindDN"` - // BindPassword is an optional password to bind with during the search phase. + // bindPassword is an optional password to bind with during the search phase. BindPassword StringSource `json:"bindPassword"` // Insecure, if true, indicates the connection should not use TLS. // Cannot be set to true with a URL scheme of "ldaps://" // If false, "ldaps://" URLs connect using TLS, and "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830 Insecure bool `json:"insecure"` - // CA is the optional trusted certificate authority bundle to use when making requests to the server + // ca is the optional trusted certificate authority bundle to use when making requests to the server // If empty, the default system roots are used CA string `json:"ca"` @@ -1329,33 +1329,33 @@ type RFC2307Config struct { // (ldapGroupUID) GroupUIDAttribute string `json:"groupUIDAttribute"` - // GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for + // groupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for // an OpenShift group GroupNameAttributes []string `json:"groupNameAttributes"` - // GroupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. + // groupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. // The values contained in those attributes must be queryable by your UserUIDAttribute GroupMembershipAttributes []string `json:"groupMembershipAttributes"` // AllUsersQuery holds the template for an LDAP query that returns user entries. AllUsersQuery LDAPQuery `json:"usersQuery"` - // UserUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. + // userUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. // It must correspond to values that will be found from the GroupMembershipAttributes UserUIDAttribute string `json:"userUIDAttribute"` - // UserNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. + // userNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. // The first attribute with a non-empty value is used. This should match your PreferredUsername setting for your LDAPPasswordIdentityProvider UserNameAttributes []string `json:"userNameAttributes"` - // TolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are + // tolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are // encountered. If 'true', an LDAP query for users that doesn't find any will be tolerated and an only // and error will be logged. If 'false', the LDAP sync job will fail if a query for users doesn't find // any. The default value is 'false'. Misconfigured LDAP sync jobs with this flag set to 'true' can cause // group membership to be removed, so it is recommended to use this flag with caution. TolerateMemberNotFoundErrors bool `json:"tolerateMemberNotFoundErrors"` - // TolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries + // tolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries // are encountered. If 'true', an LDAP query for a user that falls outside of the base DN given for the all // user query will be tolerated and only an error will be logged. If 'false', the LDAP sync job will fail // if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP @@ -1370,10 +1370,10 @@ type ActiveDirectoryConfig struct { // AllUsersQuery holds the template for an LDAP query that returns user entries. AllUsersQuery LDAPQuery `json:"usersQuery"` - // UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name. + // userNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name. UserNameAttributes []string `json:"userNameAttributes"` - // GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted + // groupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted // as the groups it is a member of GroupMembershipAttributes []string `json:"groupMembershipAttributes"` } @@ -1384,10 +1384,10 @@ type AugmentedActiveDirectoryConfig struct { // AllUsersQuery holds the template for an LDAP query that returns user entries. AllUsersQuery LDAPQuery `json:"usersQuery"` - // UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name. + // userNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name. UserNameAttributes []string `json:"userNameAttributes"` - // GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted + // groupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted // as the groups it is a member of GroupMembershipAttributes []string `json:"groupMembershipAttributes"` @@ -1398,7 +1398,7 @@ type AugmentedActiveDirectoryConfig struct { // (ldapGroupUID) GroupUIDAttribute string `json:"groupUIDAttribute"` - // GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for + // groupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for // an OpenShift group GroupNameAttributes []string `json:"groupNameAttributes"` } @@ -1427,45 +1427,45 @@ type LDAPQuery struct { // before the wait for a response is given up. If this is 0, no client-side limit is imposed TimeLimit int `json:"timeout"` - // Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN + // filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN Filter string `json:"filter"` - // PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done. + // pageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done. PageSize int `json:"pageSize"` } // AdmissionPluginConfig holds the necessary configuration options for admission plugins type AdmissionPluginConfig struct { - // Location is the path to a configuration file that contains the plugin's + // location is the path to a configuration file that contains the plugin's // configuration Location string `json:"location"` - // Configuration is an embedded configuration object to be used as the plugin's + // configuration is an embedded configuration object to be used as the plugin's // configuration. If present, it will be used instead of the path to the configuration file. Configuration runtime.RawExtension `json:"configuration"` } // AdmissionConfig holds the necessary configuration options for admission type AdmissionConfig struct { - // PluginConfig allows specifying a configuration file per admission control plugin + // pluginConfig allows specifying a configuration file per admission control plugin PluginConfig map[string]*AdmissionPluginConfig `json:"pluginConfig"` - // PluginOrderOverride is a list of admission control plugin names that will be installed + // pluginOrderOverride is a list of admission control plugin names that will be installed // on the master. Order is significant. If empty, a default list of plugins is used. PluginOrderOverride []string `json:"pluginOrderOverride,omitempty"` } // ControllerConfig holds configuration values for controllers type ControllerConfig struct { - // Controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+ + // controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+ // named 'foo', '-foo' disables the controller named 'foo'. // Defaults to "*". Controllers []string `json:"controllers"` - // Election defines the configuration for electing a controller instance to make changes to + // election defines the configuration for electing a controller instance to make changes to // the cluster. If unspecified, the ControllerTTL value is checked to determine whether the // legacy direct etcd election code will be used. Election *ControllerElectionConfig `json:"election"` - // ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for + // serviceServingCert holds configuration for service serving cert signer which creates cert/key pairs for // pods fulfilling a service to serve with. ServiceServingCert ServiceServingCert `json:"serviceServingCert"` } @@ -1473,29 +1473,29 @@ type ControllerConfig struct { // ControllerElectionConfig contains configuration values for deciding how a controller // will be elected to act as leader. type ControllerElectionConfig struct { - // LockName is the resource name used to act as the lock for determining which controller + // lockName is the resource name used to act as the lock for determining which controller // instance should lead. LockName string `json:"lockName"` - // LockNamespace is the resource namespace used to act as the lock for determining which + // lockNamespace is the resource namespace used to act as the lock for determining which // controller instance should lead. It defaults to "kube-system" LockNamespace string `json:"lockNamespace"` - // LockResource is the group and resource name to use to coordinate for the controller lock. + // lockResource is the group and resource name to use to coordinate for the controller lock. // If unset, defaults to "configmaps". LockResource GroupResource `json:"lockResource"` } // GroupResource points to a resource by its name and API group. type GroupResource struct { - // Group is the name of an API group + // group is the name of an API group Group string `json:"group"` - // Resource is the name of a resource. + // resource is the name of a resource. Resource string `json:"resource"` } // ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for // pods fulfilling a service to serve with. type ServiceServingCert struct { - // Signer holds the signing information used to automatically sign serving certificates. + // signer holds the signing information used to automatically sign serving certificates. // If this value is nil, then certs are not signed automatically. Signer *CertInfo `json:"signer"` } @@ -1512,7 +1512,7 @@ type ServiceServingCert struct { type DefaultAdmissionConfig struct { metav1.TypeMeta `json:",inline"` - // Disable turns off an admission plugin that is enabled by default. + // disable turns off an admission plugin that is enabled by default. Disable bool `json:"disable"` } diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go index daa0868b6b..42444e8aee 100644 --- a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go @@ -14,8 +14,8 @@ package v1 var map_ActiveDirectoryConfig = map[string]string{ "": "ActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the Active Directory schema", "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.", - "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", - "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", + "userNameAttributes": "userNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", + "groupMembershipAttributes": "groupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", } func (ActiveDirectoryConfig) SwaggerDoc() map[string]string { @@ -24,8 +24,8 @@ func (ActiveDirectoryConfig) SwaggerDoc() map[string]string { var map_AdmissionConfig = map[string]string{ "": "AdmissionConfig holds the necessary configuration options for admission", - "pluginConfig": "PluginConfig allows specifying a configuration file per admission control plugin", - "pluginOrderOverride": "PluginOrderOverride is a list of admission control plugin names that will be installed on the master. Order is significant. If empty, a default list of plugins is used.", + "pluginConfig": "pluginConfig allows specifying a configuration file per admission control plugin", + "pluginOrderOverride": "pluginOrderOverride is a list of admission control plugin names that will be installed on the master. Order is significant. If empty, a default list of plugins is used.", } func (AdmissionConfig) SwaggerDoc() map[string]string { @@ -34,8 +34,8 @@ func (AdmissionConfig) SwaggerDoc() map[string]string { var map_AdmissionPluginConfig = map[string]string{ "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins", - "location": "Location is the path to a configuration file that contains the plugin's configuration", - "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", + "location": "location is the path to a configuration file that contains the plugin's configuration", + "configuration": "configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", } func (AdmissionPluginConfig) SwaggerDoc() map[string]string { @@ -44,7 +44,7 @@ func (AdmissionPluginConfig) SwaggerDoc() map[string]string { var map_AggregatorConfig = map[string]string{ "": "AggregatorConfig holds information required to make the aggregator function.", - "proxyClientInfo": "ProxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers", + "proxyClientInfo": "proxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers", } func (AggregatorConfig) SwaggerDoc() map[string]string { @@ -66,8 +66,8 @@ var map_AuditConfig = map[string]string{ "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.", "maximumRetainedFiles": "Maximum number of old log files to retain.", "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.", - "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.", - "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", + "policyFile": "policyFile is a path to the file that defines the audit policy configuration.", + "policyConfiguration": "policyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", "logFormat": "Format of saved audits (legacy or json).", "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.", "webHookMode": "Strategy for sending audit events (block or batch).", @@ -80,11 +80,11 @@ func (AuditConfig) SwaggerDoc() map[string]string { var map_AugmentedActiveDirectoryConfig = map[string]string{ "": "AugmentedActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the augmented Active Directory schema", "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.", - "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", - "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", + "userNameAttributes": "userNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", + "groupMembershipAttributes": "groupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", "groupsQuery": "AllGroupsQuery holds the template for an LDAP query that returns group entries.", "groupUIDAttribute": "GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier. (ldapGroupUID)", - "groupNameAttributes": "GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", + "groupNameAttributes": "groupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", } func (AugmentedActiveDirectoryConfig) SwaggerDoc() map[string]string { @@ -131,8 +131,8 @@ func (BuildOverridesConfig) SwaggerDoc() map[string]string { var map_CertInfo = map[string]string{ "": "CertInfo relates a certificate with a private key", - "certFile": "CertFile is a file containing a PEM-encoded certificate", - "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", + "certFile": "certFile is a file containing a PEM-encoded certificate", + "keyFile": "keyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", } func (CertInfo) SwaggerDoc() map[string]string { @@ -141,10 +141,10 @@ func (CertInfo) SwaggerDoc() map[string]string { var map_ClientConnectionOverrides = map[string]string{ "": "ClientConnectionOverrides are a set of overrides to the default client connection settings.", - "acceptContentTypes": "AcceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", - "contentType": "ContentType is the content type used when sending data to the server from this client.", - "qps": "QPS controls the number of queries per second allowed for this connection.", - "burst": "Burst allows extra queries to accumulate when a client is exceeding its rate.", + "acceptContentTypes": "acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", + "contentType": "contentType is the content type used when sending data to the server from this client.", + "qps": "qps controls the number of queries per second allowed for this connection.", + "burst": "burst allows extra queries to accumulate when a client is exceeding its rate.", } func (ClientConnectionOverrides) SwaggerDoc() map[string]string { @@ -153,8 +153,8 @@ func (ClientConnectionOverrides) SwaggerDoc() map[string]string { var map_ClusterNetworkEntry = map[string]string{ "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.", - "cidr": "CIDR defines the total range of a cluster networks address space.", - "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", + "cidr": "cidr defines the total range of a cluster networks address space.", + "hostSubnetLength": "hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", } func (ClusterNetworkEntry) SwaggerDoc() map[string]string { @@ -163,9 +163,9 @@ func (ClusterNetworkEntry) SwaggerDoc() map[string]string { var map_ControllerConfig = map[string]string{ "": "ControllerConfig holds configuration values for controllers", - "controllers": "Controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller \"+ named 'foo', '-foo' disables the controller named 'foo'. Defaults to \"*\".", - "election": "Election defines the configuration for electing a controller instance to make changes to the cluster. If unspecified, the ControllerTTL value is checked to determine whether the legacy direct etcd election code will be used.", - "serviceServingCert": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", + "controllers": "controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller \"+ named 'foo', '-foo' disables the controller named 'foo'. Defaults to \"*\".", + "election": "election defines the configuration for electing a controller instance to make changes to the cluster. If unspecified, the ControllerTTL value is checked to determine whether the legacy direct etcd election code will be used.", + "serviceServingCert": "serviceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", } func (ControllerConfig) SwaggerDoc() map[string]string { @@ -174,9 +174,9 @@ func (ControllerConfig) SwaggerDoc() map[string]string { var map_ControllerElectionConfig = map[string]string{ "": "ControllerElectionConfig contains configuration values for deciding how a controller will be elected to act as leader.", - "lockName": "LockName is the resource name used to act as the lock for determining which controller instance should lead.", - "lockNamespace": "LockNamespace is the resource namespace used to act as the lock for determining which controller instance should lead. It defaults to \"kube-system\"", - "lockResource": "LockResource is the group and resource name to use to coordinate for the controller lock. If unset, defaults to \"configmaps\".", + "lockName": "lockName is the resource name used to act as the lock for determining which controller instance should lead.", + "lockNamespace": "lockNamespace is the resource namespace used to act as the lock for determining which controller instance should lead. It defaults to \"kube-system\"", + "lockResource": "lockResource is the group and resource name to use to coordinate for the controller lock. If unset, defaults to \"configmaps\".", } func (ControllerElectionConfig) SwaggerDoc() map[string]string { @@ -185,9 +185,9 @@ func (ControllerElectionConfig) SwaggerDoc() map[string]string { var map_DNSConfig = map[string]string{ "": "DNSConfig holds the necessary configuration options for DNS", - "bindAddress": "BindAddress is the ip:port to serve DNS on", - "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", - "allowRecursiveQueries": "AllowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open resolvers can be used for DNS amplification attacks and the master DNS should not be made accessible to public networks.", + "bindAddress": "bindAddress is the ip:port to serve DNS on", + "bindNetwork": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "allowRecursiveQueries": "allowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open resolvers can be used for DNS amplification attacks and the master DNS should not be made accessible to public networks.", } func (DNSConfig) SwaggerDoc() map[string]string { @@ -196,7 +196,7 @@ func (DNSConfig) SwaggerDoc() map[string]string { var map_DefaultAdmissionConfig = map[string]string{ "": "DefaultAdmissionConfig can be used to enable or disable various admission plugins. When this type is present as the `configuration` object under `pluginConfig` and *if* the admission plugin supports it, this will cause an \"off by default\" admission plugin to be enabled\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "disable": "Disable turns off an admission plugin that is enabled by default.", + "disable": "disable turns off an admission plugin that is enabled by default.", } func (DefaultAdmissionConfig) SwaggerDoc() map[string]string { @@ -213,9 +213,9 @@ func (DenyAllPasswordIdentityProvider) SwaggerDoc() map[string]string { var map_DockerConfig = map[string]string{ "": "DockerConfig holds Docker related configuration options.", - "execHandlerName": "ExecHandlerName is the name of the handler to use for executing commands in containers.", - "dockerShimSocket": "DockerShimSocket is the location of the dockershim socket the kubelet uses. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'", - "dockerShimRootDirectory": "DockershimRootDirectory is the dockershim root directory.", + "execHandlerName": "execHandlerName is the name of the handler to use for executing commands in containers.", + "dockerShimSocket": "dockerShimSocket is the location of the dockershim socket the kubelet uses. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'", + "dockerShimRootDirectory": "dockerShimRootDirectory is the dockershim root directory.", } func (DockerConfig) SwaggerDoc() map[string]string { @@ -224,10 +224,10 @@ func (DockerConfig) SwaggerDoc() map[string]string { var map_EtcdConfig = map[string]string{ "": "EtcdConfig holds the necessary configuration options for connecting with an etcd database", - "servingInfo": "ServingInfo describes how to start serving the etcd master", - "address": "Address is the advertised host:port for client connections to etcd", - "peerServingInfo": "PeerServingInfo describes how to start serving the etcd peer", - "peerAddress": "PeerAddress is the advertised host:port for peer connections to etcd", + "servingInfo": "servingInfo describes how to start serving the etcd master", + "address": "address is the advertised host:port for client connections to etcd", + "peerServingInfo": "peerServingInfo describes how to start serving the etcd peer", + "peerAddress": "peerAddress is the advertised host:port for peer connections to etcd", "storageDirectory": "StorageDir is the path to the etcd storage directory", } @@ -237,8 +237,8 @@ func (EtcdConfig) SwaggerDoc() map[string]string { var map_EtcdConnectionInfo = map[string]string{ "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server", - "urls": "URLs are the URLs for etcd", - "ca": "CA is a file containing trusted roots for the etcd server certificates", + "urls": "urls are the URLs for etcd", + "ca": "ca is a file containing trusted roots for the etcd server certificates", } func (EtcdConnectionInfo) SwaggerDoc() map[string]string { @@ -247,10 +247,10 @@ func (EtcdConnectionInfo) SwaggerDoc() map[string]string { var map_EtcdStorageConfig = map[string]string{ "": "EtcdStorageConfig holds the necessary configuration options for the etcd storage underlying OpenShift and Kubernetes", - "kubernetesStorageVersion": "KubernetesStorageVersion is the API version that Kube resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", - "kubernetesStoragePrefix": "KubernetesStoragePrefix is the path within etcd that the Kubernetes resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'kubernetes.io'.", - "openShiftStorageVersion": "OpenShiftStorageVersion is the API version that OS resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", - "openShiftStoragePrefix": "OpenShiftStoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'openshift.io'.", + "kubernetesStorageVersion": "kubernetesStorageVersion is the API version that Kube resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", + "kubernetesStoragePrefix": "kubernetesStoragePrefix is the path within etcd that the Kubernetes resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'kubernetes.io'.", + "openShiftStorageVersion": "openShiftStorageVersion is the API version that OS resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", + "openShiftStoragePrefix": "openShiftStoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'openshift.io'.", } func (EtcdStorageConfig) SwaggerDoc() map[string]string { @@ -259,12 +259,12 @@ func (EtcdStorageConfig) SwaggerDoc() map[string]string { var map_GitHubIdentityProvider = map[string]string{ "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "clientID": "ClientID is the oauth client ID", - "clientSecret": "ClientSecret is the oauth client secret", - "organizations": "Organizations optionally restricts which organizations are allowed to log in", - "teams": "Teams optionally restricts which teams are allowed to log in. Format is /.", - "hostname": "Hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname.", - "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value.", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "organizations": "organizations optionally restricts which organizations are allowed to log in", + "teams": "teams optionally restricts which teams are allowed to log in. Format is /.", + "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname.", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value.", } func (GitHubIdentityProvider) SwaggerDoc() map[string]string { @@ -273,11 +273,11 @@ func (GitHubIdentityProvider) SwaggerDoc() map[string]string { var map_GitLabIdentityProvider = map[string]string{ "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", - "url": "URL is the oauth server base URL", - "clientID": "ClientID is the oauth client ID", - "clientSecret": "ClientSecret is the oauth client secret", - "legacy": "Legacy determines if OAuth2 or OIDC should be used If true, OAuth2 is used If false, OIDC is used If nil and the URL's host is gitlab.com, OIDC is used Otherwise, OAuth2 is used In a future release, nil will default to using OIDC Eventually this flag will be removed and only OIDC will be used", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "url": "url is the oauth server base URL", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "legacy": "legacy determines if OAuth2 or OIDC should be used If true, OAuth2 is used If false, OIDC is used If nil and the URL's host is gitlab.com, OIDC is used Otherwise, OAuth2 is used In a future release, nil will default to using OIDC Eventually this flag will be removed and only OIDC will be used", } func (GitLabIdentityProvider) SwaggerDoc() map[string]string { @@ -286,9 +286,9 @@ func (GitLabIdentityProvider) SwaggerDoc() map[string]string { var map_GoogleIdentityProvider = map[string]string{ "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "clientID": "ClientID is the oauth client ID", - "clientSecret": "ClientSecret is the oauth client secret", - "hostedDomain": "HostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", } func (GoogleIdentityProvider) SwaggerDoc() map[string]string { @@ -297,8 +297,8 @@ func (GoogleIdentityProvider) SwaggerDoc() map[string]string { var map_GrantConfig = map[string]string{ "": "GrantConfig holds the necessary configuration options for grant handlers", - "method": "Method determines the default strategy to use when an OAuth client requests a grant. This method will be used only if the specific OAuth client doesn't provide a strategy of their own. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients\n - deny: always denies grant requests, useful for black-listed clients", - "serviceAccountMethod": "ServiceAccountMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt", + "method": "method determines the default strategy to use when an OAuth client requests a grant. This method will be used only if the specific OAuth client doesn't provide a strategy of their own. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients\n - deny: always denies grant requests, useful for black-listed clients", + "serviceAccountMethod": "serviceAccountMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt", } func (GrantConfig) SwaggerDoc() map[string]string { @@ -307,8 +307,8 @@ func (GrantConfig) SwaggerDoc() map[string]string { var map_GroupResource = map[string]string{ "": "GroupResource points to a resource by its name and API group.", - "group": "Group is the name of an API group", - "resource": "Resource is the name of a resource.", + "group": "group is the name of an API group", + "resource": "resource is the name of a resource.", } func (GroupResource) SwaggerDoc() map[string]string { @@ -317,7 +317,7 @@ func (GroupResource) SwaggerDoc() map[string]string { var map_HTPasswdPasswordIdentityProvider = map[string]string{ "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "file": "File is a reference to your htpasswd file", + "file": "file is a reference to your htpasswd file", } func (HTPasswdPasswordIdentityProvider) SwaggerDoc() map[string]string { @@ -326,8 +326,8 @@ func (HTPasswdPasswordIdentityProvider) SwaggerDoc() map[string]string { var map_HTTPServingInfo = map[string]string{ "": "HTTPServingInfo holds configuration for serving HTTP", - "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", - "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", + "maxRequestsInFlight": "maxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", + "requestTimeoutSeconds": "requestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", } func (HTTPServingInfo) SwaggerDoc() map[string]string { @@ -336,11 +336,11 @@ func (HTTPServingInfo) SwaggerDoc() map[string]string { var map_IdentityProvider = map[string]string{ "": "IdentityProvider provides identities for users authenticating using credentials", - "name": "Name is used to qualify the identities returned by this provider", + "name": "name is used to qualify the identities returned by this provider", "challenge": "UseAsChallenger indicates whether to issue WWW-Authenticate challenges for this provider", "login": "UseAsLogin indicates whether to use this identity provider for unauthenticated browsers to login against", - "mappingMethod": "MappingMethod determines how identities from this provider are mapped to users", - "provider": "Provider contains the information about how to set up a specific identity provider", + "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users", + "provider": "provider contains the information about how to set up a specific identity provider", } func (IdentityProvider) SwaggerDoc() map[string]string { @@ -349,8 +349,8 @@ func (IdentityProvider) SwaggerDoc() map[string]string { var map_ImageConfig = map[string]string{ "": "ImageConfig holds the necessary configuration options for building image names for system components", - "format": "Format is the format of the name to be built for the system component", - "latest": "Latest determines if the latest tag will be pulled from the registry", + "format": "format is the format of the name to be built for the system component", + "latest": "latest determines if the latest tag will be pulled from the registry", } func (ImageConfig) SwaggerDoc() map[string]string { @@ -359,14 +359,14 @@ func (ImageConfig) SwaggerDoc() map[string]string { var map_ImagePolicyConfig = map[string]string{ "": "ImagePolicyConfig holds the necessary configuration options for limits and behavior for importing images", - "maxImagesBulkImportedPerRepository": "MaxImagesBulkImportedPerRepository controls the number of images that are imported when a user does a bulk import of a container repository. This number defaults to 50 to prevent users from importing large numbers of images accidentally. Set -1 for no limit.", - "disableScheduledImport": "DisableScheduledImport allows scheduled background import of images to be disabled.", - "scheduledImageImportMinimumIntervalSeconds": "ScheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams scheduled for background import are checked against the upstream repository. The default value is 15 minutes.", - "maxScheduledImageImportsPerMinute": "MaxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the background per minute. The default value is 60. Set to -1 for unlimited.", - "allowedRegistriesForImport": "AllowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", - "internalRegistryHostname": "InternalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format.", - "externalRegistryHostname": "ExternalRegistryHostname sets the hostname for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", - "additionalTrustedCA": "AdditionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted during imagestream import.", + "maxImagesBulkImportedPerRepository": "maxImagesBulkImportedPerRepository controls the number of images that are imported when a user does a bulk import of a container repository. This number defaults to 50 to prevent users from importing large numbers of images accidentally. Set -1 for no limit.", + "disableScheduledImport": "disableScheduledImport allows scheduled background import of images to be disabled.", + "scheduledImageImportMinimumIntervalSeconds": "scheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams scheduled for background import are checked against the upstream repository. The default value is 15 minutes.", + "maxScheduledImageImportsPerMinute": "maxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the background per minute. The default value is 60. Set to -1 for unlimited.", + "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", + "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format.", + "externalRegistryHostname": "externalRegistryHostname sets the hostname for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "additionalTrustedCA": "additionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted during imagestream import.", } func (ImagePolicyConfig) SwaggerDoc() map[string]string { @@ -375,11 +375,11 @@ func (ImagePolicyConfig) SwaggerDoc() map[string]string { var map_JenkinsPipelineConfig = map[string]string{ "": "JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy", - "autoProvisionEnabled": "AutoProvisionEnabled determines whether a Jenkins server will be spawned from the provided template when the first build config in the project with type JenkinsPipeline is created. When not specified this option defaults to true.", - "templateNamespace": "TemplateNamespace contains the namespace name where the Jenkins template is stored", - "templateName": "TemplateName is the name of the default Jenkins template", - "serviceName": "ServiceName is the name of the Jenkins service OpenShift uses to detect whether a Jenkins pipeline handler has already been installed in a project. This value *must* match a service name in the provided template.", - "parameters": "Parameters specifies a set of optional parameters to the Jenkins template.", + "autoProvisionEnabled": "autoProvisionEnabled determines whether a Jenkins server will be spawned from the provided template when the first build config in the project with type JenkinsPipeline is created. When not specified this option defaults to true.", + "templateNamespace": "templateNamespace contains the namespace name where the Jenkins template is stored", + "templateName": "templateName is the name of the default Jenkins template", + "serviceName": "serviceName is the name of the Jenkins service OpenShift uses to detect whether a Jenkins pipeline handler has already been installed in a project. This value *must* match a service name in the provided template.", + "parameters": "parameters specifies a set of optional parameters to the Jenkins template.", } func (JenkinsPipelineConfig) SwaggerDoc() map[string]string { @@ -389,7 +389,7 @@ func (JenkinsPipelineConfig) SwaggerDoc() map[string]string { var map_KeystonePasswordIdentityProvider = map[string]string{ "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "domainName": "Domain Name is required for keystone v3", - "useKeystoneIdentity": "UseKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username", + "useKeystoneIdentity": "useKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username", } func (KeystonePasswordIdentityProvider) SwaggerDoc() map[string]string { @@ -398,8 +398,8 @@ func (KeystonePasswordIdentityProvider) SwaggerDoc() map[string]string { var map_KubeletConnectionInfo = map[string]string{ "": "KubeletConnectionInfo holds information necessary for connecting to a kubelet", - "port": "Port is the port to connect to kubelets on", - "ca": "CA is the CA for verifying TLS connections to kubelets", + "port": "port is the port to connect to kubelets on", + "ca": "ca is the CA for verifying TLS connections to kubelets", } func (KubeletConnectionInfo) SwaggerDoc() map[string]string { @@ -408,18 +408,18 @@ func (KubeletConnectionInfo) SwaggerDoc() map[string]string { var map_KubernetesMasterConfig = map[string]string{ "": "KubernetesMasterConfig holds the necessary configuration options for the Kubernetes master", - "apiLevels": "APILevels is a list of API levels that should be enabled on startup: v1 as examples", - "disabledAPIGroupVersions": "DisabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled.", - "masterIP": "MasterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used.", - "masterEndpointReconcileTTL": "MasterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked at an interval that is 2/3 of this value and this value defaults to 15s if unset. In very large clusters, this value may be increased to reduce the possibility that the master endpoint record expires (due to other load on the etcd server) and causes masters to drop in and out of the kubernetes service record. It is not recommended to set this value below 15s.", - "servicesSubnet": "ServicesSubnet is the subnet to use for assigning service IPs", - "servicesNodePortRange": "ServicesNodePortRange is the range to use for assigning service public ports on a host.", - "schedulerConfigFile": "SchedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules.", - "podEvictionTimeout": "PodEvictionTimeout controls grace period for deleting pods on failed nodes. It takes valid time duration string. If empty, you get the default pod eviction timeout.", - "proxyClientInfo": "ProxyClientInfo specifies the client cert/key to use when proxying to pods", - "apiServerArguments": "APIServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", - "controllerArguments": "ControllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the controller manager's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", - "schedulerArguments": "SchedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", + "apiLevels": "apiLevels is a list of API levels that should be enabled on startup: v1 as examples", + "disabledAPIGroupVersions": "disabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled.", + "masterIP": "masterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used.", + "masterEndpointReconcileTTL": "masterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked at an interval that is 2/3 of this value and this value defaults to 15s if unset. In very large clusters, this value may be increased to reduce the possibility that the master endpoint record expires (due to other load on the etcd server) and causes masters to drop in and out of the kubernetes service record. It is not recommended to set this value below 15s.", + "servicesSubnet": "servicesSubnet is the subnet to use for assigning service IPs", + "servicesNodePortRange": "servicesNodePortRange is the range to use for assigning service public ports on a host.", + "schedulerConfigFile": "schedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules.", + "podEvictionTimeout": "podEvictionTimeout controls grace period for deleting pods on failed nodes. It takes valid time duration string. If empty, you get the default pod eviction timeout.", + "proxyClientInfo": "proxyClientInfo specifies the client cert/key to use when proxying to pods", + "apiServerArguments": "apiServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", + "controllerArguments": "controllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the controller manager's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", + "schedulerArguments": "schedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", } func (KubernetesMasterConfig) SwaggerDoc() map[string]string { @@ -428,10 +428,10 @@ func (KubernetesMasterConfig) SwaggerDoc() map[string]string { var map_LDAPAttributeMapping = map[string]string{ "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields", - "id": "ID is the list of attributes whose values should be used as the user ID. Required. LDAP standard identity attribute is \"dn\"", - "preferredUsername": "PreferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", - "name": "Name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", - "email": "Email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", + "id": "id is the list of attributes whose values should be used as the user ID. Required. LDAP standard identity attribute is \"dn\"", + "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", + "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", + "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", } func (LDAPAttributeMapping) SwaggerDoc() map[string]string { @@ -440,12 +440,12 @@ func (LDAPAttributeMapping) SwaggerDoc() map[string]string { var map_LDAPPasswordIdentityProvider = map[string]string{ "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "url": "URL is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is\n ldap://host:port/basedn?attribute?scope?filter", - "bindDN": "BindDN is an optional DN to bind with during the search phase.", - "bindPassword": "BindPassword is an optional password to bind with during the search phase.", + "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is\n ldap://host:port/basedn?attribute?scope?filter", + "bindDN": "bindDN is an optional DN to bind with during the search phase.", + "bindPassword": "bindPassword is an optional password to bind with during the search phase.", "insecure": "Insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830", - "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", - "attributes": "Attributes maps LDAP attributes to identities", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "attributes": "attributes maps LDAP attributes to identities", } func (LDAPPasswordIdentityProvider) SwaggerDoc() map[string]string { @@ -458,8 +458,8 @@ var map_LDAPQuery = map[string]string{ "scope": "The (optional) scope of the search. Can be: base: only the base object, one: all object on the base level, sub: the entire subtree Defaults to the entire subtree if not set", "derefAliases": "The (optional) behavior of the search with regards to alisases. Can be: never: never dereference aliases, search: only dereference in searching, base: only dereference in finding the base object, always: always dereference Defaults to always dereferencing if not set", "timeout": "TimeLimit holds the limit of time in seconds that any request to the server can remain outstanding before the wait for a response is given up. If this is 0, no client-side limit is imposed", - "filter": "Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN", - "pageSize": "PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done.", + "filter": "filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN", + "pageSize": "pageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done.", } func (LDAPQuery) SwaggerDoc() map[string]string { @@ -469,10 +469,10 @@ func (LDAPQuery) SwaggerDoc() map[string]string { var map_LDAPSyncConfig = map[string]string{ "": "LDAPSyncConfig holds the necessary configuration options to define an LDAP group sync\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "url": "Host is the scheme, host and port of the LDAP server to connect to: scheme://host:port", - "bindDN": "BindDN is an optional DN to bind to the LDAP server with", - "bindPassword": "BindPassword is an optional password to bind with during the search phase.", + "bindDN": "bindDN is an optional DN to bind to the LDAP server with", + "bindPassword": "bindPassword is an optional password to bind with during the search phase.", "insecure": "Insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830", - "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", "groupUIDNameMapping": "LDAPGroupUIDToOpenShiftGroupNameMapping is an optional direct mapping of LDAP group UIDs to OpenShift Group names", "rfc2307": "RFC2307Config holds the configuration for extracting data from an LDAP server set up in a fashion similar to RFC2307: first-class group and user entries, with group membership determined by a multi-valued attribute on the group entry listing its members", "activeDirectory": "ActiveDirectoryConfig holds the configuration for extracting data from an LDAP server set up in a fashion similar to that used in Active Directory: first-class user entries, with group membership determined by a multi-valued attribute on members listing groups they are a member of", @@ -494,9 +494,9 @@ func (LocalQuota) SwaggerDoc() map[string]string { var map_MasterAuthConfig = map[string]string{ "": "MasterAuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", - "requestHeader": "RequestHeader holds options for setting up a front proxy against the API. It is optional.", + "requestHeader": "requestHeader holds options for setting up a front proxy against the API. It is optional.", "webhookTokenAuthenticators": "WebhookTokenAuthnConfig, if present configures remote token reviewers", - "oauthMetadataFile": "OAuthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization Server Metadata for an external OAuth server. See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This option is mutually exclusive with OAuthConfig", + "oauthMetadataFile": "oauthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization Server Metadata for an external OAuth server. See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This option is mutually exclusive with OAuthConfig", } func (MasterAuthConfig) SwaggerDoc() map[string]string { @@ -505,8 +505,8 @@ func (MasterAuthConfig) SwaggerDoc() map[string]string { var map_MasterClients = map[string]string{ "": "MasterClients holds references to `.kubeconfig` files that qualify master clients for OpenShift and Kubernetes", - "openshiftLoopbackKubeConfig": "OpenShiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master", - "openshiftLoopbackClientConnectionOverrides": "OpenShiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master.", + "openshiftLoopbackKubeConfig": "openshiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master", + "openshiftLoopbackClientConnectionOverrides": "openshiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master.", } func (MasterClients) SwaggerDoc() map[string]string { @@ -515,33 +515,33 @@ func (MasterClients) SwaggerDoc() map[string]string { var map_MasterConfig = map[string]string{ "": "MasterConfig holds the necessary configuration options for the OpenShift master\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "servingInfo": "ServingInfo describes how to start serving", - "authConfig": "AuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", - "aggregatorConfig": "AggregatorConfig has options for configuring the aggregator component of the API server.", + "servingInfo": "servingInfo describes how to start serving", + "authConfig": "authConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", + "aggregatorConfig": "aggregatorConfig has options for configuring the aggregator component of the API server.", "corsAllowedOrigins": "CORSAllowedOrigins", - "apiLevels": "APILevels is a list of API levels that should be enabled on startup: v1 as examples", - "masterPublicURL": "MasterPublicURL is how clients can access the OpenShift API server", - "controllers": "Controllers is a list of the controllers that should be started. If set to \"none\", no controllers will start automatically. The default value is \"*\" which will start all controllers. When using \"*\", you may exclude controllers by prepending a \"-\" in front of their name. No other values are recognized at this time.", - "admissionConfig": "AdmissionConfig contains admission control plugin configuration.", - "controllerConfig": "ControllerConfig holds configuration values for controllers", - "etcdStorageConfig": "EtcdStorageConfig contains information about how API resources are stored in Etcd. These values are only relevant when etcd is the backing store for the cluster.", - "etcdClientInfo": "EtcdClientInfo contains information about how to connect to etcd", - "kubeletClientInfo": "KubeletClientInfo contains information about how to connect to kubelets", + "apiLevels": "apiLevels is a list of API levels that should be enabled on startup: v1 as examples", + "masterPublicURL": "masterPublicURL is how clients can access the OpenShift API server", + "controllers": "controllers is a list of the controllers that should be started. If set to \"none\", no controllers will start automatically. The default value is \"*\" which will start all controllers. When using \"*\", you may exclude controllers by prepending a \"-\" in front of their name. No other values are recognized at this time.", + "admissionConfig": "admissionConfig contains admission control plugin configuration.", + "controllerConfig": "controllerConfig holds configuration values for controllers", + "etcdStorageConfig": "etcdStorageConfig contains information about how API resources are stored in Etcd. These values are only relevant when etcd is the backing store for the cluster.", + "etcdClientInfo": "etcdClientInfo contains information about how to connect to etcd", + "kubeletClientInfo": "kubeletClientInfo contains information about how to connect to kubelets", "kubernetesMasterConfig": "KubernetesMasterConfig, if present start the kubernetes master in this process", "etcdConfig": "EtcdConfig, if present start etcd in this process", "oauthConfig": "OAuthConfig, if present start the /oauth endpoint in this process", "dnsConfig": "DNSConfig, if present start the DNS server in this process", - "serviceAccountConfig": "ServiceAccountConfig holds options related to service accounts", - "masterClients": "MasterClients holds all the client connection information for controllers and other system components", - "imageConfig": "ImageConfig holds options that describe how to build image names for system components", - "imagePolicyConfig": "ImagePolicyConfig controls limits and behavior for importing images", - "policyConfig": "PolicyConfig holds information about where to locate critical pieces of bootstrapping policy", - "projectConfig": "ProjectConfig holds information about project creation and defaults", - "routingConfig": "RoutingConfig holds information about routing and route generation", - "networkConfig": "NetworkConfig to be passed to the compiled in network plugin", + "serviceAccountConfig": "serviceAccountConfig holds options related to service accounts", + "masterClients": "masterClients holds all the client connection information for controllers and other system components", + "imageConfig": "imageConfig holds options that describe how to build image names for system components", + "imagePolicyConfig": "imagePolicyConfig controls limits and behavior for importing images", + "policyConfig": "policyConfig holds information about where to locate critical pieces of bootstrapping policy", + "projectConfig": "projectConfig holds information about project creation and defaults", + "routingConfig": "routingConfig holds information about routing and route generation", + "networkConfig": "networkConfig to be passed to the compiled in network plugin", "volumeConfig": "MasterVolumeConfig contains options for configuring volume plugins in the master node.", - "jenkinsPipelineConfig": "JenkinsPipelineConfig holds information about the default Jenkins template used for JenkinsPipeline build strategy.", - "auditConfig": "AuditConfig holds information related to auditing capabilities.", + "jenkinsPipelineConfig": "jenkinsPipelineConfig holds information about the default Jenkins template used for JenkinsPipeline build strategy.", + "auditConfig": "auditConfig holds information related to auditing capabilities.", } func (MasterConfig) SwaggerDoc() map[string]string { @@ -550,14 +550,14 @@ func (MasterConfig) SwaggerDoc() map[string]string { var map_MasterNetworkConfig = map[string]string{ "": "MasterNetworkConfig to be passed to the compiled in network plugin", - "networkPluginName": "NetworkPluginName is the name of the network plugin to use", - "clusterNetworkCIDR": "ClusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", - "clusterNetworks": "ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set.", - "hostSubnetLength": "HostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", + "networkPluginName": "networkPluginName is the name of the network plugin to use", + "clusterNetworkCIDR": "clusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", + "clusterNetworks": "clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set.", + "hostSubnetLength": "hostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", "serviceNetworkCIDR": "ServiceNetwork is the CIDR string to specify the service networks", - "externalIPNetworkCIDRs": "ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons.", - "ingressIPNetworkCIDR": "IngressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, nodes, pods, or services.", - "vxlanPort": "VXLANPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value", + "externalIPNetworkCIDRs": "externalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons.", + "ingressIPNetworkCIDR": "ingressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, nodes, pods, or services.", + "vxlanPort": "vxlanPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value", } func (MasterNetworkConfig) SwaggerDoc() map[string]string { @@ -566,7 +566,7 @@ func (MasterNetworkConfig) SwaggerDoc() map[string]string { var map_MasterVolumeConfig = map[string]string{ "": "MasterVolumeConfig contains options for configuring volume plugins in the master node.", - "dynamicProvisioningEnabled": "DynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true", + "dynamicProvisioningEnabled": "dynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true", } func (MasterVolumeConfig) SwaggerDoc() map[string]string { @@ -575,7 +575,7 @@ func (MasterVolumeConfig) SwaggerDoc() map[string]string { var map_NamedCertificate = map[string]string{ "": "NamedCertificate specifies a certificate/key, and the names it should be served for", - "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", + "names": "names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", } func (NamedCertificate) SwaggerDoc() map[string]string { @@ -584,10 +584,10 @@ func (NamedCertificate) SwaggerDoc() map[string]string { var map_NodeAuthConfig = map[string]string{ "": "NodeAuthConfig holds authn/authz configuration options", - "authenticationCacheTTL": "AuthenticationCacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", - "authenticationCacheSize": "AuthenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used.", - "authorizationCacheTTL": "AuthorizationCacheTTL indicates how long an authorization result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", - "authorizationCacheSize": "AuthorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used.", + "authenticationCacheTTL": "authenticationCacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", + "authenticationCacheSize": "authenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used.", + "authorizationCacheTTL": "authorizationCacheTTL indicates how long an authorization result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", + "authorizationCacheSize": "authorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used.", } func (NodeAuthConfig) SwaggerDoc() map[string]string { @@ -596,29 +596,29 @@ func (NodeAuthConfig) SwaggerDoc() map[string]string { var map_NodeConfig = map[string]string{ "": "NodeConfig is the fully specified config starting an OpenShift node\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "nodeName": "NodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. If you're describing a set of static nodes to the master, this value must match one of the values in the list", + "nodeName": "nodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. If you're describing a set of static nodes to the master, this value must match one of the values in the list", "nodeIP": "Node may have multiple IPs, specify the IP to use for pod traffic routing If not specified, network parse/lookup on the nodeName is performed and the first non-loopback address is used", - "servingInfo": "ServingInfo describes how to start serving", - "masterKubeConfig": "MasterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master", - "masterClientConnectionOverrides": "MasterClientConnectionOverrides provides overrides to the client connection used to connect to the master.", - "dnsDomain": "DNSDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to 'cluster.local'.", - "dnsIP": "DNSIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes master. This IP must be listening on port 53 for compatibility with libc resolvers (which cannot be configured to resolve names from any other port). When running more complex local DNS configurations, this is often set to the local address of a DNS proxy like dnsmasq, which then will consult either the local DNS (see dnsBindAddress) or the master DNS.", - "dnsBindAddress": "DNSBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. Because most DNS resolvers will only listen on port 53, if you select an alternative port you will need a DNS proxy like dnsmasq to answer queries for containers. A common configuration is dnsmasq configured on a node IP listening on 53 and delegating queries for dnsDomain to this process, while sending other queries to the host environments nameservers.", - "dnsNameservers": "DNSNameservers is a list of ip:port values of recursive nameservers to forward queries to when running a local DNS server if dnsBindAddress is set. If this value is empty, the DNS server will default to the nameservers listed in /etc/resolv.conf. If you have configured dnsmasq or another DNS proxy on the system, this value should be set to the upstream nameservers dnsmasq resolves with.", - "dnsRecursiveResolvConf": "DNSRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. Only the nameservers and port fields are used. The file must exist and parse correctly. It adds extra nameservers to DNSNameservers if set.", + "servingInfo": "servingInfo describes how to start serving", + "masterKubeConfig": "masterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master", + "masterClientConnectionOverrides": "masterClientConnectionOverrides provides overrides to the client connection used to connect to the master.", + "dnsDomain": "dnsDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to 'cluster.local'.", + "dnsIP": "dnsIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes master. This IP must be listening on port 53 for compatibility with libc resolvers (which cannot be configured to resolve names from any other port). When running more complex local DNS configurations, this is often set to the local address of a DNS proxy like dnsmasq, which then will consult either the local DNS (see dnsBindAddress) or the master DNS.", + "dnsBindAddress": "dnsBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. Because most DNS resolvers will only listen on port 53, if you select an alternative port you will need a DNS proxy like dnsmasq to answer queries for containers. A common configuration is dnsmasq configured on a node IP listening on 53 and delegating queries for dnsDomain to this process, while sending other queries to the host environments nameservers.", + "dnsNameservers": "dnsNameservers is a list of ip:port values of recursive nameservers to forward queries to when running a local DNS server if dnsBindAddress is set. If this value is empty, the DNS server will default to the nameservers listed in /etc/resolv.conf. If you have configured dnsmasq or another DNS proxy on the system, this value should be set to the upstream nameservers dnsmasq resolves with.", + "dnsRecursiveResolvConf": "dnsRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. Only the nameservers and port fields are used. The file must exist and parse correctly. It adds extra nameservers to DNSNameservers if set.", "networkPluginName": "Deprecated and maintained for backward compatibility, use NetworkConfig.NetworkPluginName instead", - "networkConfig": "NetworkConfig provides network options for the node", - "volumeDirectory": "VolumeDirectory is the directory that volumes will be stored under", - "imageConfig": "ImageConfig holds options that describe how to build image names for system components", - "allowDisabledDocker": "AllowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started.", - "podManifestConfig": "PodManifestConfig holds the configuration for enabling the Kubelet to create pods based from a manifest file(s) placed locally on the node", - "authConfig": "AuthConfig holds authn/authz configuration options", - "dockerConfig": "DockerConfig holds Docker related configuration options.", - "kubeletArguments": "KubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", - "proxyArguments": "ProxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", - "iptablesSyncPeriod": "IPTablesSyncPeriod is how often iptable rules are refreshed", - "enableUnidling": "EnableUnidling controls whether or not the hybrid unidling proxy will be set up", - "volumeConfig": "VolumeConfig contains options for configuring volumes on the node.", + "networkConfig": "networkConfig provides network options for the node", + "volumeDirectory": "volumeDirectory is the directory that volumes will be stored under", + "imageConfig": "imageConfig holds options that describe how to build image names for system components", + "allowDisabledDocker": "allowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started.", + "podManifestConfig": "podManifestConfig holds the configuration for enabling the Kubelet to create pods based from a manifest file(s) placed locally on the node", + "authConfig": "authConfig holds authn/authz configuration options", + "dockerConfig": "dockerConfig holds Docker related configuration options.", + "kubeletArguments": "kubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", + "proxyArguments": "proxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", + "iptablesSyncPeriod": "iptablesSyncPeriod is how often iptable rules are refreshed", + "enableUnidling": "enableUnidling controls whether or not the hybrid unidling proxy will be set up", + "volumeConfig": "volumeConfig contains options for configuring volumes on the node.", } func (NodeConfig) SwaggerDoc() map[string]string { @@ -627,7 +627,7 @@ func (NodeConfig) SwaggerDoc() map[string]string { var map_NodeNetworkConfig = map[string]string{ "": "NodeNetworkConfig provides network options for the node", - "networkPluginName": "NetworkPluginName is a string specifying the networking plugin", + "networkPluginName": "networkPluginName is a string specifying the networking plugin", "mtu": "Maximum transmission unit for the network packets", } @@ -637,7 +637,7 @@ func (NodeNetworkConfig) SwaggerDoc() map[string]string { var map_NodeVolumeConfig = map[string]string{ "": "NodeVolumeConfig contains options for configuring volumes on the node.", - "localQuota": "LocalQuota contains options for controlling local volume quota on the node.", + "localQuota": "localQuota contains options for controlling local volume quota on the node.", } func (NodeVolumeConfig) SwaggerDoc() map[string]string { @@ -646,16 +646,16 @@ func (NodeVolumeConfig) SwaggerDoc() map[string]string { var map_OAuthConfig = map[string]string{ "": "OAuthConfig holds the necessary configuration options for OAuth authentication", - "masterCA": "MasterCA is the CA for verifying the TLS connection back to the MasterURL.", - "masterURL": "MasterURL is used for making server-to-server calls to exchange authorization codes for access tokens", - "masterPublicURL": "MasterPublicURL is used for building valid client redirect URLs for internal and external access", - "assetPublicURL": "AssetPublicURL is used for building valid client redirect URLs for external access", - "alwaysShowProviderSelection": "AlwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider.", - "identityProviders": "IdentityProviders is an ordered list of ways for a user to identify themselves", - "grantConfig": "GrantConfig describes how to handle grants", - "sessionConfig": "SessionConfig hold information about configuring sessions.", - "tokenConfig": "TokenConfig contains options for authorization and access tokens", - "templates": "Templates allow you to customize pages like the login page.", + "masterCA": "masterCA is the CA for verifying the TLS connection back to the MasterURL.", + "masterURL": "masterURL is used for making server-to-server calls to exchange authorization codes for access tokens", + "masterPublicURL": "masterPublicURL is used for building valid client redirect URLs for internal and external access", + "assetPublicURL": "assetPublicURL is used for building valid client redirect URLs for external access", + "alwaysShowProviderSelection": "alwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider.", + "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves", + "grantConfig": "grantConfig describes how to handle grants", + "sessionConfig": "sessionConfig hold information about configuring sessions.", + "tokenConfig": "tokenConfig contains options for authorization and access tokens", + "templates": "templates allow you to customize pages like the login page.", } func (OAuthConfig) SwaggerDoc() map[string]string { @@ -664,9 +664,9 @@ func (OAuthConfig) SwaggerDoc() map[string]string { var map_OAuthTemplates = map[string]string{ "": "OAuthTemplates allow for customization of pages like the login page", - "login": "Login is a path to a file containing a go template used to render the login page. If unspecified, the default login page is used.", - "providerSelection": "ProviderSelection is a path to a file containing a go template used to render the provider selection page. If unspecified, the default provider selection page is used.", - "error": "Error is a path to a file containing a go template used to render error pages during the authentication or grant flow If unspecified, the default error page is used.", + "login": "login is a path to a file containing a go template used to render the login page. If unspecified, the default login page is used.", + "providerSelection": "providerSelection is a path to a file containing a go template used to render the provider selection page. If unspecified, the default provider selection page is used.", + "error": "error is a path to a file containing a go template used to render error pages during the authentication or grant flow If unspecified, the default error page is used.", } func (OAuthTemplates) SwaggerDoc() map[string]string { @@ -675,10 +675,10 @@ func (OAuthTemplates) SwaggerDoc() map[string]string { var map_OpenIDClaims = map[string]string{ "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider", - "id": "ID is the list of claims whose values should be used as the user ID. Required. OpenID standard identity claim is \"sub\"", - "preferredUsername": "PreferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the id claim", - "name": "Name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", - "email": "Email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", + "id": "id is the list of claims whose values should be used as the user ID. Required. OpenID standard identity claim is \"sub\"", + "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the id claim", + "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", + "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", } func (OpenIDClaims) SwaggerDoc() map[string]string { @@ -687,13 +687,13 @@ func (OpenIDClaims) SwaggerDoc() map[string]string { var map_OpenIDIdentityProvider = map[string]string{ "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", - "clientID": "ClientID is the oauth client ID", - "clientSecret": "ClientSecret is the oauth client secret", - "extraScopes": "ExtraScopes are any scopes to request in addition to the standard \"openid\" scope.", - "extraAuthorizeParameters": "ExtraAuthorizeParameters are any custom parameters to add to the authorize request.", - "urls": "URLs to use to authenticate", - "claims": "Claims mappings", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.", + "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.", + "urls": "urls to use to authenticate", + "claims": "claims mappings", } func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { @@ -702,9 +702,9 @@ func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { var map_OpenIDURLs = map[string]string{ "": "OpenIDURLs are URLs to use when authenticating with an OpenID identity provider", - "authorize": "Authorize is the oauth authorization URL", - "token": "Token is the oauth token granting URL", - "userInfo": "UserInfo is the optional userinfo URL. If present, a granted access_token is used to request claims If empty, a granted id_token is parsed for claims", + "authorize": "authorize is the oauth authorization URL", + "token": "token is the oauth token granting URL", + "userInfo": "userInfo is the optional userinfo URL. If present, a granted access_token is used to request claims If empty, a granted id_token is parsed for claims", } func (OpenIDURLs) SwaggerDoc() map[string]string { @@ -713,8 +713,8 @@ func (OpenIDURLs) SwaggerDoc() map[string]string { var map_PodManifestConfig = map[string]string{ "": "PodManifestConfig holds the necessary configuration options for using pod manifests", - "path": "Path specifies the path for the pod manifest file or directory If its a directory, its expected to contain on or more manifest files This is used by the Kubelet to create pods on the node", - "fileCheckIntervalSeconds": "FileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data The interval needs to be a positive value", + "path": "path specifies the path for the pod manifest file or directory If its a directory, its expected to contain on or more manifest files This is used by the Kubelet to create pods on the node", + "fileCheckIntervalSeconds": "fileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data The interval needs to be a positive value", } func (PodManifestConfig) SwaggerDoc() map[string]string { @@ -723,7 +723,7 @@ func (PodManifestConfig) SwaggerDoc() map[string]string { var map_PolicyConfig = map[string]string{ "": "holds the necessary configuration options for", - "userAgentMatchingConfig": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", + "userAgentMatchingConfig": "userAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", } func (PolicyConfig) SwaggerDoc() map[string]string { @@ -732,10 +732,10 @@ func (PolicyConfig) SwaggerDoc() map[string]string { var map_ProjectConfig = map[string]string{ "": "holds the necessary configuration options for", - "defaultNodeSelector": "DefaultNodeSelector holds default project node label selector", - "projectRequestMessage": "ProjectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", - "projectRequestTemplate": "ProjectRequestTemplate is the template to use for creating projects in response to projectrequest. It is in the format namespace/template and it is optional. If it is not specified, a default template is used.", - "securityAllocator": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", + "defaultNodeSelector": "defaultNodeSelector holds default project node label selector", + "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", + "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. It is in the format namespace/template and it is optional. If it is not specified, a default template is used.", + "securityAllocator": "securityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", } func (ProjectConfig) SwaggerDoc() map[string]string { @@ -746,13 +746,13 @@ var map_RFC2307Config = map[string]string{ "": "RFC2307Config holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the RFC2307 schema", "groupsQuery": "AllGroupsQuery holds the template for an LDAP query that returns group entries.", "groupUIDAttribute": "GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier. (ldapGroupUID)", - "groupNameAttributes": "GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", - "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. The values contained in those attributes must be queryable by your UserUIDAttribute", + "groupNameAttributes": "groupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", + "groupMembershipAttributes": "groupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. The values contained in those attributes must be queryable by your UserUIDAttribute", "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.", - "userUIDAttribute": "UserUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. It must correspond to values that will be found from the GroupMembershipAttributes", - "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. The first attribute with a non-empty value is used. This should match your PreferredUsername setting for your LDAPPasswordIdentityProvider", - "tolerateMemberNotFoundErrors": "TolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are encountered. If 'true', an LDAP query for users that doesn't find any will be tolerated and an only and error will be logged. If 'false', the LDAP sync job will fail if a query for users doesn't find any. The default value is 'false'. Misconfigured LDAP sync jobs with this flag set to 'true' can cause group membership to be removed, so it is recommended to use this flag with caution.", - "tolerateMemberOutOfScopeErrors": "TolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries are encountered. If 'true', an LDAP query for a user that falls outside of the base DN given for the all user query will be tolerated and only an error will be logged. If 'false', the LDAP sync job will fail if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP sync jobs with this flag set to 'true' can result in groups missing users, so it is recommended to use this flag with caution.", + "userUIDAttribute": "userUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. It must correspond to values that will be found from the GroupMembershipAttributes", + "userNameAttributes": "userNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. The first attribute with a non-empty value is used. This should match your PreferredUsername setting for your LDAPPasswordIdentityProvider", + "tolerateMemberNotFoundErrors": "tolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are encountered. If 'true', an LDAP query for users that doesn't find any will be tolerated and an only and error will be logged. If 'false', the LDAP sync job will fail if a query for users doesn't find any. The default value is 'false'. Misconfigured LDAP sync jobs with this flag set to 'true' can cause group membership to be removed, so it is recommended to use this flag with caution.", + "tolerateMemberOutOfScopeErrors": "tolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries are encountered. If 'true', an LDAP query for a user that falls outside of the base DN given for the all user query will be tolerated and only an error will be logged. If 'false', the LDAP sync job will fail if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP sync jobs with this flag set to 'true' can result in groups missing users, so it is recommended to use this flag with caution.", } func (RFC2307Config) SwaggerDoc() map[string]string { @@ -761,8 +761,8 @@ func (RFC2307Config) SwaggerDoc() map[string]string { var map_RegistryLocation = map[string]string{ "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", - "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", - "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", + "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", } func (RegistryLocation) SwaggerDoc() map[string]string { @@ -771,8 +771,8 @@ func (RegistryLocation) SwaggerDoc() map[string]string { var map_RemoteConnectionInfo = map[string]string{ "": "RemoteConnectionInfo holds information necessary for establishing a remote connection", - "url": "URL is the remote URL to connect to", - "ca": "CA is the CA for verifying TLS connections", + "url": "url is the remote URL to connect to", + "ca": "ca is the CA for verifying TLS connections", } func (RemoteConnectionInfo) SwaggerDoc() map[string]string { @@ -781,11 +781,11 @@ func (RemoteConnectionInfo) SwaggerDoc() map[string]string { var map_RequestHeaderAuthenticationOptions = map[string]string{ "": "RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire API instead of against the /oauth endpoint.", - "clientCA": "ClientCA is a file with the trusted signer certs. It is required.", - "clientCommonNames": "ClientCommonNames is a required list of common names to require a match from.", - "usernameHeaders": "UsernameHeaders is the list of headers to check for user information. First hit wins.", + "clientCA": "clientCA is a file with the trusted signer certs. It is required.", + "clientCommonNames": "clientCommonNames is a required list of common names to require a match from.", + "usernameHeaders": "usernameHeaders is the list of headers to check for user information. First hit wins.", "groupHeaders": "GroupNameHeader is the set of headers to check for group information. All are unioned.", - "extraHeaderPrefixes": "ExtraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.", + "extraHeaderPrefixes": "extraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.", } func (RequestHeaderAuthenticationOptions) SwaggerDoc() map[string]string { @@ -794,14 +794,14 @@ func (RequestHeaderAuthenticationOptions) SwaggerDoc() map[string]string { var map_RequestHeaderIdentityProvider = map[string]string{ "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "loginURL": "LoginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", - "challengeURL": "ChallengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", - "clientCA": "ClientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header.", - "clientCommonNames": "ClientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", - "headers": "Headers is the set of headers to check for identity information", - "preferredUsernameHeaders": "PreferredUsernameHeaders is the set of headers to check for the preferred username", - "nameHeaders": "NameHeaders is the set of headers to check for the display name", - "emailHeaders": "EmailHeaders is the set of headers to check for the email address", + "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", + "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", + "clientCA": "clientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header.", + "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", + "headers": "headers is the set of headers to check for identity information", + "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username", + "nameHeaders": "nameHeaders is the set of headers to check for the display name", + "emailHeaders": "emailHeaders is the set of headers to check for the email address", } func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { @@ -810,7 +810,7 @@ func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { var map_RoutingConfig = map[string]string{ "": "RoutingConfig holds the necessary configuration options for routing to subdomains", - "subdomain": "Subdomain is the suffix appended to $service.$namespace. to form the default route hostname DEPRECATED: This field is being replaced by routers setting their own defaults. This is the \"default\" route.", + "subdomain": "subdomain is the suffix appended to $service.$namespace. to form the default route hostname DEPRECATED: This field is being replaced by routers setting their own defaults. This is the \"default\" route.", } func (RoutingConfig) SwaggerDoc() map[string]string { @@ -819,9 +819,9 @@ func (RoutingConfig) SwaggerDoc() map[string]string { var map_SecurityAllocator = map[string]string{ "": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", - "uidAllocatorRange": "UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", - "mcsAllocatorRange": "MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", - "mcsLabelsPerProject": "MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", + "uidAllocatorRange": "uidAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", + "mcsAllocatorRange": "mcsAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", + "mcsLabelsPerProject": "mcsLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", } func (SecurityAllocator) SwaggerDoc() map[string]string { @@ -830,11 +830,11 @@ func (SecurityAllocator) SwaggerDoc() map[string]string { var map_ServiceAccountConfig = map[string]string{ "": "ServiceAccountConfig holds the necessary configuration options for a service account", - "managedNames": "ManagedNames is a list of service account names that will be auto-created in every namespace. If no names are specified, the ServiceAccountsController will not be started.", - "limitSecretReferences": "LimitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace without explicitly referencing them", - "privateKeyFile": "PrivateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. If no private key is specified, the service account TokensController will not be started.", - "publicKeyFiles": "PublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.", - "masterCA": "MasterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically inject the contents of this file into pods so they can verify connections to the master.", + "managedNames": "managedNames is a list of service account names that will be auto-created in every namespace. If no names are specified, the ServiceAccountsController will not be started.", + "limitSecretReferences": "limitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace without explicitly referencing them", + "privateKeyFile": "privateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. If no private key is specified, the service account TokensController will not be started.", + "publicKeyFiles": "publicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.", + "masterCA": "masterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically inject the contents of this file into pods so they can verify connections to the master.", } func (ServiceAccountConfig) SwaggerDoc() map[string]string { @@ -843,7 +843,7 @@ func (ServiceAccountConfig) SwaggerDoc() map[string]string { var map_ServiceServingCert = map[string]string{ "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", - "signer": "Signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", + "signer": "signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", } func (ServiceServingCert) SwaggerDoc() map[string]string { @@ -852,12 +852,12 @@ func (ServiceServingCert) SwaggerDoc() map[string]string { var map_ServingInfo = map[string]string{ "": "ServingInfo holds information about serving web pages", - "bindAddress": "BindAddress is the ip:port to serve on", - "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", - "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", - "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", - "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", - "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", + "bindAddress": "bindAddress is the ip:port to serve on", + "bindNetwork": "bindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "clientCA": "clientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "namedCertificates": "namedCertificates is a list of certificates to use to secure requests to specific hostnames", + "minTLSVersion": "minTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "cipherSuites": "cipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", } func (ServingInfo) SwaggerDoc() map[string]string { @@ -866,9 +866,9 @@ func (ServingInfo) SwaggerDoc() map[string]string { var map_SessionConfig = map[string]string{ "": "SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession", - "sessionSecretsFile": "SessionSecretsFile is a reference to a file containing a serialized SessionSecrets object If no file is specified, a random signing and encryption key are generated at each server start", - "sessionMaxAgeSeconds": "SessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession", - "sessionName": "SessionName is the cookie name used to store the session", + "sessionSecretsFile": "sessionSecretsFile is a reference to a file containing a serialized SessionSecrets object If no file is specified, a random signing and encryption key are generated at each server start", + "sessionMaxAgeSeconds": "sessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession", + "sessionName": "sessionName is the cookie name used to store the session", } func (SessionConfig) SwaggerDoc() map[string]string { @@ -877,8 +877,8 @@ func (SessionConfig) SwaggerDoc() map[string]string { var map_SessionSecret = map[string]string{ "": "SessionSecret is a secret used to authenticate/decrypt cookie-based sessions", - "authentication": "Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", - "encryption": "Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", + "authentication": "authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", + "encryption": "encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", } func (SessionSecret) SwaggerDoc() map[string]string { @@ -887,7 +887,7 @@ func (SessionSecret) SwaggerDoc() map[string]string { var map_SessionSecrets = map[string]string{ "": "SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "secrets": "Secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", + "secrets": "secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", } func (SessionSecrets) SwaggerDoc() map[string]string { @@ -913,10 +913,10 @@ func (StringSource) SwaggerDoc() map[string]string { var map_StringSourceSpec = map[string]string{ "": "StringSourceSpec specifies a string value, or external location", - "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", - "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", - "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", - "keyFile": "KeyFile references a file containing the key to use to decrypt the value.", + "value": "value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "env": "env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "file": "file references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "keyFile": "keyFile references a file containing the key to use to decrypt the value.", } func (StringSourceSpec) SwaggerDoc() map[string]string { @@ -925,9 +925,9 @@ func (StringSourceSpec) SwaggerDoc() map[string]string { var map_TokenConfig = map[string]string{ "": "TokenConfig holds the necessary configuration options for authorization and access tokens", - "authorizeTokenMaxAgeSeconds": "AuthorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens", - "accessTokenMaxAgeSeconds": "AccessTokenMaxAgeSeconds defines the maximum age of access tokens", - "accessTokenInactivityTimeoutSeconds": "AccessTokenInactivityTimeoutSeconds defined the default token inactivity timeout for tokens granted by any client. Setting it to nil means the feature is completely disabled (default) The default setting can be overriden on OAuthClient basis. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are: - 0: Tokens never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)", + "authorizeTokenMaxAgeSeconds": "authorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens", + "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", + "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds defined the default token inactivity timeout for tokens granted by any client. Setting it to nil means the feature is completely disabled (default) The default setting can be overriden on OAuthClient basis. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are: - 0: Tokens never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)", } func (TokenConfig) SwaggerDoc() map[string]string { @@ -936,7 +936,7 @@ func (TokenConfig) SwaggerDoc() map[string]string { var map_UserAgentDenyRule = map[string]string{ "": "UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client", - "rejectionMessage": "RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", + "rejectionMessage": "rejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", } func (UserAgentDenyRule) SwaggerDoc() map[string]string { @@ -946,7 +946,7 @@ func (UserAgentDenyRule) SwaggerDoc() map[string]string { var map_UserAgentMatchRule = map[string]string{ "": "UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb", "regex": "UserAgentRegex is a regex that is checked against the User-Agent. Known variants of oc clients 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f", - "httpVerbs": "HTTPVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".", + "httpVerbs": "httpVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".", } func (UserAgentMatchRule) SwaggerDoc() map[string]string { @@ -957,7 +957,7 @@ var map_UserAgentMatchingConfig = map[string]string{ "": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", "requiredClients": "If this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed", "deniedClients": "If this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes", - "defaultRejectionMessage": "DefaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.", + "defaultRejectionMessage": "defaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.", } func (UserAgentMatchingConfig) SwaggerDoc() map[string]string { @@ -966,8 +966,8 @@ func (UserAgentMatchingConfig) SwaggerDoc() map[string]string { var map_WebhookTokenAuthenticator = map[string]string{ "": "WebhookTokenAuthenticators holds the necessary configuation options for external token authenticators", - "configFile": "ConfigFile is a path to a Kubeconfig file with the webhook configuration", - "cacheTTL": "CacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get a default timeout of 2 minutes. If zero (e.g. \"0m\"), caching is disabled", + "configFile": "configFile is a path to a Kubeconfig file with the webhook configuration", + "cacheTTL": "cacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get a default timeout of 2 minutes. If zero (e.g. \"0m\"), caching is disabled", } func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go b/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go index 4b5c8d6efb..d1396fbfb2 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go +++ b/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go @@ -108,7 +108,7 @@ type AlibabaCloudMachineProviderConfig struct { // +optional DataDisks []DataDiskProperties `json:"dataDisk,omitempty"` - // SecurityGroups is a list of security group references to assign to the instance. + // securityGroups is a list of security group references to assign to the instance. // A reference holds either the security group ID, the resource name, or the required tags to search. // When more than one security group is returned for a tag search, all the groups are associated with the instance up to the // maximum number of security groups to which an instance can belong. @@ -116,32 +116,32 @@ type AlibabaCloudMachineProviderConfig struct { // https://www.alibabacloud.com/help/en/doc-detail/25412.htm SecurityGroups []AlibabaResourceReference `json:"securityGroups,omitempty"` - // Bandwidth describes the internet bandwidth strategy for the instance + // bandwidth describes the internet bandwidth strategy for the instance // +optional Bandwidth BandwidthProperties `json:"bandwidth,omitempty"` - // SystemDisk holds the properties regarding the system disk for the instance + // systemDisk holds the properties regarding the system disk for the instance // +optional SystemDisk SystemDiskProperties `json:"systemDisk,omitempty"` - // VSwitch is a reference to the vswitch to use for this instance. + // vSwitch is a reference to the vswitch to use for this instance. // A reference holds either the vSwitch ID, the resource name, or the required tags to search. // When more than one vSwitch is returned for a tag search, only the first vSwitch returned will be used. // This parameter is required when you create an instance of the VPC type. // You can call the DescribeVSwitches operation to query the created vSwitches. VSwitch AlibabaResourceReference `json:"vSwitch"` - // RAMRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role. + // ramRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role. // +optional RAMRoleName string `json:"ramRoleName,omitempty"` - // ResourceGroup references the resource group to which to assign the instance. + // resourceGroup references the resource group to which to assign the instance. // A reference holds either the resource group ID, the resource name, or the required tags to search. // When more than one resource group are returned for a search, an error will be produced and the Machine will not be created. // Resource Groups do not support searching by tags. ResourceGroup AlibabaResourceReference `json:"resourceGroup"` - // Tenancy specifies whether to create the instance on a dedicated host. + // tenancy specifies whether to create the instance on a dedicated host. // Valid values: // // default: creates the instance on a non-dedicated host. @@ -151,12 +151,12 @@ type AlibabaCloudMachineProviderConfig struct { // +optional Tenancy InstanceTenancy `json:"tenancy,omitempty"` - // UserDataSecret contains a local reference to a secret that contains the + // userDataSecret contains a local reference to a secret that contains the // UserData to apply to the instance // +optional UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` - // CredentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions + // credentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions // provided by attached RAM role where the actuator is running. // +optional CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` @@ -173,15 +173,15 @@ type AlibabaResourceReference struct { // type identifies the resource reference type for this entry. Type AlibabaResourceReferenceType `json:"type"` - // ID of resource + // id of resource // +optional ID *string `json:"id,omitempty"` - // Name of the resource + // name of the resource // +optional Name *string `json:"name,omitempty"` - // Tags is a set of metadata based upon ECS object tags used to identify a resource. + // tags is a set of metadata based upon ECS object tags used to identify a resource. // For details about usage when multiple resources are found, please see the owning parent field documentation. // +optional Tags *[]Tag `json:"tags,omitempty"` @@ -213,15 +213,15 @@ type AlibabaCloudMachineProviderStatus struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // InstanceID is the instance ID of the machine created in alibabacloud + // instanceId is the instance ID of the machine created in alibabacloud // +optional InstanceID *string `json:"instanceId,omitempty"` - // InstanceState is the state of the alibabacloud instance for this machine + // instanceState is the state of the alibabacloud instance for this machine // +optional InstanceState *string `json:"instanceState,omitempty"` - // Conditions is a set of conditions associated with the Machine to indicate + // conditions is a set of conditions associated with the Machine to indicate // errors or other status // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` @@ -229,7 +229,7 @@ type AlibabaCloudMachineProviderStatus struct { // SystemDiskProperties contains the information regarding the system disk including performance, size, name, and category type SystemDiskProperties struct { - // Category is the category of the system disk. + // category is the category of the system disk. // Valid values: // cloud_essd: ESSD. When the parameter is set to this value, you can use the SystemDisk.PerformanceLevel parameter to specify the performance level of the disk. // cloud_efficiency: ultra disk. @@ -242,7 +242,7 @@ type SystemDiskProperties struct { // +optional Category string `json:"category,omitempty"` - // PerformanceLevel is the performance level of the ESSD used as the system disk. + // performanceLevel is the performance level of the ESSD used as the system disk. // Valid values: // // PL0: A single ESSD can deliver up to 10,000 random read/write IOPS. @@ -256,14 +256,14 @@ type SystemDiskProperties struct { // +optional PerformanceLevel string `json:"performanceLevel,omitempty"` - // Name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). + // name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). // Empty value means the platform chooses a default, which is subject to change over time. // Currently the default is `""`. // +kubebuilder:validation:MaxLength=128 // +optional Name string `json:"name,omitempty"` - // Size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. + // size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. // The value must be at least 20 and greater than or equal to the size of the image. // Empty value means the platform chooses a default, which is subject to change over time. // Currently the default is `40` or the size of the image depending on whichever is greater. @@ -357,7 +357,7 @@ type Tag struct { // Bandwidth describes the bandwidth strategy for the network of the instance type BandwidthProperties struct { - // InternetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: + // internetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: // When the purchased outbound public bandwidth is less than or equal to 10 Mbit/s, the valid values of this parameter are 1 to 10. // Currently the default is `10` when outbound bandwidth is less than or equal to 10 Mbit/s. // When the purchased outbound public bandwidth is greater than 10, the valid values are 1 to the InternetMaxBandwidthOut value. @@ -365,7 +365,7 @@ type BandwidthProperties struct { // +optional InternetMaxBandwidthIn int64 `json:"internetMaxBandwidthIn,omitempty"` - // InternetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. + // internetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. // When a value greater than 0 is used then a public IP address is assigned to the instance. // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. // Currently the default is `0` diff --git a/vendor/github.com/openshift/api/machine/v1/types_aws.go b/vendor/github.com/openshift/api/machine/v1/types_aws.go index bc8a7efce9..5ad2b923fd 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_aws.go +++ b/vendor/github.com/openshift/api/machine/v1/types_aws.go @@ -8,17 +8,17 @@ package v1 // +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'ARN' ? has(self.arn) : !has(self.arn)",message="arn is required when type is ARN, and forbidden otherwise" // +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Filters' ? has(self.filters) : !has(self.filters)",message="filters is required when type is Filters, and forbidden otherwise" type AWSResourceReference struct { - // Type determines how the reference will fetch the AWS resource. + // type determines how the reference will fetch the AWS resource. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Type AWSResourceReferenceType `json:"type"` - // ID of resource. + // id of resource. // +optional ID *string `json:"id,omitempty"` - // ARN of resource. + // arn of resource. // +optional ARN *string `json:"arn,omitempty"` - // Filters is a set of filters used to identify a resource. + // filters is a set of filters used to identify a resource. // +optional // +listType=atomic Filters *[]AWSResourceFilter `json:"filters,omitempty"` @@ -41,10 +41,10 @@ const ( // AWSResourceFilter is a filter used to identify an AWS resource type AWSResourceFilter struct { - // Name of the filter. Filter names are case-sensitive. - // +kubebuilder:validation:Required + // name of the filter. Filter names are case-sensitive. + // +required Name string `json:"name"` - // Values includes one or more filter values. Filter values are case-sensitive. + // values includes one or more filter values. Filter values are case-sensitive. // +optional // +listType=atomic Values []string `json:"values,omitempty"` diff --git a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go index 5ac7a5aae9..12b6e5184f 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go +++ b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go @@ -42,7 +42,24 @@ type ControlPlaneMachineSet struct { // ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet. type ControlPlaneMachineSetSpec struct { - // State defines whether the ControlPlaneMachineSet is Active or Inactive. + // machineNamePrefix is the prefix used when creating machine names. + // Each machine name will consist of this prefix, followed by + // a randomly generated string of 5 characters, and the index of the machine. + // It must be a lowercase RFC 1123 subdomain, consisting of lowercase + // alphanumeric characters, '-', or '.', and must start and end + // with an alphanumeric character. + // The prefix must be between 1 and 245 characters in length. + // For example, if machineNamePrefix is set to 'control-plane', + // and three machines are created, their names might be: + // control-plane-abcde-0, control-plane-fghij-1, control-plane-klmno-2 + // +openshift:validation:FeatureGateAwareXValidation:featureGate=CPMSMachineNamePrefix,rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=245 + // +openshift:enable:FeatureGate=CPMSMachineNamePrefix + // +optional + MachineNamePrefix string `json:"machineNamePrefix,omitempty"` + + // state defines whether the ControlPlaneMachineSet is Active or Inactive. // When Inactive, the ControlPlaneMachineSet will not take any action on the // state of the Machines within the cluster. // When Active, the ControlPlaneMachineSet will reconcile the Machines and @@ -55,7 +72,7 @@ type ControlPlaneMachineSetSpec struct { // +optional State ControlPlaneMachineSetState `json:"state,omitempty"` - // Replicas defines how many Control Plane Machines should be + // replicas defines how many Control Plane Machines should be // created by this ControlPlaneMachineSet. // This field is immutable and cannot be changed after cluster // installation. @@ -64,10 +81,10 @@ type ControlPlaneMachineSetSpec struct { // +kubebuilder:validation:Enum:=3;5 // +kubebuilder:default:=3 // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="replicas is immutable" - // +kubebuilder:validation:Required + // +required Replicas *int32 `json:"replicas"` - // Strategy defines how the ControlPlaneMachineSet will update + // strategy defines how the ControlPlaneMachineSet will update // Machines when it detects a change to the ProviderSpec. // +kubebuilder:default:={type: RollingUpdate} // +optional @@ -78,12 +95,12 @@ type ControlPlaneMachineSetSpec struct { // It must match the template's labels. // This field is considered immutable after creation of the resource. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="selector is immutable" - // +kubebuilder:validation:Required + // +required Selector metav1.LabelSelector `json:"selector"` - // Template describes the Control Plane Machines that will be created + // template describes the Control Plane Machines that will be created // by this ControlPlaneMachineSet. - // +kubebuilder:validation:Required + // +required Template ControlPlaneMachineSetTemplate `json:"template"` } @@ -113,10 +130,10 @@ const ( // + future version of the Machine API Machine. // +kubebuilder:validation:XValidation:rule="has(self.machineType) && self.machineType == 'machines_v1beta1_machine_openshift_io' ? has(self.machines_v1beta1_machine_openshift_io) : !has(self.machines_v1beta1_machine_openshift_io)",message="machines_v1beta1_machine_openshift_io configuration is required when machineType is machines_v1beta1_machine_openshift_io, and forbidden otherwise" type ControlPlaneMachineSetTemplate struct { - // MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. + // machineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. // Currently, the only valid value is machines_v1beta1_machine_openshift_io. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required MachineType ControlPlaneMachineSetMachineType `json:"machineType,omitempty"` // OpenShiftMachineV1Beta1Machine defines the template for creating Machines @@ -138,7 +155,7 @@ const ( // OpenShiftMachineV1Beta1MachineTemplate is a template for the ControlPlaneMachineSet to create // Machines from the v1beta1.machine.openshift.io API group. type OpenShiftMachineV1Beta1MachineTemplate struct { - // FailureDomains is the list of failure domains (sometimes called + // failureDomains is the list of failure domains (sometimes called // availability zones) in which the ControlPlaneMachineSet should balance // the Control Plane Machines. // This will be merged into the ProviderSpec given in the template. @@ -149,16 +166,16 @@ type OpenShiftMachineV1Beta1MachineTemplate struct { // ObjectMeta is the standard object metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // Labels are required to match the ControlPlaneMachineSet selector. - // +kubebuilder:validation:Required + // +required ObjectMeta ControlPlaneMachineSetTemplateObjectMeta `json:"metadata"` - // Spec contains the desired configuration of the Control Plane Machines. + // spec contains the desired configuration of the Control Plane Machines. // The ProviderSpec within contains platform specific details // for creating the Control Plane Machines. // The ProviderSe should be complete apart from the platform specific // failure domain field. This will be overriden when the Machines // are created based on the FailureDomains field. - // +kubebuilder:validation:Required + // +required Spec machinev1beta1.MachineSpec `json:"spec"` } @@ -175,10 +192,10 @@ type ControlPlaneMachineSetTemplateObjectMeta struct { // +kubebuilder:validation:XValidation:rule="'machine.openshift.io/cluster-api-machine-role' in self && self['machine.openshift.io/cluster-api-machine-role'] == 'master'",message="label 'machine.openshift.io/cluster-api-machine-role' is required, and must have value 'master'" // +kubebuilder:validation:XValidation:rule="'machine.openshift.io/cluster-api-machine-type' in self && self['machine.openshift.io/cluster-api-machine-type'] == 'master'",message="label 'machine.openshift.io/cluster-api-machine-type' is required, and must have value 'master'" // +kubebuilder:validation:XValidation:rule="'machine.openshift.io/cluster-api-cluster' in self",message="label 'machine.openshift.io/cluster-api-cluster' is required" - // +kubebuilder:validation:Required + // +required Labels map[string]string `json:"labels"` - // Annotations is an unstructured key value map stored with a resource that may be + // annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. // More info: http://kubernetes.io/docs/user-guide/annotations @@ -189,7 +206,7 @@ type ControlPlaneMachineSetTemplateObjectMeta struct { // ControlPlaneMachineSetStrategy defines the strategy for applying updates to the // Control Plane Machines managed by the ControlPlaneMachineSet. type ControlPlaneMachineSetStrategy struct { - // Type defines the type of update strategy that should be + // type defines the type of update strategy that should be // used when updating Machines owned by the ControlPlaneMachineSet. // Valid values are "RollingUpdate" and "OnDelete". // The current default value is "RollingUpdate". @@ -240,23 +257,23 @@ const ( // +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'VSphere' ? has(self.vsphere) : !has(self.vsphere)",message="vsphere configuration is required when platform is VSphere, and forbidden otherwise" // +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'Nutanix' ? has(self.nutanix) : !has(self.nutanix)",message="nutanix configuration is required when platform is Nutanix, and forbidden otherwise" type FailureDomains struct { - // Platform identifies the platform for which the FailureDomain represents. + // platform identifies the platform for which the FailureDomain represents. // Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Platform configv1.PlatformType `json:"platform"` - // AWS configures failure domain information for the AWS platform. + // aws configures failure domain information for the AWS platform. // +listType=atomic // +optional AWS *[]AWSFailureDomain `json:"aws,omitempty"` - // Azure configures failure domain information for the Azure platform. + // azure configures failure domain information for the Azure platform. // +listType=atomic // +optional Azure *[]AzureFailureDomain `json:"azure,omitempty"` - // GCP configures failure domain information for the GCP platform. + // gcp configures failure domain information for the GCP platform. // +listType=atomic // +optional GCP *[]GCPFailureDomain `json:"gcp,omitempty"` @@ -267,7 +284,7 @@ type FailureDomains struct { // +optional VSphere []VSphereFailureDomain `json:"vsphere,omitempty"` - // OpenStack configures failure domain information for the OpenStack platform. + // openstack configures failure domain information for the OpenStack platform. // +optional // // + --- @@ -289,19 +306,19 @@ type FailureDomains struct { // AWSFailureDomain configures failure domain information for the AWS platform. // +kubebuilder:validation:MinProperties:=1 type AWSFailureDomain struct { - // Subnet is a reference to the subnet to use for this instance. + // subnet is a reference to the subnet to use for this instance. // +optional Subnet *AWSResourceReference `json:"subnet,omitempty"` - // Placement configures the placement information for this instance. + // placement configures the placement information for this instance. // +optional Placement AWSFailureDomainPlacement `json:"placement,omitempty"` } // AWSFailureDomainPlacement configures the placement information for the AWSFailureDomain. type AWSFailureDomainPlacement struct { - // AvailabilityZone is the availability zone of the instance. - // +kubebuilder:validation:Required + // availabilityZone is the availability zone of the instance. + // +required AvailabilityZone string `json:"availabilityZone"` } @@ -309,7 +326,7 @@ type AWSFailureDomainPlacement struct { type AzureFailureDomain struct { // Availability Zone for the virtual machine. // If nil, the virtual machine should be deployed to no zone. - // +kubebuilder:validation:Required + // +required Zone string `json:"zone"` // subnet is the name of the network subnet in which the VM will be created. @@ -322,8 +339,8 @@ type AzureFailureDomain struct { // GCPFailureDomain configures failure domain information for the GCP platform type GCPFailureDomain struct { - // Zone is the zone in which the GCP machine provider will create the VM. - // +kubebuilder:validation:Required + // zone is the zone in which the GCP machine provider will create the VM. + // +required Zone string `json:"zone"` } @@ -333,7 +350,7 @@ type VSphereFailureDomain struct { // Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. // When balancing machines across failure domains, the control plane machine set will inject configuration from the // Infrastructure resource into the machine providerSpec to allocate the machine to a failure domain. - // +kubebuilder:validation:Required + // +required Name string `json:"name"` } @@ -367,7 +384,7 @@ type OpenStackFailureDomain struct { type NutanixFailureDomainReference struct { // name of the failure domain in which the nutanix machine provider will create the VM. // Failure domains are defined in a cluster's config.openshift.io/Infrastructure resource. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=64 // +kubebuilder:validation:Pattern=`[a-z0-9]([-a-z0-9]*[a-z0-9])?` @@ -400,7 +417,7 @@ type RootVolume struct { // + the control plane with a root volume. This is because the default volume type in Cinder is not guaranteed // + to be available, therefore we prefer the user to be explicit about the volume type to use. // + We apply the same logic in CPMS: if the failure domain specifies a root volume, we require the user to specify a volume type. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=255 VolumeType string `json:"volumeType"` @@ -408,7 +425,7 @@ type RootVolume struct { // ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD. type ControlPlaneMachineSetStatus struct { - // Conditions represents the observations of the ControlPlaneMachineSet's current state. + // conditions represents the observations of the ControlPlaneMachineSet's current state. // Known .status.conditions.type are: Available, Degraded and Progressing. // +patchMergeKey=type // +patchStrategy=merge @@ -417,27 +434,27 @@ type ControlPlaneMachineSetStatus struct { // +optional Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` - // ObservedGeneration is the most recent generation observed for this + // observedGeneration is the most recent generation observed for this // ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, // which is updated on mutation by the API Server. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` - // Replicas is the number of Control Plane Machines created by the + // replicas is the number of Control Plane Machines created by the // ControlPlaneMachineSet controller. // Note that during update operations this value may differ from the // desired replica count. // +optional Replicas int32 `json:"replicas,omitempty"` - // ReadyReplicas is the number of Control Plane Machines created by the + // readyReplicas is the number of Control Plane Machines created by the // ControlPlaneMachineSet controller which are ready. // Note that this value may be higher than the desired number of replicas // while rolling updates are in-progress. // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty"` - // UpdatedReplicas is the number of non-terminated Control Plane Machines + // updatedReplicas is the number of non-terminated Control Plane Machines // created by the ControlPlaneMachineSet controller that have the desired // provider spec and are ready. // This value is set to 0 when a change is detected to the desired spec. @@ -448,7 +465,7 @@ type ControlPlaneMachineSetStatus struct { // +optional UpdatedReplicas int32 `json:"updatedReplicas,omitempty"` - // UnavailableReplicas is the number of Control Plane Machines that are + // unavailableReplicas is the number of Control Plane Machines that are // still required before the ControlPlaneMachineSet reaches the desired // available capacity. When this value is non-zero, the number of // ReadyReplicas is less than the desired Replicas. diff --git a/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go index e5e0ae8305..cc1a355b53 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go +++ b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go @@ -23,40 +23,40 @@ type NutanixMachineProviderConfig struct { // of the Prism Central), in which the Machine's VM will be created. // The cluster identifier (uuid or name) can be obtained from the Prism Central console // or using the prism_central API. - // +kubebuilder:validation:Required + // +required Cluster NutanixResourceIdentifier `json:"cluster"` // image is to identify the rhcos image uploaded to the Prism Central (PC) // The image identifier (uuid or name) can be obtained from the Prism Central console // or using the prism_central API. - // +kubebuilder:validation:Required + // +required Image NutanixResourceIdentifier `json:"image"` // subnets holds a list of identifiers (one or more) of the cluster's network subnets // for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be // obtained from the Prism Central console or using the prism_central API. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinItems=1 Subnets []NutanixResourceIdentifier `json:"subnets"` // vcpusPerSocket is the number of vCPUs per socket of the VM - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Minimum=1 VCPUsPerSocket int32 `json:"vcpusPerSocket"` // vcpuSockets is the number of vCPU sockets of the VM - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Minimum=1 VCPUSockets int32 `json:"vcpuSockets"` // memorySize is the memory size (in Quantity format) of the VM // The minimum memorySize is 2Gi bytes - // +kubebuilder:validation:Required + // +required MemorySize resource.Quantity `json:"memorySize"` // systemDiskSize is size (in Quantity format) of the system disk of the VM // The minimum systemDiskSize is 20Gi bytes - // +kubebuilder:validation:Required + // +required SystemDiskSize resource.Quantity `json:"systemDiskSize"` // bootType indicates the boot type (Legacy, UEFI or SecureBoot) the Machine's VM uses to boot. @@ -96,7 +96,7 @@ type NutanixMachineProviderConfig struct { // credentialsSecret is a local reference to a secret that contains the // credentials data to access Nutanix PC client - // +kubebuilder:validation:Required + // +required CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret"` // failureDomain refers to the name of the FailureDomain with which this Machine is associated. @@ -113,13 +113,13 @@ type NutanixCategory struct { // key is the prism category key name // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=64 - // +kubebuilder:validation:Required + // +required Key string `json:"key"` // value is the prism category value associated with the key // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=64 - // +kubebuilder:validation:Required + // +required Value string `json:"value"` } @@ -151,9 +151,9 @@ const ( // NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.) // +union type NutanixResourceIdentifier struct { - // Type is the identifier type to use for this resource. + // type is the identifier type to use for this resource. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Enum:=uuid;name Type NutanixIdentifierType `json:"type"` @@ -186,7 +186,7 @@ type NutanixGPU struct { // type is the identifier type of the GPU device. // Valid values are Name and DeviceID. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Type NutanixGPUIdentifierType `json:"type"` // deviceID is the GPU device ID with the integer value. @@ -219,7 +219,7 @@ type NutanixStorageResourceIdentifier struct { // type is the identifier type to use for this resource. // The valid value is "uuid". // +unionDiscriminator - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Enum:=uuid Type NutanixIdentifierType `json:"type"` @@ -279,13 +279,13 @@ type NutanixVMDiskDeviceProperties struct { // deviceType specifies the disk device type. // The valid values are "Disk" and "CDRom", and the default is "Disk". // +kubebuilder:default=Disk - // +kubebuilder:validation:Required + // +required DeviceType NutanixDiskDeviceType `json:"deviceType"` // adapterType is the adapter type of the disk address. // If the deviceType is "Disk", the valid adapterType can be "SCSI", "IDE", "PCI", "SATA" or "SPAPR". // If the deviceType is "CDRom", the valid adapterType can be "IDE" or "SATA". - // +kubebuilder:validation:Required + // +required AdapterType NutanixDiskAdapterType `json:"adapterType,omitempty"` // deviceIndex is the index of the disk address. The valid values are non-negative integers, with the default value 0. @@ -295,7 +295,7 @@ type NutanixVMDiskDeviceProperties struct { // the deviceIndex should start from 1. // +kubebuilder:default=0 // +kubebuilder:validation:Minimum=0 - // +kubebuilder:validation:Required + // +required DeviceIndex int32 `json:"deviceIndex,omitempty"` } @@ -304,7 +304,7 @@ type NutanixVMDisk struct { // diskSize is size (in Quantity format) of the disk attached to the VM. // See https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Format for the Quantity format and example documentation. // The minimum diskSize is 1GB. - // +kubebuilder:validation:Required + // +required DiskSize resource.Quantity `json:"diskSize"` // deviceProperties are the properties of the disk device. diff --git a/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go b/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go index c131139c54..b676a8d5f7 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go +++ b/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go @@ -142,18 +142,18 @@ type PowerVSMachineProviderConfig struct { // a validation error. // +union type PowerVSResource struct { - // Type identifies the resource type for this entry. + // type identifies the resource type for this entry. // Valid values are ID, Name and RegEx // +kubebuilder:validation:Enum:=ID;Name;RegEx // +optional Type PowerVSResourceType `json:"type,omitempty"` - // ID of resource + // id of resource // +optional ID *string `json:"id,omitempty"` - // Name of resource + // name of resource // +optional Name *string `json:"name,omitempty"` - // Regex to find resource + // regex to find resource // Regex contains the pattern to match to find a resource // +optional RegEx *string `json:"regex,omitempty"` @@ -200,7 +200,7 @@ type PowerVSMachineProviderStatus struct { // referenced secret inside the same namespace. // +structType=atomic type PowerVSSecretReference struct { - // Name of the secret. + // name of the secret. // +optional Name string `json:"name,omitempty"` } @@ -211,7 +211,7 @@ type LoadBalancerReference struct { // The name should be between 1 and 63 characters long and may consist of lowercase alphanumeric characters and hyphens only. // The value must not end with a hyphen. // It is a reference to existing LoadBalancer created by openshift installer component. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Pattern=`^([a-z]|[a-z][-a-z0-9]*[a-z0-9]|[0-9][-a-z0-9]*([a-z]|[-a-z][-a-z0-9]*[a-z0-9]))$` // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 @@ -221,7 +221,7 @@ type LoadBalancerReference struct { // More details about Application LoadBalancer // https://cloud.ibm.com/docs/vpc?topic=vpc-load-balancers-about&interface=ui // Supported values are Application. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Enum:="Application" Type IBMVPCLoadBalancerType `json:"type"` } diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml index b001170faf..7be04ec844 100644 --- a/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.featuregated-crd-manifests.yaml @@ -7,6 +7,7 @@ controlplanemachinesets.machine.openshift.io: Capability: MachineAPI Category: "" FeatureGates: + - CPMSMachineNamePrefix - MachineAPIMigration FilenameOperatorName: control-plane-machine-set FilenameOperatorOrdering: "01" diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go index 32b86a7e9b..4d96b3552e 100644 --- a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go @@ -20,15 +20,15 @@ var map_AlibabaCloudMachineProviderConfig = map[string]string{ "zoneId": "The ID of the zone in which to create the instance. You can call the DescribeZones operation to query the most recent region list.", "imageId": "The ID of the image used to create the instance.", "dataDisk": "DataDisks holds information regarding the extra disks attached to the instance", - "securityGroups": "SecurityGroups is a list of security group references to assign to the instance. A reference holds either the security group ID, the resource name, or the required tags to search. When more than one security group is returned for a tag search, all the groups are associated with the instance up to the maximum number of security groups to which an instance can belong. For more information, see the \"Security group limits\" section in Limits. https://www.alibabacloud.com/help/en/doc-detail/25412.htm", - "bandwidth": "Bandwidth describes the internet bandwidth strategy for the instance", - "systemDisk": "SystemDisk holds the properties regarding the system disk for the instance", - "vSwitch": "VSwitch is a reference to the vswitch to use for this instance. A reference holds either the vSwitch ID, the resource name, or the required tags to search. When more than one vSwitch is returned for a tag search, only the first vSwitch returned will be used. This parameter is required when you create an instance of the VPC type. You can call the DescribeVSwitches operation to query the created vSwitches.", - "ramRoleName": "RAMRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role.", - "resourceGroup": "ResourceGroup references the resource group to which to assign the instance. A reference holds either the resource group ID, the resource name, or the required tags to search. When more than one resource group are returned for a search, an error will be produced and the Machine will not be created. Resource Groups do not support searching by tags.", - "tenancy": "Tenancy specifies whether to create the instance on a dedicated host. Valid values:\n\ndefault: creates the instance on a non-dedicated host. host: creates the instance on a dedicated host. If you do not specify the DedicatedHostID parameter, Alibaba Cloud automatically selects a dedicated host for the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `default`.", - "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", - "credentialsSecret": "CredentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions provided by attached RAM role where the actuator is running.", + "securityGroups": "securityGroups is a list of security group references to assign to the instance. A reference holds either the security group ID, the resource name, or the required tags to search. When more than one security group is returned for a tag search, all the groups are associated with the instance up to the maximum number of security groups to which an instance can belong. For more information, see the \"Security group limits\" section in Limits. https://www.alibabacloud.com/help/en/doc-detail/25412.htm", + "bandwidth": "bandwidth describes the internet bandwidth strategy for the instance", + "systemDisk": "systemDisk holds the properties regarding the system disk for the instance", + "vSwitch": "vSwitch is a reference to the vswitch to use for this instance. A reference holds either the vSwitch ID, the resource name, or the required tags to search. When more than one vSwitch is returned for a tag search, only the first vSwitch returned will be used. This parameter is required when you create an instance of the VPC type. You can call the DescribeVSwitches operation to query the created vSwitches.", + "ramRoleName": "ramRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role.", + "resourceGroup": "resourceGroup references the resource group to which to assign the instance. A reference holds either the resource group ID, the resource name, or the required tags to search. When more than one resource group are returned for a search, an error will be produced and the Machine will not be created. Resource Groups do not support searching by tags.", + "tenancy": "tenancy specifies whether to create the instance on a dedicated host. Valid values:\n\ndefault: creates the instance on a non-dedicated host. host: creates the instance on a dedicated host. If you do not specify the DedicatedHostID parameter, Alibaba Cloud automatically selects a dedicated host for the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `default`.", + "userDataSecret": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "credentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions provided by attached RAM role where the actuator is running.", "tag": "Tags are the set of metadata to add to an instance.", } @@ -48,9 +48,9 @@ func (AlibabaCloudMachineProviderConfigList) SwaggerDoc() map[string]string { var map_AlibabaCloudMachineProviderStatus = map[string]string{ "": "AlibabaCloudMachineProviderStatus is the Schema for the alibabacloudmachineproviderconfig API Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "instanceId": "InstanceID is the instance ID of the machine created in alibabacloud", - "instanceState": "InstanceState is the state of the alibabacloud instance for this machine", - "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", + "instanceId": "instanceId is the instance ID of the machine created in alibabacloud", + "instanceState": "instanceState is the state of the alibabacloud instance for this machine", + "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status", } func (AlibabaCloudMachineProviderStatus) SwaggerDoc() map[string]string { @@ -60,9 +60,9 @@ func (AlibabaCloudMachineProviderStatus) SwaggerDoc() map[string]string { var map_AlibabaResourceReference = map[string]string{ "": "ResourceTagReference is a reference to a specific AlibabaCloud resource by ID, or tags. Only one of ID or Tags may be specified. Specifying more than one will result in a validation error.", "type": "type identifies the resource reference type for this entry.", - "id": "ID of resource", - "name": "Name of the resource", - "tags": "Tags is a set of metadata based upon ECS object tags used to identify a resource. For details about usage when multiple resources are found, please see the owning parent field documentation.", + "id": "id of resource", + "name": "name of the resource", + "tags": "tags is a set of metadata based upon ECS object tags used to identify a resource. For details about usage when multiple resources are found, please see the owning parent field documentation.", } func (AlibabaResourceReference) SwaggerDoc() map[string]string { @@ -71,8 +71,8 @@ func (AlibabaResourceReference) SwaggerDoc() map[string]string { var map_BandwidthProperties = map[string]string{ "": "Bandwidth describes the bandwidth strategy for the network of the instance", - "internetMaxBandwidthIn": "InternetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: When the purchased outbound public bandwidth is less than or equal to 10 Mbit/s, the valid values of this parameter are 1 to 10. Currently the default is `10` when outbound bandwidth is less than or equal to 10 Mbit/s. When the purchased outbound public bandwidth is greater than 10, the valid values are 1 to the InternetMaxBandwidthOut value. Currently the default is the value used for `InternetMaxBandwidthOut` when outbound public bandwidth is greater than 10.", - "internetMaxBandwidthOut": "InternetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. When a value greater than 0 is used then a public IP address is assigned to the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `0`", + "internetMaxBandwidthIn": "internetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: When the purchased outbound public bandwidth is less than or equal to 10 Mbit/s, the valid values of this parameter are 1 to 10. Currently the default is `10` when outbound bandwidth is less than or equal to 10 Mbit/s. When the purchased outbound public bandwidth is greater than 10, the valid values are 1 to the InternetMaxBandwidthOut value. Currently the default is the value used for `InternetMaxBandwidthOut` when outbound public bandwidth is greater than 10.", + "internetMaxBandwidthOut": "internetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. When a value greater than 0 is used then a public IP address is assigned to the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `0`", } func (BandwidthProperties) SwaggerDoc() map[string]string { @@ -97,10 +97,10 @@ func (DataDiskProperties) SwaggerDoc() map[string]string { var map_SystemDiskProperties = map[string]string{ "": "SystemDiskProperties contains the information regarding the system disk including performance, size, name, and category", - "category": "Category is the category of the system disk. Valid values: cloud_essd: ESSD. When the parameter is set to this value, you can use the SystemDisk.PerformanceLevel parameter to specify the performance level of the disk. cloud_efficiency: ultra disk. cloud_ssd: standard SSD. cloud: basic disk. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. Currently for other instances, the default is `cloud_efficiency`.", - "performanceLevel": "PerformanceLevel is the performance level of the ESSD used as the system disk. Valid values:\n\nPL0: A single ESSD can deliver up to 10,000 random read/write IOPS. PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `PL1`. For more information about ESSD performance levels, see ESSDs.", - "name": "Name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `\"\"`.", - "size": "Size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. The value must be at least 20 and greater than or equal to the size of the image. Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `40` or the size of the image depending on whichever is greater.", + "category": "category is the category of the system disk. Valid values: cloud_essd: ESSD. When the parameter is set to this value, you can use the SystemDisk.PerformanceLevel parameter to specify the performance level of the disk. cloud_efficiency: ultra disk. cloud_ssd: standard SSD. cloud: basic disk. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. Currently for other instances, the default is `cloud_efficiency`.", + "performanceLevel": "performanceLevel is the performance level of the ESSD used as the system disk. Valid values:\n\nPL0: A single ESSD can deliver up to 10,000 random read/write IOPS. PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `PL1`. For more information about ESSD performance levels, see ESSDs.", + "name": "name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `\"\"`.", + "size": "size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. The value must be at least 20 and greater than or equal to the size of the image. Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `40` or the size of the image depending on whichever is greater.", } func (SystemDiskProperties) SwaggerDoc() map[string]string { @@ -119,8 +119,8 @@ func (Tag) SwaggerDoc() map[string]string { var map_AWSResourceFilter = map[string]string{ "": "AWSResourceFilter is a filter used to identify an AWS resource", - "name": "Name of the filter. Filter names are case-sensitive.", - "values": "Values includes one or more filter values. Filter values are case-sensitive.", + "name": "name of the filter. Filter names are case-sensitive.", + "values": "values includes one or more filter values. Filter values are case-sensitive.", } func (AWSResourceFilter) SwaggerDoc() map[string]string { @@ -129,10 +129,10 @@ func (AWSResourceFilter) SwaggerDoc() map[string]string { var map_AWSResourceReference = map[string]string{ "": "AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. Only one of ID, ARN or Filters may be specified. Specifying more than one will result in a validation error.", - "type": "Type determines how the reference will fetch the AWS resource.", - "id": "ID of resource.", - "arn": "ARN of resource.", - "filters": "Filters is a set of filters used to identify a resource.", + "type": "type determines how the reference will fetch the AWS resource.", + "id": "id of resource.", + "arn": "arn of resource.", + "filters": "filters is a set of filters used to identify a resource.", } func (AWSResourceReference) SwaggerDoc() map[string]string { @@ -141,8 +141,8 @@ func (AWSResourceReference) SwaggerDoc() map[string]string { var map_AWSFailureDomain = map[string]string{ "": "AWSFailureDomain configures failure domain information for the AWS platform.", - "subnet": "Subnet is a reference to the subnet to use for this instance.", - "placement": "Placement configures the placement information for this instance.", + "subnet": "subnet is a reference to the subnet to use for this instance.", + "placement": "placement configures the placement information for this instance.", } func (AWSFailureDomain) SwaggerDoc() map[string]string { @@ -151,7 +151,7 @@ func (AWSFailureDomain) SwaggerDoc() map[string]string { var map_AWSFailureDomainPlacement = map[string]string{ "": "AWSFailureDomainPlacement configures the placement information for the AWSFailureDomain.", - "availabilityZone": "AvailabilityZone is the availability zone of the instance.", + "availabilityZone": "availabilityZone is the availability zone of the instance.", } func (AWSFailureDomainPlacement) SwaggerDoc() map[string]string { @@ -187,12 +187,13 @@ func (ControlPlaneMachineSetList) SwaggerDoc() map[string]string { } var map_ControlPlaneMachineSetSpec = map[string]string{ - "": "ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet.", - "state": "State defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet.", - "replicas": "Replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field.", - "strategy": "Strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec.", - "selector": "Label selector for Machines. Existing Machines selected by this selector will be the ones affected by this ControlPlaneMachineSet. It must match the template's labels. This field is considered immutable after creation of the resource.", - "template": "Template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet.", + "": "ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet.", + "machineNamePrefix": "machineNamePrefix is the prefix used when creating machine names. Each machine name will consist of this prefix, followed by a randomly generated string of 5 characters, and the index of the machine. It must be a lowercase RFC 1123 subdomain, consisting of lowercase alphanumeric characters, '-', or '.', and must start and end with an alphanumeric character. The prefix must be between 1 and 245 characters in length. For example, if machineNamePrefix is set to 'control-plane', and three machines are created, their names might be: control-plane-abcde-0, control-plane-fghij-1, control-plane-klmno-2", + "state": "state defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet.", + "replicas": "replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field.", + "strategy": "strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec.", + "selector": "Label selector for Machines. Existing Machines selected by this selector will be the ones affected by this ControlPlaneMachineSet. It must match the template's labels. This field is considered immutable after creation of the resource.", + "template": "template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet.", } func (ControlPlaneMachineSetSpec) SwaggerDoc() map[string]string { @@ -201,12 +202,12 @@ func (ControlPlaneMachineSetSpec) SwaggerDoc() map[string]string { var map_ControlPlaneMachineSetStatus = map[string]string{ "": "ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD.", - "conditions": "Conditions represents the observations of the ControlPlaneMachineSet's current state. Known .status.conditions.type are: Available, Degraded and Progressing.", - "observedGeneration": "ObservedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server.", - "replicas": "Replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count.", - "readyReplicas": "ReadyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress.", - "updatedReplicas": "UpdatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready.", - "unavailableReplicas": "UnavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas.", + "conditions": "conditions represents the observations of the ControlPlaneMachineSet's current state. Known .status.conditions.type are: Available, Degraded and Progressing.", + "observedGeneration": "observedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server.", + "replicas": "replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count.", + "readyReplicas": "readyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress.", + "updatedReplicas": "updatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready.", + "unavailableReplicas": "unavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas.", } func (ControlPlaneMachineSetStatus) SwaggerDoc() map[string]string { @@ -215,7 +216,7 @@ func (ControlPlaneMachineSetStatus) SwaggerDoc() map[string]string { var map_ControlPlaneMachineSetStrategy = map[string]string{ "": "ControlPlaneMachineSetStrategy defines the strategy for applying updates to the Control Plane Machines managed by the ControlPlaneMachineSet.", - "type": "Type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are \"RollingUpdate\" and \"OnDelete\". The current default value is \"RollingUpdate\".", + "type": "type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are \"RollingUpdate\" and \"OnDelete\". The current default value is \"RollingUpdate\".", } func (ControlPlaneMachineSetStrategy) SwaggerDoc() map[string]string { @@ -224,7 +225,7 @@ func (ControlPlaneMachineSetStrategy) SwaggerDoc() map[string]string { var map_ControlPlaneMachineSetTemplate = map[string]string{ "": "ControlPlaneMachineSetTemplate is a template used by the ControlPlaneMachineSet to create the Machines that it will manage in the future. ", - "machineType": "MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io.", + "machineType": "machineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io.", "machines_v1beta1_machine_openshift_io": "OpenShiftMachineV1Beta1Machine defines the template for creating Machines from the v1beta1.machine.openshift.io API group.", } @@ -235,7 +236,7 @@ func (ControlPlaneMachineSetTemplate) SwaggerDoc() map[string]string { var map_ControlPlaneMachineSetTemplateObjectMeta = map[string]string{ "": "ControlPlaneMachineSetTemplateObjectMeta is a subset of the metav1.ObjectMeta struct. It allows users to specify labels and annotations that will be copied onto Machines created from this template.", "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. This field must contain both the 'machine.openshift.io/cluster-api-machine-role' and 'machine.openshift.io/cluster-api-machine-type' labels, both with a value of 'master'. It must also contain a label with the key 'machine.openshift.io/cluster-api-cluster'.", - "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "annotations": "annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", } func (ControlPlaneMachineSetTemplateObjectMeta) SwaggerDoc() map[string]string { @@ -244,12 +245,12 @@ func (ControlPlaneMachineSetTemplateObjectMeta) SwaggerDoc() map[string]string { var map_FailureDomains = map[string]string{ "": "FailureDomain represents the different configurations required to spread Machines across failure domains on different platforms.", - "platform": "Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix.", - "aws": "AWS configures failure domain information for the AWS platform.", - "azure": "Azure configures failure domain information for the Azure platform.", - "gcp": "GCP configures failure domain information for the GCP platform.", + "platform": "platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, GCP, OpenStack, VSphere and Nutanix.", + "aws": "aws configures failure domain information for the AWS platform.", + "azure": "azure configures failure domain information for the Azure platform.", + "gcp": "gcp configures failure domain information for the GCP platform.", "vsphere": "vsphere configures failure domain information for the VSphere platform.", - "openstack": "OpenStack configures failure domain information for the OpenStack platform.", + "openstack": "openstack configures failure domain information for the OpenStack platform.", "nutanix": "nutanix configures failure domain information for the Nutanix platform.", } @@ -259,7 +260,7 @@ func (FailureDomains) SwaggerDoc() map[string]string { var map_GCPFailureDomain = map[string]string{ "": "GCPFailureDomain configures failure domain information for the GCP platform", - "zone": "Zone is the zone in which the GCP machine provider will create the VM.", + "zone": "zone is the zone in which the GCP machine provider will create the VM.", } func (GCPFailureDomain) SwaggerDoc() map[string]string { @@ -277,9 +278,9 @@ func (NutanixFailureDomainReference) SwaggerDoc() map[string]string { var map_OpenShiftMachineV1Beta1MachineTemplate = map[string]string{ "": "OpenShiftMachineV1Beta1MachineTemplate is a template for the ControlPlaneMachineSet to create Machines from the v1beta1.machine.openshift.io API group.", - "failureDomains": "FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information.", + "failureDomains": "failureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information.", "metadata": "ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector.", - "spec": "Spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field.", + "spec": "spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field.", } func (OpenShiftMachineV1Beta1MachineTemplate) SwaggerDoc() map[string]string { @@ -372,7 +373,7 @@ func (NutanixMachineProviderStatus) SwaggerDoc() map[string]string { var map_NutanixResourceIdentifier = map[string]string{ "": "NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.)", - "type": "Type is the identifier type to use for this resource.", + "type": "type is the identifier type to use for this resource.", "uuid": "uuid is the UUID of the resource in the PC.", "name": "name is the resource name in the PC", } @@ -467,10 +468,10 @@ func (PowerVSMachineProviderStatus) SwaggerDoc() map[string]string { var map_PowerVSResource = map[string]string{ "": "PowerVSResource is a reference to a specific PowerVS resource by ID, Name or RegEx Only one of ID, Name or RegEx may be specified. Specifying more than one will result in a validation error.", - "type": "Type identifies the resource type for this entry. Valid values are ID, Name and RegEx", - "id": "ID of resource", - "name": "Name of resource", - "regex": "Regex to find resource Regex contains the pattern to match to find a resource", + "type": "type identifies the resource type for this entry. Valid values are ID, Name and RegEx", + "id": "id of resource", + "name": "name of resource", + "regex": "regex to find resource Regex contains the pattern to match to find a resource", } func (PowerVSResource) SwaggerDoc() map[string]string { @@ -479,7 +480,7 @@ func (PowerVSResource) SwaggerDoc() map[string]string { var map_PowerVSSecretReference = map[string]string{ "": "PowerVSSecretReference contains enough information to locate the referenced secret inside the same namespace.", - "name": "Name of the secret.", + "name": "name of the secret.", } func (PowerVSSecretReference) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go b/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go index da5fbc5152..89b55c7b41 100644 --- a/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go +++ b/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go @@ -166,9 +166,9 @@ type NetworkParam struct { Filter Filter `json:"filter,omitempty"` // Subnet within a network to use Subnets []SubnetParam `json:"subnets,omitempty"` - // NoAllowedAddressPairs disables creation of allowed address pairs for the network ports + // noAllowedAddressPairs disables creation of allowed address pairs for the network ports NoAllowedAddressPairs bool `json:"noAllowedAddressPairs,omitempty"` - // PortTags allows users to specify a list of tags to add to ports created in a given network + // portTags allows users to specify a list of tags to add to ports created in a given network PortTags []string `json:"portTags,omitempty"` // The virtual network interface card (vNIC) type that is bound to the // neutron port. @@ -177,7 +177,7 @@ type NetworkParam struct { // host to pass and receive virtual network interface (VIF) port-specific // information to the plug-in. Profile map[string]string `json:"profile,omitempty"` - // PortSecurity optionally enables or disables security on ports managed by OpenStack + // portSecurity optionally enables or disables security on ports managed by OpenStack PortSecurity *bool `json:"portSecurity,omitempty"` } @@ -229,10 +229,10 @@ type SubnetParam struct { // Filters for optional network query Filter SubnetFilter `json:"filter,omitempty"` - // PortTags are tags that are added to ports created on this subnet + // portTags are tags that are added to ports created on this subnet PortTags []string `json:"portTags,omitempty"` - // PortSecurity optionally enables or disables security on ports managed by OpenStack + // portSecurity optionally enables or disables security on ports managed by OpenStack PortSecurity *bool `json:"portSecurity,omitempty"` } @@ -379,7 +379,7 @@ type RootVolume struct { type BlockDeviceStorage struct { // type is the type of block device to create. // This can be either "Volume" or "Local". - // +kubebuilder:validation:Required + // +required // +unionDiscriminator Type BlockDeviceType `json:"type"` @@ -414,16 +414,16 @@ type AdditionalBlockDevice struct { // Also, this name will be used for tagging the block device. // Information about the block device tag can be obtained from the OpenStack // metadata API or the config drive. - // +kubebuilder:validation:Required + // +required Name string `json:"name"` // sizeGiB is the size of the block device in gibibytes (GiB). - // +kubebuilder:validation:Required + // +required SizeGiB int `json:"sizeGiB"` // storage specifies the storage type of the block device and // additional storage options. - // +kubebuilder:validation:Required + // +required Storage BlockDeviceStorage `json:"storage"` } diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go index c8094eb269..2818d17bb8 100644 --- a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go @@ -79,11 +79,11 @@ var map_NetworkParam = map[string]string{ "fixedIp": "A fixed IPv4 address for the NIC.", "filter": "Filters for optional network query", "subnets": "Subnet within a network to use", - "noAllowedAddressPairs": "NoAllowedAddressPairs disables creation of allowed address pairs for the network ports", - "portTags": "PortTags allows users to specify a list of tags to add to ports created in a given network", + "noAllowedAddressPairs": "noAllowedAddressPairs disables creation of allowed address pairs for the network ports", + "portTags": "portTags allows users to specify a list of tags to add to ports created in a given network", "vnicType": "The virtual network interface card (vNIC) type that is bound to the neutron port.", "profile": "A dictionary that enables the application running on the specified host to pass and receive virtual network interface (VIF) port-specific information to the plug-in.", - "portSecurity": "PortSecurity optionally enables or disables security on ports managed by OpenStack", + "portSecurity": "portSecurity optionally enables or disables security on ports managed by OpenStack", } func (NetworkParam) SwaggerDoc() map[string]string { @@ -217,8 +217,8 @@ func (SubnetFilter) SwaggerDoc() map[string]string { var map_SubnetParam = map[string]string{ "uuid": "The UUID of the network. Required if you omit the port attribute.", "filter": "Filters for optional network query", - "portTags": "PortTags are tags that are added to ports created on this subnet", - "portSecurity": "PortSecurity optionally enables or disables security on ports managed by OpenStack", + "portTags": "portTags are tags that are added to ports created on this subnet", + "portSecurity": "portSecurity optionally enables or disables security on ports managed by OpenStack", } func (SubnetParam) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go index b2c66b7072..b34a87886d 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go @@ -13,38 +13,38 @@ type AWSMachineProviderConfig struct { metav1.TypeMeta `json:",inline"` // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - // AMI is the reference to the AMI from which to create the machine instance. + // ami is the reference to the AMI from which to create the machine instance. AMI AWSResourceReference `json:"ami"` - // InstanceType is the type of instance to create. Example: m4.xlarge + // instanceType is the type of instance to create. Example: m4.xlarge InstanceType string `json:"instanceType"` - // Tags is the set of tags to add to apply to an instance, in addition to the ones + // tags is the set of tags to add to apply to an instance, in addition to the ones // added by default by the actuator. These tags are additive. The actuator will ensure // these tags are present, but will not remove any other tags that may exist on the // instance. // +optional Tags []TagSpecification `json:"tags,omitempty"` - // IAMInstanceProfile is a reference to an IAM role to assign to the instance + // iamInstanceProfile is a reference to an IAM role to assign to the instance // +optional IAMInstanceProfile *AWSResourceReference `json:"iamInstanceProfile,omitempty"` - // UserDataSecret contains a local reference to a secret that contains the + // userDataSecret contains a local reference to a secret that contains the // UserData to apply to the instance // +optional UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` - // CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions + // credentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions // provided by attached IAM role where the actuator is running. // +optional CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` - // KeyName is the name of the KeyPair to use for SSH + // keyName is the name of the KeyPair to use for SSH // +optional KeyName *string `json:"keyName,omitempty"` - // DeviceIndex is the index of the device on the instance for the network interface attachment. + // deviceIndex is the index of the device on the instance for the network interface attachment. // Defaults to 0. DeviceIndex int64 `json:"deviceIndex"` - // PublicIP specifies whether the instance should get a public IP. If not present, + // publicIp specifies whether the instance should get a public IP. If not present, // it should use the default of its subnet. // +optional PublicIP *bool `json:"publicIp,omitempty"` - // NetworkInterfaceType specifies the type of network interface to be used for the primary + // networkInterfaceType specifies the type of network interface to be used for the primary // network interface. // Valid values are "ENA", "EFA", and omitted, which means no opinion and the platform // chooses a good default which may change over time. @@ -54,32 +54,32 @@ type AWSMachineProviderConfig struct { // +kubebuilder:validation:Enum:="ENA";"EFA" // +optional NetworkInterfaceType AWSNetworkInterfaceType `json:"networkInterfaceType,omitempty"` - // SecurityGroups is an array of references to security groups that should be applied to the + // securityGroups is an array of references to security groups that should be applied to the // instance. // +optional SecurityGroups []AWSResourceReference `json:"securityGroups,omitempty"` - // Subnet is a reference to the subnet to use for this instance + // subnet is a reference to the subnet to use for this instance Subnet AWSResourceReference `json:"subnet"` - // Placement specifies where to create the instance in AWS + // placement specifies where to create the instance in AWS Placement Placement `json:"placement"` - // LoadBalancers is the set of load balancers to which the new instance + // loadBalancers is the set of load balancers to which the new instance // should be added once it is created. // +optional LoadBalancers []LoadBalancerReference `json:"loadBalancers,omitempty"` - // BlockDevices is the set of block device mapping associated to this instance, + // blockDevices is the set of block device mapping associated to this instance, // block device without a name will be used as a root device and only one device without a name is allowed // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html // +optional BlockDevices []BlockDeviceMappingSpec `json:"blockDevices,omitempty"` - // SpotMarketOptions allows users to configure instances to be run using AWS Spot instances. + // spotMarketOptions allows users to configure instances to be run using AWS Spot instances. // +optional SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"` - // MetadataServiceOptions allows users to configure instance metadata service interaction options. + // metadataServiceOptions allows users to configure instance metadata service interaction options. // If nothing specified, default AWS IMDS settings will be applied. // https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html // +optional MetadataServiceOptions MetadataServiceOptions `json:"metadataServiceOptions,omitempty"` - // PlacementGroupName specifies the name of the placement group in which to launch the instance. + // placementGroupName specifies the name of the placement group in which to launch the instance. // The placement group must already be created and may use any placement strategy. // When omitted, no placement group is used when creating the EC2 instance. // +optional @@ -194,7 +194,7 @@ const ( // MetadataServiceOptions defines the options available to a user when configuring // Instance Metadata Service (IMDS) Options. type MetadataServiceOptions struct { - // Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. + // authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. // When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. // When omitted, this means the user has no opinion and the value is left to the platform to choose a good // default, which is subject to change over time. The current default is optional. @@ -209,26 +209,26 @@ type MetadataServiceOptions struct { // Only one of ID, ARN or Filters may be specified. Specifying more than one will result in // a validation error. type AWSResourceReference struct { - // ID of resource + // id of resource // +optional ID *string `json:"id,omitempty"` - // ARN of resource + // arn of resource // +optional ARN *string `json:"arn,omitempty"` - // Filters is a set of filters used to identify a resource + // filters is a set of filters used to identify a resource // +optional Filters []Filter `json:"filters,omitempty"` } // Placement indicates where to create the instance in AWS type Placement struct { - // Region is the region to use to create the instance + // region is the region to use to create the instance // +optional Region string `json:"region,omitempty"` - // AvailabilityZone is the availability zone of the instance + // availabilityZone is the availability zone of the instance // +optional AvailabilityZone string `json:"availabilityZone,omitempty"` - // Tenancy indicates if instance should run on shared or single-tenant hardware. There are + // tenancy indicates if instance should run on shared or single-tenant hardware. There are // supported 3 options: default, dedicated and host. // +optional Tenancy InstanceTenancy `json:"tenancy,omitempty"` @@ -236,18 +236,18 @@ type Placement struct { // Filter is a filter used to identify an AWS resource type Filter struct { - // Name of the filter. Filter names are case-sensitive. + // name of the filter. Filter names are case-sensitive. Name string `json:"name"` - // Values includes one or more filter values. Filter values are case-sensitive. + // values includes one or more filter values. Filter values are case-sensitive. // +optional Values []string `json:"values,omitempty"` } // TagSpecification is the name/value pair for a tag type TagSpecification struct { - // Name of the tag + // name of the tag Name string `json:"name"` - // Value of the tag + // value of the tag Value string `json:"value"` } @@ -309,13 +309,13 @@ const ( // +openshift:compatibility-gen:level=2 type AWSMachineProviderStatus struct { metav1.TypeMeta `json:",inline"` - // InstanceID is the instance ID of the machine created in AWS + // instanceId is the instance ID of the machine created in AWS // +optional InstanceID *string `json:"instanceId,omitempty"` - // InstanceState is the state of the AWS instance for this machine + // instanceState is the state of the AWS instance for this machine // +optional InstanceState *string `json:"instanceState,omitempty"` - // Conditions is a set of conditions associated with the Machine to indicate + // conditions is a set of conditions associated with the Machine to indicate // errors or other status // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go index 00a9497fd3..db84fa2c9f 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go @@ -39,32 +39,32 @@ type AzureMachineProviderSpec struct { metav1.TypeMeta `json:",inline"` // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - // UserDataSecret contains a local reference to a secret that contains the + // userDataSecret contains a local reference to a secret that contains the // UserData to apply to the instance // +optional UserDataSecret *corev1.SecretReference `json:"userDataSecret,omitempty"` - // CredentialsSecret is a reference to the secret with Azure credentials. + // credentialsSecret is a reference to the secret with Azure credentials. // +optional CredentialsSecret *corev1.SecretReference `json:"credentialsSecret,omitempty"` - // Location is the region to use to create the instance + // location is the region to use to create the instance // +optional Location string `json:"location,omitempty"` - // VMSize is the size of the VM to create. + // vmSize is the size of the VM to create. // +optional VMSize string `json:"vmSize,omitempty"` - // Image is the OS image to use to create the instance. + // image is the OS image to use to create the instance. Image Image `json:"image"` - // OSDisk represents the parameters for creating the OS disk. + // osDisk represents the parameters for creating the OS disk. OSDisk OSDisk `json:"osDisk"` // DataDisk specifies the parameters that are used to add one or more data disks to the machine. // +optional DataDisks []DataDisk `json:"dataDisks,omitempty"` - // SSHPublicKey is the public key to use to SSH to the virtual machine. + // sshPublicKey is the public key to use to SSH to the virtual machine. // +optional SSHPublicKey string `json:"sshPublicKey,omitempty"` - // PublicIP if true a public IP will be used + // publicIP if true a public IP will be used PublicIP bool `json:"publicIP"` - // Tags is a list of tags to apply to the machine. + // tags is a list of tags to apply to the machine. // +optional Tags map[string]string `json:"tags,omitempty"` // Network Security Group that needs to be attached to the machine's interface. @@ -75,40 +75,40 @@ type AzureMachineProviderSpec struct { // No application security groups will be attached if zero-length. // +optional ApplicationSecurityGroups []string `json:"applicationSecurityGroups,omitempty"` - // Subnet to use for this instance + // subnet to use for this instance Subnet string `json:"subnet"` - // PublicLoadBalancer to use for this instance + // publicLoadBalancer to use for this instance // +optional PublicLoadBalancer string `json:"publicLoadBalancer,omitempty"` // InternalLoadBalancerName to use for this instance // +optional InternalLoadBalancer string `json:"internalLoadBalancer,omitempty"` - // NatRule to set inbound NAT rule of the load balancer + // natRule to set inbound NAT rule of the load balancer // +optional NatRule *int64 `json:"natRule,omitempty"` - // ManagedIdentity to set managed identity name + // managedIdentity to set managed identity name // +optional ManagedIdentity string `json:"managedIdentity,omitempty"` - // Vnet to set virtual network name + // vnet to set virtual network name // +optional Vnet string `json:"vnet,omitempty"` // Availability Zone for the virtual machine. // If nil, the virtual machine should be deployed to no zone // +optional Zone string `json:"zone,omitempty"` - // NetworkResourceGroup is the resource group for the virtual machine's network + // networkResourceGroup is the resource group for the virtual machine's network // +optional NetworkResourceGroup string `json:"networkResourceGroup,omitempty"` - // ResourceGroup is the resource group for the virtual machine + // resourceGroup is the resource group for the virtual machine // +optional ResourceGroup string `json:"resourceGroup,omitempty"` - // SpotVMOptions allows the ability to specify the Machine should use a Spot VM + // spotVMOptions allows the ability to specify the Machine should use a Spot VM // +optional SpotVMOptions *SpotVMOptions `json:"spotVMOptions,omitempty"` - // SecurityProfile specifies the Security profile settings for a virtual machine. + // securityProfile specifies the Security profile settings for a virtual machine. // +optional SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"` - // UltraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. + // ultraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. // This can be used to allow/disallow binding of Azure UltraSSD to the Machine both as Data Disks or via Persistent Volumes. // This Azure feature is subject to a specific scope and certain limitations. // More informations on this can be found in the official Azure documentation for Ultra Disks: @@ -130,16 +130,16 @@ type AzureMachineProviderSpec struct { // +kubebuilder:validation:Enum:="Enabled";"Disabled" // +optional UltraSSDCapability AzureUltraSSDCapabilityState `json:"ultraSSDCapability,omitempty"` - // AcceleratedNetworking enables or disables Azure accelerated networking feature. + // acceleratedNetworking enables or disables Azure accelerated networking feature. // Set to false by default. If true, then this will depend on whether the requested // VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error. // +optional AcceleratedNetworking bool `json:"acceleratedNetworking,omitempty"` - // AvailabilitySet specifies the availability set to use for this instance. + // availabilitySet specifies the availability set to use for this instance. // Availability set should be precreated, before using this field. // +optional AvailabilitySet string `json:"availabilitySet,omitempty"` - // Diagnostics configures the diagnostics settings for the virtual machine. + // diagnostics configures the diagnostics settings for the virtual machine. // This allows you to configure boot diagnostics such as capturing serial output from // the virtual machine on boot. // This is useful for debugging software based launch issues. @@ -156,7 +156,7 @@ type AzureMachineProviderSpec struct { // SpotVMOptions defines the options relevant to running the Machine on Spot VMs type SpotVMOptions struct { - // MaxPrice defines the maximum price the user is willing to pay for Spot VM instances + // maxPrice defines the maximum price the user is willing to pay for Spot VM instances // +optional MaxPrice *resource.Quantity `json:"maxPrice,omitempty"` } @@ -177,13 +177,13 @@ type AzureDiagnostics struct { // This is useful for debugging software based launch issues. // +union type AzureBootDiagnostics struct { - // StorageAccountType determines if the storage account for storing the diagnostics data + // storageAccountType determines if the storage account for storing the diagnostics data // should be provisioned by Azure (AzureManaged) or by the customer (CustomerManaged). - // +kubebuilder:validation:Required + // +required // +unionDiscriminator StorageAccountType AzureBootDiagnosticsStorageAccountType `json:"storageAccountType"` - // CustomerManaged provides reference to the customer manager storage account. + // customerManaged provides reference to the customer manager storage account. // +optional CustomerManaged *AzureCustomerManagedBootDiagnostics `json:"customerManaged,omitempty"` } @@ -191,12 +191,12 @@ type AzureBootDiagnostics struct { // AzureCustomerManagedBootDiagnostics provides reference to a customer managed // storage account. type AzureCustomerManagedBootDiagnostics struct { - // StorageAccountURI is the URI of the customer managed storage account. + // storageAccountURI is the URI of the customer managed storage account. // The URI typically will be `https://.blob.core.windows.net/` // but may differ if you are using Azure DNS zone endpoints. // You can find the correct endpoint by looking for the Blob Primary Endpoint in the // endpoints tab in the Azure console. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Pattern=`^https://` // +kubebuilder:validation:MaxLength=1024 StorageAccountURI string `json:"storageAccountURI"` @@ -225,13 +225,13 @@ type AzureMachineProviderStatus struct { metav1.TypeMeta `json:",inline"` // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - // VMID is the ID of the virtual machine created in Azure. + // vmId is the ID of the virtual machine created in Azure. // +optional VMID *string `json:"vmId,omitempty"` - // VMState is the provisioning state of the Azure virtual machine. + // vmState is the provisioning state of the Azure virtual machine. // +optional VMState *AzureVMState `json:"vmState,omitempty"` - // Conditions is a set of conditions associated with the Machine to indicate + // conditions is a set of conditions associated with the Machine to indicate // errors or other status. // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` @@ -274,23 +274,23 @@ const ( // Image is a mirror of azure sdk compute.ImageReference type Image struct { - // Publisher is the name of the organization that created the image + // publisher is the name of the organization that created the image Publisher string `json:"publisher"` - // Offer specifies the name of a group of related images created by the publisher. + // offer specifies the name of a group of related images created by the publisher. // For example, UbuntuServer, WindowsServer Offer string `json:"offer"` - // SKU specifies an instance of an offer, such as a major release of a distribution. + // sku specifies an instance of an offer, such as a major release of a distribution. // For example, 18.04-LTS, 2019-Datacenter SKU string `json:"sku"` - // Version specifies the version of an image sku. The allowed formats + // version specifies the version of an image sku. The allowed formats // are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. // Specify 'latest' to use the latest version of an image available at deploy time. // Even if you use 'latest', the VM image will not automatically update after deploy // time even if a new version becomes available. Version string `json:"version"` - // ResourceID specifies an image to use by ID + // resourceID specifies an image to use by ID ResourceID string `json:"resourceID"` - // Type identifies the source of the image and related information, such as purchase plans. + // type identifies the source of the image and related information, such as purchase plans. // Valid values are "ID", "MarketplaceWithPlan", "MarketplaceNoPlan", and omitted, which // means no opinion and the platform chooses a good default which may change over time. // Currently that default is "MarketplaceNoPlan" if publisher data is supplied, or "ID" if not. @@ -313,16 +313,16 @@ const ( ) type OSDisk struct { - // OSType is the operating system type of the OS disk. Possible values include "Linux" and "Windows". + // osType is the operating system type of the OS disk. Possible values include "Linux" and "Windows". OSType string `json:"osType"` - // ManagedDisk specifies the Managed Disk parameters for the OS disk. + // managedDisk specifies the Managed Disk parameters for the OS disk. ManagedDisk OSDiskManagedDiskParameters `json:"managedDisk"` - // DiskSizeGB is the size in GB to assign to the data disk. + // diskSizeGB is the size in GB to assign to the data disk. DiskSizeGB int32 `json:"diskSizeGB"` - // DiskSettings describe ephemeral disk settings for the os disk. + // diskSettings describe ephemeral disk settings for the os disk. // +optional DiskSettings DiskSettings `json:"diskSettings,omitempty"` - // CachingType specifies the caching requirements. + // cachingType specifies the caching requirements. // Possible values include: 'None', 'ReadOnly', 'ReadWrite'. // Empty value means no opinion and the platform chooses a default, which is subject to change over // time. Currently the default is `None`. @@ -342,43 +342,43 @@ type OSDisk struct { // Once the custom userdata Secret has been created, it can be referenced in the Machine's `.providerSpec.userDataSecret`. // For further guidance and examples, please refer to the official OpenShift docs. type DataDisk struct { - // NameSuffix is the suffix to be appended to the machine name to generate the disk name. + // nameSuffix is the suffix to be appended to the machine name to generate the disk name. // Each disk name will be in format _. // NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. // The overall disk name must not exceed 80 chars in length. // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9](?:[\w\.-]*[a-zA-Z0-9])?$` // +kubebuilder:validation:MaxLength:=78 - // +kubebuilder:validation:Required + // +required NameSuffix string `json:"nameSuffix"` - // DiskSizeGB is the size in GB to assign to the data disk. + // diskSizeGB is the size in GB to assign to the data disk. // +kubebuilder:validation:Minimum=4 - // +kubebuilder:validation:Required + // +required DiskSizeGB int32 `json:"diskSizeGB"` - // ManagedDisk specifies the Managed Disk parameters for the data disk. + // managedDisk specifies the Managed Disk parameters for the data disk. // Empty value means no opinion and the platform chooses a default, which is subject to change over time. // Currently the default is a ManagedDisk with with storageAccountType: "Premium_LRS" and diskEncryptionSet.id: "Default". // +optional ManagedDisk DataDiskManagedDiskParameters `json:"managedDisk,omitempty"` - // Lun Specifies the logical unit number of the data disk. + // lun Specifies the logical unit number of the data disk. // This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. // This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). // The value must be between 0 and 63. // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=63 - // +kubebuilder:validation:Required + // +required Lun int32 `json:"lun,omitempty"` - // CachingType specifies the caching requirements. + // cachingType specifies the caching requirements. // Empty value means no opinion and the platform chooses a default, which is subject to change over time. // Currently the default is CachingTypeNone. // +optional // +kubebuilder:validation:Enum=None;ReadOnly;ReadWrite CachingType CachingTypeOption `json:"cachingType,omitempty"` - // DeletionPolicy specifies the data disk deletion policy upon Machine deletion. + // deletionPolicy specifies the data disk deletion policy upon Machine deletion. // Possible values are "Delete","Detach". // When "Delete" is used the data disk is deleted when the Machine is deleted. // When "Detach" is used the data disk is detached from the Machine and retained when the Machine is deleted. // +kubebuilder:validation:Enum=Delete;Detach - // +kubebuilder:validation:Required + // +required DeletionPolicy DiskDeletionPolicyType `json:"deletionPolicy"` } @@ -408,7 +408,7 @@ const ( // DiskSettings describe ephemeral disk settings for the os disk. type DiskSettings struct { - // EphemeralStorageLocation enables ephemeral OS when set to 'Local'. + // ephemeralStorageLocation enables ephemeral OS when set to 'Local'. // Possible values include: 'Local'. // See https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks for full details. // Empty value means no opinion and the platform chooses a default, which is subject to change over @@ -420,10 +420,10 @@ type DiskSettings struct { // OSDiskManagedDiskParameters is the parameters of a OSDisk managed disk. type OSDiskManagedDiskParameters struct { - // StorageAccountType is the storage account type to use. + // storageAccountType is the storage account type to use. // Possible values include "Standard_LRS", "Premium_LRS". StorageAccountType string `json:"storageAccountType"` - // DiskEncryptionSet is the disk encryption set properties + // diskEncryptionSet is the disk encryption set properties // +optional DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` // securityProfile specifies the security profile for the managed disk. @@ -460,11 +460,11 @@ type VMDiskSecurityProfile struct { // DataDiskManagedDiskParameters is the parameters of a DataDisk managed disk. type DataDiskManagedDiskParameters struct { - // StorageAccountType is the storage account type to use. + // storageAccountType is the storage account type to use. // Possible values include "Standard_LRS", "Premium_LRS" and "UltraSSD_LRS". // +kubebuilder:validation:Enum=Standard_LRS;Premium_LRS;UltraSSD_LRS StorageAccountType StorageAccountType `json:"storageAccountType"` - // DiskEncryptionSet is the disk encryption set properties. + // diskEncryptionSet is the disk encryption set properties. // Empty value means no opinion and the platform chooses a default, which is subject to change over time. // Currently the default is a DiskEncryptionSet with id: "Default". // +optional @@ -486,7 +486,7 @@ const ( // DiskEncryptionSetParameters is the disk encryption set properties type DiskEncryptionSetParameters struct { - // ID is the disk encryption set ID + // id is the disk encryption set ID // Empty value means no opinion and the platform chooses a default, which is subject to change over time. // Currently the default is: "Default". // +optional @@ -514,7 +514,7 @@ type SecuritySettings struct { // securityType specifies the SecurityType of the virtual machine. It has to be set to any specified value to // enable UEFISettings. The default behavior is: UEFISettings will not be enabled unless this property is set. // +kubebuilder:validation:Enum=ConfidentialVM;TrustedLaunch - // +kubebuilder:validation:Required + // +required // +unionDiscriminator SecurityType SecurityTypes `json:"securityType,omitempty"` // confidentialVM specifies the security configuration of the virtual machine. @@ -532,14 +532,14 @@ type SecuritySettings struct { // ConfidentialVM defines the UEFI settings for the virtual machine. type ConfidentialVM struct { // uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine. - // +kubebuilder:validation:Required + // +required UEFISettings UEFISettings `json:"uefiSettings,omitempty"` } // TrustedLaunch defines the UEFI settings for the virtual machine. type TrustedLaunch struct { // uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine. - // +kubebuilder:validation:Required + // +required UEFISettings UEFISettings `json:"uefiSettings,omitempty"` } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go index b5bb506192..fc09a0c47b 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go @@ -76,61 +76,61 @@ type GCPMachineProviderSpec struct { // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // UserDataSecret contains a local reference to a secret that contains the + // userDataSecret contains a local reference to a secret that contains the // UserData to apply to the instance // +optional UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` - // CredentialsSecret is a reference to the secret with GCP credentials. + // credentialsSecret is a reference to the secret with GCP credentials. // +optional CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` - // CanIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. + // canIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. // This is required if you plan to use this instance to forward routes. CanIPForward bool `json:"canIPForward"` - // DeletionProtection whether the resource should be protected against deletion. + // deletionProtection whether the resource should be protected against deletion. DeletionProtection bool `json:"deletionProtection"` - // Disks is a list of disks to be attached to the VM. + // disks is a list of disks to be attached to the VM. // +optional Disks []*GCPDisk `json:"disks,omitempty"` - // Labels list of labels to apply to the VM. + // labels list of labels to apply to the VM. // +optional Labels map[string]string `json:"labels,omitempty"` // Metadata key/value pairs to apply to the VM. // +optional Metadata []*GCPMetadata `json:"gcpMetadata,omitempty"` - // NetworkInterfaces is a list of network interfaces to be attached to the VM. + // networkInterfaces is a list of network interfaces to be attached to the VM. // +optional NetworkInterfaces []*GCPNetworkInterface `json:"networkInterfaces,omitempty"` - // ServiceAccounts is a list of GCP service accounts to be used by the VM. + // serviceAccounts is a list of GCP service accounts to be used by the VM. ServiceAccounts []GCPServiceAccount `json:"serviceAccounts"` - // Tags list of network tags to apply to the VM. + // tags list of network tags to apply to the VM. Tags []string `json:"tags,omitempty"` - // TargetPools are used for network TCP/UDP load balancing. A target pool references member instances, + // targetPools are used for network TCP/UDP load balancing. A target pool references member instances, // an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool // +optional TargetPools []string `json:"targetPools,omitempty"` - // MachineType is the machine type to use for the VM. + // machineType is the machine type to use for the VM. MachineType string `json:"machineType"` - // Region is the region in which the GCP machine provider will create the VM. + // region is the region in which the GCP machine provider will create the VM. Region string `json:"region"` - // Zone is the zone in which the GCP machine provider will create the VM. + // zone is the zone in which the GCP machine provider will create the VM. Zone string `json:"zone"` - // ProjectID is the project in which the GCP machine provider will create the VM. + // projectID is the project in which the GCP machine provider will create the VM. // +optional ProjectID string `json:"projectID,omitempty"` - // GPUs is a list of GPUs to be attached to the VM. + // gpus is a list of GPUs to be attached to the VM. // +optional GPUs []GCPGPUConfig `json:"gpus,omitempty"` - // Preemptible indicates if created instance is preemptible. + // preemptible indicates if created instance is preemptible. // +optional Preemptible bool `json:"preemptible,omitempty"` - // OnHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. + // onHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. // This is required to be set to "Terminate" if you want to provision machine with attached GPUs. // Otherwise, allowed values are "Migrate" and "Terminate". // If omitted, the platform chooses a default, which is subject to change over time, currently that default is "Migrate". // +kubebuilder:validation:Enum=Migrate;Terminate; // +optional OnHostMaintenance GCPHostMaintenanceType `json:"onHostMaintenance,omitempty"` - // RestartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default "Always"). + // restartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default "Always"). // Cannot be "Always" with preemptible instances. // Otherwise, allowed values are "Always" and "Never". // If omitted, the platform chooses a default, which is subject to change over time, currently that default is "Always". @@ -139,7 +139,7 @@ type GCPMachineProviderSpec struct { // +optional RestartPolicy GCPRestartPolicyType `json:"restartPolicy,omitempty"` - // ShieldedInstanceConfig is the Shielded VM configuration for the VM + // shieldedInstanceConfig is the Shielded VM configuration for the VM // +optional ShieldedInstanceConfig GCPShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` @@ -169,7 +169,7 @@ type ResourceManagerTag struct { // An OrganizationID can have a maximum of 32 characters and must consist of decimal numbers, and // cannot have leading zeroes. A ProjectID must be 6 to 30 characters in length, can only contain // lowercase letters, numbers, and hyphens, and must start with a letter, and cannot end with a hyphen. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=32 // +kubebuilder:validation:Pattern=`(^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$)` @@ -178,7 +178,7 @@ type ResourceManagerTag struct { // key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. // Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase // alphanumeric characters, and the following special characters `._-`. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$` @@ -187,7 +187,7 @@ type ResourceManagerTag struct { // value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. // Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase // alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$` @@ -196,48 +196,48 @@ type ResourceManagerTag struct { // GCPDisk describes disks for GCP. type GCPDisk struct { - // AutoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false). + // autoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false). AutoDelete bool `json:"autoDelete"` - // Boot indicates if this is a boot disk (default false). + // boot indicates if this is a boot disk (default false). Boot bool `json:"boot"` - // SizeGB is the size of the disk (in GB). + // sizeGb is the size of the disk (in GB). SizeGB int64 `json:"sizeGb"` - // Type is the type of the disk (eg: pd-standard). + // type is the type of the disk (eg: pd-standard). Type string `json:"type"` - // Image is the source image to create this disk. + // image is the source image to create this disk. Image string `json:"image"` - // Labels list of labels to apply to the disk. + // labels list of labels to apply to the disk. Labels map[string]string `json:"labels"` - // EncryptionKey is the customer-supplied encryption key of the disk. + // encryptionKey is the customer-supplied encryption key of the disk. // +optional EncryptionKey *GCPEncryptionKeyReference `json:"encryptionKey,omitempty"` } // GCPMetadata describes metadata for GCP. type GCPMetadata struct { - // Key is the metadata key. + // key is the metadata key. Key string `json:"key"` - // Value is the metadata value. + // value is the metadata value. Value *string `json:"value"` } // GCPNetworkInterface describes network interfaces for GCP type GCPNetworkInterface struct { - // PublicIP indicates if true a public IP will be used + // publicIP indicates if true a public IP will be used PublicIP bool `json:"publicIP,omitempty"` - // Network is the network name. + // network is the network name. Network string `json:"network,omitempty"` - // ProjectID is the project in which the GCP machine provider will create the VM. + // projectID is the project in which the GCP machine provider will create the VM. ProjectID string `json:"projectID,omitempty"` - // Subnetwork is the subnetwork name. + // subnetwork is the subnetwork name. Subnetwork string `json:"subnetwork,omitempty"` } // GCPServiceAccount describes service accounts for GCP. type GCPServiceAccount struct { - // Email is the service account email. + // email is the service account email. Email string `json:"email"` - // Scopes list of scopes to be assigned to the service account. + // scopes list of scopes to be assigned to the service account. Scopes []string `json:"scopes"` } @@ -246,7 +246,7 @@ type GCPEncryptionKeyReference struct { // KMSKeyName is the reference KMS key, in the format // +optional KMSKey *GCPKMSKeyReference `json:"kmsKey,omitempty"` - // KMSKeyServiceAccount is the service account being used for the + // kmsKeyServiceAccount is the service account being used for the // encryption request for the given KMS key. If absent, the Compute // Engine default service account is used. // See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account @@ -257,23 +257,23 @@ type GCPEncryptionKeyReference struct { // GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key type GCPKMSKeyReference struct { - // Name is the name of the customer managed encryption key to be used for the disk encryption. + // name is the name of the customer managed encryption key to be used for the disk encryption. Name string `json:"name"` - // KeyRing is the name of the KMS Key Ring which the KMS Key belongs to. + // keyRing is the name of the KMS Key Ring which the KMS Key belongs to. KeyRing string `json:"keyRing"` - // ProjectID is the ID of the Project in which the KMS Key Ring exists. + // projectID is the ID of the Project in which the KMS Key Ring exists. // Defaults to the VM ProjectID if not set. // +optional ProjectID string `json:"projectID,omitempty"` - // Location is the GCP location in which the Key Ring exists. + // location is the GCP location in which the Key Ring exists. Location string `json:"location"` } // GCPGPUConfig describes type and count of GPUs attached to the instance on GCP. type GCPGPUConfig struct { - // Count is the number of GPUs to be attached to an instance. + // count is the number of GPUs to be attached to an instance. Count int32 `json:"count"` - // Type is the type of GPU to be attached to an instance. + // type is the type of GPU to be attached to an instance. // Supported GPU types are: nvidia-tesla-k80, nvidia-tesla-p100, nvidia-tesla-v100, nvidia-tesla-p4, nvidia-tesla-t4 // +kubebuilder:validation:Pattern=`^nvidia-tesla-(k80|p100|v100|p4|t4)$` Type string `json:"type"` @@ -287,13 +287,13 @@ type GCPMachineProviderStatus struct { metav1.TypeMeta `json:",inline"` // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - // InstanceID is the ID of the instance in GCP + // instanceId is the ID of the instance in GCP // +optional InstanceID *string `json:"instanceId,omitempty"` - // InstanceState is the provisioning state of the GCP Instance. + // instanceState is the provisioning state of the GCP Instance. // +optional InstanceState *string `json:"instanceState,omitempty"` - // Conditions is a set of conditions associated with the Machine to indicate + // conditions is a set of conditions associated with the Machine to indicate // errors or other status // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` @@ -302,14 +302,14 @@ type GCPMachineProviderStatus struct { // GCPShieldedInstanceConfig describes the shielded VM configuration of the instance on GCP. // Shielded VM configuration allow users to enable and disable Secure Boot, vTPM, and Integrity Monitoring. type GCPShieldedInstanceConfig struct { - // SecureBoot Defines whether the instance should have secure boot enabled. + // secureBoot Defines whether the instance should have secure boot enabled. // Secure Boot verify the digital signature of all boot components, and halting the boot process if signature verification fails. // If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled. // +kubebuilder:validation:Enum=Enabled;Disabled //+optional SecureBoot SecureBootPolicy `json:"secureBoot,omitempty"` - // VirtualizedTrustedPlatformModule enable virtualized trusted platform module measurements to create a known good boot integrity policy baseline. + // virtualizedTrustedPlatformModule enable virtualized trusted platform module measurements to create a known good boot integrity policy baseline. // The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. // This is required to be set to "Enabled" if IntegrityMonitoring is enabled. // If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled. @@ -317,7 +317,7 @@ type GCPShieldedInstanceConfig struct { // +optional VirtualizedTrustedPlatformModule VirtualizedTrustedPlatformModulePolicy `json:"virtualizedTrustedPlatformModule,omitempty"` - // IntegrityMonitoring determines whether the instance should have integrity monitoring that verify the runtime boot integrity. + // integrityMonitoring determines whether the instance should have integrity monitoring that verify the runtime boot integrity. // Compares the most recent boot measurements to the integrity policy baseline and return // a pair of pass/fail results depending on whether they match or not. // If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go index c8fcb192bd..9bd3bdd60b 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go @@ -227,7 +227,7 @@ type MachineSpec struct { // +optional ObjectMeta `json:"metadata,omitempty"` - // LifecycleHooks allow users to pause operations on the machine at + // lifecycleHooks allow users to pause operations on the machine at // certain predefined points within the machine lifecycle. // +optional LifecycleHooks LifecycleHooks `json:"lifecycleHooks,omitempty"` @@ -242,11 +242,11 @@ type MachineSpec struct { // +listType=atomic Taints []corev1.Taint `json:"taints,omitempty"` - // ProviderSpec details Provider-specific configuration to use during node creation. + // providerSpec details Provider-specific configuration to use during node creation. // +optional ProviderSpec ProviderSpec `json:"providerSpec"` - // ProviderID is the identification ID of the machine provided by the provider. + // providerID is the identification ID of the machine provided by the provider. // This field must match the provider ID as seen on the node object corresponding to this machine. // This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler // with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out @@ -277,14 +277,14 @@ type MachineSpec struct { // LifecycleHooks allow users to pause operations on the machine at // certain prefedined points within the machine lifecycle. type LifecycleHooks struct { - // PreDrain hooks prevent the machine from being drained. + // preDrain hooks prevent the machine from being drained. // This also blocks further lifecycle events, such as termination. // +listType=map // +listMapKey=name // +optional PreDrain []LifecycleHook `json:"preDrain,omitempty"` - // PreTerminate hooks prevent the machine from being terminated. + // preTerminate hooks prevent the machine from being terminated. // PreTerminate hooks be actioned after the Machine has been drained. // +listType=map // +listMapKey=name @@ -294,39 +294,39 @@ type LifecycleHooks struct { // LifecycleHook represents a single instance of a lifecycle hook type LifecycleHook struct { - // Name defines a unique name for the lifcycle hook. + // name defines a unique name for the lifcycle hook. // The name should be unique and descriptive, ideally 1-3 words, in CamelCase or // it may be namespaced, eg. foo.example.com/CamelCase. // Names must be unique and should only be managed by a single entity. // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` // +kubebuilder:validation:MinLength=3 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Required + // +required Name string `json:"name"` - // Owner defines the owner of the lifecycle hook. + // owner defines the owner of the lifecycle hook. // This should be descriptive enough so that users can identify // who/what is responsible for blocking the lifecycle. // This could be the name of a controller (e.g. clusteroperator/etcd) // or an administrator managing the hook. // +kubebuilder:validation:MinLength=3 // +kubebuilder:validation:MaxLength=512 - // +kubebuilder:validation:Required + // +required Owner string `json:"owner"` } // MachineStatus defines the observed state of Machine // +openshift:validation:FeatureGateAwareXValidation:featureGate=MachineAPIMigration,rule="!has(oldSelf.synchronizedGeneration) || (has(self.synchronizedGeneration) && self.synchronizedGeneration >= oldSelf.synchronizedGeneration) || (oldSelf.authoritativeAPI == 'Migrating' && self.authoritativeAPI != 'Migrating')",message="synchronizedGeneration must not decrease unless authoritativeAPI is transitioning from Migrating to another value" type MachineStatus struct { - // NodeRef will point to the corresponding Node if it exists. + // nodeRef will point to the corresponding Node if it exists. // +optional NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"` - // LastUpdated identifies when this status was last observed. + // lastUpdated identifies when this status was last observed. // +optional LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` - // ErrorReason will be set in the event that there is a terminal problem + // errorReason will be set in the event that there is a terminal problem // reconciling the Machine and will contain a succinct value suitable // for machine interpretation. // @@ -345,7 +345,7 @@ type MachineStatus struct { // +optional ErrorReason *MachineStatusError `json:"errorReason,omitempty"` - // ErrorMessage will be set in the event that there is a terminal problem + // errorMessage will be set in the event that there is a terminal problem // reconciling the Machine and will contain a more verbose string suitable // for logging and human consumption. // @@ -364,7 +364,7 @@ type MachineStatus struct { // +optional ErrorMessage *string `json:"errorMessage,omitempty"` - // ProviderStatus details a Provider-specific status. + // providerStatus details a Provider-specific status. // It is recommended that providers maintain their // own versioned API types that should be // serialized/deserialized from this field. @@ -372,24 +372,24 @@ type MachineStatus struct { // +kubebuilder:validation:XPreserveUnknownFields ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"` - // Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. + // addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. // +optional // +listType=atomic Addresses []corev1.NodeAddress `json:"addresses,omitempty"` - // LastOperation describes the last-operation performed by the machine-controller. + // lastOperation describes the last-operation performed by the machine-controller. // This API should be useful as a history in terms of the latest operation performed on the // specific machine. It should also convey the state of the latest-operation for example if // it is still on-going, failed or completed successfully. // +optional LastOperation *LastOperation `json:"lastOperation,omitempty"` - // Phase represents the current phase of machine actuation. + // phase represents the current phase of machine actuation. // One of: Failed, Provisioning, Provisioned, Running, Deleting // +optional Phase *string `json:"phase,omitempty"` - // Conditions defines the current state of the Machine + // conditions defines the current state of the Machine // +listType=map // +listMapKey=type Conditions []Condition `json:"conditions,omitempty"` @@ -415,17 +415,17 @@ type MachineStatus struct { // LastOperation represents the detail of the last performed operation on the MachineObject. type LastOperation struct { - // Description is the human-readable description of the last operation. + // description is the human-readable description of the last operation. Description *string `json:"description,omitempty"` - // LastUpdated is the timestamp at which LastOperation API was last-updated. + // lastUpdated is the timestamp at which LastOperation API was last-updated. LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` - // State is the current status of the last performed operation. + // state is the current status of the last performed operation. // E.g. Processing, Failed, Successful etc State *string `json:"state,omitempty"` - // Type is the type of operation which was last performed. + // type is the type of operation which was last performed. // E.g. Create, Delete, Update etc Type *string `json:"type,omitempty"` } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go index 912b7dea5d..76c79acb0b 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go @@ -64,7 +64,7 @@ type MachineHealthCheckSpec struct { // Note: An empty selector will match all machines. Selector metav1.LabelSelector `json:"selector"` - // UnhealthyConditions contains a list of the conditions that determine + // unhealthyConditions contains a list of the conditions that determine // whether a node is considered unhealthy. The conditions are combined in a // logical OR, i.e. if any of the conditions is met, the node is unhealthy. // @@ -96,7 +96,7 @@ type MachineHealthCheckSpec struct { // +optional NodeStartupTimeout *metav1.Duration `json:"nodeStartupTimeout,omitempty"` - // RemediationTemplate is a reference to a remediation template + // remediationTemplate is a reference to a remediation template // provided by an infrastructure provider. // // This field is completely optional, when filled, the MachineHealthCheck controller @@ -136,13 +136,13 @@ type MachineHealthCheckStatus struct { // +kubebuilder:validation:Minimum=0 CurrentHealthy *int `json:"currentHealthy"` - // RemediationsAllowed is the number of further remediations allowed by this machine health check before + // remediationsAllowed is the number of further remediations allowed by this machine health check before // maxUnhealthy short circuiting will be applied // +kubebuilder:validation:Minimum=0 // +optional RemediationsAllowed int32 `json:"remediationsAllowed"` - // Conditions defines the current state of the MachineHealthCheck + // conditions defines the current state of the MachineHealthCheck // +optional // +listType=map // +listMapKey=type diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go index a14d50eb78..a29977f347 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go @@ -38,25 +38,25 @@ type MachineSet struct { // MachineSetSpec defines the desired state of MachineSet type MachineSetSpec struct { - // Replicas is the number of desired replicas. + // replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. // +kubebuilder:default=1 Replicas *int32 `json:"replicas,omitempty"` - // MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. + // minReadySeconds is the minimum number of seconds for which a newly created machine should be ready. // Defaults to 0 (machine will be considered available as soon as it is ready) // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty"` - // DeletePolicy defines the policy used to identify nodes to delete when downscaling. + // deletePolicy defines the policy used to identify nodes to delete when downscaling. // Defaults to "Random". Valid values are "Random, "Newest", "Oldest" // +kubebuilder:validation:Enum=Random;Newest;Oldest DeletePolicy string `json:"deletePolicy,omitempty"` - // Selector is a label query over machines that should match the replica count. + // selector is a label query over machines that should match the replica count. // Label keys and values that must match in order to be controlled by this MachineSet. // It must match the machine template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors Selector metav1.LabelSelector `json:"selector"` - // Template is the object that describes the machine that will be created if + // template is the object that describes the machine that will be created if // insufficient replicas are detected. // +optional Template MachineTemplateSpec `json:"template,omitempty"` @@ -113,7 +113,7 @@ type MachineTemplateSpec struct { // MachineSetStatus defines the observed state of MachineSet // +openshift:validation:FeatureGateAwareXValidation:featureGate=MachineAPIMigration,rule="!has(oldSelf.synchronizedGeneration) || (has(self.synchronizedGeneration) && self.synchronizedGeneration >= oldSelf.synchronizedGeneration) || (oldSelf.authoritativeAPI == 'Migrating' && self.authoritativeAPI != 'Migrating')",message="synchronizedGeneration must not decrease unless authoritativeAPI is transitioning from Migrating to another value" type MachineSetStatus struct { - // Replicas is the most recently observed number of replicas. + // replicas is the most recently observed number of replicas. Replicas int32 `json:"replicas"` // The number of replicas that have labels matching the labels of the machine template of the MachineSet. // +optional @@ -124,7 +124,7 @@ type MachineSetStatus struct { // The number of available replicas (ready for at least minReadySeconds) for this MachineSet. // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty"` - // ObservedGeneration reflects the generation of the most recently observed MachineSet. + // observedGeneration reflects the generation of the most recently observed MachineSet. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` // In the event that there is a terminal problem reconciling the @@ -150,7 +150,7 @@ type MachineSetStatus struct { // +optional ErrorMessage *string `json:"errorMessage,omitempty"` - // Conditions defines the current state of the MachineSet + // conditions defines the current state of the MachineSet // +listType=map // +listMapKey=type Conditions []Condition `json:"conditions,omitempty"` diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go index e9de632783..812358e89f 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go @@ -11,7 +11,7 @@ type ProviderSpec struct { // No more than one of the following may be specified. - // Value is an inlined, serialized representation of the resource + // value is an inlined, serialized representation of the resource // configuration. It is recommended that providers maintain their own // versioned API types that should be serialized/deserialized from this // field, akin to component config. @@ -42,7 +42,7 @@ type ProviderSpec struct { // In future versions, controller-tools@v2 might allow overriding the type and validation for embedded // types. When that happens, this hack should be revisited. type ObjectMeta struct { - // Name must be unique within a namespace. Is required when creating resources, although + // name must be unique within a namespace. Is required when creating resources, although // some resources may allow a client to request the generation of an appropriate name // automatically. Name is primarily intended for creation idempotence and configuration // definition. @@ -51,7 +51,7 @@ type ObjectMeta struct { // +optional Name string `json:"name,omitempty"` - // GenerateName is an optional prefix, used by the server, to generate a unique + // generateName is an optional prefix, used by the server, to generate a unique // name ONLY IF the Name field has not been provided. // If this field is used, the name returned to the client will be different // than the name passed. This value will also be combined with a unique suffix. @@ -69,7 +69,7 @@ type ObjectMeta struct { // +optional GenerateName string `json:"generateName,omitempty"` - // Namespace defines the space within each name must be unique. An empty namespace is + // namespace defines the space within each name must be unique. An empty namespace is // equivalent to the "default" namespace, but "default" is the canonical representation. // Not all objects are required to be scoped to a namespace - the value of this field for // those objects will be empty. @@ -87,7 +87,7 @@ type ObjectMeta struct { // +optional Labels map[string]string `json:"labels,omitempty"` - // Annotations is an unstructured key value map stored with a resource that may be + // annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. // More info: http://kubernetes.io/docs/user-guide/annotations @@ -193,18 +193,17 @@ const ( // Condition defines an observation of a Machine API resource operational state. type Condition struct { - // Type of condition in CamelCase or in foo.example.com/CamelCase. + // type of condition in CamelCase or in foo.example.com/CamelCase. // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions // can be useful (see .node.status.conditions), the ability to deconflict is important. // +required - // +kubebuilder:validation:Required Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. + // status of the condition, one of True, False, Unknown. // +required Status corev1.ConditionStatus `json:"status"` - // Severity provides an explicit classification of Reason code, so the users or machines can immediately + // severity provides an explicit classification of Reason code, so the users or machines can immediately // understand the current situation and act accordingly. // The Severity field MUST be set only when Status=False. // +optional diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go index f458cbf6ef..50b6e8f540 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go @@ -14,22 +14,22 @@ type VSphereMachineProviderSpec struct { metav1.TypeMeta `json:",inline"` // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - // UserDataSecret contains a local reference to a secret that contains the + // userDataSecret contains a local reference to a secret that contains the // UserData to apply to the instance // +optional UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` - // CredentialsSecret is a reference to the secret with vSphere credentials. + // credentialsSecret is a reference to the secret with vSphere credentials. // +optional CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` - // Template is the name, inventory path, or instance UUID of the template + // template is the name, inventory path, or instance UUID of the template // used to clone new machines. Template string `json:"template"` - // Workspace describes the workspace to use for the machine. + // workspace describes the workspace to use for the machine. // +optional Workspace *Workspace `json:"workspace,omitempty"` - // Network is the network configuration for this machine's VM. + // network is the network configuration for this machine's VM. Network NetworkSpec `json:"network"` - // NumCPUs is the number of virtual processors in a virtual machine. + // numCPUs is the number of virtual processors in a virtual machine. // Defaults to the analogue property value in the template from which this // machine is cloned. // +optional @@ -40,12 +40,12 @@ type VSphereMachineProviderSpec struct { // machine is cloned. // +optional NumCoresPerSocket int32 `json:"numCoresPerSocket,omitempty"` - // MemoryMiB is the size of a virtual machine's memory, in MiB. + // memoryMiB is the size of a virtual machine's memory, in MiB. // Defaults to the analogue property value in the template from which this // machine is cloned. // +optional MemoryMiB int64 `json:"memoryMiB,omitempty"` - // DiskGiB is the size of a virtual machine's disk, in GiB. + // diskGiB is the size of a virtual machine's disk, in GiB. // Defaults to the analogue property value in the template from which this // machine is cloned. // This parameter will be ignored if 'LinkedClone' CloneMode is set. @@ -57,10 +57,10 @@ type VSphereMachineProviderSpec struct { // +kubebuilder:example="urn:vmomi:InventoryServiceTag:5736bf56-49f5-4667-b38c-b97e09dc9578:GLOBAL" // +optional TagIDs []string `json:"tagIDs,omitempty"` - // Snapshot is the name of the snapshot from which the VM was cloned + // snapshot is the name of the snapshot from which the VM was cloned // +optional Snapshot string `json:"snapshot"` - // CloneMode specifies the type of clone operation. + // cloneMode specifies the type of clone operation. // The LinkedClone mode is only support for templates that have at least // one snapshot. If the template has no snapshots, then CloneMode defaults // to FullClone. @@ -89,7 +89,7 @@ const ( // NetworkSpec defines the virtual machine's network configuration. type NetworkSpec struct { - // Devices defines the virtual machine's network interfaces. + // devices defines the virtual machine's network interfaces. Devices []NetworkDeviceSpec `json:"devices"` } @@ -100,19 +100,19 @@ type AddressesFromPool struct { // This should be a fully qualified domain name, for example, externalipam.controller.io. // +kubebuilder:example=externalipam.controller.io // +kubebuilder:validation:Pattern="^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" - // +kubebuilder:validation:Required + // +required Group string `json:"group"` // resource of the IP address pool type known to an external IPAM controller. // It is normally the plural form of the resource kind in lowercase, for example, // ippools. // +kubebuilder:example=ippools // +kubebuilder:validation:Pattern="^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - // +kubebuilder:validation:Required + // +required Resource string `json:"resource"` // name of an IP address pool, for example, pool-config-1. // +kubebuilder:example=pool-config-1 // +kubebuilder:validation:Pattern="^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - // +kubebuilder:validation:Required + // +required Name string `json:"name"` } @@ -175,21 +175,25 @@ type NetworkDeviceSpec struct { // WorkspaceConfig defines a workspace configuration for the vSphere cloud // provider. type Workspace struct { - // Server is the IP address or FQDN of the vSphere endpoint. + // server is the IP address or FQDN of the vSphere endpoint. // +optional Server string `gcfg:"server,omitempty" json:"server,omitempty"` - // Datacenter is the datacenter in which VMs are created/located. + // datacenter is the datacenter in which VMs are created/located. // +optional Datacenter string `gcfg:"datacenter,omitempty" json:"datacenter,omitempty"` - // Folder is the folder in which VMs are created/located. + // folder is the folder in which VMs are created/located. // +optional Folder string `gcfg:"folder,omitempty" json:"folder,omitempty"` - // Datastore is the datastore in which VMs are created/located. + // datastore is the datastore in which VMs are created/located. // +optional Datastore string `gcfg:"default-datastore,omitempty" json:"datastore,omitempty"` - // ResourcePool is the resource pool in which VMs are created/located. + // resourcePool is the resource pool in which VMs are created/located. // +optional ResourcePool string `gcfg:"resourcepool-path,omitempty" json:"resourcePool,omitempty"` + // vmGroup is the cluster vm group in which virtual machines will be added for vm host group based zonal. + // +openshift:validation:featureGate=VSphereHostVMGroupZonal + // +optional + VMGroup string `gcfg:"vmGroup,omitempty" json:"vmGroup,omitempty"` } // VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. @@ -199,16 +203,16 @@ type Workspace struct { type VSphereMachineProviderStatus struct { metav1.TypeMeta `json:",inline"` - // InstanceID is the ID of the instance in VSphere + // instanceId is the ID of the instance in VSphere // +optional InstanceID *string `json:"instanceId,omitempty"` - // InstanceState is the provisioning state of the VSphere Instance. + // instanceState is the provisioning state of the VSphere Instance. // +optional InstanceState *string `json:"instanceState,omitempty"` - // Conditions is a set of conditions associated with the Machine to indicate + // conditions is a set of conditions associated with the Machine to indicate // errors or other status Conditions []metav1.Condition `json:"conditions,omitempty"` - // TaskRef is a managed object reference to a Task related to the machine. + // taskRef is a managed object reference to a Task related to the machine. // This value is set automatically at runtime and should not be set or // modified by users. // +optional diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go index 5bba232bf7..e26f79a45f 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -13,24 +13,24 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE var map_AWSMachineProviderConfig = map[string]string{ "": "AWSMachineProviderConfig is the Schema for the awsmachineproviderconfigs API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "ami": "AMI is the reference to the AMI from which to create the machine instance.", - "instanceType": "InstanceType is the type of instance to create. Example: m4.xlarge", - "tags": "Tags is the set of tags to add to apply to an instance, in addition to the ones added by default by the actuator. These tags are additive. The actuator will ensure these tags are present, but will not remove any other tags that may exist on the instance.", - "iamInstanceProfile": "IAMInstanceProfile is a reference to an IAM role to assign to the instance", - "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", - "credentialsSecret": "CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions provided by attached IAM role where the actuator is running.", - "keyName": "KeyName is the name of the KeyPair to use for SSH", - "deviceIndex": "DeviceIndex is the index of the device on the instance for the network interface attachment. Defaults to 0.", - "publicIp": "PublicIP specifies whether the instance should get a public IP. If not present, it should use the default of its subnet.", - "networkInterfaceType": "NetworkInterfaceType specifies the type of network interface to be used for the primary network interface. Valid values are \"ENA\", \"EFA\", and omitted, which means no opinion and the platform chooses a good default which may change over time. The current default value is \"ENA\". Please visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html to learn more about the AWS Elastic Fabric Adapter interface option.", - "securityGroups": "SecurityGroups is an array of references to security groups that should be applied to the instance.", - "subnet": "Subnet is a reference to the subnet to use for this instance", - "placement": "Placement specifies where to create the instance in AWS", - "loadBalancers": "LoadBalancers is the set of load balancers to which the new instance should be added once it is created.", - "blockDevices": "BlockDevices is the set of block device mapping associated to this instance, block device without a name will be used as a root device and only one device without a name is allowed https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html", - "spotMarketOptions": "SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.", - "metadataServiceOptions": "MetadataServiceOptions allows users to configure instance metadata service interaction options. If nothing specified, default AWS IMDS settings will be applied. https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", - "placementGroupName": "PlacementGroupName specifies the name of the placement group in which to launch the instance. The placement group must already be created and may use any placement strategy. When omitted, no placement group is used when creating the EC2 instance.", + "ami": "ami is the reference to the AMI from which to create the machine instance.", + "instanceType": "instanceType is the type of instance to create. Example: m4.xlarge", + "tags": "tags is the set of tags to add to apply to an instance, in addition to the ones added by default by the actuator. These tags are additive. The actuator will ensure these tags are present, but will not remove any other tags that may exist on the instance.", + "iamInstanceProfile": "iamInstanceProfile is a reference to an IAM role to assign to the instance", + "userDataSecret": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "credentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions provided by attached IAM role where the actuator is running.", + "keyName": "keyName is the name of the KeyPair to use for SSH", + "deviceIndex": "deviceIndex is the index of the device on the instance for the network interface attachment. Defaults to 0.", + "publicIp": "publicIp specifies whether the instance should get a public IP. If not present, it should use the default of its subnet.", + "networkInterfaceType": "networkInterfaceType specifies the type of network interface to be used for the primary network interface. Valid values are \"ENA\", \"EFA\", and omitted, which means no opinion and the platform chooses a good default which may change over time. The current default value is \"ENA\". Please visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html to learn more about the AWS Elastic Fabric Adapter interface option.", + "securityGroups": "securityGroups is an array of references to security groups that should be applied to the instance.", + "subnet": "subnet is a reference to the subnet to use for this instance", + "placement": "placement specifies where to create the instance in AWS", + "loadBalancers": "loadBalancers is the set of load balancers to which the new instance should be added once it is created.", + "blockDevices": "blockDevices is the set of block device mapping associated to this instance, block device without a name will be used as a root device and only one device without a name is allowed https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html", + "spotMarketOptions": "spotMarketOptions allows users to configure instances to be run using AWS Spot instances.", + "metadataServiceOptions": "metadataServiceOptions allows users to configure instance metadata service interaction options. If nothing specified, default AWS IMDS settings will be applied. https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", + "placementGroupName": "placementGroupName specifies the name of the placement group in which to launch the instance. The placement group must already be created and may use any placement strategy. When omitted, no placement group is used when creating the EC2 instance.", "placementGroupPartition": "placementGroupPartition is the partition number within the placement group in which to launch the instance. This must be an integer value between 1 and 7. It is only valid if the placement group, referred in `PlacementGroupName` was created with strategy set to partition.", "capacityReservationId": "capacityReservationId specifies the target Capacity Reservation into which the instance should be launched. The field size should be greater than 0 and the field input must start with cr-***", } @@ -49,9 +49,9 @@ func (AWSMachineProviderConfigList) SwaggerDoc() map[string]string { var map_AWSMachineProviderStatus = map[string]string{ "": "AWSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains AWS-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "instanceId": "InstanceID is the instance ID of the machine created in AWS", - "instanceState": "InstanceState is the state of the AWS instance for this machine", - "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", + "instanceId": "instanceId is the instance ID of the machine created in AWS", + "instanceState": "instanceState is the state of the AWS instance for this machine", + "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status", } func (AWSMachineProviderStatus) SwaggerDoc() map[string]string { @@ -60,9 +60,9 @@ func (AWSMachineProviderStatus) SwaggerDoc() map[string]string { var map_AWSResourceReference = map[string]string{ "": "AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. Only one of ID, ARN or Filters may be specified. Specifying more than one will result in a validation error.", - "id": "ID of resource", - "arn": "ARN of resource", - "filters": "Filters is a set of filters used to identify a resource", + "id": "id of resource", + "arn": "arn of resource", + "filters": "filters is a set of filters used to identify a resource", } func (AWSResourceReference) SwaggerDoc() map[string]string { @@ -97,8 +97,8 @@ func (EBSBlockDeviceSpec) SwaggerDoc() map[string]string { var map_Filter = map[string]string{ "": "Filter is a filter used to identify an AWS resource", - "name": "Name of the filter. Filter names are case-sensitive.", - "values": "Values includes one or more filter values. Filter values are case-sensitive.", + "name": "name of the filter. Filter names are case-sensitive.", + "values": "values includes one or more filter values. Filter values are case-sensitive.", } func (Filter) SwaggerDoc() map[string]string { @@ -115,7 +115,7 @@ func (LoadBalancerReference) SwaggerDoc() map[string]string { var map_MetadataServiceOptions = map[string]string{ "": "MetadataServiceOptions defines the options available to a user when configuring Instance Metadata Service (IMDS) Options.", - "authentication": "Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. When omitted, this means the user has no opinion and the value is left to the platform to choose a good default, which is subject to change over time. The current default is optional. At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", + "authentication": "authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. When omitted, this means the user has no opinion and the value is left to the platform to choose a good default, which is subject to change over time. The current default is optional. At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", } func (MetadataServiceOptions) SwaggerDoc() map[string]string { @@ -124,9 +124,9 @@ func (MetadataServiceOptions) SwaggerDoc() map[string]string { var map_Placement = map[string]string{ "": "Placement indicates where to create the instance in AWS", - "region": "Region is the region to use to create the instance", - "availabilityZone": "AvailabilityZone is the availability zone of the instance", - "tenancy": "Tenancy indicates if instance should run on shared or single-tenant hardware. There are supported 3 options: default, dedicated and host.", + "region": "region is the region to use to create the instance", + "availabilityZone": "availabilityZone is the availability zone of the instance", + "tenancy": "tenancy indicates if instance should run on shared or single-tenant hardware. There are supported 3 options: default, dedicated and host.", } func (Placement) SwaggerDoc() map[string]string { @@ -144,8 +144,8 @@ func (SpotMarketOptions) SwaggerDoc() map[string]string { var map_TagSpecification = map[string]string{ "": "TagSpecification is the name/value pair for a tag", - "name": "Name of the tag", - "value": "Value of the tag", + "name": "name of the tag", + "value": "value of the tag", } func (TagSpecification) SwaggerDoc() map[string]string { @@ -154,8 +154,8 @@ func (TagSpecification) SwaggerDoc() map[string]string { var map_AzureBootDiagnostics = map[string]string{ "": "AzureBootDiagnostics configures the boot diagnostics settings for the virtual machine. This allows you to configure capturing serial output from the virtual machine on boot. This is useful for debugging software based launch issues.", - "storageAccountType": "StorageAccountType determines if the storage account for storing the diagnostics data should be provisioned by Azure (AzureManaged) or by the customer (CustomerManaged).", - "customerManaged": "CustomerManaged provides reference to the customer manager storage account.", + "storageAccountType": "storageAccountType determines if the storage account for storing the diagnostics data should be provisioned by Azure (AzureManaged) or by the customer (CustomerManaged).", + "customerManaged": "customerManaged provides reference to the customer manager storage account.", } func (AzureBootDiagnostics) SwaggerDoc() map[string]string { @@ -164,7 +164,7 @@ func (AzureBootDiagnostics) SwaggerDoc() map[string]string { var map_AzureCustomerManagedBootDiagnostics = map[string]string{ "": "AzureCustomerManagedBootDiagnostics provides reference to a customer managed storage account.", - "storageAccountURI": "StorageAccountURI is the URI of the customer managed storage account. The URI typically will be `https://.blob.core.windows.net/` but may differ if you are using Azure DNS zone endpoints. You can find the correct endpoint by looking for the Blob Primary Endpoint in the endpoints tab in the Azure console.", + "storageAccountURI": "storageAccountURI is the URI of the customer managed storage account. The URI typically will be `https://.blob.core.windows.net/` but may differ if you are using Azure DNS zone endpoints. You can find the correct endpoint by looking for the Blob Primary Endpoint in the endpoints tab in the Azure console.", } func (AzureCustomerManagedBootDiagnostics) SwaggerDoc() map[string]string { @@ -182,33 +182,33 @@ func (AzureDiagnostics) SwaggerDoc() map[string]string { var map_AzureMachineProviderSpec = map[string]string{ "": "AzureMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an Azure virtual machine. It is used by the Azure machine actuator to create a single Machine. Required parameters such as location that are not specified by this configuration, will be defaulted by the actuator. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", - "credentialsSecret": "CredentialsSecret is a reference to the secret with Azure credentials.", - "location": "Location is the region to use to create the instance", - "vmSize": "VMSize is the size of the VM to create.", - "image": "Image is the OS image to use to create the instance.", - "osDisk": "OSDisk represents the parameters for creating the OS disk.", + "userDataSecret": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "credentialsSecret is a reference to the secret with Azure credentials.", + "location": "location is the region to use to create the instance", + "vmSize": "vmSize is the size of the VM to create.", + "image": "image is the OS image to use to create the instance.", + "osDisk": "osDisk represents the parameters for creating the OS disk.", "dataDisks": "DataDisk specifies the parameters that are used to add one or more data disks to the machine.", - "sshPublicKey": "SSHPublicKey is the public key to use to SSH to the virtual machine.", - "publicIP": "PublicIP if true a public IP will be used", - "tags": "Tags is a list of tags to apply to the machine.", + "sshPublicKey": "sshPublicKey is the public key to use to SSH to the virtual machine.", + "publicIP": "publicIP if true a public IP will be used", + "tags": "tags is a list of tags to apply to the machine.", "securityGroup": "Network Security Group that needs to be attached to the machine's interface. No security group will be attached if empty.", "applicationSecurityGroups": "Application Security Groups that need to be attached to the machine's interface. No application security groups will be attached if zero-length.", - "subnet": "Subnet to use for this instance", - "publicLoadBalancer": "PublicLoadBalancer to use for this instance", + "subnet": "subnet to use for this instance", + "publicLoadBalancer": "publicLoadBalancer to use for this instance", "internalLoadBalancer": "InternalLoadBalancerName to use for this instance", - "natRule": "NatRule to set inbound NAT rule of the load balancer", - "managedIdentity": "ManagedIdentity to set managed identity name", - "vnet": "Vnet to set virtual network name", + "natRule": "natRule to set inbound NAT rule of the load balancer", + "managedIdentity": "managedIdentity to set managed identity name", + "vnet": "vnet to set virtual network name", "zone": "Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone", - "networkResourceGroup": "NetworkResourceGroup is the resource group for the virtual machine's network", - "resourceGroup": "ResourceGroup is the resource group for the virtual machine", - "spotVMOptions": "SpotVMOptions allows the ability to specify the Machine should use a Spot VM", - "securityProfile": "SecurityProfile specifies the Security profile settings for a virtual machine.", - "ultraSSDCapability": "UltraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. This can be used to allow/disallow binding of Azure UltraSSD to the Machine both as Data Disks or via Persistent Volumes. This Azure feature is subject to a specific scope and certain limitations. More informations on this can be found in the official Azure documentation for Ultra Disks: (https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd?tabs=azure-portal#ga-scope-and-limitations).\n\nWhen omitted, if at least one Data Disk of type UltraSSD is specified, the platform will automatically enable the capability. If a Perisistent Volume backed by an UltraSSD is bound to a Pod on the Machine, when this field is ommitted, the platform will *not* automatically enable the capability (unless already enabled by the presence of an UltraSSD as Data Disk). This may manifest in the Pod being stuck in `ContainerCreating` phase. This defaulting behaviour may be subject to change in future.\n\nWhen set to \"Enabled\", if the capability is available for the Machine based on the scope and limitations described above, the capability will be set on the Machine. This will thus allow UltraSSD both as Data Disks and Persistent Volumes. If set to \"Enabled\" when the capability can't be available due to scope and limitations, the Machine will go into \"Failed\" state.\n\nWhen set to \"Disabled\", UltraSSDs will not be allowed either as Data Disks nor as Persistent Volumes. In this case if any UltraSSDs are specified as Data Disks on a Machine, the Machine will go into a \"Failed\" state. If instead any UltraSSDs are backing the volumes (via Persistent Volumes) of any Pods scheduled on a Node which is backed by the Machine, the Pod may get stuck in `ContainerCreating` phase.", - "acceleratedNetworking": "AcceleratedNetworking enables or disables Azure accelerated networking feature. Set to false by default. If true, then this will depend on whether the requested VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error.", - "availabilitySet": "AvailabilitySet specifies the availability set to use for this instance. Availability set should be precreated, before using this field.", - "diagnostics": "Diagnostics configures the diagnostics settings for the virtual machine. This allows you to configure boot diagnostics such as capturing serial output from the virtual machine on boot. This is useful for debugging software based launch issues.", + "networkResourceGroup": "networkResourceGroup is the resource group for the virtual machine's network", + "resourceGroup": "resourceGroup is the resource group for the virtual machine", + "spotVMOptions": "spotVMOptions allows the ability to specify the Machine should use a Spot VM", + "securityProfile": "securityProfile specifies the Security profile settings for a virtual machine.", + "ultraSSDCapability": "ultraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. This can be used to allow/disallow binding of Azure UltraSSD to the Machine both as Data Disks or via Persistent Volumes. This Azure feature is subject to a specific scope and certain limitations. More informations on this can be found in the official Azure documentation for Ultra Disks: (https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd?tabs=azure-portal#ga-scope-and-limitations).\n\nWhen omitted, if at least one Data Disk of type UltraSSD is specified, the platform will automatically enable the capability. If a Perisistent Volume backed by an UltraSSD is bound to a Pod on the Machine, when this field is ommitted, the platform will *not* automatically enable the capability (unless already enabled by the presence of an UltraSSD as Data Disk). This may manifest in the Pod being stuck in `ContainerCreating` phase. This defaulting behaviour may be subject to change in future.\n\nWhen set to \"Enabled\", if the capability is available for the Machine based on the scope and limitations described above, the capability will be set on the Machine. This will thus allow UltraSSD both as Data Disks and Persistent Volumes. If set to \"Enabled\" when the capability can't be available due to scope and limitations, the Machine will go into \"Failed\" state.\n\nWhen set to \"Disabled\", UltraSSDs will not be allowed either as Data Disks nor as Persistent Volumes. In this case if any UltraSSDs are specified as Data Disks on a Machine, the Machine will go into a \"Failed\" state. If instead any UltraSSDs are backing the volumes (via Persistent Volumes) of any Pods scheduled on a Node which is backed by the Machine, the Pod may get stuck in `ContainerCreating` phase.", + "acceleratedNetworking": "acceleratedNetworking enables or disables Azure accelerated networking feature. Set to false by default. If true, then this will depend on whether the requested VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error.", + "availabilitySet": "availabilitySet specifies the availability set to use for this instance. Availability set should be precreated, before using this field.", + "diagnostics": "diagnostics configures the diagnostics settings for the virtual machine. This allows you to configure boot diagnostics such as capturing serial output from the virtual machine on boot. This is useful for debugging software based launch issues.", "capacityReservationGroupID": "capacityReservationGroupID specifies the capacity reservation group resource id that should be used for allocating the virtual machine. The field size should be greater than 0 and the field input must start with '/'. The input for capacityReservationGroupID must be similar to '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}'. The keys which are used should be among 'subscriptions', 'providers' and 'resourcegroups' followed by valid ID or names respectively.", } @@ -218,9 +218,9 @@ func (AzureMachineProviderSpec) SwaggerDoc() map[string]string { var map_AzureMachineProviderStatus = map[string]string{ "": "AzureMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains Azure-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "vmId": "VMID is the ID of the virtual machine created in Azure.", - "vmState": "VMState is the provisioning state of the Azure virtual machine.", - "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status.", + "vmId": "vmId is the ID of the virtual machine created in Azure.", + "vmState": "vmState is the provisioning state of the Azure virtual machine.", + "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status.", } func (AzureMachineProviderStatus) SwaggerDoc() map[string]string { @@ -238,12 +238,12 @@ func (ConfidentialVM) SwaggerDoc() map[string]string { var map_DataDisk = map[string]string{ "": "DataDisk specifies the parameters that are used to add one or more data disks to the machine. A Data Disk is a managed disk that's attached to a virtual machine to store application data. It differs from an OS Disk as it doesn't come with a pre-installed OS, and it cannot contain the boot volume. It is registered as SCSI drive and labeled with the chosen `lun`. e.g. for `lun: 0` the raw disk device will be available at `/dev/disk/azure/scsi1/lun0`.\n\nAs the Data Disk disk device is attached raw to the virtual machine, it will need to be partitioned, formatted with a filesystem and mounted, in order for it to be usable. This can be done by creating a custom userdata Secret with custom Ignition configuration to achieve the desired initialization. At this stage the previously defined `lun` is to be used as the \"device\" key for referencing the raw disk device to be initialized. Once the custom userdata Secret has been created, it can be referenced in the Machine's `.providerSpec.userDataSecret`. For further guidance and examples, please refer to the official OpenShift docs.", - "nameSuffix": "NameSuffix is the suffix to be appended to the machine name to generate the disk name. Each disk name will be in format _. NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. The overall disk name must not exceed 80 chars in length.", - "diskSizeGB": "DiskSizeGB is the size in GB to assign to the data disk.", - "managedDisk": "ManagedDisk specifies the Managed Disk parameters for the data disk. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a ManagedDisk with with storageAccountType: \"Premium_LRS\" and diskEncryptionSet.id: \"Default\".", - "lun": "Lun Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). The value must be between 0 and 63.", - "cachingType": "CachingType specifies the caching requirements. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is CachingTypeNone.", - "deletionPolicy": "DeletionPolicy specifies the data disk deletion policy upon Machine deletion. Possible values are \"Delete\",\"Detach\". When \"Delete\" is used the data disk is deleted when the Machine is deleted. When \"Detach\" is used the data disk is detached from the Machine and retained when the Machine is deleted.", + "nameSuffix": "nameSuffix is the suffix to be appended to the machine name to generate the disk name. Each disk name will be in format _. NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. The overall disk name must not exceed 80 chars in length.", + "diskSizeGB": "diskSizeGB is the size in GB to assign to the data disk.", + "managedDisk": "managedDisk specifies the Managed Disk parameters for the data disk. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a ManagedDisk with with storageAccountType: \"Premium_LRS\" and diskEncryptionSet.id: \"Default\".", + "lun": "lun Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). The value must be between 0 and 63.", + "cachingType": "cachingType specifies the caching requirements. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is CachingTypeNone.", + "deletionPolicy": "deletionPolicy specifies the data disk deletion policy upon Machine deletion. Possible values are \"Delete\",\"Detach\". When \"Delete\" is used the data disk is deleted when the Machine is deleted. When \"Detach\" is used the data disk is detached from the Machine and retained when the Machine is deleted.", } func (DataDisk) SwaggerDoc() map[string]string { @@ -252,8 +252,8 @@ func (DataDisk) SwaggerDoc() map[string]string { var map_DataDiskManagedDiskParameters = map[string]string{ "": "DataDiskManagedDiskParameters is the parameters of a DataDisk managed disk.", - "storageAccountType": "StorageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\" and \"UltraSSD_LRS\".", - "diskEncryptionSet": "DiskEncryptionSet is the disk encryption set properties. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a DiskEncryptionSet with id: \"Default\".", + "storageAccountType": "storageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\" and \"UltraSSD_LRS\".", + "diskEncryptionSet": "diskEncryptionSet is the disk encryption set properties. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a DiskEncryptionSet with id: \"Default\".", } func (DataDiskManagedDiskParameters) SwaggerDoc() map[string]string { @@ -262,7 +262,7 @@ func (DataDiskManagedDiskParameters) SwaggerDoc() map[string]string { var map_DiskEncryptionSetParameters = map[string]string{ "": "DiskEncryptionSetParameters is the disk encryption set properties", - "id": "ID is the disk encryption set ID Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is: \"Default\".", + "id": "id is the disk encryption set ID Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is: \"Default\".", } func (DiskEncryptionSetParameters) SwaggerDoc() map[string]string { @@ -271,7 +271,7 @@ func (DiskEncryptionSetParameters) SwaggerDoc() map[string]string { var map_DiskSettings = map[string]string{ "": "DiskSettings describe ephemeral disk settings for the os disk.", - "ephemeralStorageLocation": "EphemeralStorageLocation enables ephemeral OS when set to 'Local'. Possible values include: 'Local'. See https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks for full details. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is that disks are saved to remote Azure storage.", + "ephemeralStorageLocation": "ephemeralStorageLocation enables ephemeral OS when set to 'Local'. Possible values include: 'Local'. See https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks for full details. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is that disks are saved to remote Azure storage.", } func (DiskSettings) SwaggerDoc() map[string]string { @@ -280,12 +280,12 @@ func (DiskSettings) SwaggerDoc() map[string]string { var map_Image = map[string]string{ "": "Image is a mirror of azure sdk compute.ImageReference", - "publisher": "Publisher is the name of the organization that created the image", - "offer": "Offer specifies the name of a group of related images created by the publisher. For example, UbuntuServer, WindowsServer", - "sku": "SKU specifies an instance of an offer, such as a major release of a distribution. For example, 18.04-LTS, 2019-Datacenter", - "version": "Version specifies the version of an image sku. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.", - "resourceID": "ResourceID specifies an image to use by ID", - "type": "Type identifies the source of the image and related information, such as purchase plans. Valid values are \"ID\", \"MarketplaceWithPlan\", \"MarketplaceNoPlan\", and omitted, which means no opinion and the platform chooses a good default which may change over time. Currently that default is \"MarketplaceNoPlan\" if publisher data is supplied, or \"ID\" if not. For more information about purchase plans, see: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-ps-findimage#check-the-purchase-plan-information", + "publisher": "publisher is the name of the organization that created the image", + "offer": "offer specifies the name of a group of related images created by the publisher. For example, UbuntuServer, WindowsServer", + "sku": "sku specifies an instance of an offer, such as a major release of a distribution. For example, 18.04-LTS, 2019-Datacenter", + "version": "version specifies the version of an image sku. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.", + "resourceID": "resourceID specifies an image to use by ID", + "type": "type identifies the source of the image and related information, such as purchase plans. Valid values are \"ID\", \"MarketplaceWithPlan\", \"MarketplaceNoPlan\", and omitted, which means no opinion and the platform chooses a good default which may change over time. Currently that default is \"MarketplaceNoPlan\" if publisher data is supplied, or \"ID\" if not. For more information about purchase plans, see: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-ps-findimage#check-the-purchase-plan-information", } func (Image) SwaggerDoc() map[string]string { @@ -293,11 +293,11 @@ func (Image) SwaggerDoc() map[string]string { } var map_OSDisk = map[string]string{ - "osType": "OSType is the operating system type of the OS disk. Possible values include \"Linux\" and \"Windows\".", - "managedDisk": "ManagedDisk specifies the Managed Disk parameters for the OS disk.", - "diskSizeGB": "DiskSizeGB is the size in GB to assign to the data disk.", - "diskSettings": "DiskSettings describe ephemeral disk settings for the os disk.", - "cachingType": "CachingType specifies the caching requirements. Possible values include: 'None', 'ReadOnly', 'ReadWrite'. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `None`.", + "osType": "osType is the operating system type of the OS disk. Possible values include \"Linux\" and \"Windows\".", + "managedDisk": "managedDisk specifies the Managed Disk parameters for the OS disk.", + "diskSizeGB": "diskSizeGB is the size in GB to assign to the data disk.", + "diskSettings": "diskSettings describe ephemeral disk settings for the os disk.", + "cachingType": "cachingType specifies the caching requirements. Possible values include: 'None', 'ReadOnly', 'ReadWrite'. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `None`.", } func (OSDisk) SwaggerDoc() map[string]string { @@ -306,8 +306,8 @@ func (OSDisk) SwaggerDoc() map[string]string { var map_OSDiskManagedDiskParameters = map[string]string{ "": "OSDiskManagedDiskParameters is the parameters of a OSDisk managed disk.", - "storageAccountType": "StorageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\".", - "diskEncryptionSet": "DiskEncryptionSet is the disk encryption set properties", + "storageAccountType": "storageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\".", + "diskEncryptionSet": "diskEncryptionSet is the disk encryption set properties", "securityProfile": "securityProfile specifies the security profile for the managed disk.", } @@ -338,7 +338,7 @@ func (SecuritySettings) SwaggerDoc() map[string]string { var map_SpotVMOptions = map[string]string{ "": "SpotVMOptions defines the options relevant to running the Machine on Spot VMs", - "maxPrice": "MaxPrice defines the maximum price the user is willing to pay for Spot VM instances", + "maxPrice": "maxPrice defines the maximum price the user is willing to pay for Spot VM instances", } func (SpotVMOptions) SwaggerDoc() map[string]string { @@ -376,13 +376,13 @@ func (VMDiskSecurityProfile) SwaggerDoc() map[string]string { var map_GCPDisk = map[string]string{ "": "GCPDisk describes disks for GCP.", - "autoDelete": "AutoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false).", - "boot": "Boot indicates if this is a boot disk (default false).", - "sizeGb": "SizeGB is the size of the disk (in GB).", - "type": "Type is the type of the disk (eg: pd-standard).", - "image": "Image is the source image to create this disk.", - "labels": "Labels list of labels to apply to the disk.", - "encryptionKey": "EncryptionKey is the customer-supplied encryption key of the disk.", + "autoDelete": "autoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false).", + "boot": "boot indicates if this is a boot disk (default false).", + "sizeGb": "sizeGb is the size of the disk (in GB).", + "type": "type is the type of the disk (eg: pd-standard).", + "image": "image is the source image to create this disk.", + "labels": "labels list of labels to apply to the disk.", + "encryptionKey": "encryptionKey is the customer-supplied encryption key of the disk.", } func (GCPDisk) SwaggerDoc() map[string]string { @@ -392,7 +392,7 @@ func (GCPDisk) SwaggerDoc() map[string]string { var map_GCPEncryptionKeyReference = map[string]string{ "": "GCPEncryptionKeyReference describes the encryptionKey to use for a disk's encryption.", "kmsKey": "KMSKeyName is the reference KMS key, in the format", - "kmsKeyServiceAccount": "KMSKeyServiceAccount is the service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account for details on the default service account.", + "kmsKeyServiceAccount": "kmsKeyServiceAccount is the service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account for details on the default service account.", } func (GCPEncryptionKeyReference) SwaggerDoc() map[string]string { @@ -401,8 +401,8 @@ func (GCPEncryptionKeyReference) SwaggerDoc() map[string]string { var map_GCPGPUConfig = map[string]string{ "": "GCPGPUConfig describes type and count of GPUs attached to the instance on GCP.", - "count": "Count is the number of GPUs to be attached to an instance.", - "type": "Type is the type of GPU to be attached to an instance. Supported GPU types are: nvidia-tesla-k80, nvidia-tesla-p100, nvidia-tesla-v100, nvidia-tesla-p4, nvidia-tesla-t4", + "count": "count is the number of GPUs to be attached to an instance.", + "type": "type is the type of GPU to be attached to an instance. Supported GPU types are: nvidia-tesla-k80, nvidia-tesla-p100, nvidia-tesla-v100, nvidia-tesla-p4, nvidia-tesla-t4", } func (GCPGPUConfig) SwaggerDoc() map[string]string { @@ -411,10 +411,10 @@ func (GCPGPUConfig) SwaggerDoc() map[string]string { var map_GCPKMSKeyReference = map[string]string{ "": "GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key", - "name": "Name is the name of the customer managed encryption key to be used for the disk encryption.", - "keyRing": "KeyRing is the name of the KMS Key Ring which the KMS Key belongs to.", - "projectID": "ProjectID is the ID of the Project in which the KMS Key Ring exists. Defaults to the VM ProjectID if not set.", - "location": "Location is the GCP location in which the Key Ring exists.", + "name": "name is the name of the customer managed encryption key to be used for the disk encryption.", + "keyRing": "keyRing is the name of the KMS Key Ring which the KMS Key belongs to.", + "projectID": "projectID is the ID of the Project in which the KMS Key Ring exists. Defaults to the VM ProjectID if not set.", + "location": "location is the GCP location in which the Key Ring exists.", } func (GCPKMSKeyReference) SwaggerDoc() map[string]string { @@ -424,26 +424,26 @@ func (GCPKMSKeyReference) SwaggerDoc() map[string]string { var map_GCPMachineProviderSpec = map[string]string{ "": "GCPMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an GCP virtual machine. It is used by the GCP machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", - "credentialsSecret": "CredentialsSecret is a reference to the secret with GCP credentials.", - "canIPForward": "CanIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes.", - "deletionProtection": "DeletionProtection whether the resource should be protected against deletion.", - "disks": "Disks is a list of disks to be attached to the VM.", - "labels": "Labels list of labels to apply to the VM.", + "userDataSecret": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "credentialsSecret is a reference to the secret with GCP credentials.", + "canIPForward": "canIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes.", + "deletionProtection": "deletionProtection whether the resource should be protected against deletion.", + "disks": "disks is a list of disks to be attached to the VM.", + "labels": "labels list of labels to apply to the VM.", "gcpMetadata": "Metadata key/value pairs to apply to the VM.", - "networkInterfaces": "NetworkInterfaces is a list of network interfaces to be attached to the VM.", - "serviceAccounts": "ServiceAccounts is a list of GCP service accounts to be used by the VM.", - "tags": "Tags list of network tags to apply to the VM.", - "targetPools": "TargetPools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool", - "machineType": "MachineType is the machine type to use for the VM.", - "region": "Region is the region in which the GCP machine provider will create the VM.", - "zone": "Zone is the zone in which the GCP machine provider will create the VM.", - "projectID": "ProjectID is the project in which the GCP machine provider will create the VM.", - "gpus": "GPUs is a list of GPUs to be attached to the VM.", - "preemptible": "Preemptible indicates if created instance is preemptible.", - "onHostMaintenance": "OnHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. This is required to be set to \"Terminate\" if you want to provision machine with attached GPUs. Otherwise, allowed values are \"Migrate\" and \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Migrate\".", - "restartPolicy": "RestartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default \"Always\"). Cannot be \"Always\" with preemptible instances. Otherwise, allowed values are \"Always\" and \"Never\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Always\". RestartPolicy represents AutomaticRestart in GCP compute api", - "shieldedInstanceConfig": "ShieldedInstanceConfig is the Shielded VM configuration for the VM", + "networkInterfaces": "networkInterfaces is a list of network interfaces to be attached to the VM.", + "serviceAccounts": "serviceAccounts is a list of GCP service accounts to be used by the VM.", + "tags": "tags list of network tags to apply to the VM.", + "targetPools": "targetPools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool", + "machineType": "machineType is the machine type to use for the VM.", + "region": "region is the region in which the GCP machine provider will create the VM.", + "zone": "zone is the zone in which the GCP machine provider will create the VM.", + "projectID": "projectID is the project in which the GCP machine provider will create the VM.", + "gpus": "gpus is a list of GPUs to be attached to the VM.", + "preemptible": "preemptible indicates if created instance is preemptible.", + "onHostMaintenance": "onHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. This is required to be set to \"Terminate\" if you want to provision machine with attached GPUs. Otherwise, allowed values are \"Migrate\" and \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Migrate\".", + "restartPolicy": "restartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default \"Always\"). Cannot be \"Always\" with preemptible instances. Otherwise, allowed values are \"Always\" and \"Never\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Always\". RestartPolicy represents AutomaticRestart in GCP compute api", + "shieldedInstanceConfig": "shieldedInstanceConfig is the Shielded VM configuration for the VM", "confidentialCompute": "confidentialCompute Defines whether the instance should have confidential compute enabled. If enabled OnHostMaintenance is required to be set to \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is false.", "resourceManagerTags": "resourceManagerTags is an optional list of tags to apply to the GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.", } @@ -454,9 +454,9 @@ func (GCPMachineProviderSpec) SwaggerDoc() map[string]string { var map_GCPMachineProviderStatus = map[string]string{ "": "GCPMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains GCP-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "instanceId": "InstanceID is the ID of the instance in GCP", - "instanceState": "InstanceState is the provisioning state of the GCP Instance.", - "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", + "instanceId": "instanceId is the ID of the instance in GCP", + "instanceState": "instanceState is the provisioning state of the GCP Instance.", + "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status", } func (GCPMachineProviderStatus) SwaggerDoc() map[string]string { @@ -465,8 +465,8 @@ func (GCPMachineProviderStatus) SwaggerDoc() map[string]string { var map_GCPMetadata = map[string]string{ "": "GCPMetadata describes metadata for GCP.", - "key": "Key is the metadata key.", - "value": "Value is the metadata value.", + "key": "key is the metadata key.", + "value": "value is the metadata value.", } func (GCPMetadata) SwaggerDoc() map[string]string { @@ -475,10 +475,10 @@ func (GCPMetadata) SwaggerDoc() map[string]string { var map_GCPNetworkInterface = map[string]string{ "": "GCPNetworkInterface describes network interfaces for GCP", - "publicIP": "PublicIP indicates if true a public IP will be used", - "network": "Network is the network name.", - "projectID": "ProjectID is the project in which the GCP machine provider will create the VM.", - "subnetwork": "Subnetwork is the subnetwork name.", + "publicIP": "publicIP indicates if true a public IP will be used", + "network": "network is the network name.", + "projectID": "projectID is the project in which the GCP machine provider will create the VM.", + "subnetwork": "subnetwork is the subnetwork name.", } func (GCPNetworkInterface) SwaggerDoc() map[string]string { @@ -487,8 +487,8 @@ func (GCPNetworkInterface) SwaggerDoc() map[string]string { var map_GCPServiceAccount = map[string]string{ "": "GCPServiceAccount describes service accounts for GCP.", - "email": "Email is the service account email.", - "scopes": "Scopes list of scopes to be assigned to the service account.", + "email": "email is the service account email.", + "scopes": "scopes list of scopes to be assigned to the service account.", } func (GCPServiceAccount) SwaggerDoc() map[string]string { @@ -497,9 +497,9 @@ func (GCPServiceAccount) SwaggerDoc() map[string]string { var map_GCPShieldedInstanceConfig = map[string]string{ "": "GCPShieldedInstanceConfig describes the shielded VM configuration of the instance on GCP. Shielded VM configuration allow users to enable and disable Secure Boot, vTPM, and Integrity Monitoring.", - "secureBoot": "SecureBoot Defines whether the instance should have secure boot enabled. Secure Boot verify the digital signature of all boot components, and halting the boot process if signature verification fails. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled.", - "virtualizedTrustedPlatformModule": "VirtualizedTrustedPlatformModule enable virtualized trusted platform module measurements to create a known good boot integrity policy baseline. The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. This is required to be set to \"Enabled\" if IntegrityMonitoring is enabled. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.", - "integrityMonitoring": "IntegrityMonitoring determines whether the instance should have integrity monitoring that verify the runtime boot integrity. Compares the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.", + "secureBoot": "secureBoot Defines whether the instance should have secure boot enabled. Secure Boot verify the digital signature of all boot components, and halting the boot process if signature verification fails. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled.", + "virtualizedTrustedPlatformModule": "virtualizedTrustedPlatformModule enable virtualized trusted platform module measurements to create a known good boot integrity policy baseline. The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. This is required to be set to \"Enabled\" if IntegrityMonitoring is enabled. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.", + "integrityMonitoring": "integrityMonitoring determines whether the instance should have integrity monitoring that verify the runtime boot integrity. Compares the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.", } func (GCPShieldedInstanceConfig) SwaggerDoc() map[string]string { @@ -519,10 +519,10 @@ func (ResourceManagerTag) SwaggerDoc() map[string]string { var map_LastOperation = map[string]string{ "": "LastOperation represents the detail of the last performed operation on the MachineObject.", - "description": "Description is the human-readable description of the last operation.", - "lastUpdated": "LastUpdated is the timestamp at which LastOperation API was last-updated.", - "state": "State is the current status of the last performed operation. E.g. Processing, Failed, Successful etc", - "type": "Type is the type of operation which was last performed. E.g. Create, Delete, Update etc", + "description": "description is the human-readable description of the last operation.", + "lastUpdated": "lastUpdated is the timestamp at which LastOperation API was last-updated.", + "state": "state is the current status of the last performed operation. E.g. Processing, Failed, Successful etc", + "type": "type is the type of operation which was last performed. E.g. Create, Delete, Update etc", } func (LastOperation) SwaggerDoc() map[string]string { @@ -531,8 +531,8 @@ func (LastOperation) SwaggerDoc() map[string]string { var map_LifecycleHook = map[string]string{ "": "LifecycleHook represents a single instance of a lifecycle hook", - "name": "Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity.", - "owner": "Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook.", + "name": "name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity.", + "owner": "owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook.", } func (LifecycleHook) SwaggerDoc() map[string]string { @@ -541,8 +541,8 @@ func (LifecycleHook) SwaggerDoc() map[string]string { var map_LifecycleHooks = map[string]string{ "": "LifecycleHooks allow users to pause operations on the machine at certain prefedined points within the machine lifecycle.", - "preDrain": "PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination.", - "preTerminate": "PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained.", + "preDrain": "preDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination.", + "preTerminate": "preTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained.", } func (LifecycleHooks) SwaggerDoc() map[string]string { @@ -570,10 +570,10 @@ func (MachineList) SwaggerDoc() map[string]string { var map_MachineSpec = map[string]string{ "": "MachineSpec defines the desired state of Machine", "metadata": "ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node.", - "lifecycleHooks": "LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle.", + "lifecycleHooks": "lifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle.", "taints": "The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints", - "providerSpec": "ProviderSpec details Provider-specific configuration to use during node creation.", - "providerID": "ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider.", + "providerSpec": "providerSpec details Provider-specific configuration to use during node creation.", + "providerID": "providerID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider.", "authoritativeAPI": "authoritativeAPI is the API that is authoritative for this resource. Valid values are MachineAPI and ClusterAPI. When set to MachineAPI, writes to the spec of the machine.openshift.io copy of this resource will be reflected into the cluster.x-k8s.io copy. When set to ClusterAPI, writes to the spec of the cluster.x-k8s.io copy of this resource will be reflected into the machine.openshift.io copy. Updates to the status will be reflected in both copies of the resource, based on the controller implementing the functionality of the API. Currently the authoritative API determines which controller will manage the resource, this will change in a future release. To ensure the change has been accepted, please verify that the `status.authoritativeAPI` field has been updated to the desired value and that the `Synchronized` condition is present and set to `True`.", } @@ -583,15 +583,15 @@ func (MachineSpec) SwaggerDoc() map[string]string { var map_MachineStatus = map[string]string{ "": "MachineStatus defines the observed state of Machine", - "nodeRef": "NodeRef will point to the corresponding Node if it exists.", - "lastUpdated": "LastUpdated identifies when this status was last observed.", - "errorReason": "ErrorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", - "errorMessage": "ErrorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", - "providerStatus": "ProviderStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field.", - "addresses": "Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available.", - "lastOperation": "LastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully.", - "phase": "Phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting", - "conditions": "Conditions defines the current state of the Machine", + "nodeRef": "nodeRef will point to the corresponding Node if it exists.", + "lastUpdated": "lastUpdated identifies when this status was last observed.", + "errorReason": "errorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", + "errorMessage": "errorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", + "providerStatus": "providerStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field.", + "addresses": "addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available.", + "lastOperation": "lastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully.", + "phase": "phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting", + "conditions": "conditions defines the current state of the Machine", "authoritativeAPI": "authoritativeAPI is the API that is authoritative for this resource. Valid values are MachineAPI, ClusterAPI and Migrating. This value is updated by the migration controller to reflect the authoritative API. Machine API and Cluster API controllers use this value to determine whether or not to reconcile the resource. When set to Migrating, the migration controller is currently performing the handover of authority from one API to the other.", "synchronizedGeneration": "synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. This field is set when the authoritative resource is updated and the sync controller has updated the non-authoritative resource to match.", } @@ -623,10 +623,10 @@ func (MachineHealthCheckList) SwaggerDoc() map[string]string { var map_MachineHealthCheckSpec = map[string]string{ "": "MachineHealthCheckSpec defines the desired state of MachineHealthCheck", "selector": "Label selector to match machines whose health will be exercised. Note: An empty selector will match all machines.", - "unhealthyConditions": "UnhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy.", + "unhealthyConditions": "unhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy.", "maxUnhealthy": "Any farther remediation is only allowed if at most \"MaxUnhealthy\" machines selected by \"selector\" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation.", "nodeStartupTimeout": "Machines older than this duration without a node will be considered to have failed and will be remediated. To prevent Machines without Nodes from being removed, disable startup checks by setting this value explicitly to \"0\". Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".", - "remediationTemplate": "RemediationTemplate is a reference to a remediation template provided by an infrastructure provider.\n\nThis field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator.", + "remediationTemplate": "remediationTemplate is a reference to a remediation template provided by an infrastructure provider.\n\nThis field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator.", } func (MachineHealthCheckSpec) SwaggerDoc() map[string]string { @@ -637,8 +637,8 @@ var map_MachineHealthCheckStatus = map[string]string{ "": "MachineHealthCheckStatus defines the observed state of MachineHealthCheck", "expectedMachines": "total number of machines counted by this machine health check", "currentHealthy": "total number of machines counted by this machine health check", - "remediationsAllowed": "RemediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied", - "conditions": "Conditions defines the current state of the MachineHealthCheck", + "remediationsAllowed": "remediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied", + "conditions": "conditions defines the current state of the MachineHealthCheck", } func (MachineHealthCheckStatus) SwaggerDoc() map[string]string { @@ -674,11 +674,11 @@ func (MachineSetList) SwaggerDoc() map[string]string { var map_MachineSetSpec = map[string]string{ "": "MachineSetSpec defines the desired state of MachineSet", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1.", - "minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready)", - "deletePolicy": "DeletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to \"Random\". Valid values are \"Random, \"Newest\", \"Oldest\"", - "selector": "Selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "Template is the object that describes the machine that will be created if insufficient replicas are detected.", + "replicas": "replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1.", + "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready)", + "deletePolicy": "deletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to \"Random\". Valid values are \"Random, \"Newest\", \"Oldest\"", + "selector": "selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the machine that will be created if insufficient replicas are detected.", "authoritativeAPI": "authoritativeAPI is the API that is authoritative for this resource. Valid values are MachineAPI and ClusterAPI. When set to MachineAPI, writes to the spec of the machine.openshift.io copy of this resource will be reflected into the cluster.x-k8s.io copy. When set to ClusterAPI, writes to the spec of the cluster.x-k8s.io copy of this resource will be reflected into the machine.openshift.io copy. Updates to the status will be reflected in both copies of the resource, based on the controller implementing the functionality of the API. Currently the authoritative API determines which controller will manage the resource, this will change in a future release. To ensure the change has been accepted, please verify that the `status.authoritativeAPI` field has been updated to the desired value and that the `Synchronized` condition is present and set to `True`.", } @@ -688,13 +688,13 @@ func (MachineSetSpec) SwaggerDoc() map[string]string { var map_MachineSetStatus = map[string]string{ "": "MachineSetStatus defines the observed state of MachineSet", - "replicas": "Replicas is the most recently observed number of replicas.", + "replicas": "replicas is the most recently observed number of replicas.", "fullyLabeledReplicas": "The number of replicas that have labels matching the labels of the machine template of the MachineSet.", "readyReplicas": "The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is \"Ready\".", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this MachineSet.", - "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed MachineSet.", + "observedGeneration": "observedGeneration reflects the generation of the most recently observed MachineSet.", "errorReason": "In the event that there is a terminal problem reconciling the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason will be populated with a succinct value suitable for machine interpretation, while ErrorMessage will contain a more verbose string suitable for logging and human consumption.\n\nThese fields should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachineTemplate's spec or the configuration of the machine controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the machine controller, or the responsible machine controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the MachineSet object and/or logged in the controller's output.", - "conditions": "Conditions defines the current state of the MachineSet", + "conditions": "conditions defines the current state of the MachineSet", "authoritativeAPI": "authoritativeAPI is the API that is authoritative for this resource. Valid values are MachineAPI, ClusterAPI and Migrating. This value is updated by the migration controller to reflect the authoritative API. Machine API and Cluster API controllers use this value to determine whether or not to reconcile the resource. When set to Migrating, the migration controller is currently performing the handover of authority from one API to the other.", "synchronizedGeneration": "synchronizedGeneration is the generation of the authoritative resource that the non-authoritative resource is synchronised with. This field is set when the authoritative resource is updated and the sync controller has updated the non-authoritative resource to match.", } @@ -715,9 +715,9 @@ func (MachineTemplateSpec) SwaggerDoc() map[string]string { var map_Condition = map[string]string{ "": "Condition defines an observation of a Machine API resource operational state.", - "type": "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important.", - "status": "Status of the condition, one of True, False, Unknown.", - "severity": "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False.", + "type": "type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important.", + "status": "status of the condition, one of True, False, Unknown.", + "severity": "severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False.", "lastTransitionTime": "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", "reason": "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty.", "message": "A human readable message indicating details about the transition. This field may be empty.", @@ -729,11 +729,11 @@ func (Condition) SwaggerDoc() map[string]string { var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. This is a copy of customizable fields from metav1.ObjectMeta.\n\nObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`, which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases and read-only fields which end up in the generated CRD validation, having it as a subset simplifies the API and some issues that can impact user experience.\n\nDuring the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054) for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs, specifically `spec.metadata.creationTimestamp in body must be of type string: \"null\"`. The investigation showed that `controller-tools@v2` behaves differently than its previous version when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package.\n\nIn more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta` had validation properties, including for `creationTimestamp` (metav1.Time). The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null` which breaks validation because the field isn't marked as nullable.\n\nIn future versions, controller-tools@v2 might allow overriding the type and validation for embedded types. When that happens, this hack should be revisited.", - "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", - "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "name": "name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "generateName": "generateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "namespace": "namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", - "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "annotations": "annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", } @@ -743,7 +743,7 @@ func (ObjectMeta) SwaggerDoc() map[string]string { var map_ProviderSpec = map[string]string{ "": "ProviderSpec defines the configuration to use during node creation.", - "value": "Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config.", + "value": "value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config.", } func (ProviderSpec) SwaggerDoc() map[string]string { @@ -776,7 +776,7 @@ func (NetworkDeviceSpec) SwaggerDoc() map[string]string { var map_NetworkSpec = map[string]string{ "": "NetworkSpec defines the virtual machine's network configuration.", - "devices": "Devices defines the virtual machine's network interfaces.", + "devices": "devices defines the virtual machine's network interfaces.", } func (NetworkSpec) SwaggerDoc() map[string]string { @@ -785,18 +785,18 @@ func (NetworkSpec) SwaggerDoc() map[string]string { var map_VSphereMachineProviderSpec = map[string]string{ "": "VSphereMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an VSphere virtual machine. It is used by the vSphere machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", - "credentialsSecret": "CredentialsSecret is a reference to the secret with vSphere credentials.", - "template": "Template is the name, inventory path, or instance UUID of the template used to clone new machines.", - "workspace": "Workspace describes the workspace to use for the machine.", - "network": "Network is the network configuration for this machine's VM.", - "numCPUs": "NumCPUs is the number of virtual processors in a virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.", + "userDataSecret": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "credentialsSecret is a reference to the secret with vSphere credentials.", + "template": "template is the name, inventory path, or instance UUID of the template used to clone new machines.", + "workspace": "workspace describes the workspace to use for the machine.", + "network": "network is the network configuration for this machine's VM.", + "numCPUs": "numCPUs is the number of virtual processors in a virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.", "numCoresPerSocket": "NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.", - "memoryMiB": "MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the analogue property value in the template from which this machine is cloned.", - "diskGiB": "DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the analogue property value in the template from which this machine is cloned. This parameter will be ignored if 'LinkedClone' CloneMode is set.", + "memoryMiB": "memoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the analogue property value in the template from which this machine is cloned.", + "diskGiB": "diskGiB is the size of a virtual machine's disk, in GiB. Defaults to the analogue property value in the template from which this machine is cloned. This parameter will be ignored if 'LinkedClone' CloneMode is set.", "tagIDs": "tagIDs is an optional set of tags to add to an instance. Specified tagIDs must use URN-notation instead of display names. A maximum of 10 tag IDs may be specified.", - "snapshot": "Snapshot is the name of the snapshot from which the VM was cloned", - "cloneMode": "CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to FullClone. When using LinkedClone, if no snapshots exist for the source template, falls back to FullClone.", + "snapshot": "snapshot is the name of the snapshot from which the VM was cloned", + "cloneMode": "cloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to FullClone. When using LinkedClone, if no snapshots exist for the source template, falls back to FullClone.", } func (VSphereMachineProviderSpec) SwaggerDoc() map[string]string { @@ -805,10 +805,10 @@ func (VSphereMachineProviderSpec) SwaggerDoc() map[string]string { var map_VSphereMachineProviderStatus = map[string]string{ "": "VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains VSphere-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", - "instanceId": "InstanceID is the ID of the instance in VSphere", - "instanceState": "InstanceState is the provisioning state of the VSphere Instance.", - "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", - "taskRef": "TaskRef is a managed object reference to a Task related to the machine. This value is set automatically at runtime and should not be set or modified by users.", + "instanceId": "instanceId is the ID of the instance in VSphere", + "instanceState": "instanceState is the provisioning state of the VSphere Instance.", + "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status", + "taskRef": "taskRef is a managed object reference to a Task related to the machine. This value is set automatically at runtime and should not be set or modified by users.", } func (VSphereMachineProviderStatus) SwaggerDoc() map[string]string { @@ -817,11 +817,12 @@ func (VSphereMachineProviderStatus) SwaggerDoc() map[string]string { var map_Workspace = map[string]string{ "": "WorkspaceConfig defines a workspace configuration for the vSphere cloud provider.", - "server": "Server is the IP address or FQDN of the vSphere endpoint.", - "datacenter": "Datacenter is the datacenter in which VMs are created/located.", - "folder": "Folder is the folder in which VMs are created/located.", - "datastore": "Datastore is the datastore in which VMs are created/located.", - "resourcePool": "ResourcePool is the resource pool in which VMs are created/located.", + "server": "server is the IP address or FQDN of the vSphere endpoint.", + "datacenter": "datacenter is the datacenter in which VMs are created/located.", + "folder": "folder is the folder in which VMs are created/located.", + "datastore": "datastore is the datastore in which VMs are created/located.", + "resourcePool": "resourcePool is the resource pool in which VMs are created/located.", + "vmGroup": "vmGroup is the cluster vm group in which virtual machines will be added for vm host group based zonal.", } func (Workspace) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/register.go b/vendor/github.com/openshift/api/machineconfiguration/v1/register.go index bbafc28dea..cddaa85301 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/register.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/register.go @@ -34,6 +34,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &MachineConfigList{}, &MachineConfigPool{}, &MachineConfigPoolList{}, + &MachineOSConfig{}, + &MachineOSConfigList{}, + &MachineOSBuild{}, + &MachineOSBuildList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go index 01644fcf77..5ba1b9d59a 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go @@ -38,8 +38,11 @@ type ControllerConfig struct { // TODO(jkyros): inconsistent historical generation resulted in the controllerconfig CRD being // generated with all fields required, while everything else was generated with optional - // +kubebuilder:validation:Required + // spec contains the desired controller config configuration. + // +required Spec ControllerConfigSpec `json:"spec"` + + // status contains observed information about the controller config. // +optional Status ControllerConfigStatus `json:"status"` } @@ -47,11 +50,11 @@ type ControllerConfig struct { // ControllerConfigSpec is the spec for ControllerConfig resource. type ControllerConfigSpec struct { // clusterDNSIP is the cluster DNS IP address - // +kubebuilder:validation:Required + // +required ClusterDNSIP string `json:"clusterDNSIP"` // cloudProviderConfig is the configuration for the given cloud provider - // +kubebuilder:validation:Required + // +required CloudProviderConfig string `json:"cloudProviderConfig"` // platform is deprecated, use Infra.Status.PlatformStatus.Type instead @@ -65,21 +68,21 @@ type ControllerConfigSpec struct { // TODO: Use string for CA data // kubeAPIServerServingCAData managed Kubelet to API Server Cert... Rotated automatically - // +kubebuilder:validation:Required + // +required KubeAPIServerServingCAData []byte `json:"kubeAPIServerServingCAData"` // rootCAData specifies the root CA data - // +kubebuilder:validation:Required + // +required RootCAData []byte `json:"rootCAData"` - // cloudProvider specifies the cloud provider CA data - // +kubebuilder:validation:Required + // cloudProviderCAData specifies the cloud provider CA data + // +required // +nullable CloudProviderCAData []byte `json:"cloudProviderCAData"` // additionalTrustBundle is a certificate bundle that will be added to the nodes // trusted certificate store. - // +kubebuilder:validation:Required + // +required // +nullable AdditionalTrustBundle []byte `json:"additionalTrustBundle"` @@ -107,44 +110,44 @@ type ControllerConfigSpec struct { InternalRegistryPullSecret []byte `json:"internalRegistryPullSecret"` // images is map of images that are used by the controller to render templates under ./templates/ - // +kubebuilder:validation:Required + // +required Images map[string]string `json:"images"` - // BaseOSContainerImage is the new-format container image for operating system updates. - // +kubebuilder:validation:Required + // baseOSContainerImage is the new-format container image for operating system updates. + // +required BaseOSContainerImage string `json:"baseOSContainerImage"` - // BaseOSExtensionsContainerImage is the matching extensions container for the new-format container + // baseOSExtensionsContainerImage is the matching extensions container for the new-format container // +optional BaseOSExtensionsContainerImage string `json:"baseOSExtensionsContainerImage"` - // OSImageURL is the old-format container image that contains the OS update payload. + // osImageURL is the old-format container image that contains the OS update payload. // +optional OSImageURL string `json:"osImageURL"` // releaseImage is the image used when installing the cluster - // +kubebuilder:validation:Required + // +required ReleaseImage string `json:"releaseImage"` // proxy holds the current proxy configuration for the nodes - // +kubebuilder:validation:Required + // +required // +nullable Proxy *configv1.ProxyStatus `json:"proxy"` // infra holds the infrastructure details // +kubebuilder:validation:EmbeddedResource - // +kubebuilder:validation:Required + // +required // +nullable Infra *configv1.Infrastructure `json:"infra"` // dns holds the cluster dns details // +kubebuilder:validation:EmbeddedResource - // +kubebuilder:validation:Required + // +required // +nullable DNS *configv1.DNS `json:"dns"` // ipFamilies indicates the IP families in use by the cluster network - // +kubebuilder:validation:Required + // +required IPFamilies IPFamiliesType `json:"ipFamilies"` // networkType holds the type of network the cluster is using @@ -155,8 +158,8 @@ type ControllerConfigSpec struct { // +optional NetworkType string `json:"networkType,omitempty"` - // Network contains additional network related information - // +kubebuilder:validation:Required + // network contains additional network related information + // +required // +nullable Network *NetworkInfo `json:"network"` } @@ -164,10 +167,10 @@ type ControllerConfigSpec struct { // ImageRegistryBundle contains information for writing image registry certificates type ImageRegistryBundle struct { // file holds the name of the file where the bundle will be written to disk - // +kubebuilder:validation:Required + // +required File string `json:"file"` // data holds the contents of the bundle that will be written to the file location - // +kubebuilder:validation:Required + // +required Data []byte `json:"data"` } @@ -183,8 +186,8 @@ const ( // Network contains network related configuration type NetworkInfo struct { - // MTUMigration contains the MTU migration configuration. - // +kubebuilder:validation:Required + // mtuMigration contains the MTU migration configuration. + // +required // +nullable MTUMigration *configv1.MTUMigration `json:"mtuMigration"` } @@ -210,11 +213,11 @@ type ControllerConfigStatus struct { // ControllerCertificate contains info about a specific cert. type ControllerCertificate struct { // subject is the cert subject - // +kubebuilder:validation:Required + // +required Subject string `json:"subject"` // signer is the cert Issuer - // +kubebuilder:validation:Required + // +required Signer string `json:"signer"` // notBefore is the lower boundary for validity @@ -226,22 +229,22 @@ type ControllerCertificate struct { NotAfter *metav1.Time `json:"notAfter"` // bundleFile is the larger bundle a cert comes from - // +kubebuilder:validation:Required + // +required BundleFile string `json:"bundleFile"` } // ControllerConfigStatusCondition contains condition information for ControllerConfigStatus type ControllerConfigStatusCondition struct { // type specifies the state of the operator's reconciliation functionality. - // +kubebuilder:validation:Required + // +required Type ControllerConfigStatusConditionType `json:"type"` // status of the condition, one of True, False, Unknown. - // +kubebuilder:validation:Required + // +required Status corev1.ConditionStatus `json:"status"` // lastTransitionTime is the time of the last update to the current status object. - // +kubebuilder:validation:Required + // +required // +nullable LastTransitionTime metav1.Time `json:"lastTransitionTime"` @@ -308,17 +311,17 @@ type MachineConfig struct { // MachineConfigSpec is the spec for MachineConfig type MachineConfigSpec struct { - // OSImageURL specifies the remote location that will be used to + // osImageURL specifies the remote location that will be used to // fetch the OS. // +optional OSImageURL string `json:"osImageURL"` - // BaseOSExtensionsContainerImage specifies the remote location that will be used + // baseOSExtensionsContainerImage specifies the remote location that will be used // to fetch the extensions container matching a new-format OS image // +optional BaseOSExtensionsContainerImage string `json:"baseOSExtensionsContainerImage"` - // Config is a Ignition Config object. + // config is a Ignition Config object. // +optional Config runtime.RawExtension `json:"config"` @@ -383,8 +386,11 @@ type MachineConfigPool struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required + // spec contains the desired machine config pool configuration. + // +required Spec MachineConfigPoolSpec `json:"spec"` + + // status contains observed information about the machine config pool. // +optional Status MachineConfigPoolStatus `json:"status"` } @@ -459,7 +465,7 @@ type PinnedImageSetRef struct { // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` - // +kubebuilder:validation:Required + // +required Name string `json:"name"` } @@ -520,26 +526,26 @@ type MachineConfigPoolStatus struct { // +kubebuilder:validation:XValidation:rule="self.availableMachineCount >= self.readyMachineCount", message="availableMachineCount must be greater than or equal to readyMachineCount" type PoolSynchronizerStatus struct { // poolSynchronizerType describes the type of the pool synchronizer. - // +kubebuilder:validation:Required + // +required PoolSynchronizerType PoolSynchronizerType `json:"poolSynchronizerType"` // machineCount is the number of machines that are managed by the node synchronizer. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Minimum=0 MachineCount int64 `json:"machineCount"` // updatedMachineCount is the number of machines that have been updated by the node synchronizer. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Minimum=0 UpdatedMachineCount int64 `json:"updatedMachineCount"` // readyMachineCount is the number of machines managed by the node synchronizer that are in a ready state. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Minimum=0 ReadyMachineCount int64 `json:"readyMachineCount"` // availableMachineCount is the number of machines managed by the node synchronizer which are available. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Minimum=0 AvailableMachineCount int64 `json:"availableMachineCount"` // unavailableMachineCount is the number of machines managed by the node synchronizer but are unavailable. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Minimum=0 UnavailableMachineCount int64 `json:"unavailableMachineCount"` // +kubebuilder:validation:XValidation:rule="self >= oldSelf || (self == 0 && oldSelf > 0)", message="observedGeneration must not move backwards except to zero" @@ -563,10 +569,10 @@ const ( // ceryExpiry contains the bundle name and the expiry date type CertExpiry struct { // bundle is the name of the bundle in which the subject certificate resides - // +kubebuilder:validation:Required + // +required Bundle string `json:"bundle"` // subject is the subject of the certificate - // +kubebuilder:validation:Required + // +required Subject string `json:"subject"` // expiry is the date after which the certificate will no longer be valid // +optional @@ -683,8 +689,11 @@ type KubeletConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required + // spec contains the desired kubelet configuration. + // +required Spec KubeletConfigSpec `json:"spec"` + + // status contains observed information about the kubelet configuration. // +optional Status KubeletConfigStatus `json:"status"` } @@ -696,7 +705,7 @@ type KubeletConfigSpec struct { // +optional LogLevel *int32 `json:"logLevel,omitempty"` - // MachineConfigPoolSelector selects which pools the KubeletConfig shoud apply to. + // machineConfigPoolSelector selects which pools the KubeletConfig shoud apply to. // A nil selector will result in no pools being selected. // +optional MachineConfigPoolSelector *metav1.LabelSelector `json:"machineConfigPoolSelector,omitempty"` @@ -792,20 +801,24 @@ type ContainerRuntimeConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required + // spec contains the desired container runtime configuration. + // +required Spec ContainerRuntimeConfigSpec `json:"spec"` + + // status contains observed information about the container runtime configuration. // +optional Status ContainerRuntimeConfigStatus `json:"status"` } // ContainerRuntimeConfigSpec defines the desired state of ContainerRuntimeConfig type ContainerRuntimeConfigSpec struct { - // MachineConfigPoolSelector selects which pools the ContainerRuntimeConfig shoud apply to. + // machineConfigPoolSelector selects which pools the ContainerRuntimeConfig shoud apply to. // A nil selector will result in no pools being selected. // +optional MachineConfigPoolSelector *metav1.LabelSelector `json:"machineConfigPoolSelector,omitempty"` - // +kubebuilder:validation:Required + // containerRuntimeConfig defines the tuneables of the container runtime. + // +required ContainerRuntimeConfig *ContainerRuntimeConfiguration `json:"containerRuntimeConfig,omitempty"` } diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineosbuild.go b/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineosbuild.go new file mode 100644 index 0000000000..8dcebebb8d --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineosbuild.go @@ -0,0 +1,217 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=machineosbuilds,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2090 +// +openshift:enable:FeatureGate=OnClusterBuild +// +openshift:file-pattern=cvoRunLevel=0000_80,operatorName=machine-config,operatorOrdering=01 +// +kubebuilder:metadata:labels=openshift.io/operator-managed= +// +kubebuilder:printcolumn:name="Prepared",type="string",JSONPath=.status.conditions[?(@.type=="Prepared")].status +// +kubebuilder:printcolumn:name="Building",type="string",JSONPath=.status.conditions[?(@.type=="Building")].status +// +kubebuilder:printcolumn:name="Succeeded",type="string",JSONPath=.status.conditions[?(@.type=="Succeeded")].status +// +kubebuilder:printcolumn:name="Interrupted",type="string",JSONPath=.status.conditions[?(@.type=="Interrupted")].status +// +kubebuilder:printcolumn:name="Failed",type="string",JSONPath=.status.conditions[?(@.type=="Failed")].status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// MachineOSBuild describes a build process managed and deployed by the MCO +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type MachineOSBuild struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec describes the configuration of the machine os build. + // It is immutable once set. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="machineOSBuildSpec is immutable once set" + // +required + Spec MachineOSBuildSpec `json:"spec"` + + // status describes the last observed state of this machine os build. + // +optional + Status MachineOSBuildStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineOSBuildList describes all of the Builds on the system +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type MachineOSBuildList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // items contains a collection of MachineOSBuild resources. + // +optional + Items []MachineOSBuild `json:"items,omitempty"` +} + +// MachineOSBuildSpec describes information about a build process primarily populated from a MachineOSConfig object. +type MachineOSBuildSpec struct { + // machineConfig points to the rendered MachineConfig resource to be included in this image build. + // +required + MachineConfig MachineConfigReference `json:"machineConfig"` + // machineOSConfig references the MachineOSConfig resource that this image build extends. + // +required + MachineOSConfig MachineOSConfigReference `json:"machineOSConfig"` + // renderedImagePushSpec is set by the Machine Config Operator from the MachineOSConfig object this build is attached to. + // This field describes the location of the final image, which will be pushed by the build once complete. + // The format of the image push spec is: host[:port][/namespace]/name: or svc_name.namespace.svc[:port]/repository/name:. + // The length of the push spec must be between 1 to 447 characters. + // +required + RenderedImagePushSpec ImageTagFormat `json:"renderedImagePushSpec"` +} + +// MachineOSBuildStatus describes the state of a build and other helpful information. +// +kubebuilder:validation:XValidation:rule="has(self.buildEnd) ? has(self.buildStart) && timestamp(self.buildStart) < timestamp(self.buildEnd) : true",message="buildEnd must be after buildStart" +type MachineOSBuildStatus struct { + // conditions are state related conditions for the build. Valid types are: + // Prepared, Building, Failed, Interrupted, and Succeeded. + // Once a Build is marked as Failed, Interrupted or Succeeded, no future conditions can be set. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:XValidation:rule="oldSelf.exists(x, x.type=='Failed' && x.status=='True') ? self==oldSelf : true",message="once a Failed condition is set, conditions are immutable" + // +kubebuilder:validation:XValidation:rule="oldSelf.exists(x, x.type=='Interrupted' && x.status=='True') ? self==oldSelf : true",message="once an Interrupted condition is set, conditions are immutable" + // +kubebuilder:validation:XValidation:rule="oldSelf.exists(x, x.type=='Succeeded' && x.status=='True') ? self==oldSelf : true",message="once an Succeeded condition is set, conditions are immutable" + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // builder describes the image builder backend used for this build. + // +optional + Builder *MachineOSBuilderReference `json:"builder,omitempty"` + // relatedObjects is a list of references to ephemeral objects such as ConfigMaps or Secrets that are meant to be consumed while the build process runs. + // After a successful build or when this MachineOSBuild is deleted, these ephemeral objects will be removed. + // In the event of a failed build, the objects will remain until the build is removed to allow for inspection. + // +kubebuilder:validation:MaxItems=10 + // +listType=map + // +listMapKey=name + // +listMapKey=resource + // +optional + RelatedObjects []ObjectReference `json:"relatedObjects,omitempty"` + // buildStart is the timestamp corresponding to the build controller initiating the build backend for this MachineOSBuild. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="buildStart is immutable once set" + // +optional + BuildStart *metav1.Time `json:"buildStart,omitempty"` + // buildEnd is the timestamp corresponding to completion of the builder backend. + // When omitted the build has either not been started, or is in progress. + // It will be populated once the build completes, fails or is interrupted. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="buildEnd is immutable once set" + // +optional + BuildEnd *metav1.Time `json:"buildEnd,omitempty"` + // digestedImagePushSpec describes the fully qualified push spec produced by this build. + // The format of the push spec is: host[:port][/namespace]/name@sha256:, + // where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. + // The length of the whole spec must be between 1 to 447 characters. + // +optional + DigestedImagePushSpec ImageDigestFormat `json:"digestedImagePushSpec,omitempty"` +} + +// MachineOSBuilderReference describes which ImageBuilder backend to use for this build +// +union +// +kubebuilder:validation:XValidation:rule="has(self.imageBuilderType) && self.imageBuilderType == 'Job' ? has(self.job) : !has(self.job)",message="job is required when imageBuilderType is Job, and forbidden otherwise" +type MachineOSBuilderReference struct { + // imageBuilderType describes the type of image builder used to build this image. + // Valid values are Job only. + // When set to Job, a pod based builder, using buildah, is launched to build the specified image. + // +unionDiscriminator + // +required + ImageBuilderType MachineOSImageBuilderType `json:"imageBuilderType"` + + // job is a reference to the job object that is managing the image build. + // This is required if the imageBuilderType is Job, and forbidden otherwise. + // +unionMember + // +optional + Job *ObjectReference `json:"job,omitempty"` +} + +// BuildProgess highlights some of the key phases of a build to be tracked in Conditions. +type BuildProgress string + +const ( + // prepared indicates that the build has finished preparing. A build is prepared + // by gathering the build inputs, validating them, and making sure we can do an update as specified. + MachineOSBuildPrepared BuildProgress = "Prepared" + // building indicates that the build has been kicked off with the specified image builder + MachineOSBuilding BuildProgress = "Building" + // failed indicates that during the build or preparation process, the build failed. + MachineOSBuildFailed BuildProgress = "Failed" + // interrupted indicates that the user stopped the build process by modifying part of the build config + MachineOSBuildInterrupted BuildProgress = "Interrupted" + // succeeded indicates that the build has completed and the image is ready to roll out. + MachineOSBuildSucceeded BuildProgress = "Succeeded" +) + +// Refers to the name of a rendered MachineConfig (e.g., "rendered-worker-ec40d2965ff81bce7cd7a7e82a680739", etc.): +// the build targets this MachineConfig, this is often used to tell us whether we need an update. +type MachineConfigReference struct { + // name is the name of the rendered MachineConfig object. + // This value should be between 10 and 253 characters, and must contain only lowercase + // alphanumeric characters, hyphens and periods, and should start and end with an alphanumeric character. + // +kubebuilder:validation:MinLength:=10 + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +required + Name string `json:"name"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // group of the referent. + // The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + // Example: "", "apps", "build.openshift.io", etc. + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +kubebuilder:validation:MaxLength:=253 + // +required + Group string `json:"group"` + // resource of the referent. + // This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + // and should start and end with an alphanumeric character. + // Example: "deployments", "deploymentconfigs", "pods", etc. + // +required + // +kubebuilder:validation:XValidation:rule=`!format.dns1123Label().validate(self).hasValue()`,message="the value must consist of only lowercase alphanumeric characters and hyphens" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + Resource string `json:"resource"` + // namespace of the referent. + // This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + // and should start and end with an alphanumeric character. + // +kubebuilder:validation:XValidation:rule=`!format.dns1123Label().validate(self).hasValue()`,message="the value must consist of only lowercase alphanumeric characters and hyphens" + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +optional + Namespace string `json:"namespace,omitempty"` + // name of the referent. + // The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +required + Name string `json:"name"` +} + +// MachineOSConfigReference refers to the MachineOSConfig this build is based off of +type MachineOSConfigReference struct { + // name of the MachineOSConfig. + // The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +required + Name string `json:"name"` +} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineosconfig.go b/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineosconfig.go new file mode 100644 index 0000000000..9cf1553d9d --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/types_machineosconfig.go @@ -0,0 +1,217 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=machineosconfigs,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2090 +// +openshift:enable:FeatureGate=OnClusterBuild +// +openshift:file-pattern=cvoRunLevel=0000_80,operatorName=machine-config,operatorOrdering=01 +// +kubebuilder:metadata:labels=openshift.io/operator-managed= + +// MachineOSConfig describes the configuration for a build process managed by the MCO +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type MachineOSConfig struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec describes the configuration of the machineosconfig + // +required + Spec MachineOSConfigSpec `json:"spec"` + + // status describes the status of the machineosconfig + // +optional + Status MachineOSConfigStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineOSConfigList describes all configurations for image builds on the system +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type MachineOSConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // items contains a collection of MachineOSConfig resources. + // +optional + Items []MachineOSConfig `json:"items"` +} + +// MachineOSConfigSpec describes user-configurable options as well as information about a build process. +type MachineOSConfigSpec struct { + // machineConfigPool is the pool which the build is for. + // The Machine Config Operator will perform the build and roll out the built image to the specified pool. + // +required + MachineConfigPool MachineConfigPoolReference `json:"machineConfigPool"` + // imageBuilder describes which image builder will be used in each build triggered by this MachineOSConfig. + // Currently supported type(s): Job + // +required + ImageBuilder MachineOSImageBuilder `json:"imageBuilder"` + // baseImagePullSecret is the secret used to pull the base image. + // Must live in the openshift-machine-config-operator namespace if provided. + // Defaults to using the cluster-wide pull secret if not specified. This is provided during install time of the cluster, and lives in the openshift-config namespace as a secret. + // +optional + BaseImagePullSecret *ImageSecretObjectReference `json:"baseImagePullSecret,omitempty"` + // renderedImagePushSecret is the secret used to connect to a user registry. + // The final image push and pull secrets should be separate and assume the principal of least privilege. + // The push secret with write privilege is only required to be present on the node hosting the MachineConfigController pod. + // The pull secret with read only privileges is required on all nodes. + // By separating the two secrets, the risk of write credentials becoming compromised is reduced. + // +required + RenderedImagePushSecret ImageSecretObjectReference `json:"renderedImagePushSecret"` + // renderedImagePushSpec describes the location of the final image. + // The MachineOSConfig object will use the in cluster image registry configuration. + // If you wish to use a mirror or any other settings specific to registries.conf, please specify those in the cluster wide registries.conf via the cluster image.config, ImageContentSourcePolicies, ImageDigestMirrorSet, or ImageTagMirrorSet objects. + // The format of the image push spec is: host[:port][/namespace]/name: or svc_name.namespace.svc[:port]/repository/name:. + // The length of the push spec must be between 1 to 447 characters. + // +required + RenderedImagePushSpec ImageTagFormat `json:"renderedImagePushSpec"` + // containerFile describes the custom data the user has specified to build into the image. + // This is also commonly called a Dockerfile and you can treat it as such. The content is the content of your Dockerfile. + // See https://github.com/containers/common/blob/main/docs/Containerfile.5.md for the spec reference. + // This is a list indexed by architecture name (e.g. AMD64), and allows specifying one containerFile per arch, up to 4. + // +patchMergeKey=containerfileArch + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerfileArch + // +kubebuilder:validation:MinItems=0 + // +kubebuilder:validation:MaxItems=4 + // +optional + Containerfile []MachineOSContainerfile `json:"containerFile" patchStrategy:"merge" patchMergeKey:"containerfileArch"` +} + +// MachineOSConfigStatus describes the status this config object and relates it to the builds associated with this MachineOSConfig +type MachineOSConfigStatus struct { + // conditions are state related conditions for the object. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + // TODO(jerzhang): add godoc after conditions are finalized. Also consider adding printer columns. + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // observedGeneration represents the generation of the MachineOSConfig object observed by the Machine Config Operator's build controller. + // +kubebuilder:validation:XValidation:rule="self >= oldSelf", message="observedGeneration must not move backwards" + // +kubebuilder:validation:Minimum=0 + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // currentImagePullSpec is the fully qualified image pull spec used by the MCO to pull down the new OSImage. This includes the sha256 image digest. + // This is generated when the Machine Config Operator's build controller successfully completes the build, and is populated from the corresponding + // MachineOSBuild object's FinalImagePushSpec. This may change after completion in reaction to spec changes that would cause a new image build, + // but will not be removed. + // The format of the image pull spec is: host[:port][/namespace]/name@sha256:, + // where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. + // The length of the whole spec must be between 1 to 447 characters. + // +optional + CurrentImagePullSpec ImageDigestFormat `json:"currentImagePullSpec,omitempty"` + // machineOSBuild is a reference to the MachineOSBuild object for this MachineOSConfig, which contains the status for the image build. + // +optional + MachineOSBuild *ObjectReference `json:"machineOSBuild,omitempty"` +} + +type MachineOSImageBuilder struct { + // imageBuilderType specifies the backend to be used to build the image. + // +kubebuilder:validation:Enum:=Job + // Valid options are: Job + // +required + ImageBuilderType MachineOSImageBuilderType `json:"imageBuilderType"` +} + +// MachineOSContainerfile contains all custom content the user wants built into the image +type MachineOSContainerfile struct { + // containerfileArch describes the architecture this containerfile is to be built for. + // This arch is optional. If the user does not specify an architecture, it is assumed + // that the content can be applied to all architectures, or in a single arch cluster: the only architecture. + // +kubebuilder:validation:Enum:=ARM64;AMD64;PPC64LE;S390X;NoArch + // +kubebuilder:default:=NoArch + // +optional + ContainerfileArch ContainerfileArch `json:"containerfileArch,omitempty"` + // content is an embedded Containerfile/Dockerfile that defines the contents to be built into your image. + // See https://github.com/containers/common/blob/main/docs/Containerfile.5.md for the spec reference. + // for example, this would add the tree package to your hosts: + // FROM configs AS final + // RUN rpm-ostree install tree && \ + // ostree container commit + // This is a required field and can have a maximum length of **4096** characters. + // +required + // +kubebuilder:validation:MaxLength=4096 + Content string `json:"content"` +} + +// +enum +type ContainerfileArch string + +const ( + // describes the arm64 architecture + Arm64 ContainerfileArch = "ARM64" + // describes the amd64 architecture + Amd64 ContainerfileArch = "AMD64" + // describes the ppc64le architecture + Ppc ContainerfileArch = "PPC64LE" + // describes the s390x architecture + S390 ContainerfileArch = "S390X" + // describes a containerfile that can be applied to any arch + NoArch ContainerfileArch = "NoArch" +) + +// Refers to the name of a MachineConfigPool (e.g., "worker", "infra", etc.): +// the MachineOSBuilder pod validates that the user has provided a valid pool +type MachineConfigPoolReference struct { + // name of the MachineConfigPool object. + // This value should be at most 253 characters, and must contain only lowercase + // alphanumeric characters, hyphens and periods, and should start and end with an alphanumeric character. + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +required + Name string `json:"name"` +} + +// Refers to the name of an image registry push/pull secret needed in the build process. +type ImageSecretObjectReference struct { + // name is the name of the secret used to push or pull this MachineOSConfig object. + // Must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + // This secret must be in the openshift-machine-config-operator namespace. + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +required + Name string `json:"name"` +} + +// ImageTagFormat is a type that conforms to the format host[:port][/namespace]/name: or svc_name.namespace.svc[:port]/repository/name:. +// The length of the field must be between 1 to 447 characters. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=447 +// +kubebuilder:validation:XValidation:rule=`self.matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?(/[a-zA-Z0-9-_]{1,61})*/[a-zA-Z0-9-_.]+:[a-zA-Z0-9._-]+$') || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$')`,message="the OCI Image name should follow the host[:port][/namespace]/name format, resembling a valid URL without the scheme. Or it must be a valid .svc followed by a port, repository, image name, and tag." +type ImageTagFormat string + +// ImageDigestFormat is a type that conforms to the format host[:port][/namespace]/name@sha256:. +// The digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. +// The length of the field must be between 1 to 447 characters. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=447 +// +kubebuilder:validation:XValidation:rule=`(self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$'))`,message="the OCI Image reference must end with a valid '@sha256:' suffix, where '' is 64 characters long" +// +kubebuilder:validation:XValidation:rule=`(self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$'))`,message="the OCI Image name should follow the host[:port][/namespace]/name format, resembling a valid URL without the scheme" +type ImageDigestFormat string + +// +enum +type MachineOSImageBuilderType string + +const ( + // describes that the machine-os-builder will use a Job to spin up a custom pod builder that uses buildah + JobBuilder MachineOSImageBuilderType = "Job" +) diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_containerruntimeconfigs.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_containerruntimeconfigs.crd.yaml index bd5297b1e6..37dd845354 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_containerruntimeconfigs.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_containerruntimeconfigs.crd.yaml @@ -46,11 +46,11 @@ spec: metadata: type: object spec: - description: ContainerRuntimeConfigSpec defines the desired state of ContainerRuntimeConfig + description: spec contains the desired container runtime configuration. properties: containerRuntimeConfig: - description: ContainerRuntimeConfiguration defines the tuneables of - the container runtime + description: containerRuntimeConfig defines the tuneables of the container + runtime. properties: defaultRuntime: description: defaultRuntime is the name of the OCI runtime to @@ -88,7 +88,7 @@ spec: type: object machineConfigPoolSelector: description: |- - MachineConfigPoolSelector selects which pools the ContainerRuntimeConfig shoud apply to. + machineConfigPoolSelector selects which pools the ContainerRuntimeConfig shoud apply to. A nil selector will result in no pools being selected. properties: matchExpressions: @@ -138,8 +138,8 @@ spec: - containerRuntimeConfig type: object status: - description: ContainerRuntimeConfigStatus defines the observed state of - a ContainerRuntimeConfig + description: status contains observed information about the container + runtime configuration. properties: conditions: description: conditions represents the latest available observations diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml index 989d964276..b51ea7a749 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml @@ -46,7 +46,7 @@ spec: metadata: type: object spec: - description: ControllerConfigSpec is the spec for ControllerConfig resource. + description: spec contains the desired controller config configuration. properties: additionalTrustBundle: description: |- @@ -56,15 +56,15 @@ spec: nullable: true type: string baseOSContainerImage: - description: BaseOSContainerImage is the new-format container image + description: baseOSContainerImage is the new-format container image for operating system updates. type: string baseOSExtensionsContainerImage: - description: BaseOSExtensionsContainerImage is the matching extensions + description: baseOSExtensionsContainerImage is the matching extensions container for the new-format container type: string cloudProviderCAData: - description: cloudProvider specifies the cloud provider CA data + description: cloudProviderCAData specifies the cloud provider CA data format: byte nullable: true type: string @@ -337,7 +337,7 @@ spec: All the clients are expected to use the generated ConfigMap only. properties: key: - description: Key allows pointing to a specific key/value + description: key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. type: string @@ -350,11 +350,11 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. type: object aws: - description: AWS contains settings specific to the Amazon + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: serviceEndpoints: @@ -386,11 +386,11 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure + description: azure contains settings specific to the Azure infrastructure provider. type: object baremetal: - description: BareMetal contains settings specific to the + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIPs: @@ -477,7 +477,7 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' equinixMetal: - description: EquinixMetal contains settings specific to + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. type: object external: @@ -488,7 +488,7 @@ spec: platformName: default: Unknown description: |- - PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. + platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making. type: string x-kubernetes-validations: @@ -496,19 +496,19 @@ spec: rule: oldSelf == 'Unknown' || self == oldSelf type: object gcp: - description: GCP contains settings specific to the Google + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. type: object ibmcloud: - description: IBMCloud contains settings specific to the + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. type: object kubevirt: - description: Kubevirt contains settings specific to the + description: kubevirt contains settings specific to the kubevirt infrastructure provider. type: object nutanix: - description: Nutanix contains settings specific to the + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: failureDomains: @@ -569,6 +569,7 @@ spec: subnets: description: |- subnets holds a list of identifiers (one or more) of the cluster's network subnets + If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API. items: @@ -605,17 +606,20 @@ spec: when type is Name, and forbidden otherwise rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) : !has(self.name)' - maxItems: 1 + maxItems: 32 minItems: 1 type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: each subnet must be unique + rule: self.all(x, self.exists_one(y, x == + y)) required: - cluster - name - subnets type: object + maxItems: 32 type: array x-kubernetes-list-map-keys: - name @@ -699,7 +703,7 @@ spec: - prismElements type: object openstack: - description: OpenStack contains settings specific to the + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIPs: @@ -786,11 +790,11 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' ovirt: - description: Ovirt contains settings specific to the oVirt + description: ovirt contains settings specific to the oVirt infrastructure provider. type: object powervs: - description: PowerVS contains settings specific to the + description: powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. properties: serviceEndpoints: @@ -809,7 +813,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -858,7 +876,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIPs: @@ -895,9 +913,9 @@ spec: failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used. items: - description: |- - VSpherePlatformFailureDomainSpec holds the region and zone failure domain and - the vCenter topology of that failure domain. + description: VSpherePlatformFailureDomainSpec holds + the region and zone failure domain and the vCenter + topology of that failure domain. properties: name: description: |- @@ -914,6 +932,25 @@ spec: maxLength: 80 minLength: 1 type: string + regionAffinity: + description: |- + regionAffinity holds the type of region, Datacenter or ComputeCluster. + When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. + When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology. + properties: + type: + description: |- + type determines the vSphere object type for a region within this failure domain. + Available types are Datacenter and ComputeCluster. + When set to Datacenter, this means the vCenter Datacenter defined is the region. + When set to ComputeCluster, this means the vCenter cluster defined is the region. + enum: + - ComputeCluster + - Datacenter + type: string + required: + - type + type: object server: description: server is the fully-qualified domain name or the IP address of the vCenter server. @@ -921,7 +958,7 @@ spec: minLength: 1 type: string topology: - description: Topology describes a given failure + description: topology describes a given failure domain using vSphere constructs properties: computeCluster: @@ -1010,6 +1047,67 @@ spec: maxLength: 80 minLength: 1 type: string + zoneAffinity: + description: |- + zoneAffinity holds the type of the zone and the hostGroup which + vmGroup and the hostGroup names in vCenter corresponds to + a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup holds the vmGroup and the hostGroup names in vCenter + corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + hostGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmGroup: + description: |- + vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. + vmGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmHostRule: + description: |- + vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. + vmHostRule is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + required: + - hostGroup + - vmGroup + - vmHostRule + type: object + type: + description: |- + type determines the vSphere object type for a zone within this failure domain. + Available types are ComputeCluster and HostGroup. + When set to ComputeCluster, this means the vCenter cluster defined is the zone. + When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and + this means the zone is defined by the grouping of those fields. + enum: + - HostGroup + - ComputeCluster + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: hostGroup is required when type is + HostGroup, and forbidden otherwise + rule: 'has(self.type) && self.type == ''HostGroup'' + ? has(self.hostGroup) : !has(self.hostGroup)' required: - name - region @@ -1239,6 +1337,7 @@ spec: its components are not visible within the cluster. enum: - HighlyAvailable + - HighlyAvailableArbiter - SingleReplica - External type: string @@ -1312,7 +1411,7 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. properties: region: @@ -1356,7 +1455,7 @@ spec: - region type: object aws: - description: AWS contains settings specific to the Amazon + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: cloudLoadBalancerConfig: @@ -1501,7 +1600,7 @@ spec: x-kubernetes-list-type: atomic serviceEndpoints: description: |- - ServiceEndpoints list contains custom endpoints which will override default + serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. items: @@ -1528,7 +1627,7 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure + description: azure contains settings specific to the Azure infrastructure provider. properties: armEndpoint: @@ -1604,7 +1703,7 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' baremetal: - description: BareMetal contains settings specific to the + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIP: @@ -1714,7 +1813,7 @@ spec: type: string type: object equinixMetal: - description: EquinixMetal contains settings specific to + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. properties: apiServerInternalIP: @@ -1731,7 +1830,7 @@ spec: type: string type: object external: - description: External contains settings specific to the + description: external contains settings specific to the generic External infrastructure provider. properties: cloudControllerManager: @@ -1771,7 +1870,7 @@ spec: removed once set rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) gcp: - description: GCP contains settings specific to the Google + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. properties: cloudLoadBalancerConfig: @@ -1999,29 +2098,29 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' ibmcloud: - description: IBMCloud contains settings specific to the + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string location: - description: Location is where the cluster has been + description: location is where the cluster has been deployed type: string providerType: - description: ProviderType indicates the type of cluster + description: providerType indicates the type of cluster that was created type: string resourceGroupName: - description: ResourceGroupName is the Resource Group + description: resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. type: string serviceEndpoints: @@ -2076,7 +2175,7 @@ spec: x-kubernetes-list-type: map type: object kubevirt: - description: Kubevirt contains settings specific to the + description: kubevirt contains settings specific to the kubevirt infrastructure provider. properties: apiServerInternalIP: @@ -2093,7 +2192,7 @@ spec: type: string type: object nutanix: - description: Nutanix contains settings specific to the + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: apiServerInternalIP: @@ -2177,7 +2276,7 @@ spec: type: object type: object openstack: - description: OpenStack contains settings specific to the + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIP: @@ -2292,7 +2391,7 @@ spec: type: string type: object ovirt: - description: Ovirt contains settings specific to the oVirt + description: ovirt contains settings specific to the oVirt infrastructure provider. properties: apiServerInternalIP: @@ -2381,17 +2480,17 @@ spec: type: string type: object powervs: - description: PowerVS contains settings specific to the + description: powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string region: @@ -2427,7 +2526,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -2487,7 +2600,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIP: @@ -2619,39 +2732,39 @@ spec: format: byte type: string network: - description: Network contains additional network related information + description: network contains additional network related information nullable: true properties: mtuMigration: - description: MTUMigration contains the MTU migration configuration. + description: mtuMigration contains the MTU migration configuration. nullable: true properties: machine: - description: Machine contains MTU migration configuration + description: machine contains MTU migration configuration for the machine's uplink. properties: from: - description: From is the MTU to migrate from. + description: from is the MTU to migrate from. format: int32 minimum: 0 type: integer to: - description: To is the MTU to migrate to. + description: to is the MTU to migrate to. format: int32 minimum: 0 type: integer type: object network: - description: Network contains MTU migration configuration + description: network contains MTU migration configuration for the default network. properties: from: - description: From is the MTU to migrate from. + description: from is the MTU to migrate from. format: int32 minimum: 0 type: integer to: - description: To is the MTU to migrate to. + description: to is the MTU to migrate to. format: int32 minimum: 0 type: integer @@ -2669,7 +2782,7 @@ spec: regeneration if this changes. type: string osImageURL: - description: OSImageURL is the old-format container image that contains + description: osImageURL is the old-format container image that contains the OS update payload. type: string platform: @@ -2760,7 +2873,8 @@ spec: - rootCAData type: object status: - description: ControllerConfigStatus is the status for ControllerConfig + description: status contains observed information about the controller + config. properties: conditions: description: conditions represents the latest available observations diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-Default.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-Default.crd.yaml index c30a63e346..5886525c9a 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-Default.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-Default.crd.yaml @@ -46,7 +46,7 @@ spec: metadata: type: object spec: - description: ControllerConfigSpec is the spec for ControllerConfig resource. + description: spec contains the desired controller config configuration. properties: additionalTrustBundle: description: |- @@ -56,15 +56,15 @@ spec: nullable: true type: string baseOSContainerImage: - description: BaseOSContainerImage is the new-format container image + description: baseOSContainerImage is the new-format container image for operating system updates. type: string baseOSExtensionsContainerImage: - description: BaseOSExtensionsContainerImage is the matching extensions + description: baseOSExtensionsContainerImage is the matching extensions container for the new-format container type: string cloudProviderCAData: - description: cloudProvider specifies the cloud provider CA data + description: cloudProviderCAData specifies the cloud provider CA data format: byte nullable: true type: string @@ -337,7 +337,7 @@ spec: All the clients are expected to use the generated ConfigMap only. properties: key: - description: Key allows pointing to a specific key/value + description: key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. type: string @@ -350,11 +350,11 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. type: object aws: - description: AWS contains settings specific to the Amazon + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: serviceEndpoints: @@ -386,11 +386,11 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure + description: azure contains settings specific to the Azure infrastructure provider. type: object baremetal: - description: BareMetal contains settings specific to the + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIPs: @@ -477,7 +477,7 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' equinixMetal: - description: EquinixMetal contains settings specific to + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. type: object external: @@ -488,7 +488,7 @@ spec: platformName: default: Unknown description: |- - PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. + platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making. type: string x-kubernetes-validations: @@ -496,19 +496,19 @@ spec: rule: oldSelf == 'Unknown' || self == oldSelf type: object gcp: - description: GCP contains settings specific to the Google + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. type: object ibmcloud: - description: IBMCloud contains settings specific to the + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. type: object kubevirt: - description: Kubevirt contains settings specific to the + description: kubevirt contains settings specific to the kubevirt infrastructure provider. type: object nutanix: - description: Nutanix contains settings specific to the + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: failureDomains: @@ -569,6 +569,7 @@ spec: subnets: description: |- subnets holds a list of identifiers (one or more) of the cluster's network subnets + If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API. items: @@ -608,9 +609,7 @@ spec: maxItems: 1 minItems: 1 type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map + x-kubernetes-list-type: atomic required: - cluster - name @@ -699,7 +698,7 @@ spec: - prismElements type: object openstack: - description: OpenStack contains settings specific to the + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIPs: @@ -786,11 +785,11 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' ovirt: - description: Ovirt contains settings specific to the oVirt + description: ovirt contains settings specific to the oVirt infrastructure provider. type: object powervs: - description: PowerVS contains settings specific to the + description: powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. properties: serviceEndpoints: @@ -809,7 +808,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -858,7 +871,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIPs: @@ -895,9 +908,9 @@ spec: failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used. items: - description: |- - VSpherePlatformFailureDomainSpec holds the region and zone failure domain and - the vCenter topology of that failure domain. + description: VSpherePlatformFailureDomainSpec holds + the region and zone failure domain and the vCenter + topology of that failure domain. properties: name: description: |- @@ -914,6 +927,25 @@ spec: maxLength: 80 minLength: 1 type: string + regionAffinity: + description: |- + regionAffinity holds the type of region, Datacenter or ComputeCluster. + When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. + When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology. + properties: + type: + description: |- + type determines the vSphere object type for a region within this failure domain. + Available types are Datacenter and ComputeCluster. + When set to Datacenter, this means the vCenter Datacenter defined is the region. + When set to ComputeCluster, this means the vCenter cluster defined is the region. + enum: + - ComputeCluster + - Datacenter + type: string + required: + - type + type: object server: description: server is the fully-qualified domain name or the IP address of the vCenter server. @@ -921,7 +953,7 @@ spec: minLength: 1 type: string topology: - description: Topology describes a given failure + description: topology describes a given failure domain using vSphere constructs properties: computeCluster: @@ -1010,6 +1042,67 @@ spec: maxLength: 80 minLength: 1 type: string + zoneAffinity: + description: |- + zoneAffinity holds the type of the zone and the hostGroup which + vmGroup and the hostGroup names in vCenter corresponds to + a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup holds the vmGroup and the hostGroup names in vCenter + corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + hostGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmGroup: + description: |- + vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. + vmGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmHostRule: + description: |- + vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. + vmHostRule is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + required: + - hostGroup + - vmGroup + - vmHostRule + type: object + type: + description: |- + type determines the vSphere object type for a zone within this failure domain. + Available types are ComputeCluster and HostGroup. + When set to ComputeCluster, this means the vCenter cluster defined is the zone. + When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and + this means the zone is defined by the grouping of those fields. + enum: + - HostGroup + - ComputeCluster + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: hostGroup is required when type is + HostGroup, and forbidden otherwise + rule: 'has(self.type) && self.type == ''HostGroup'' + ? has(self.hostGroup) : !has(self.hostGroup)' required: - name - region @@ -1184,7 +1277,7 @@ spec: - datacenters - server type: object - maxItems: 1 + maxItems: 3 minItems: 0 type: array x-kubernetes-list-type: atomic @@ -1312,7 +1405,7 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. properties: region: @@ -1356,7 +1449,7 @@ spec: - region type: object aws: - description: AWS contains settings specific to the Amazon + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: region: @@ -1397,7 +1490,7 @@ spec: x-kubernetes-list-type: atomic serviceEndpoints: description: |- - ServiceEndpoints list contains custom endpoints which will override default + serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. items: @@ -1424,7 +1517,7 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure + description: azure contains settings specific to the Azure infrastructure provider. properties: armEndpoint: @@ -1500,7 +1593,7 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' baremetal: - description: BareMetal contains settings specific to the + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIP: @@ -1610,7 +1703,7 @@ spec: type: string type: object equinixMetal: - description: EquinixMetal contains settings specific to + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. properties: apiServerInternalIP: @@ -1627,7 +1720,7 @@ spec: type: string type: object external: - description: External contains settings specific to the + description: external contains settings specific to the generic External infrastructure provider. properties: cloudControllerManager: @@ -1667,7 +1760,7 @@ spec: removed once set rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) gcp: - description: GCP contains settings specific to the Google + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. properties: projectID: @@ -1791,29 +1884,29 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' ibmcloud: - description: IBMCloud contains settings specific to the + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string location: - description: Location is where the cluster has been + description: location is where the cluster has been deployed type: string providerType: - description: ProviderType indicates the type of cluster + description: providerType indicates the type of cluster that was created type: string resourceGroupName: - description: ResourceGroupName is the Resource Group + description: resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. type: string serviceEndpoints: @@ -1868,7 +1961,7 @@ spec: x-kubernetes-list-type: map type: object kubevirt: - description: Kubevirt contains settings specific to the + description: kubevirt contains settings specific to the kubevirt infrastructure provider. properties: apiServerInternalIP: @@ -1885,7 +1978,7 @@ spec: type: string type: object nutanix: - description: Nutanix contains settings specific to the + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: apiServerInternalIP: @@ -1969,7 +2062,7 @@ spec: type: object type: object openstack: - description: OpenStack contains settings specific to the + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIP: @@ -2084,7 +2177,7 @@ spec: type: string type: object ovirt: - description: Ovirt contains settings specific to the oVirt + description: ovirt contains settings specific to the oVirt infrastructure provider. properties: apiServerInternalIP: @@ -2173,17 +2266,17 @@ spec: type: string type: object powervs: - description: PowerVS contains settings specific to the + description: powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string region: @@ -2219,7 +2312,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -2279,7 +2386,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIP: @@ -2411,39 +2518,39 @@ spec: format: byte type: string network: - description: Network contains additional network related information + description: network contains additional network related information nullable: true properties: mtuMigration: - description: MTUMigration contains the MTU migration configuration. + description: mtuMigration contains the MTU migration configuration. nullable: true properties: machine: - description: Machine contains MTU migration configuration + description: machine contains MTU migration configuration for the machine's uplink. properties: from: - description: From is the MTU to migrate from. + description: from is the MTU to migrate from. format: int32 minimum: 0 type: integer to: - description: To is the MTU to migrate to. + description: to is the MTU to migrate to. format: int32 minimum: 0 type: integer type: object network: - description: Network contains MTU migration configuration + description: network contains MTU migration configuration for the default network. properties: from: - description: From is the MTU to migrate from. + description: from is the MTU to migrate from. format: int32 minimum: 0 type: integer to: - description: To is the MTU to migrate to. + description: to is the MTU to migrate to. format: int32 minimum: 0 type: integer @@ -2461,7 +2568,7 @@ spec: regeneration if this changes. type: string osImageURL: - description: OSImageURL is the old-format container image that contains + description: osImageURL is the old-format container image that contains the OS update payload. type: string platform: @@ -2552,7 +2659,8 @@ spec: - rootCAData type: object status: - description: ControllerConfigStatus is the status for ControllerConfig + description: status contains observed information about the controller + config. properties: conditions: description: conditions represents the latest available observations diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml index ccce2d520c..dbc9258d23 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml @@ -46,7 +46,7 @@ spec: metadata: type: object spec: - description: ControllerConfigSpec is the spec for ControllerConfig resource. + description: spec contains the desired controller config configuration. properties: additionalTrustBundle: description: |- @@ -56,15 +56,15 @@ spec: nullable: true type: string baseOSContainerImage: - description: BaseOSContainerImage is the new-format container image + description: baseOSContainerImage is the new-format container image for operating system updates. type: string baseOSExtensionsContainerImage: - description: BaseOSExtensionsContainerImage is the matching extensions + description: baseOSExtensionsContainerImage is the matching extensions container for the new-format container type: string cloudProviderCAData: - description: cloudProvider specifies the cloud provider CA data + description: cloudProviderCAData specifies the cloud provider CA data format: byte nullable: true type: string @@ -337,7 +337,7 @@ spec: All the clients are expected to use the generated ConfigMap only. properties: key: - description: Key allows pointing to a specific key/value + description: key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. type: string @@ -350,11 +350,11 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. type: object aws: - description: AWS contains settings specific to the Amazon + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: serviceEndpoints: @@ -386,11 +386,11 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure + description: azure contains settings specific to the Azure infrastructure provider. type: object baremetal: - description: BareMetal contains settings specific to the + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIPs: @@ -477,7 +477,7 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' equinixMetal: - description: EquinixMetal contains settings specific to + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. type: object external: @@ -488,7 +488,7 @@ spec: platformName: default: Unknown description: |- - PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. + platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making. type: string x-kubernetes-validations: @@ -496,19 +496,19 @@ spec: rule: oldSelf == 'Unknown' || self == oldSelf type: object gcp: - description: GCP contains settings specific to the Google + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. type: object ibmcloud: - description: IBMCloud contains settings specific to the + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. type: object kubevirt: - description: Kubevirt contains settings specific to the + description: kubevirt contains settings specific to the kubevirt infrastructure provider. type: object nutanix: - description: Nutanix contains settings specific to the + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: failureDomains: @@ -569,6 +569,7 @@ spec: subnets: description: |- subnets holds a list of identifiers (one or more) of the cluster's network subnets + If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API. items: @@ -605,17 +606,20 @@ spec: when type is Name, and forbidden otherwise rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) : !has(self.name)' - maxItems: 1 + maxItems: 32 minItems: 1 type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: each subnet must be unique + rule: self.all(x, self.exists_one(y, x == + y)) required: - cluster - name - subnets type: object + maxItems: 32 type: array x-kubernetes-list-map-keys: - name @@ -699,7 +703,7 @@ spec: - prismElements type: object openstack: - description: OpenStack contains settings specific to the + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIPs: @@ -786,11 +790,11 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' ovirt: - description: Ovirt contains settings specific to the oVirt + description: ovirt contains settings specific to the oVirt infrastructure provider. type: object powervs: - description: PowerVS contains settings specific to the + description: powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. properties: serviceEndpoints: @@ -809,7 +813,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -858,7 +876,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIPs: @@ -895,9 +913,9 @@ spec: failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used. items: - description: |- - VSpherePlatformFailureDomainSpec holds the region and zone failure domain and - the vCenter topology of that failure domain. + description: VSpherePlatformFailureDomainSpec holds + the region and zone failure domain and the vCenter + topology of that failure domain. properties: name: description: |- @@ -914,6 +932,25 @@ spec: maxLength: 80 minLength: 1 type: string + regionAffinity: + description: |- + regionAffinity holds the type of region, Datacenter or ComputeCluster. + When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. + When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology. + properties: + type: + description: |- + type determines the vSphere object type for a region within this failure domain. + Available types are Datacenter and ComputeCluster. + When set to Datacenter, this means the vCenter Datacenter defined is the region. + When set to ComputeCluster, this means the vCenter cluster defined is the region. + enum: + - ComputeCluster + - Datacenter + type: string + required: + - type + type: object server: description: server is the fully-qualified domain name or the IP address of the vCenter server. @@ -921,7 +958,7 @@ spec: minLength: 1 type: string topology: - description: Topology describes a given failure + description: topology describes a given failure domain using vSphere constructs properties: computeCluster: @@ -1010,6 +1047,67 @@ spec: maxLength: 80 minLength: 1 type: string + zoneAffinity: + description: |- + zoneAffinity holds the type of the zone and the hostGroup which + vmGroup and the hostGroup names in vCenter corresponds to + a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup holds the vmGroup and the hostGroup names in vCenter + corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + hostGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmGroup: + description: |- + vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. + vmGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmHostRule: + description: |- + vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. + vmHostRule is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + required: + - hostGroup + - vmGroup + - vmHostRule + type: object + type: + description: |- + type determines the vSphere object type for a zone within this failure domain. + Available types are ComputeCluster and HostGroup. + When set to ComputeCluster, this means the vCenter cluster defined is the zone. + When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and + this means the zone is defined by the grouping of those fields. + enum: + - HostGroup + - ComputeCluster + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: hostGroup is required when type is + HostGroup, and forbidden otherwise + rule: 'has(self.type) && self.type == ''HostGroup'' + ? has(self.hostGroup) : !has(self.hostGroup)' required: - name - region @@ -1239,6 +1337,7 @@ spec: its components are not visible within the cluster. enum: - HighlyAvailable + - HighlyAvailableArbiter - SingleReplica - External type: string @@ -1312,7 +1411,7 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. properties: region: @@ -1356,7 +1455,7 @@ spec: - region type: object aws: - description: AWS contains settings specific to the Amazon + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: cloudLoadBalancerConfig: @@ -1501,7 +1600,7 @@ spec: x-kubernetes-list-type: atomic serviceEndpoints: description: |- - ServiceEndpoints list contains custom endpoints which will override default + serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. items: @@ -1528,7 +1627,7 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure + description: azure contains settings specific to the Azure infrastructure provider. properties: armEndpoint: @@ -1604,7 +1703,7 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' baremetal: - description: BareMetal contains settings specific to the + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIP: @@ -1714,7 +1813,7 @@ spec: type: string type: object equinixMetal: - description: EquinixMetal contains settings specific to + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. properties: apiServerInternalIP: @@ -1731,7 +1830,7 @@ spec: type: string type: object external: - description: External contains settings specific to the + description: external contains settings specific to the generic External infrastructure provider. properties: cloudControllerManager: @@ -1771,7 +1870,7 @@ spec: removed once set rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) gcp: - description: GCP contains settings specific to the Google + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. properties: cloudLoadBalancerConfig: @@ -1999,29 +2098,29 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' ibmcloud: - description: IBMCloud contains settings specific to the + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string location: - description: Location is where the cluster has been + description: location is where the cluster has been deployed type: string providerType: - description: ProviderType indicates the type of cluster + description: providerType indicates the type of cluster that was created type: string resourceGroupName: - description: ResourceGroupName is the Resource Group + description: resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. type: string serviceEndpoints: @@ -2076,7 +2175,7 @@ spec: x-kubernetes-list-type: map type: object kubevirt: - description: Kubevirt contains settings specific to the + description: kubevirt contains settings specific to the kubevirt infrastructure provider. properties: apiServerInternalIP: @@ -2093,7 +2192,7 @@ spec: type: string type: object nutanix: - description: Nutanix contains settings specific to the + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: apiServerInternalIP: @@ -2177,7 +2276,7 @@ spec: type: object type: object openstack: - description: OpenStack contains settings specific to the + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIP: @@ -2292,7 +2391,7 @@ spec: type: string type: object ovirt: - description: Ovirt contains settings specific to the oVirt + description: ovirt contains settings specific to the oVirt infrastructure provider. properties: apiServerInternalIP: @@ -2381,17 +2480,17 @@ spec: type: string type: object powervs: - description: PowerVS contains settings specific to the + description: powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string region: @@ -2427,7 +2526,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -2487,7 +2600,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIP: @@ -2619,39 +2732,39 @@ spec: format: byte type: string network: - description: Network contains additional network related information + description: network contains additional network related information nullable: true properties: mtuMigration: - description: MTUMigration contains the MTU migration configuration. + description: mtuMigration contains the MTU migration configuration. nullable: true properties: machine: - description: Machine contains MTU migration configuration + description: machine contains MTU migration configuration for the machine's uplink. properties: from: - description: From is the MTU to migrate from. + description: from is the MTU to migrate from. format: int32 minimum: 0 type: integer to: - description: To is the MTU to migrate to. + description: to is the MTU to migrate to. format: int32 minimum: 0 type: integer type: object network: - description: Network contains MTU migration configuration + description: network contains MTU migration configuration for the default network. properties: from: - description: From is the MTU to migrate from. + description: from is the MTU to migrate from. format: int32 minimum: 0 type: integer to: - description: To is the MTU to migrate to. + description: to is the MTU to migrate to. format: int32 minimum: 0 type: integer @@ -2669,7 +2782,7 @@ spec: regeneration if this changes. type: string osImageURL: - description: OSImageURL is the old-format container image that contains + description: osImageURL is the old-format container image that contains the OS update payload. type: string platform: @@ -2760,7 +2873,8 @@ spec: - rootCAData type: object status: - description: ControllerConfigStatus is the status for ControllerConfig + description: status contains observed information about the controller + config. properties: conditions: description: conditions represents the latest available observations diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml index 8fad75fc37..4fc2a73abc 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml @@ -46,7 +46,7 @@ spec: metadata: type: object spec: - description: ControllerConfigSpec is the spec for ControllerConfig resource. + description: spec contains the desired controller config configuration. properties: additionalTrustBundle: description: |- @@ -56,15 +56,15 @@ spec: nullable: true type: string baseOSContainerImage: - description: BaseOSContainerImage is the new-format container image + description: baseOSContainerImage is the new-format container image for operating system updates. type: string baseOSExtensionsContainerImage: - description: BaseOSExtensionsContainerImage is the matching extensions + description: baseOSExtensionsContainerImage is the matching extensions container for the new-format container type: string cloudProviderCAData: - description: cloudProvider specifies the cloud provider CA data + description: cloudProviderCAData specifies the cloud provider CA data format: byte nullable: true type: string @@ -337,7 +337,7 @@ spec: All the clients are expected to use the generated ConfigMap only. properties: key: - description: Key allows pointing to a specific key/value + description: key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. type: string @@ -350,11 +350,11 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. type: object aws: - description: AWS contains settings specific to the Amazon + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: serviceEndpoints: @@ -386,11 +386,11 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure + description: azure contains settings specific to the Azure infrastructure provider. type: object baremetal: - description: BareMetal contains settings specific to the + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIPs: @@ -477,7 +477,7 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' equinixMetal: - description: EquinixMetal contains settings specific to + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. type: object external: @@ -488,7 +488,7 @@ spec: platformName: default: Unknown description: |- - PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. + platformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making. type: string x-kubernetes-validations: @@ -496,19 +496,19 @@ spec: rule: oldSelf == 'Unknown' || self == oldSelf type: object gcp: - description: GCP contains settings specific to the Google + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. type: object ibmcloud: - description: IBMCloud contains settings specific to the + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. type: object kubevirt: - description: Kubevirt contains settings specific to the + description: kubevirt contains settings specific to the kubevirt infrastructure provider. type: object nutanix: - description: Nutanix contains settings specific to the + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: failureDomains: @@ -569,6 +569,7 @@ spec: subnets: description: |- subnets holds a list of identifiers (one or more) of the cluster's network subnets + If the feature gate NutanixMultiSubnets is enabled, up to 32 subnets may be configured. for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API. items: @@ -605,17 +606,20 @@ spec: when type is Name, and forbidden otherwise rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) : !has(self.name)' - maxItems: 1 + maxItems: 32 minItems: 1 type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: each subnet must be unique + rule: self.all(x, self.exists_one(y, x == + y)) required: - cluster - name - subnets type: object + maxItems: 32 type: array x-kubernetes-list-map-keys: - name @@ -699,7 +703,7 @@ spec: - prismElements type: object openstack: - description: OpenStack contains settings specific to the + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIPs: @@ -786,11 +790,11 @@ spec: - message: ingressIPs list is required once set rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' ovirt: - description: Ovirt contains settings specific to the oVirt + description: ovirt contains settings specific to the oVirt infrastructure provider. type: object powervs: - description: PowerVS contains settings specific to the + description: powervs contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. properties: serviceEndpoints: @@ -809,7 +813,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -858,7 +876,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIPs: @@ -895,9 +913,9 @@ spec: failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used. items: - description: |- - VSpherePlatformFailureDomainSpec holds the region and zone failure domain and - the vCenter topology of that failure domain. + description: VSpherePlatformFailureDomainSpec holds + the region and zone failure domain and the vCenter + topology of that failure domain. properties: name: description: |- @@ -914,6 +932,25 @@ spec: maxLength: 80 minLength: 1 type: string + regionAffinity: + description: |- + regionAffinity holds the type of region, Datacenter or ComputeCluster. + When set to Datacenter, this means the region is a vCenter Datacenter as defined in topology. + When set to ComputeCluster, this means the region is a vCenter Cluster as defined in topology. + properties: + type: + description: |- + type determines the vSphere object type for a region within this failure domain. + Available types are Datacenter and ComputeCluster. + When set to Datacenter, this means the vCenter Datacenter defined is the region. + When set to ComputeCluster, this means the vCenter cluster defined is the region. + enum: + - ComputeCluster + - Datacenter + type: string + required: + - type + type: object server: description: server is the fully-qualified domain name or the IP address of the vCenter server. @@ -921,7 +958,7 @@ spec: minLength: 1 type: string topology: - description: Topology describes a given failure + description: topology describes a given failure domain using vSphere constructs properties: computeCluster: @@ -1010,6 +1047,67 @@ spec: maxLength: 80 minLength: 1 type: string + zoneAffinity: + description: |- + zoneAffinity holds the type of the zone and the hostGroup which + vmGroup and the hostGroup names in vCenter corresponds to + a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup holds the vmGroup and the hostGroup names in vCenter + corresponds to a vm-host group of type Virtual Machine and Host respectively. Is also + contains the vmHostRule which is an affinity vm-host rule in vCenter. + properties: + hostGroup: + description: |- + hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + hostGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmGroup: + description: |- + vmGroup is the name of the vm-host group of type virtual machine within vCenter for this failure domain. + vmGroup is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + vmHostRule: + description: |- + vmHostRule is the name of the affinity vm-host rule within vCenter for this failure domain. + vmHostRule is limited to 80 characters. + This field is required when the VSphereFailureDomain ZoneType is HostGroup + maxLength: 80 + minLength: 1 + type: string + required: + - hostGroup + - vmGroup + - vmHostRule + type: object + type: + description: |- + type determines the vSphere object type for a zone within this failure domain. + Available types are ComputeCluster and HostGroup. + When set to ComputeCluster, this means the vCenter cluster defined is the zone. + When set to HostGroup, hostGroup must be configured with hostGroup, vmGroup and vmHostRule and + this means the zone is defined by the grouping of those fields. + enum: + - HostGroup + - ComputeCluster + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: hostGroup is required when type is + HostGroup, and forbidden otherwise + rule: 'has(self.type) && self.type == ''HostGroup'' + ? has(self.hostGroup) : !has(self.hostGroup)' required: - name - region @@ -1239,6 +1337,7 @@ spec: its components are not visible within the cluster. enum: - HighlyAvailable + - HighlyAvailableArbiter - SingleReplica - External type: string @@ -1312,7 +1411,7 @@ spec: infrastructure provider. properties: alibabaCloud: - description: AlibabaCloud contains settings specific to + description: alibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. properties: region: @@ -1356,7 +1455,7 @@ spec: - region type: object aws: - description: AWS contains settings specific to the Amazon + description: aws contains settings specific to the Amazon Web Services infrastructure provider. properties: cloudLoadBalancerConfig: @@ -1501,7 +1600,7 @@ spec: x-kubernetes-list-type: atomic serviceEndpoints: description: |- - ServiceEndpoints list contains custom endpoints which will override default + serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. items: @@ -1528,7 +1627,7 @@ spec: x-kubernetes-list-type: atomic type: object azure: - description: Azure contains settings specific to the Azure + description: azure contains settings specific to the Azure infrastructure provider. properties: armEndpoint: @@ -1604,7 +1703,7 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' baremetal: - description: BareMetal contains settings specific to the + description: baremetal contains settings specific to the BareMetal platform. properties: apiServerInternalIP: @@ -1714,7 +1813,7 @@ spec: type: string type: object equinixMetal: - description: EquinixMetal contains settings specific to + description: equinixMetal contains settings specific to the Equinix Metal infrastructure provider. properties: apiServerInternalIP: @@ -1731,7 +1830,7 @@ spec: type: string type: object external: - description: External contains settings specific to the + description: external contains settings specific to the generic External infrastructure provider. properties: cloudControllerManager: @@ -1771,7 +1870,7 @@ spec: removed once set rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) gcp: - description: GCP contains settings specific to the Google + description: gcp contains settings specific to the Google Cloud Platform infrastructure provider. properties: cloudLoadBalancerConfig: @@ -1999,29 +2098,29 @@ spec: rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' ibmcloud: - description: IBMCloud contains settings specific to the + description: ibmcloud contains settings specific to the IBMCloud infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string location: - description: Location is where the cluster has been + description: location is where the cluster has been deployed type: string providerType: - description: ProviderType indicates the type of cluster + description: providerType indicates the type of cluster that was created type: string resourceGroupName: - description: ResourceGroupName is the Resource Group + description: resourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. type: string serviceEndpoints: @@ -2076,7 +2175,7 @@ spec: x-kubernetes-list-type: map type: object kubevirt: - description: Kubevirt contains settings specific to the + description: kubevirt contains settings specific to the kubevirt infrastructure provider. properties: apiServerInternalIP: @@ -2093,7 +2192,7 @@ spec: type: string type: object nutanix: - description: Nutanix contains settings specific to the + description: nutanix contains settings specific to the Nutanix infrastructure provider. properties: apiServerInternalIP: @@ -2177,7 +2276,7 @@ spec: type: object type: object openstack: - description: OpenStack contains settings specific to the + description: openstack contains settings specific to the OpenStack infrastructure provider. properties: apiServerInternalIP: @@ -2292,7 +2391,7 @@ spec: type: string type: object ovirt: - description: Ovirt contains settings specific to the oVirt + description: ovirt contains settings specific to the oVirt infrastructure provider. properties: apiServerInternalIP: @@ -2381,17 +2480,17 @@ spec: type: string type: object powervs: - description: PowerVS contains settings specific to the + description: powervs contains settings specific to the Power Systems Virtual Servers infrastructure provider. properties: cisInstanceCRN: description: |- - CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain type: string dnsInstanceCRN: description: |- - DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone + dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain type: string region: @@ -2427,7 +2526,21 @@ spec: IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - pattern: ^[a-z0-9-]+$ + enum: + - CIS + - COS + - COSConfig + - DNSServices + - GlobalCatalog + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - Power + - ResourceController + - ResourceManager + - VPC type: string url: description: |- @@ -2487,7 +2600,7 @@ spec: - External type: string vsphere: - description: VSphere contains settings specific to the + description: vsphere contains settings specific to the VSphere infrastructure provider. properties: apiServerInternalIP: @@ -2619,39 +2732,39 @@ spec: format: byte type: string network: - description: Network contains additional network related information + description: network contains additional network related information nullable: true properties: mtuMigration: - description: MTUMigration contains the MTU migration configuration. + description: mtuMigration contains the MTU migration configuration. nullable: true properties: machine: - description: Machine contains MTU migration configuration + description: machine contains MTU migration configuration for the machine's uplink. properties: from: - description: From is the MTU to migrate from. + description: from is the MTU to migrate from. format: int32 minimum: 0 type: integer to: - description: To is the MTU to migrate to. + description: to is the MTU to migrate to. format: int32 minimum: 0 type: integer type: object network: - description: Network contains MTU migration configuration + description: network contains MTU migration configuration for the default network. properties: from: - description: From is the MTU to migrate from. + description: from is the MTU to migrate from. format: int32 minimum: 0 type: integer to: - description: To is the MTU to migrate to. + description: to is the MTU to migrate to. format: int32 minimum: 0 type: integer @@ -2669,7 +2782,7 @@ spec: regeneration if this changes. type: string osImageURL: - description: OSImageURL is the old-format container image that contains + description: osImageURL is the old-format container image that contains the OS update payload. type: string platform: @@ -2760,7 +2873,8 @@ spec: - rootCAData type: object status: - description: ControllerConfigStatus is the status for ControllerConfig + description: status contains observed information about the controller + config. properties: conditions: description: conditions represents the latest available observations diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_kubeletconfigs.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_kubeletconfigs.crd.yaml index 2da51f8c75..b056dc99c1 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_kubeletconfigs.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_kubeletconfigs.crd.yaml @@ -44,7 +44,7 @@ spec: metadata: type: object spec: - description: KubeletConfigSpec defines the desired state of KubeletConfig + description: spec contains the desired kubelet configuration. properties: autoSizingReserved: type: boolean @@ -61,7 +61,7 @@ spec: type: integer machineConfigPoolSelector: description: |- - MachineConfigPoolSelector selects which pools the KubeletConfig shoud apply to. + machineConfigPoolSelector selects which pools the KubeletConfig shoud apply to. A nil selector will result in no pools being selected. properties: matchExpressions: @@ -308,7 +308,7 @@ spec: type: object type: object status: - description: KubeletConfigStatus defines the observed state of a KubeletConfig + description: status contains observed information about the kubelet configuration. properties: conditions: description: conditions represents the latest available observations diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml index 5d8bf7f135..e4cd170511 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml @@ -86,7 +86,7 @@ spec: metadata: type: object spec: - description: MachineConfigPoolSpec is the spec for MachineConfigPool resource. + description: spec contains the desired machine config pool configuration. properties: configuration: description: The targeted MachineConfig object for the machine config @@ -341,8 +341,8 @@ spec: x-kubernetes-list-type: map type: object status: - description: MachineConfigPoolStatus is the status for MachineConfigPool - resource. + description: status contains observed information about the machine config + pool. properties: certExpirys: description: certExpirys keeps track of important certificate expiration diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-Default.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-Default.crd.yaml index 07c16fb0ac..a198447e08 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-Default.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-Default.crd.yaml @@ -86,7 +86,7 @@ spec: metadata: type: object spec: - description: MachineConfigPoolSpec is the spec for MachineConfigPool resource. + description: spec contains the desired machine config pool configuration. properties: configuration: description: The targeted MachineConfig object for the machine config @@ -300,8 +300,8 @@ spec: type: boolean type: object status: - description: MachineConfigPoolStatus is the status for MachineConfigPool - resource. + description: status contains observed information about the machine config + pool. properties: certExpirys: description: certExpirys keeps track of important certificate expiration diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml index 1098ff23fa..ddd7c3b682 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml @@ -86,7 +86,7 @@ spec: metadata: type: object spec: - description: MachineConfigPoolSpec is the spec for MachineConfigPool resource. + description: spec contains the desired machine config pool configuration. properties: configuration: description: The targeted MachineConfig object for the machine config @@ -341,8 +341,8 @@ spec: x-kubernetes-list-type: map type: object status: - description: MachineConfigPoolStatus is the status for MachineConfigPool - resource. + description: status contains observed information about the machine config + pool. properties: certExpirys: description: certExpirys keeps track of important certificate expiration diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml index e1da019008..ef048f6a84 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml @@ -86,7 +86,7 @@ spec: metadata: type: object spec: - description: MachineConfigPoolSpec is the spec for MachineConfigPool resource. + description: spec contains the desired machine config pool configuration. properties: configuration: description: The targeted MachineConfig object for the machine config @@ -341,8 +341,8 @@ spec: x-kubernetes-list-type: map type: object status: - description: MachineConfigPoolStatus is the status for MachineConfigPool - resource. + description: status contains observed information about the machine config + pool. properties: certExpirys: description: certExpirys keeps track of important certificate expiration diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigs.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigs.crd.yaml index f878dd7ed5..df90d44d02 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigs.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigs.crd.yaml @@ -63,11 +63,11 @@ spec: properties: baseOSExtensionsContainerImage: description: |- - BaseOSExtensionsContainerImage specifies the remote location that will be used + baseOSExtensionsContainerImage specifies the remote location that will be used to fetch the extensions container matching a new-format OS image type: string config: - description: Config is a Ignition Config object. + description: config is a Ignition Config object. type: object x-kubernetes-preserve-unknown-fields: true extensions: @@ -95,7 +95,7 @@ spec: type: string osImageURL: description: |- - OSImageURL specifies the remote location that will be used to + osImageURL specifies the remote location that will be used to fetch the OS. type: string type: object diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml new file mode 100644 index 0000000000..a550b96c59 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml @@ -0,0 +1,402 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2090 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: CustomNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineosbuilds.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineOSBuild + listKind: MachineOSBuildList + plural: machineosbuilds + singular: machineosbuild + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Prepared")].status + name: Prepared + type: string + - jsonPath: .status.conditions[?(@.type=="Building")].status + name: Building + type: string + - jsonPath: .status.conditions[?(@.type=="Succeeded")].status + name: Succeeded + type: string + - jsonPath: .status.conditions[?(@.type=="Interrupted")].status + name: Interrupted + type: string + - jsonPath: .status.conditions[?(@.type=="Failed")].status + name: Failed + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + MachineOSBuild describes a build process managed and deployed by the MCO + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + spec describes the configuration of the machine os build. + It is immutable once set. + properties: + machineConfig: + description: machineConfig points to the rendered MachineConfig resource + to be included in this image build. + properties: + name: + description: |- + name is the name of the rendered MachineConfig object. + This value should be between 10 and 253 characters, and must contain only lowercase + alphanumeric characters, hyphens and periods, and should start and end with an alphanumeric character. + maxLength: 253 + minLength: 10 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + machineOSConfig: + description: machineOSConfig references the MachineOSConfig resource + that this image build extends. + properties: + name: + description: |- + name of the MachineOSConfig. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + renderedImagePushSpec: + description: |- + renderedImagePushSpec is set by the Machine Config Operator from the MachineOSConfig object this build is attached to. + This field describes the location of the final image, which will be pushed by the build once complete. + The format of the image push spec is: host[:port][/namespace]/name: or svc_name.namespace.svc[:port]/repository/name:. + The length of the push spec must be between 1 to 447 characters. + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme. Or it must + be a valid .svc followed by a port, repository, image name, and + tag. + rule: self.matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?(/[a-zA-Z0-9-_]{1,61})*/[a-zA-Z0-9-_.]+:[a-zA-Z0-9._-]+$') + || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$') + required: + - machineConfig + - machineOSConfig + - renderedImagePushSpec + type: object + x-kubernetes-validations: + - message: machineOSBuildSpec is immutable once set + rule: self == oldSelf + status: + description: status describes the last observed state of this machine + os build. + properties: + buildEnd: + description: |- + buildEnd is the timestamp corresponding to completion of the builder backend. + When omitted the build has either not been started, or is in progress. + It will be populated once the build completes, fails or is interrupted. + format: date-time + type: string + x-kubernetes-validations: + - message: buildEnd is immutable once set + rule: self == oldSelf + buildStart: + description: buildStart is the timestamp corresponding to the build + controller initiating the build backend for this MachineOSBuild. + format: date-time + type: string + x-kubernetes-validations: + - message: buildStart is immutable once set + rule: self == oldSelf + builder: + description: builder describes the image builder backend used for + this build. + properties: + imageBuilderType: + description: |- + imageBuilderType describes the type of image builder used to build this image. + Valid values are Job only. + When set to Job, a pod based builder, using buildah, is launched to build the specified image. + type: string + job: + description: |- + job is a reference to the job object that is managing the image build. + This is required if the imageBuilderType is Job, and forbidden otherwise. + properties: + group: + description: |- + group of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + Example: "", "apps", "build.openshift.io", etc. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of + lower case alphanumeric characters, '-' or '.', and must + start and end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + name: + description: |- + name of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of + lower case alphanumeric characters, '-' or '.', and must + start and end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + namespace: + description: |- + namespace of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + resource: + description: |- + resource of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + Example: "deployments", "deploymentconfigs", "pods", etc. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + required: + - group + - name + - resource + type: object + required: + - imageBuilderType + type: object + x-kubernetes-validations: + - message: job is required when imageBuilderType is Job, and forbidden + otherwise + rule: 'has(self.imageBuilderType) && self.imageBuilderType == ''Job'' + ? has(self.job) : !has(self.job)' + conditions: + description: |- + conditions are state related conditions for the build. Valid types are: + Prepared, Building, Failed, Interrupted, and Succeeded. + Once a Build is marked as Failed, Interrupted or Succeeded, no future conditions can be set. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 8 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: once a Failed condition is set, conditions are immutable + rule: 'oldSelf.exists(x, x.type==''Failed'' && x.status==''True'') + ? self==oldSelf : true' + - message: once an Interrupted condition is set, conditions are immutable + rule: 'oldSelf.exists(x, x.type==''Interrupted'' && x.status==''True'') + ? self==oldSelf : true' + - message: once an Succeeded condition is set, conditions are immutable + rule: 'oldSelf.exists(x, x.type==''Succeeded'' && x.status==''True'') + ? self==oldSelf : true' + digestedImagePushSpec: + description: |- + digestedImagePushSpec describes the fully qualified push spec produced by this build. + The format of the push spec is: host[:port][/namespace]/name@sha256:, + where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. + The length of the whole spec must be between 1 to 447 characters. + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + relatedObjects: + description: |- + relatedObjects is a list of references to ephemeral objects such as ConfigMaps or Secrets that are meant to be consumed while the build process runs. + After a successful build or when this MachineOSBuild is deleted, these ephemeral objects will be removed. + In the event of a failed build, the objects will remain until the build is removed to allow for inspection. + items: + description: ObjectReference contains enough information to let + you inspect or modify the referred object. + properties: + group: + description: |- + group of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + Example: "", "apps", "build.openshift.io", etc. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start + and end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + name: + description: |- + name of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start + and end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + namespace: + description: |- + namespace of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + resource: + description: |- + resource of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + Example: "deployments", "deploymentconfigs", "pods", etc. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + required: + - group + - name + - resource + type: object + maxItems: 10 + type: array + x-kubernetes-list-map-keys: + - name + - resource + x-kubernetes-list-type: map + type: object + x-kubernetes-validations: + - message: buildEnd must be after buildStart + rule: 'has(self.buildEnd) ? has(self.buildStart) && timestamp(self.buildStart) + < timestamp(self.buildEnd) : true' + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..c4dbe5637f --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,402 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2090 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineosbuilds.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineOSBuild + listKind: MachineOSBuildList + plural: machineosbuilds + singular: machineosbuild + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Prepared")].status + name: Prepared + type: string + - jsonPath: .status.conditions[?(@.type=="Building")].status + name: Building + type: string + - jsonPath: .status.conditions[?(@.type=="Succeeded")].status + name: Succeeded + type: string + - jsonPath: .status.conditions[?(@.type=="Interrupted")].status + name: Interrupted + type: string + - jsonPath: .status.conditions[?(@.type=="Failed")].status + name: Failed + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + MachineOSBuild describes a build process managed and deployed by the MCO + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + spec describes the configuration of the machine os build. + It is immutable once set. + properties: + machineConfig: + description: machineConfig points to the rendered MachineConfig resource + to be included in this image build. + properties: + name: + description: |- + name is the name of the rendered MachineConfig object. + This value should be between 10 and 253 characters, and must contain only lowercase + alphanumeric characters, hyphens and periods, and should start and end with an alphanumeric character. + maxLength: 253 + minLength: 10 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + machineOSConfig: + description: machineOSConfig references the MachineOSConfig resource + that this image build extends. + properties: + name: + description: |- + name of the MachineOSConfig. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + renderedImagePushSpec: + description: |- + renderedImagePushSpec is set by the Machine Config Operator from the MachineOSConfig object this build is attached to. + This field describes the location of the final image, which will be pushed by the build once complete. + The format of the image push spec is: host[:port][/namespace]/name: or svc_name.namespace.svc[:port]/repository/name:. + The length of the push spec must be between 1 to 447 characters. + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme. Or it must + be a valid .svc followed by a port, repository, image name, and + tag. + rule: self.matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?(/[a-zA-Z0-9-_]{1,61})*/[a-zA-Z0-9-_.]+:[a-zA-Z0-9._-]+$') + || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$') + required: + - machineConfig + - machineOSConfig + - renderedImagePushSpec + type: object + x-kubernetes-validations: + - message: machineOSBuildSpec is immutable once set + rule: self == oldSelf + status: + description: status describes the last observed state of this machine + os build. + properties: + buildEnd: + description: |- + buildEnd is the timestamp corresponding to completion of the builder backend. + When omitted the build has either not been started, or is in progress. + It will be populated once the build completes, fails or is interrupted. + format: date-time + type: string + x-kubernetes-validations: + - message: buildEnd is immutable once set + rule: self == oldSelf + buildStart: + description: buildStart is the timestamp corresponding to the build + controller initiating the build backend for this MachineOSBuild. + format: date-time + type: string + x-kubernetes-validations: + - message: buildStart is immutable once set + rule: self == oldSelf + builder: + description: builder describes the image builder backend used for + this build. + properties: + imageBuilderType: + description: |- + imageBuilderType describes the type of image builder used to build this image. + Valid values are Job only. + When set to Job, a pod based builder, using buildah, is launched to build the specified image. + type: string + job: + description: |- + job is a reference to the job object that is managing the image build. + This is required if the imageBuilderType is Job, and forbidden otherwise. + properties: + group: + description: |- + group of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + Example: "", "apps", "build.openshift.io", etc. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of + lower case alphanumeric characters, '-' or '.', and must + start and end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + name: + description: |- + name of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of + lower case alphanumeric characters, '-' or '.', and must + start and end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + namespace: + description: |- + namespace of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + resource: + description: |- + resource of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + Example: "deployments", "deploymentconfigs", "pods", etc. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + required: + - group + - name + - resource + type: object + required: + - imageBuilderType + type: object + x-kubernetes-validations: + - message: job is required when imageBuilderType is Job, and forbidden + otherwise + rule: 'has(self.imageBuilderType) && self.imageBuilderType == ''Job'' + ? has(self.job) : !has(self.job)' + conditions: + description: |- + conditions are state related conditions for the build. Valid types are: + Prepared, Building, Failed, Interrupted, and Succeeded. + Once a Build is marked as Failed, Interrupted or Succeeded, no future conditions can be set. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 8 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: once a Failed condition is set, conditions are immutable + rule: 'oldSelf.exists(x, x.type==''Failed'' && x.status==''True'') + ? self==oldSelf : true' + - message: once an Interrupted condition is set, conditions are immutable + rule: 'oldSelf.exists(x, x.type==''Interrupted'' && x.status==''True'') + ? self==oldSelf : true' + - message: once an Succeeded condition is set, conditions are immutable + rule: 'oldSelf.exists(x, x.type==''Succeeded'' && x.status==''True'') + ? self==oldSelf : true' + digestedImagePushSpec: + description: |- + digestedImagePushSpec describes the fully qualified push spec produced by this build. + The format of the push spec is: host[:port][/namespace]/name@sha256:, + where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. + The length of the whole spec must be between 1 to 447 characters. + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + relatedObjects: + description: |- + relatedObjects is a list of references to ephemeral objects such as ConfigMaps or Secrets that are meant to be consumed while the build process runs. + After a successful build or when this MachineOSBuild is deleted, these ephemeral objects will be removed. + In the event of a failed build, the objects will remain until the build is removed to allow for inspection. + items: + description: ObjectReference contains enough information to let + you inspect or modify the referred object. + properties: + group: + description: |- + group of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + Example: "", "apps", "build.openshift.io", etc. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start + and end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + name: + description: |- + name of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start + and end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + namespace: + description: |- + namespace of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + resource: + description: |- + resource of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + Example: "deployments", "deploymentconfigs", "pods", etc. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + required: + - group + - name + - resource + type: object + maxItems: 10 + type: array + x-kubernetes-list-map-keys: + - name + - resource + x-kubernetes-list-type: map + type: object + x-kubernetes-validations: + - message: buildEnd must be after buildStart + rule: 'has(self.buildEnd) ? has(self.buildStart) && timestamp(self.buildStart) + < timestamp(self.buildEnd) : true' + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..f3ef8ff277 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,402 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2090 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineosbuilds.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineOSBuild + listKind: MachineOSBuildList + plural: machineosbuilds + singular: machineosbuild + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Prepared")].status + name: Prepared + type: string + - jsonPath: .status.conditions[?(@.type=="Building")].status + name: Building + type: string + - jsonPath: .status.conditions[?(@.type=="Succeeded")].status + name: Succeeded + type: string + - jsonPath: .status.conditions[?(@.type=="Interrupted")].status + name: Interrupted + type: string + - jsonPath: .status.conditions[?(@.type=="Failed")].status + name: Failed + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + MachineOSBuild describes a build process managed and deployed by the MCO + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + spec describes the configuration of the machine os build. + It is immutable once set. + properties: + machineConfig: + description: machineConfig points to the rendered MachineConfig resource + to be included in this image build. + properties: + name: + description: |- + name is the name of the rendered MachineConfig object. + This value should be between 10 and 253 characters, and must contain only lowercase + alphanumeric characters, hyphens and periods, and should start and end with an alphanumeric character. + maxLength: 253 + minLength: 10 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + machineOSConfig: + description: machineOSConfig references the MachineOSConfig resource + that this image build extends. + properties: + name: + description: |- + name of the MachineOSConfig. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + renderedImagePushSpec: + description: |- + renderedImagePushSpec is set by the Machine Config Operator from the MachineOSConfig object this build is attached to. + This field describes the location of the final image, which will be pushed by the build once complete. + The format of the image push spec is: host[:port][/namespace]/name: or svc_name.namespace.svc[:port]/repository/name:. + The length of the push spec must be between 1 to 447 characters. + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme. Or it must + be a valid .svc followed by a port, repository, image name, and + tag. + rule: self.matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?(/[a-zA-Z0-9-_]{1,61})*/[a-zA-Z0-9-_.]+:[a-zA-Z0-9._-]+$') + || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$') + required: + - machineConfig + - machineOSConfig + - renderedImagePushSpec + type: object + x-kubernetes-validations: + - message: machineOSBuildSpec is immutable once set + rule: self == oldSelf + status: + description: status describes the last observed state of this machine + os build. + properties: + buildEnd: + description: |- + buildEnd is the timestamp corresponding to completion of the builder backend. + When omitted the build has either not been started, or is in progress. + It will be populated once the build completes, fails or is interrupted. + format: date-time + type: string + x-kubernetes-validations: + - message: buildEnd is immutable once set + rule: self == oldSelf + buildStart: + description: buildStart is the timestamp corresponding to the build + controller initiating the build backend for this MachineOSBuild. + format: date-time + type: string + x-kubernetes-validations: + - message: buildStart is immutable once set + rule: self == oldSelf + builder: + description: builder describes the image builder backend used for + this build. + properties: + imageBuilderType: + description: |- + imageBuilderType describes the type of image builder used to build this image. + Valid values are Job only. + When set to Job, a pod based builder, using buildah, is launched to build the specified image. + type: string + job: + description: |- + job is a reference to the job object that is managing the image build. + This is required if the imageBuilderType is Job, and forbidden otherwise. + properties: + group: + description: |- + group of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + Example: "", "apps", "build.openshift.io", etc. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of + lower case alphanumeric characters, '-' or '.', and must + start and end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + name: + description: |- + name of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of + lower case alphanumeric characters, '-' or '.', and must + start and end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + namespace: + description: |- + namespace of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + resource: + description: |- + resource of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + Example: "deployments", "deploymentconfigs", "pods", etc. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + required: + - group + - name + - resource + type: object + required: + - imageBuilderType + type: object + x-kubernetes-validations: + - message: job is required when imageBuilderType is Job, and forbidden + otherwise + rule: 'has(self.imageBuilderType) && self.imageBuilderType == ''Job'' + ? has(self.job) : !has(self.job)' + conditions: + description: |- + conditions are state related conditions for the build. Valid types are: + Prepared, Building, Failed, Interrupted, and Succeeded. + Once a Build is marked as Failed, Interrupted or Succeeded, no future conditions can be set. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 8 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: once a Failed condition is set, conditions are immutable + rule: 'oldSelf.exists(x, x.type==''Failed'' && x.status==''True'') + ? self==oldSelf : true' + - message: once an Interrupted condition is set, conditions are immutable + rule: 'oldSelf.exists(x, x.type==''Interrupted'' && x.status==''True'') + ? self==oldSelf : true' + - message: once an Succeeded condition is set, conditions are immutable + rule: 'oldSelf.exists(x, x.type==''Succeeded'' && x.status==''True'') + ? self==oldSelf : true' + digestedImagePushSpec: + description: |- + digestedImagePushSpec describes the fully qualified push spec produced by this build. + The format of the push spec is: host[:port][/namespace]/name@sha256:, + where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. + The length of the whole spec must be between 1 to 447 characters. + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + relatedObjects: + description: |- + relatedObjects is a list of references to ephemeral objects such as ConfigMaps or Secrets that are meant to be consumed while the build process runs. + After a successful build or when this MachineOSBuild is deleted, these ephemeral objects will be removed. + In the event of a failed build, the objects will remain until the build is removed to allow for inspection. + items: + description: ObjectReference contains enough information to let + you inspect or modify the referred object. + properties: + group: + description: |- + group of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + Example: "", "apps", "build.openshift.io", etc. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start + and end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + name: + description: |- + name of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start + and end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + namespace: + description: |- + namespace of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + resource: + description: |- + resource of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + Example: "deployments", "deploymentconfigs", "pods", etc. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + required: + - group + - name + - resource + type: object + maxItems: 10 + type: array + x-kubernetes-list-map-keys: + - name + - resource + x-kubernetes-list-type: map + type: object + x-kubernetes-validations: + - message: buildEnd must be after buildStart + rule: 'has(self.buildEnd) ? has(self.buildStart) && timestamp(self.buildStart) + < timestamp(self.buildEnd) : true' + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosconfigs-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosconfigs-CustomNoUpgrade.crd.yaml new file mode 100644 index 0000000000..98a3599d50 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosconfigs-CustomNoUpgrade.crd.yaml @@ -0,0 +1,351 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2090 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: CustomNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineosconfigs.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineOSConfig + listKind: MachineOSConfigList + plural: machineosconfigs + singular: machineosconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + MachineOSConfig describes the configuration for a build process managed by the MCO + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec describes the configuration of the machineosconfig + properties: + baseImagePullSecret: + description: |- + baseImagePullSecret is the secret used to pull the base image. + Must live in the openshift-machine-config-operator namespace if provided. + Defaults to using the cluster-wide pull secret if not specified. This is provided during install time of the cluster, and lives in the openshift-config namespace as a secret. + properties: + name: + description: |- + name is the name of the secret used to push or pull this MachineOSConfig object. + Must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + This secret must be in the openshift-machine-config-operator namespace. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + containerFile: + description: |- + containerFile describes the custom data the user has specified to build into the image. + This is also commonly called a Dockerfile and you can treat it as such. The content is the content of your Dockerfile. + See https://github.com/containers/common/blob/main/docs/Containerfile.5.md for the spec reference. + This is a list indexed by architecture name (e.g. AMD64), and allows specifying one containerFile per arch, up to 4. + items: + description: MachineOSContainerfile contains all custom content + the user wants built into the image + properties: + containerfileArch: + default: NoArch + description: |- + containerfileArch describes the architecture this containerfile is to be built for. + This arch is optional. If the user does not specify an architecture, it is assumed + that the content can be applied to all architectures, or in a single arch cluster: the only architecture. + enum: + - ARM64 + - AMD64 + - PPC64LE + - S390X + - NoArch + type: string + content: + description: |- + content is an embedded Containerfile/Dockerfile that defines the contents to be built into your image. + See https://github.com/containers/common/blob/main/docs/Containerfile.5.md for the spec reference. + for example, this would add the tree package to your hosts: + FROM configs AS final + RUN rpm-ostree install tree && \ + ostree container commit + This is a required field and can have a maximum length of **4096** characters. + maxLength: 4096 + type: string + required: + - content + type: object + maxItems: 4 + minItems: 0 + type: array + x-kubernetes-list-map-keys: + - containerfileArch + x-kubernetes-list-type: map + imageBuilder: + description: |- + imageBuilder describes which image builder will be used in each build triggered by this MachineOSConfig. + Currently supported type(s): Job + properties: + imageBuilderType: + description: |- + imageBuilderType specifies the backend to be used to build the image. + Valid options are: Job + enum: + - Job + type: string + required: + - imageBuilderType + type: object + machineConfigPool: + description: |- + machineConfigPool is the pool which the build is for. + The Machine Config Operator will perform the build and roll out the built image to the specified pool. + properties: + name: + description: |- + name of the MachineConfigPool object. + This value should be at most 253 characters, and must contain only lowercase + alphanumeric characters, hyphens and periods, and should start and end with an alphanumeric character. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + renderedImagePushSecret: + description: |- + renderedImagePushSecret is the secret used to connect to a user registry. + The final image push and pull secrets should be separate and assume the principal of least privilege. + The push secret with write privilege is only required to be present on the node hosting the MachineConfigController pod. + The pull secret with read only privileges is required on all nodes. + By separating the two secrets, the risk of write credentials becoming compromised is reduced. + properties: + name: + description: |- + name is the name of the secret used to push or pull this MachineOSConfig object. + Must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + This secret must be in the openshift-machine-config-operator namespace. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + renderedImagePushSpec: + description: |- + renderedImagePushSpec describes the location of the final image. + The MachineOSConfig object will use the in cluster image registry configuration. + If you wish to use a mirror or any other settings specific to registries.conf, please specify those in the cluster wide registries.conf via the cluster image.config, ImageContentSourcePolicies, ImageDigestMirrorSet, or ImageTagMirrorSet objects. + The format of the image push spec is: host[:port][/namespace]/name: or svc_name.namespace.svc[:port]/repository/name:. + The length of the push spec must be between 1 to 447 characters. + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme. Or it must + be a valid .svc followed by a port, repository, image name, and + tag. + rule: self.matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?(/[a-zA-Z0-9-_]{1,61})*/[a-zA-Z0-9-_.]+:[a-zA-Z0-9._-]+$') + || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$') + required: + - imageBuilder + - machineConfigPool + - renderedImagePushSecret + - renderedImagePushSpec + type: object + status: + description: status describes the status of the machineosconfig + properties: + conditions: + description: conditions are state related conditions for the object. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + currentImagePullSpec: + description: |- + currentImagePullSpec is the fully qualified image pull spec used by the MCO to pull down the new OSImage. This includes the sha256 image digest. + This is generated when the Machine Config Operator's build controller successfully completes the build, and is populated from the corresponding + MachineOSBuild object's FinalImagePushSpec. This may change after completion in reaction to spec changes that would cause a new image build, + but will not be removed. + The format of the image pull spec is: host[:port][/namespace]/name@sha256:, + where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. + The length of the whole spec must be between 1 to 447 characters. + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + machineOSBuild: + description: machineOSBuild is a reference to the MachineOSBuild object + for this MachineOSConfig, which contains the status for the image + build. + properties: + group: + description: |- + group of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + Example: "", "apps", "build.openshift.io", etc. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + name: + description: |- + name of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + namespace: + description: |- + namespace of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + resource: + description: |- + resource of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + Example: "deployments", "deploymentconfigs", "pods", etc. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + required: + - group + - name + - resource + type: object + observedGeneration: + description: observedGeneration represents the generation of the MachineOSConfig + object observed by the Machine Config Operator's build controller. + format: int64 + minimum: 0 + type: integer + x-kubernetes-validations: + - message: observedGeneration must not move backwards + rule: self >= oldSelf + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosconfigs-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosconfigs-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..bdfa6fcaba --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosconfigs-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,351 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2090 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineosconfigs.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineOSConfig + listKind: MachineOSConfigList + plural: machineosconfigs + singular: machineosconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + MachineOSConfig describes the configuration for a build process managed by the MCO + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec describes the configuration of the machineosconfig + properties: + baseImagePullSecret: + description: |- + baseImagePullSecret is the secret used to pull the base image. + Must live in the openshift-machine-config-operator namespace if provided. + Defaults to using the cluster-wide pull secret if not specified. This is provided during install time of the cluster, and lives in the openshift-config namespace as a secret. + properties: + name: + description: |- + name is the name of the secret used to push or pull this MachineOSConfig object. + Must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + This secret must be in the openshift-machine-config-operator namespace. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + containerFile: + description: |- + containerFile describes the custom data the user has specified to build into the image. + This is also commonly called a Dockerfile and you can treat it as such. The content is the content of your Dockerfile. + See https://github.com/containers/common/blob/main/docs/Containerfile.5.md for the spec reference. + This is a list indexed by architecture name (e.g. AMD64), and allows specifying one containerFile per arch, up to 4. + items: + description: MachineOSContainerfile contains all custom content + the user wants built into the image + properties: + containerfileArch: + default: NoArch + description: |- + containerfileArch describes the architecture this containerfile is to be built for. + This arch is optional. If the user does not specify an architecture, it is assumed + that the content can be applied to all architectures, or in a single arch cluster: the only architecture. + enum: + - ARM64 + - AMD64 + - PPC64LE + - S390X + - NoArch + type: string + content: + description: |- + content is an embedded Containerfile/Dockerfile that defines the contents to be built into your image. + See https://github.com/containers/common/blob/main/docs/Containerfile.5.md for the spec reference. + for example, this would add the tree package to your hosts: + FROM configs AS final + RUN rpm-ostree install tree && \ + ostree container commit + This is a required field and can have a maximum length of **4096** characters. + maxLength: 4096 + type: string + required: + - content + type: object + maxItems: 4 + minItems: 0 + type: array + x-kubernetes-list-map-keys: + - containerfileArch + x-kubernetes-list-type: map + imageBuilder: + description: |- + imageBuilder describes which image builder will be used in each build triggered by this MachineOSConfig. + Currently supported type(s): Job + properties: + imageBuilderType: + description: |- + imageBuilderType specifies the backend to be used to build the image. + Valid options are: Job + enum: + - Job + type: string + required: + - imageBuilderType + type: object + machineConfigPool: + description: |- + machineConfigPool is the pool which the build is for. + The Machine Config Operator will perform the build and roll out the built image to the specified pool. + properties: + name: + description: |- + name of the MachineConfigPool object. + This value should be at most 253 characters, and must contain only lowercase + alphanumeric characters, hyphens and periods, and should start and end with an alphanumeric character. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + renderedImagePushSecret: + description: |- + renderedImagePushSecret is the secret used to connect to a user registry. + The final image push and pull secrets should be separate and assume the principal of least privilege. + The push secret with write privilege is only required to be present on the node hosting the MachineConfigController pod. + The pull secret with read only privileges is required on all nodes. + By separating the two secrets, the risk of write credentials becoming compromised is reduced. + properties: + name: + description: |- + name is the name of the secret used to push or pull this MachineOSConfig object. + Must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + This secret must be in the openshift-machine-config-operator namespace. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + renderedImagePushSpec: + description: |- + renderedImagePushSpec describes the location of the final image. + The MachineOSConfig object will use the in cluster image registry configuration. + If you wish to use a mirror or any other settings specific to registries.conf, please specify those in the cluster wide registries.conf via the cluster image.config, ImageContentSourcePolicies, ImageDigestMirrorSet, or ImageTagMirrorSet objects. + The format of the image push spec is: host[:port][/namespace]/name: or svc_name.namespace.svc[:port]/repository/name:. + The length of the push spec must be between 1 to 447 characters. + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme. Or it must + be a valid .svc followed by a port, repository, image name, and + tag. + rule: self.matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?(/[a-zA-Z0-9-_]{1,61})*/[a-zA-Z0-9-_.]+:[a-zA-Z0-9._-]+$') + || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$') + required: + - imageBuilder + - machineConfigPool + - renderedImagePushSecret + - renderedImagePushSpec + type: object + status: + description: status describes the status of the machineosconfig + properties: + conditions: + description: conditions are state related conditions for the object. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + currentImagePullSpec: + description: |- + currentImagePullSpec is the fully qualified image pull spec used by the MCO to pull down the new OSImage. This includes the sha256 image digest. + This is generated when the Machine Config Operator's build controller successfully completes the build, and is populated from the corresponding + MachineOSBuild object's FinalImagePushSpec. This may change after completion in reaction to spec changes that would cause a new image build, + but will not be removed. + The format of the image pull spec is: host[:port][/namespace]/name@sha256:, + where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. + The length of the whole spec must be between 1 to 447 characters. + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + machineOSBuild: + description: machineOSBuild is a reference to the MachineOSBuild object + for this MachineOSConfig, which contains the status for the image + build. + properties: + group: + description: |- + group of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + Example: "", "apps", "build.openshift.io", etc. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + name: + description: |- + name of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + namespace: + description: |- + namespace of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + resource: + description: |- + resource of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + Example: "deployments", "deploymentconfigs", "pods", etc. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + required: + - group + - name + - resource + type: object + observedGeneration: + description: observedGeneration represents the generation of the MachineOSConfig + object observed by the Machine Config Operator's build controller. + format: int64 + minimum: 0 + type: integer + x-kubernetes-validations: + - message: observedGeneration must not move backwards + rule: self >= oldSelf + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosconfigs-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosconfigs-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..d420b15b84 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosconfigs-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,351 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2090 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineosconfigs.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineOSConfig + listKind: MachineOSConfigList + plural: machineosconfigs + singular: machineosconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + MachineOSConfig describes the configuration for a build process managed by the MCO + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec describes the configuration of the machineosconfig + properties: + baseImagePullSecret: + description: |- + baseImagePullSecret is the secret used to pull the base image. + Must live in the openshift-machine-config-operator namespace if provided. + Defaults to using the cluster-wide pull secret if not specified. This is provided during install time of the cluster, and lives in the openshift-config namespace as a secret. + properties: + name: + description: |- + name is the name of the secret used to push or pull this MachineOSConfig object. + Must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + This secret must be in the openshift-machine-config-operator namespace. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + containerFile: + description: |- + containerFile describes the custom data the user has specified to build into the image. + This is also commonly called a Dockerfile and you can treat it as such. The content is the content of your Dockerfile. + See https://github.com/containers/common/blob/main/docs/Containerfile.5.md for the spec reference. + This is a list indexed by architecture name (e.g. AMD64), and allows specifying one containerFile per arch, up to 4. + items: + description: MachineOSContainerfile contains all custom content + the user wants built into the image + properties: + containerfileArch: + default: NoArch + description: |- + containerfileArch describes the architecture this containerfile is to be built for. + This arch is optional. If the user does not specify an architecture, it is assumed + that the content can be applied to all architectures, or in a single arch cluster: the only architecture. + enum: + - ARM64 + - AMD64 + - PPC64LE + - S390X + - NoArch + type: string + content: + description: |- + content is an embedded Containerfile/Dockerfile that defines the contents to be built into your image. + See https://github.com/containers/common/blob/main/docs/Containerfile.5.md for the spec reference. + for example, this would add the tree package to your hosts: + FROM configs AS final + RUN rpm-ostree install tree && \ + ostree container commit + This is a required field and can have a maximum length of **4096** characters. + maxLength: 4096 + type: string + required: + - content + type: object + maxItems: 4 + minItems: 0 + type: array + x-kubernetes-list-map-keys: + - containerfileArch + x-kubernetes-list-type: map + imageBuilder: + description: |- + imageBuilder describes which image builder will be used in each build triggered by this MachineOSConfig. + Currently supported type(s): Job + properties: + imageBuilderType: + description: |- + imageBuilderType specifies the backend to be used to build the image. + Valid options are: Job + enum: + - Job + type: string + required: + - imageBuilderType + type: object + machineConfigPool: + description: |- + machineConfigPool is the pool which the build is for. + The Machine Config Operator will perform the build and roll out the built image to the specified pool. + properties: + name: + description: |- + name of the MachineConfigPool object. + This value should be at most 253 characters, and must contain only lowercase + alphanumeric characters, hyphens and periods, and should start and end with an alphanumeric character. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + renderedImagePushSecret: + description: |- + renderedImagePushSecret is the secret used to connect to a user registry. + The final image push and pull secrets should be separate and assume the principal of least privilege. + The push secret with write privilege is only required to be present on the node hosting the MachineConfigController pod. + The pull secret with read only privileges is required on all nodes. + By separating the two secrets, the risk of write credentials becoming compromised is reduced. + properties: + name: + description: |- + name is the name of the secret used to push or pull this MachineOSConfig object. + Must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. + This secret must be in the openshift-machine-config-operator namespace. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + renderedImagePushSpec: + description: |- + renderedImagePushSpec describes the location of the final image. + The MachineOSConfig object will use the in cluster image registry configuration. + If you wish to use a mirror or any other settings specific to registries.conf, please specify those in the cluster wide registries.conf via the cluster image.config, ImageContentSourcePolicies, ImageDigestMirrorSet, or ImageTagMirrorSet objects. + The format of the image push spec is: host[:port][/namespace]/name: or svc_name.namespace.svc[:port]/repository/name:. + The length of the push spec must be between 1 to 447 characters. + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme. Or it must + be a valid .svc followed by a port, repository, image name, and + tag. + rule: self.matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?(/[a-zA-Z0-9-_]{1,61})*/[a-zA-Z0-9-_.]+:[a-zA-Z0-9._-]+$') + || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$') + required: + - imageBuilder + - machineConfigPool + - renderedImagePushSecret + - renderedImagePushSpec + type: object + status: + description: status describes the status of the machineosconfig + properties: + conditions: + description: conditions are state related conditions for the object. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + currentImagePullSpec: + description: |- + currentImagePullSpec is the fully qualified image pull spec used by the MCO to pull down the new OSImage. This includes the sha256 image digest. + This is generated when the Machine Config Operator's build controller successfully completes the build, and is populated from the corresponding + MachineOSBuild object's FinalImagePushSpec. This may change after completion in reaction to spec changes that would cause a new image build, + but will not be removed. + The format of the image pull spec is: host[:port][/namespace]/name@sha256:, + where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. + The length of the whole spec must be between 1 to 447 characters. + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + machineOSBuild: + description: machineOSBuild is a reference to the MachineOSBuild object + for this MachineOSConfig, which contains the status for the image + build. + properties: + group: + description: |- + group of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + Example: "", "apps", "build.openshift.io", etc. + maxLength: 253 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + name: + description: |- + name of the referent. + The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: a lowercase RFC 1123 subdomain must consist of lower + case alphanumeric characters, '-' or '.', and must start and + end with an alphanumeric character. + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + namespace: + description: |- + namespace of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + resource: + description: |- + resource of the referent. + This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, + and should start and end with an alphanumeric character. + Example: "deployments", "deploymentconfigs", "pods", etc. + maxLength: 63 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the value must consist of only lowercase alphanumeric + characters and hyphens + rule: '!format.dns1123Label().validate(self).hasValue()' + required: + - group + - name + - resource + type: object + observedGeneration: + description: observedGeneration represents the generation of the MachineOSConfig + object observed by the Machine Config Operator's build controller. + format: int64 + minimum: 0 + type: integer + x-kubernetes-validations: + - message: observedGeneration must not move backwards + rule: self >= oldSelf + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go index 9ad13130fe..94f9acbd5a 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go @@ -431,6 +431,22 @@ func (in *ImageRegistryBundle) DeepCopy() *ImageRegistryBundle { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSecretObjectReference) DeepCopyInto(out *ImageSecretObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSecretObjectReference. +func (in *ImageSecretObjectReference) DeepCopy() *ImageSecretObjectReference { + if in == nil { + return nil + } + out := new(ImageSecretObjectReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeletConfig) DeepCopyInto(out *KubeletConfig) { *out = *in @@ -711,6 +727,22 @@ func (in *MachineConfigPoolList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigPoolReference) DeepCopyInto(out *MachineConfigPoolReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigPoolReference. +func (in *MachineConfigPoolReference) DeepCopy() *MachineConfigPoolReference { + if in == nil { + return nil + } + out := new(MachineConfigPoolReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineConfigPoolSpec) DeepCopyInto(out *MachineConfigPoolSpec) { *out = *in @@ -806,6 +838,22 @@ func (in *MachineConfigPoolStatusConfiguration) DeepCopy() *MachineConfigPoolSta return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineConfigReference) DeepCopyInto(out *MachineConfigReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineConfigReference. +func (in *MachineConfigReference) DeepCopy() *MachineConfigReference { + if in == nil { + return nil + } + out := new(MachineConfigReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineConfigSpec) DeepCopyInto(out *MachineConfigSpec) { *out = *in @@ -833,6 +881,313 @@ func (in *MachineConfigSpec) DeepCopy() *MachineConfigSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineOSBuild) DeepCopyInto(out *MachineOSBuild) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineOSBuild. +func (in *MachineOSBuild) DeepCopy() *MachineOSBuild { + if in == nil { + return nil + } + out := new(MachineOSBuild) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineOSBuild) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineOSBuildList) DeepCopyInto(out *MachineOSBuildList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineOSBuild, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineOSBuildList. +func (in *MachineOSBuildList) DeepCopy() *MachineOSBuildList { + if in == nil { + return nil + } + out := new(MachineOSBuildList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineOSBuildList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineOSBuildSpec) DeepCopyInto(out *MachineOSBuildSpec) { + *out = *in + out.MachineConfig = in.MachineConfig + out.MachineOSConfig = in.MachineOSConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineOSBuildSpec. +func (in *MachineOSBuildSpec) DeepCopy() *MachineOSBuildSpec { + if in == nil { + return nil + } + out := new(MachineOSBuildSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineOSBuildStatus) DeepCopyInto(out *MachineOSBuildStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Builder != nil { + in, out := &in.Builder, &out.Builder + *out = new(MachineOSBuilderReference) + (*in).DeepCopyInto(*out) + } + if in.RelatedObjects != nil { + in, out := &in.RelatedObjects, &out.RelatedObjects + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.BuildStart != nil { + in, out := &in.BuildStart, &out.BuildStart + *out = (*in).DeepCopy() + } + if in.BuildEnd != nil { + in, out := &in.BuildEnd, &out.BuildEnd + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineOSBuildStatus. +func (in *MachineOSBuildStatus) DeepCopy() *MachineOSBuildStatus { + if in == nil { + return nil + } + out := new(MachineOSBuildStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineOSBuilderReference) DeepCopyInto(out *MachineOSBuilderReference) { + *out = *in + if in.Job != nil { + in, out := &in.Job, &out.Job + *out = new(ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineOSBuilderReference. +func (in *MachineOSBuilderReference) DeepCopy() *MachineOSBuilderReference { + if in == nil { + return nil + } + out := new(MachineOSBuilderReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineOSConfig) DeepCopyInto(out *MachineOSConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineOSConfig. +func (in *MachineOSConfig) DeepCopy() *MachineOSConfig { + if in == nil { + return nil + } + out := new(MachineOSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineOSConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineOSConfigList) DeepCopyInto(out *MachineOSConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineOSConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineOSConfigList. +func (in *MachineOSConfigList) DeepCopy() *MachineOSConfigList { + if in == nil { + return nil + } + out := new(MachineOSConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineOSConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineOSConfigReference) DeepCopyInto(out *MachineOSConfigReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineOSConfigReference. +func (in *MachineOSConfigReference) DeepCopy() *MachineOSConfigReference { + if in == nil { + return nil + } + out := new(MachineOSConfigReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineOSConfigSpec) DeepCopyInto(out *MachineOSConfigSpec) { + *out = *in + out.MachineConfigPool = in.MachineConfigPool + out.ImageBuilder = in.ImageBuilder + if in.BaseImagePullSecret != nil { + in, out := &in.BaseImagePullSecret, &out.BaseImagePullSecret + *out = new(ImageSecretObjectReference) + **out = **in + } + out.RenderedImagePushSecret = in.RenderedImagePushSecret + if in.Containerfile != nil { + in, out := &in.Containerfile, &out.Containerfile + *out = make([]MachineOSContainerfile, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineOSConfigSpec. +func (in *MachineOSConfigSpec) DeepCopy() *MachineOSConfigSpec { + if in == nil { + return nil + } + out := new(MachineOSConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineOSConfigStatus) DeepCopyInto(out *MachineOSConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MachineOSBuild != nil { + in, out := &in.MachineOSBuild, &out.MachineOSBuild + *out = new(ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineOSConfigStatus. +func (in *MachineOSConfigStatus) DeepCopy() *MachineOSConfigStatus { + if in == nil { + return nil + } + out := new(MachineOSConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineOSContainerfile) DeepCopyInto(out *MachineOSContainerfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineOSContainerfile. +func (in *MachineOSContainerfile) DeepCopy() *MachineOSContainerfile { + if in == nil { + return nil + } + out := new(MachineOSContainerfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineOSImageBuilder) DeepCopyInto(out *MachineOSImageBuilder) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineOSImageBuilder. +func (in *MachineOSImageBuilder) DeepCopy() *MachineOSImageBuilder { + if in == nil { + return nil + } + out := new(MachineOSImageBuilder) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkInfo) DeepCopyInto(out *NetworkInfo) { *out = *in @@ -854,6 +1209,22 @@ func (in *NetworkInfo) DeepCopy() *NetworkInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PinnedImageSetRef) DeepCopyInto(out *PinnedImageSetRef) { *out = *in diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml index 0766b10691..bb386656df 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.featuregated-crd-manifests.yaml @@ -32,6 +32,8 @@ controllerconfigs.machineconfiguration.openshift.io: - BareMetalLoadBalancer - GCPClusterHostedDNS - GCPLabelsTags + - HighlyAvailableArbiter + - NutanixMultiSubnets - VSphereControlPlaneMachineSet - VSphereMultiNetworks - VSphereMultiVCenters @@ -169,3 +171,69 @@ machineconfigpools.machineconfiguration.openshift.io: TopLevelFeatureGates: [] Version: v1 +machineosbuilds.machineconfiguration.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2090 + CRDName: machineosbuilds.machineconfiguration.openshift.io + Capability: "" + Category: "" + FeatureGates: + - OnClusterBuild + FilenameOperatorName: machine-config + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_80" + GroupName: machineconfiguration.openshift.io + HasStatus: true + KindName: MachineOSBuild + Labels: + openshift.io/operator-managed: "" + PluralName: machineosbuilds + PrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Prepared")].status + name: Prepared + type: string + - jsonPath: .status.conditions[?(@.type=="Building")].status + name: Building + type: string + - jsonPath: .status.conditions[?(@.type=="Succeeded")].status + name: Succeeded + type: string + - jsonPath: .status.conditions[?(@.type=="Interrupted")].status + name: Interrupted + type: string + - jsonPath: .status.conditions[?(@.type=="Failed")].status + name: Failed + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - OnClusterBuild + Version: v1 + +machineosconfigs.machineconfiguration.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2090 + CRDName: machineosconfigs.machineconfiguration.openshift.io + Capability: "" + Category: "" + FeatureGates: + - OnClusterBuild + FilenameOperatorName: machine-config + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_80" + GroupName: machineconfiguration.openshift.io + HasStatus: true + KindName: MachineOSConfig + Labels: + openshift.io/operator-managed: "" + PluralName: machineosconfigs + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - OnClusterBuild + Version: v1 + diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go index 29a3a2a902..22ead771e6 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go @@ -23,7 +23,9 @@ func (CertExpiry) SwaggerDoc() map[string]string { } var map_ContainerRuntimeConfig = map[string]string{ - "": "ContainerRuntimeConfig describes a customized Container Runtime configuration.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ContainerRuntimeConfig describes a customized Container Runtime configuration.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec contains the desired container runtime configuration.", + "status": "status contains observed information about the container runtime configuration.", } func (ContainerRuntimeConfig) SwaggerDoc() map[string]string { @@ -53,7 +55,8 @@ func (ContainerRuntimeConfigList) SwaggerDoc() map[string]string { var map_ContainerRuntimeConfigSpec = map[string]string{ "": "ContainerRuntimeConfigSpec defines the desired state of ContainerRuntimeConfig", - "machineConfigPoolSelector": "MachineConfigPoolSelector selects which pools the ContainerRuntimeConfig shoud apply to. A nil selector will result in no pools being selected.", + "machineConfigPoolSelector": "machineConfigPoolSelector selects which pools the ContainerRuntimeConfig shoud apply to. A nil selector will result in no pools being selected.", + "containerRuntimeConfig": "containerRuntimeConfig defines the tuneables of the container runtime.", } func (ContainerRuntimeConfigSpec) SwaggerDoc() map[string]string { @@ -97,7 +100,9 @@ func (ControllerCertificate) SwaggerDoc() map[string]string { } var map_ControllerConfig = map[string]string{ - "": "ControllerConfig describes configuration for MachineConfigController. This is currently only used to drive the MachineConfig objects generated by the TemplateController.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ControllerConfig describes configuration for MachineConfigController. This is currently only used to drive the MachineConfig objects generated by the TemplateController.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec contains the desired controller config configuration.", + "status": "status contains observed information about the controller config.", } func (ControllerConfig) SwaggerDoc() map[string]string { @@ -120,23 +125,23 @@ var map_ControllerConfigSpec = map[string]string{ "etcdDiscoveryDomain": "etcdDiscoveryDomain is deprecated, use Infra.Status.EtcdDiscoveryDomain instead", "kubeAPIServerServingCAData": "kubeAPIServerServingCAData managed Kubelet to API Server Cert... Rotated automatically", "rootCAData": "rootCAData specifies the root CA data", - "cloudProviderCAData": "cloudProvider specifies the cloud provider CA data", + "cloudProviderCAData": "cloudProviderCAData specifies the cloud provider CA data", "additionalTrustBundle": "additionalTrustBundle is a certificate bundle that will be added to the nodes trusted certificate store.", "imageRegistryBundleUserData": "imageRegistryBundleUserData is Image Registry Data provided by the user", "imageRegistryBundleData": "imageRegistryBundleData is the ImageRegistryData", "pullSecret": "pullSecret is the default pull secret that needs to be installed on all machines.", "internalRegistryPullSecret": "internalRegistryPullSecret is the pull secret for the internal registry, used by rpm-ostree to pull images from the internal registry if present", "images": "images is map of images that are used by the controller to render templates under ./templates/", - "baseOSContainerImage": "BaseOSContainerImage is the new-format container image for operating system updates.", - "baseOSExtensionsContainerImage": "BaseOSExtensionsContainerImage is the matching extensions container for the new-format container", - "osImageURL": "OSImageURL is the old-format container image that contains the OS update payload.", + "baseOSContainerImage": "baseOSContainerImage is the new-format container image for operating system updates.", + "baseOSExtensionsContainerImage": "baseOSExtensionsContainerImage is the matching extensions container for the new-format container", + "osImageURL": "osImageURL is the old-format container image that contains the OS update payload.", "releaseImage": "releaseImage is the image used when installing the cluster", "proxy": "proxy holds the current proxy configuration for the nodes", "infra": "infra holds the infrastructure details", "dns": "dns holds the cluster dns details", "ipFamilies": "ipFamilies indicates the IP families in use by the cluster network", "networkType": "networkType holds the type of network the cluster is using XXX: this is temporary and will be dropped as soon as possible in favor of a better support to start network related services the proper way. Nobody is also changing this once the cluster is up and running the first time, so, disallow regeneration if this changes.", - "network": "Network contains additional network related information", + "network": "network contains additional network related information", } func (ControllerConfigSpec) SwaggerDoc() map[string]string { @@ -178,7 +183,9 @@ func (ImageRegistryBundle) SwaggerDoc() map[string]string { } var map_KubeletConfig = map[string]string{ - "": "KubeletConfig describes a customized Kubelet configuration.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "KubeletConfig describes a customized Kubelet configuration.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec contains the desired kubelet configuration.", + "status": "status contains observed information about the kubelet configuration.", } func (KubeletConfig) SwaggerDoc() map[string]string { @@ -208,7 +215,7 @@ func (KubeletConfigList) SwaggerDoc() map[string]string { var map_KubeletConfigSpec = map[string]string{ "": "KubeletConfigSpec defines the desired state of KubeletConfig", - "machineConfigPoolSelector": "MachineConfigPoolSelector selects which pools the KubeletConfig shoud apply to. A nil selector will result in no pools being selected.", + "machineConfigPoolSelector": "machineConfigPoolSelector selects which pools the KubeletConfig shoud apply to. A nil selector will result in no pools being selected.", "kubeletConfig": "kubeletConfig fields are defined in kubernetes upstream. Please refer to the types defined in the version/commit used by OpenShift of the upstream kubernetes. It's important to note that, since the fields of the kubelet configuration are directly fetched from upstream the validation of those values is handled directly by the kubelet. Please refer to the upstream version of the relevant kubernetes for the valid values of these fields. Invalid values of the kubelet configuration fields may render cluster nodes unusable.", "tlsSecurityProfile": "If unset, the default is based on the apiservers.config.openshift.io/cluster resource. Note that only Old and Intermediate profiles are currently supported, and the maximum available minTLSVersion is VersionTLS12.", } @@ -244,7 +251,9 @@ func (MachineConfigList) SwaggerDoc() map[string]string { } var map_MachineConfigPool = map[string]string{ - "": "MachineConfigPool describes a pool of MachineConfigs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "MachineConfigPool describes a pool of MachineConfigs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec contains the desired machine config pool configuration.", + "status": "status contains observed information about the machine config pool.", } func (MachineConfigPool) SwaggerDoc() map[string]string { @@ -315,9 +324,9 @@ func (MachineConfigPoolStatusConfiguration) SwaggerDoc() map[string]string { var map_MachineConfigSpec = map[string]string{ "": "MachineConfigSpec is the spec for MachineConfig", - "osImageURL": "OSImageURL specifies the remote location that will be used to fetch the OS.", - "baseOSExtensionsContainerImage": "BaseOSExtensionsContainerImage specifies the remote location that will be used to fetch the extensions container matching a new-format OS image", - "config": "Config is a Ignition Config object.", + "osImageURL": "osImageURL specifies the remote location that will be used to fetch the OS.", + "baseOSExtensionsContainerImage": "baseOSExtensionsContainerImage specifies the remote location that will be used to fetch the extensions container matching a new-format OS image", + "config": "config is a Ignition Config object.", "kernelArguments": "kernelArguments contains a list of kernel arguments to be added", "extensions": "extensions contains a list of additional features that can be enabled on host", "fips": "fips controls FIPS mode", @@ -330,7 +339,7 @@ func (MachineConfigSpec) SwaggerDoc() map[string]string { var map_NetworkInfo = map[string]string{ "": "Network contains network related configuration", - "mtuMigration": "MTUMigration contains the MTU migration configuration.", + "mtuMigration": "mtuMigration contains the MTU migration configuration.", } func (NetworkInfo) SwaggerDoc() map[string]string { @@ -359,4 +368,173 @@ func (PoolSynchronizerStatus) SwaggerDoc() map[string]string { return map_PoolSynchronizerStatus } +var map_MachineConfigReference = map[string]string{ + "": "Refers to the name of a rendered MachineConfig (e.g., \"rendered-worker-ec40d2965ff81bce7cd7a7e82a680739\", etc.): the build targets this MachineConfig, this is often used to tell us whether we need an update.", + "name": "name is the name of the rendered MachineConfig object. This value should be between 10 and 253 characters, and must contain only lowercase alphanumeric characters, hyphens and periods, and should start and end with an alphanumeric character.", +} + +func (MachineConfigReference) SwaggerDoc() map[string]string { + return map_MachineConfigReference +} + +var map_MachineOSBuild = map[string]string{ + "": "MachineOSBuild describes a build process managed and deployed by the MCO Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object metadata.", + "spec": "spec describes the configuration of the machine os build. It is immutable once set.", + "status": "status describes the last observed state of this machine os build.", +} + +func (MachineOSBuild) SwaggerDoc() map[string]string { + return map_MachineOSBuild +} + +var map_MachineOSBuildList = map[string]string{ + "": "MachineOSBuildList describes all of the Builds on the system\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list metadata.", + "items": "items contains a collection of MachineOSBuild resources.", +} + +func (MachineOSBuildList) SwaggerDoc() map[string]string { + return map_MachineOSBuildList +} + +var map_MachineOSBuildSpec = map[string]string{ + "": "MachineOSBuildSpec describes information about a build process primarily populated from a MachineOSConfig object.", + "machineConfig": "machineConfig points to the rendered MachineConfig resource to be included in this image build.", + "machineOSConfig": "machineOSConfig references the MachineOSConfig resource that this image build extends.", + "renderedImagePushSpec": "renderedImagePushSpec is set by the Machine Config Operator from the MachineOSConfig object this build is attached to. This field describes the location of the final image, which will be pushed by the build once complete. The format of the image push spec is: host[:port][/namespace]/name: or svc_name.namespace.svc[:port]/repository/name:. The length of the push spec must be between 1 to 447 characters.", +} + +func (MachineOSBuildSpec) SwaggerDoc() map[string]string { + return map_MachineOSBuildSpec +} + +var map_MachineOSBuildStatus = map[string]string{ + "": "MachineOSBuildStatus describes the state of a build and other helpful information.", + "conditions": "conditions are state related conditions for the build. Valid types are: Prepared, Building, Failed, Interrupted, and Succeeded. Once a Build is marked as Failed, Interrupted or Succeeded, no future conditions can be set.", + "builder": "builder describes the image builder backend used for this build.", + "relatedObjects": "relatedObjects is a list of references to ephemeral objects such as ConfigMaps or Secrets that are meant to be consumed while the build process runs. After a successful build or when this MachineOSBuild is deleted, these ephemeral objects will be removed. In the event of a failed build, the objects will remain until the build is removed to allow for inspection.", + "buildStart": "buildStart is the timestamp corresponding to the build controller initiating the build backend for this MachineOSBuild.", + "buildEnd": "buildEnd is the timestamp corresponding to completion of the builder backend. When omitted the build has either not been started, or is in progress. It will be populated once the build completes, fails or is interrupted.", + "digestedImagePushSpec": "digestedImagePushSpec describes the fully qualified push spec produced by this build. The format of the push spec is: host[:port][/namespace]/name@sha256:, where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. The length of the whole spec must be between 1 to 447 characters.", +} + +func (MachineOSBuildStatus) SwaggerDoc() map[string]string { + return map_MachineOSBuildStatus +} + +var map_MachineOSBuilderReference = map[string]string{ + "": "MachineOSBuilderReference describes which ImageBuilder backend to use for this build", + "imageBuilderType": "imageBuilderType describes the type of image builder used to build this image. Valid values are Job only. When set to Job, a pod based builder, using buildah, is launched to build the specified image.", + "job": "job is a reference to the job object that is managing the image build. This is required if the imageBuilderType is Job, and forbidden otherwise.", +} + +func (MachineOSBuilderReference) SwaggerDoc() map[string]string { + return map_MachineOSBuilderReference +} + +var map_MachineOSConfigReference = map[string]string{ + "": "MachineOSConfigReference refers to the MachineOSConfig this build is based off of", + "name": "name of the MachineOSConfig. The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character.", +} + +func (MachineOSConfigReference) SwaggerDoc() map[string]string { + return map_MachineOSConfigReference +} + +var map_ObjectReference = map[string]string{ + "": "ObjectReference contains enough information to let you inspect or modify the referred object.", + "group": "group of the referent. The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character. Example: \"\", \"apps\", \"build.openshift.io\", etc.", + "resource": "resource of the referent. This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, and should start and end with an alphanumeric character. Example: \"deployments\", \"deploymentconfigs\", \"pods\", etc.", + "namespace": "namespace of the referent. This value should consist of at most 63 characters, and of only lowercase alphanumeric characters and hyphens, and should start and end with an alphanumeric character.", + "name": "name of the referent. The name must contain only lowercase alphanumeric characters, '-' or '.' and start/end with an alphanumeric character.", +} + +func (ObjectReference) SwaggerDoc() map[string]string { + return map_ObjectReference +} + +var map_ImageSecretObjectReference = map[string]string{ + "": "Refers to the name of an image registry push/pull secret needed in the build process.", + "name": "name is the name of the secret used to push or pull this MachineOSConfig object. Must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character. This secret must be in the openshift-machine-config-operator namespace.", +} + +func (ImageSecretObjectReference) SwaggerDoc() map[string]string { + return map_ImageSecretObjectReference +} + +var map_MachineConfigPoolReference = map[string]string{ + "": "Refers to the name of a MachineConfigPool (e.g., \"worker\", \"infra\", etc.): the MachineOSBuilder pod validates that the user has provided a valid pool", + "name": "name of the MachineConfigPool object. This value should be at most 253 characters, and must contain only lowercase alphanumeric characters, hyphens and periods, and should start and end with an alphanumeric character.", +} + +func (MachineConfigPoolReference) SwaggerDoc() map[string]string { + return map_MachineConfigPoolReference +} + +var map_MachineOSConfig = map[string]string{ + "": "MachineOSConfig describes the configuration for a build process managed by the MCO Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object metadata.", + "spec": "spec describes the configuration of the machineosconfig", + "status": "status describes the status of the machineosconfig", +} + +func (MachineOSConfig) SwaggerDoc() map[string]string { + return map_MachineOSConfig +} + +var map_MachineOSConfigList = map[string]string{ + "": "MachineOSConfigList describes all configurations for image builds on the system\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list metadata.", + "items": "items contains a collection of MachineOSConfig resources.", +} + +func (MachineOSConfigList) SwaggerDoc() map[string]string { + return map_MachineOSConfigList +} + +var map_MachineOSConfigSpec = map[string]string{ + "": "MachineOSConfigSpec describes user-configurable options as well as information about a build process.", + "machineConfigPool": "machineConfigPool is the pool which the build is for. The Machine Config Operator will perform the build and roll out the built image to the specified pool.", + "imageBuilder": "imageBuilder describes which image builder will be used in each build triggered by this MachineOSConfig. Currently supported type(s): Job", + "baseImagePullSecret": "baseImagePullSecret is the secret used to pull the base image. Must live in the openshift-machine-config-operator namespace if provided. Defaults to using the cluster-wide pull secret if not specified. This is provided during install time of the cluster, and lives in the openshift-config namespace as a secret.", + "renderedImagePushSecret": "renderedImagePushSecret is the secret used to connect to a user registry. The final image push and pull secrets should be separate and assume the principal of least privilege. The push secret with write privilege is only required to be present on the node hosting the MachineConfigController pod. The pull secret with read only privileges is required on all nodes. By separating the two secrets, the risk of write credentials becoming compromised is reduced.", + "renderedImagePushSpec": "renderedImagePushSpec describes the location of the final image. The MachineOSConfig object will use the in cluster image registry configuration. If you wish to use a mirror or any other settings specific to registries.conf, please specify those in the cluster wide registries.conf via the cluster image.config, ImageContentSourcePolicies, ImageDigestMirrorSet, or ImageTagMirrorSet objects. The format of the image push spec is: host[:port][/namespace]/name: or svc_name.namespace.svc[:port]/repository/name:. The length of the push spec must be between 1 to 447 characters.", + "containerFile": "containerFile describes the custom data the user has specified to build into the image. This is also commonly called a Dockerfile and you can treat it as such. The content is the content of your Dockerfile. See https://github.com/containers/common/blob/main/docs/Containerfile.5.md for the spec reference. This is a list indexed by architecture name (e.g. AMD64), and allows specifying one containerFile per arch, up to 4.", +} + +func (MachineOSConfigSpec) SwaggerDoc() map[string]string { + return map_MachineOSConfigSpec +} + +var map_MachineOSConfigStatus = map[string]string{ + "": "MachineOSConfigStatus describes the status this config object and relates it to the builds associated with this MachineOSConfig", + "conditions": "conditions are state related conditions for the object.", + "observedGeneration": "observedGeneration represents the generation of the MachineOSConfig object observed by the Machine Config Operator's build controller.", + "currentImagePullSpec": "currentImagePullSpec is the fully qualified image pull spec used by the MCO to pull down the new OSImage. This includes the sha256 image digest. This is generated when the Machine Config Operator's build controller successfully completes the build, and is populated from the corresponding MachineOSBuild object's FinalImagePushSpec. This may change after completion in reaction to spec changes that would cause a new image build, but will not be removed. The format of the image pull spec is: host[:port][/namespace]/name@sha256:, where the digest must be 64 characters long, and consist only of lowercase hexadecimal characters, a-f and 0-9. The length of the whole spec must be between 1 to 447 characters.", + "machineOSBuild": "machineOSBuild is a reference to the MachineOSBuild object for this MachineOSConfig, which contains the status for the image build.", +} + +func (MachineOSConfigStatus) SwaggerDoc() map[string]string { + return map_MachineOSConfigStatus +} + +var map_MachineOSContainerfile = map[string]string{ + "": "MachineOSContainerfile contains all custom content the user wants built into the image", + "containerfileArch": "containerfileArch describes the architecture this containerfile is to be built for. This arch is optional. If the user does not specify an architecture, it is assumed that the content can be applied to all architectures, or in a single arch cluster: the only architecture.", + "content": "content is an embedded Containerfile/Dockerfile that defines the contents to be built into your image. See https://github.com/containers/common/blob/main/docs/Containerfile.5.md for the spec reference. for example, this would add the tree package to your hosts:\n FROM configs AS final\n RUN rpm-ostree install tree && \\n ostree container commit\nThis is a required field and can have a maximum length of **4096** characters.", +} + +func (MachineOSContainerfile) SwaggerDoc() map[string]string { + return map_MachineOSContainerfile +} + +var map_MachineOSImageBuilder = map[string]string{ + "imageBuilderType": "imageBuilderType specifies the backend to be used to build the image. Valid options are: Job", +} + +func (MachineOSImageBuilder) SwaggerDoc() map[string]string { + return map_MachineOSImageBuilder +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineconfignode.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineconfignode.go index 98b3c48f71..050b5f683e 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineconfignode.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineconfignode.go @@ -40,7 +40,7 @@ type MachineConfigNode struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec describes the configuration of the machine config node. - // +kubebuilder:validation:Required + // +required Spec MachineConfigNodeSpec `json:"spec"` // status describes the last observed state of this machine config node. @@ -70,25 +70,25 @@ type MCOObjectReference struct { // and must be at most 253 characters in length. // +kubebuilder:validation:MaxLength:=253 // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` - // +kubebuilder:validation:Required + // +required Name string `json:"name"` } // MachineConfigNodeSpec describes the MachineConfigNode we are managing. type MachineConfigNodeSpec struct { // node contains a reference to the node for this machine config node. - // +kubebuilder:validation:Required + // +required Node MCOObjectReference `json:"node"` // pool contains a reference to the machine config pool that this machine config node's // referenced node belongs to. - // +kubebuilder:validation:Required + // +required Pool MCOObjectReference `json:"pool"` // configVersion holds the desired config version for the node targeted by this machine config node resource. // The desired version represents the machine config the node will attempt to update to. This gets set before the machine config operator validates // the new machine config against the current machine config. - // +kubebuilder:validation:Required + // +required ConfigVersion MachineConfigNodeSpecMachineConfigVersion `json:"configVersion"` // pinnedImageSets holds the desired pinned image sets that this node should pin and pull. @@ -109,13 +109,13 @@ type MachineConfigNodeStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` // observedGeneration represents the generation observed by the controller. // This field is updated when the controller observes a change to the desiredConfig in the configVersion of the machine config node spec. - // +kubebuilder:validation:Required + // +required ObservedGeneration int64 `json:"observedGeneration,omitempty"` // configVersion describes the current and desired machine config for this node. // The current version represents the current machine config for the node and is updated after a successful update. // The desired version represents the machine config the node will attempt to update to. // This desired machine config has been compared to the current machine config and has been validated by the machine config operator as one that is valid and that exists. - // +kubebuilder:validation:Required + // +required ConfigVersion MachineConfigNodeStatusMachineConfigVersion `json:"configVersion"` // pinnedImageSets describes the current and desired pinned image sets for this node. // The current version is the generation of the pinned image set that has most recently been successfully pulled and pinned on this node. @@ -137,7 +137,7 @@ type MachineConfigNodeStatusPinnedImageSet struct { // and must be at most 253 characters in length. // +kubebuilder:validation:MaxLength:=253 // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` - // +kubebuilder:validation:Required + // +required Name string `json:"name"` // currentGeneration is the generation of the pinned image set that has most recently been successfully pulled and pinned on this node. // +optional @@ -180,7 +180,7 @@ type MachineConfigNodeStatusMachineConfigVersion struct { // and must be at most 253 characters in length. // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` - // +kubebuilder:validation:Required + // +required Desired string `json:"desired"` } @@ -198,7 +198,7 @@ type MachineConfigNodeSpecMachineConfigVersion struct { // and must be at most 253 characters in length. // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` - // +kubebuilder:validation:Required + // +required Desired string `json:"desired"` } @@ -209,7 +209,7 @@ type MachineConfigNodeSpecPinnedImageSet struct { // and must be at most 253 characters in length. // +kubebuilder:validation:MaxLength:=253 // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` - // +kubebuilder:validation:Required + // +required Name string `json:"name"` } diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosbuild.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosbuild.go index 82ae150c82..d65fd4bce0 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosbuild.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosbuild.go @@ -29,7 +29,7 @@ type MachineOSBuild struct { // spec describes the configuration of the machine os build // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="machineOSBuildSpec is immutable once set" - // +kubebuilder:validation:Required + // +required Spec MachineOSBuildSpec `json:"spec"` // status describes the lst observed state of this machine os build @@ -54,17 +54,17 @@ type MachineOSBuildList struct { type MachineOSBuildSpec struct { // configGeneration tracks which version of MachineOSConfig this build is based off of // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Required + // +required ConfigGeneration int64 `json:"configGeneration"` // desiredConfig is the desired config we want to build an image for. - // +kubebuilder:validation:Required + // +required DesiredConfig RenderedMachineConfigReference `json:"desiredConfig"` // machineOSConfig is the config object which the build is based off of - // +kubebuilder:validation:Required + // +required MachineOSConfig MachineOSConfigReference `json:"machineOSConfig"` // version tracks the newest MachineOSBuild for each MachineOSConfig // +kubebuilder:validation:Minimum=1 - // +kubebuilder:validation:Required + // +required Version int64 `json:"version"` // renderedImagePushspec is set from the MachineOSConfig // The format of the image pullspec is: @@ -73,7 +73,7 @@ type MachineOSBuildSpec struct { // +kubebuilder:validation:MaxLength=447 // +kubebuilder:validation:XValidation:rule=`((self.split(':').size() == 2 && self.split(':')[1].matches('^([a-zA-Z0-9-./:])+$')) || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$'))`,message="the OCI Image reference must end with a valid :, where '' is 64 characters long and '' is any valid string Or it must be a valid .svc followed by a port, repository, image name, and tag." // +kubebuilder:validation:XValidation:rule=`((self.split(':').size() == 2 && self.split(':')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$'))`,message="the OCI Image name should follow the host[:port][/namespace]/name format, resembling a valid URL without the scheme. Or it must be a valid .svc followed by a port, repository, image name, and tag." - // +kubebuilder:validation:Required + // +required RenderedImagePushspec string `json:"renderedImagePushspec"` } @@ -95,7 +95,7 @@ type MachineOSBuildStatus struct { RelatedObjects []ObjectReference `json:"relatedObjects,omitempty"` // buildStart describes when the build started. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="buildStart is immutable once set" - // +kubebuilder:validation:Required + // +required BuildStart *metav1.Time `json:"buildStart"` // buildEnd describes when the build ended. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="buildEnd is immutable once set" @@ -111,7 +111,7 @@ type MachineOSBuildStatus struct { // +union // +kubebuilder:validation:XValidation:rule="has(self.imageBuilderType) && self.imageBuilderType == 'PodImageBuilder' ? true : !has(self.buildPod)",message="buildPod is required when imageBuilderType is PodImageBuilder, and forbidden otherwise" type MachineOSBuilderReference struct { - // ImageBuilderType describes the image builder set in the MachineOSConfig + // imageBuilderType describes the image builder set in the MachineOSConfig // +unionDiscriminator ImageBuilderType MachineOSImageBuilderType `json:"imageBuilderType"` @@ -143,29 +143,29 @@ type RenderedMachineConfigReference struct { // name is the name of the rendered MachineConfig object. // +kubebuilder:validation:MaxLength:=253 // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` - // +kubebuilder:validation:Required + // +required Name string `json:"name"` } // ObjectReference contains enough information to let you inspect or modify the referred object. type ObjectReference struct { // group of the referent. - // +kubebuilder:validation:Required + // +required Group string `json:"group"` // resource of the referent. - // +kubebuilder:validation:Required + // +required Resource string `json:"resource"` // namespace of the referent. // +optional Namespace string `json:"namespace,omitempty"` // name of the referent. - // +kubebuilder:validation:Required + // +required Name string `json:"name"` } // MachineOSConfigReference refers to the MachineOSConfig this build is based off of type MachineOSConfigReference struct { // name of the MachineOSConfig - // +kubebuilder:validation:Required + // +required Name string `json:"name"` } diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosconfig.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosconfig.go index 35863517a5..1d9f36c36b 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosconfig.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_machineosconfig.go @@ -23,7 +23,7 @@ type MachineOSConfig struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec describes the configuration of the machineosconfig - // +kubebuilder:validation:Required + // +required Spec MachineOSConfigSpec `json:"spec"` // status describes the status of the machineosconfig @@ -47,10 +47,10 @@ type MachineOSConfigList struct { // MachineOSConfigSpec describes user-configurable options as well as information about a build process. type MachineOSConfigSpec struct { // machineConfigPool is the pool which the build is for - // +kubebuilder:validation:Required + // +required MachineConfigPool MachineConfigPoolReference `json:"machineConfigPool"` // buildInputs is where user input options for the build live - // +kubebuilder:validation:Required + // +required BuildInputs BuildInputs `json:"buildInputs"` // buildOutputs is where user input options for the build live // +optional @@ -68,7 +68,7 @@ type MachineOSConfigStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` // observedGeneration represents the generation observed by the controller. // this field is updated when the user changes the configuration in BuildSettings or the MCP this object is associated with. - // +kubebuilder:validation:Required + // +required ObservedGeneration int64 `json:"observedGeneration,omitempty"` // currentImagePullspec is the fully qualified image pull spec used by the MCO to pull down the new OSImage. This must include sha256. // +kubebuilder:validation:MinLength=1 @@ -105,10 +105,10 @@ type BuildInputs struct { BaseOSImagePullspec string `json:"baseOSImagePullspec,omitempty"` // baseImagePullSecret is the secret used to pull the base image. // must live in the openshift-machine-config-operator namespace - // +kubebuilder:validation:Required + // +required BaseImagePullSecret ImageSecretObjectReference `json:"baseImagePullSecret"` // machineOSImageBuilder describes which image builder will be used in each build triggered by this MachineOSConfig - // +kubebuilder:validation:Required + // +required ImageBuilder *MachineOSImageBuilder `json:"imageBuilder"` // renderedImagePushSecret is the secret used to connect to a user registry. // the final image push and pull secrets should be separate for security concerns. If the final image push secret is somehow exfiltrated, @@ -116,7 +116,7 @@ type BuildInputs struct { // that only gives someone to pull images from the image repository. It's basically the principle of least permissions. // this push secret will be used only by the MachineConfigController pod to push the image to the final destination. Not all nodes will need to push this image, most of them // will only need to pull the image in order to use it. - // +kubebuilder:validation:Required + // +required RenderedImagePushSecret ImageSecretObjectReference `json:"renderedImagePushSecret"` // renderedImagePushspec describes the location of the final image. // the MachineOSConfig object will use the in cluster image registry configuration. @@ -127,7 +127,7 @@ type BuildInputs struct { // +kubebuilder:validation:MaxLength=447 // +kubebuilder:validation:XValidation:rule=`((self.split(':').size() == 2 && self.split(':')[1].matches('^([a-zA-Z0-9-./:])+$')) || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$'))`,message="the OCI Image reference must end with a valid :, where '' is 64 characters long and '' is any valid string Or it must be a valid .svc followed by a port, repository, image name, and tag." // +kubebuilder:validation:XValidation:rule=`((self.split(':').size() == 2 && self.split(':')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$'))`,message="the OCI Image name should follow the host[:port][/namespace]/name format, resembling a valid URL without the scheme. Or it must be a valid .svc followed by a port, repository, image name, and tag." - // +kubebuilder:validation:Required + // +required RenderedImagePushspec string `json:"renderedImagePushspec"` // releaseVersion is associated with the base OS Image. This is the version of Openshift that the Base Image is associated with. // This field is populated from the machine-config-osimageurl configmap in the openshift-machine-config-operator namespace. @@ -178,7 +178,7 @@ type MachineOSContainerfile struct { // +optional ContainerfileArch ContainerfileArch `json:"containerfileArch"` // content is the custom content to be built - // +kubebuilder:validation:Required + // +required Content string `json:"content"` } @@ -207,7 +207,7 @@ type MachineConfigPoolReference struct { // name of the MachineConfigPool object. // +kubebuilder:validation:MaxLength:=253 // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` - // +kubebuilder:validation:Required + // +required Name string `json:"name"` } @@ -215,7 +215,7 @@ type MachineConfigPoolReference struct { type ImageSecretObjectReference struct { // name is the name of the secret used to push or pull this MachineOSConfig object. // this secret must be in the openshift-machine-config-operator namespace. - // +kubebuilder:validation:Required + // +required Name string `json:"name"` } diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_pinnedimageset.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_pinnedimageset.go index 2718d98deb..9d097311dd 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_pinnedimageset.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/types_pinnedimageset.go @@ -25,7 +25,7 @@ type PinnedImageSet struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec describes the configuration of this pinned image set. - // +kubebuilder:validation:Required + // +required Spec PinnedImageSetSpec `json:"spec"` // status describes the last observed state of this pinned image set. @@ -58,7 +58,7 @@ type PinnedImageSetSpec struct { // ] // // These image references should all be by digest, tags aren't allowed. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=500 // +listType=map @@ -71,7 +71,7 @@ type PinnedImageRef struct { // // The format of the image ref is: // host[:port][/namespace]/name@sha256: - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=447 // +kubebuilder:validation:XValidation:rule=`self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')`,message="the OCI Image reference must end with a valid '@sha256:' suffix, where '' is 64 characters long" diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml index aee396dfe3..b153e19ba2 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-CustomNoUpgrade.crd.yaml @@ -170,7 +170,7 @@ spec: - resource type: object imageBuilderType: - description: ImageBuilderType describes the image builder set + description: imageBuilderType describes the image builder set in the MachineOSConfig type: string required: diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml index 4cf5f00163..d64704b194 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml @@ -170,7 +170,7 @@ spec: - resource type: object imageBuilderType: - description: ImageBuilderType describes the image builder set + description: imageBuilderType describes the image builder set in the MachineOSConfig type: string required: diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml index 0e2fe374f1..886173c58e 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-TechPreviewNoUpgrade.crd.yaml @@ -170,7 +170,7 @@ spec: - resource type: object imageBuilderType: - description: ImageBuilderType describes the image builder set + description: imageBuilderType describes the image builder set in the MachineOSConfig type: string required: diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go index a51ffee142..f2592eaf73 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.swagger_doc_generated.go @@ -148,7 +148,7 @@ func (MachineOSBuildStatus) SwaggerDoc() map[string]string { var map_MachineOSBuilderReference = map[string]string{ "": "MachineOSBuilderReference describes which ImageBuilder backend to use for this build/", - "imageBuilderType": "ImageBuilderType describes the image builder set in the MachineOSConfig", + "imageBuilderType": "imageBuilderType describes the image builder set in the MachineOSConfig", "buildPod": "relatedObjects is a list of objects that are related to the build process.", } diff --git a/vendor/github.com/openshift/api/monitoring/v1/types.go b/vendor/github.com/openshift/api/monitoring/v1/types.go index 111538ba78..fc650d9616 100644 --- a/vendor/github.com/openshift/api/monitoring/v1/types.go +++ b/vendor/github.com/openshift/api/monitoring/v1/types.go @@ -44,7 +44,7 @@ type AlertingRule struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec describes the desired state of this AlertingRule object. - // +kubebuilder:validation:Required + // +required Spec AlertingRuleSpec `json:"spec"` // status describes the current state of this AlertOverrides object. @@ -67,8 +67,8 @@ type AlertingRuleList struct { metav1.ListMeta `json:"metadata,omitempty"` // items is a list of AlertingRule objects. - // +kubebuilder:validation:Required - Items []AlertingRule `json:"items"` + // +optional + Items []AlertingRule `json:"items,omitempty"` } // AlertingRuleSpec is the desired state of an AlertingRule resource. @@ -93,7 +93,7 @@ type AlertingRuleSpec struct { // +listType=map // +listMapKey=name // +kubebuilder:validation:MinItems:=1 - // +kubebuilder:validation:Required + // +required Groups []RuleGroup `json:"groups"` } @@ -110,7 +110,7 @@ type Duration string type RuleGroup struct { // name is the name of the group. // - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=2048 Name string `json:"name"` @@ -130,7 +130,7 @@ type RuleGroup struct { // processed sequentially, and all rules are processed. // // +kubebuilder:validation:MinItems:=1 - // +kubebuilder:validation:Required + // +required Rules []Rule `json:"rules"` } @@ -143,7 +143,7 @@ type Rule struct { // alert is the name of the alert. Must be a valid label value, i.e. may // contain any Unicode character. // - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=2048 Alert string `json:"alert"` @@ -157,7 +157,7 @@ type Rule struct { // to create an always-firing "Watchdog" alert in order to ensure the alerting // pipeline is functional. // - // +kubebuilder:validation:Required + // +required Expr intstr.IntOrString `json:"expr"` // for is the time period after which alerts are considered firing after first @@ -210,7 +210,7 @@ type PrometheusRuleRef struct { // the reference should we ever need to. // name of the referenced PrometheusRule. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=2048 Name string `json:"name"` @@ -238,7 +238,7 @@ type AlertRelabelConfig struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec describes the desired state of this AlertRelabelConfig object. - // +kubebuilder:validation:Required + // +required Spec AlertRelabelConfigSpec `json:"spec"` // status describes the current state of this AlertRelabelConfig object. @@ -254,7 +254,7 @@ type AlertRelabelConfigSpec struct { // configs is a list of sequentially evaluated alert relabel configs. // // +kubebuilder:validation:MinItems:=1 - // +kubebuilder:validation:Required + // +required Configs []RelabelConfig `json:"configs"` } @@ -285,9 +285,8 @@ type AlertRelabelConfigList struct { metav1.ListMeta `json:"metadata,omitempty"` // items is a list of AlertRelabelConfigs. - // +kubebuilder:validation:MinItems:=1 - // +kubebuilder:validation:Required - Items []*AlertRelabelConfig `json:"items"` + // +optional + Items []AlertRelabelConfig `json:"items,omitempty"` } // LabelName is a valid Prometheus label name which may only contain ASCII diff --git a/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go index cb472ccf54..67af0e0828 100644 --- a/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go @@ -45,13 +45,9 @@ func (in *AlertRelabelConfigList) DeepCopyInto(out *AlertRelabelConfigList) { in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]*AlertRelabelConfig, len(*in)) + *out = make([]AlertRelabelConfig, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(AlertRelabelConfig) - (*in).DeepCopyInto(*out) - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } return diff --git a/vendor/github.com/openshift/api/network/v1/generated.proto b/vendor/github.com/openshift/api/network/v1/generated.proto index a429f04c02..4fc68a9740 100644 --- a/vendor/github.com/openshift/api/network/v1/generated.proto +++ b/vendor/github.com/openshift/api/network/v1/generated.proto @@ -29,39 +29,37 @@ message ClusterNetwork { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Network is a CIDR string specifying the global overlay network's L3 space + // network is a CIDR string specifying the global overlay network's L3 space // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` optional string network = 2; - // HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods + // hostsubnetlength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods // +kubebuilder:validation:Minimum=2 // +kubebuilder:validation:Maximum=30 optional uint32 hostsubnetlength = 3; - // ServiceNetwork is the CIDR range that Service IP addresses are allocated from + // serviceNetwork is the CIDR range that Service IP addresses are allocated from // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` optional string serviceNetwork = 4; - // PluginName is the name of the network plugin being used + // pluginName is the name of the network plugin being used optional string pluginName = 5; - // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. + // clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. repeated ClusterNetworkEntry clusterNetworks = 6; - // VXLANPort sets the VXLAN destination port used by the cluster. + // vxlanPort sets the VXLAN destination port used by the cluster. // It is set by the master configuration file on startup and cannot be edited manually. // Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. // Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 - // +kubebuilder:validation:Optional // +optional optional uint32 vxlanPort = 7; - // MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. + // mtu is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. // +kubebuilder:validation:Minimum=576 // +kubebuilder:validation:Maximum=65536 - // +kubebuilder:validation:Optional // +optional optional uint32 mtu = 8; } @@ -72,7 +70,7 @@ message ClusterNetworkEntry { // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` optional string cidr = 1; - // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. + // hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. // +kubebuilder:validation:Minimum=2 // +kubebuilder:validation:Maximum=30 optional uint32 hostSubnetLength = 2; @@ -87,7 +85,7 @@ message ClusterNetworkList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of cluster networks + // items is the list of cluster networks repeated ClusterNetwork items = 2; } @@ -125,14 +123,14 @@ message EgressNetworkPolicyList { // EgressNetworkPolicyPeer specifies a target to apply egress network policy to message EgressNetworkPolicyPeer { - // CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset + // cidrSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset // Ideally we would have liked to use the cidr openapi format for this property. // But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs // We are therefore using a regex pattern to validate instead. // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` optional string cidrSelector = 1; - // DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset + // dnsName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset // +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` optional string dnsName = 2; } @@ -172,25 +170,25 @@ message HostSubnet { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Host is the name of the node. (This is the same as the object's name, but both fields must be set.) + // host is the name of the node. (This is the same as the object's name, but both fields must be set.) // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` optional string host = 2; - // HostIP is the IP address to be used as a VTEP by other nodes in the overlay network + // hostIP is the IP address to be used as a VTEP by other nodes in the overlay network // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` optional string hostIP = 3; - // Subnet is the CIDR range of the overlay network assigned to the node for its pods + // subnet is the CIDR range of the overlay network assigned to the node for its pods // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` optional string subnet = 4; - // EgressIPs is the list of automatic egress IP addresses currently hosted by this node. + // egressIPs is the list of automatic egress IP addresses currently hosted by this node. // If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the // master will overwrite the value here with its own allocation of egress IPs. // +optional repeated string egressIPs = 5; - // EgressCIDRs is the list of CIDR ranges available for automatically assigning + // egressCIDRs is the list of CIDR ranges available for automatically assigning // egress IPs to this node from. If this field is set then EgressIPs should be // treated as read-only. // +optional @@ -206,7 +204,7 @@ message HostSubnetList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of host subnets + // items is the list of host subnets repeated HostSubnet items = 2; } @@ -227,16 +225,16 @@ message NetNamespace { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) + // netname is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` optional string netname = 2; - // NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. + // netid is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=16777215 optional uint32 netid = 3; - // EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. + // egressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. // (If empty, external traffic will be masqueraded to Node IPs.) // +optional repeated string egressIPs = 4; @@ -251,7 +249,7 @@ message NetNamespaceList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of net namespaces + // items is the list of net namespaces repeated NetNamespace items = 2; } diff --git a/vendor/github.com/openshift/api/network/v1/types.go b/vendor/github.com/openshift/api/network/v1/types.go index 89015cf6b1..7790802138 100644 --- a/vendor/github.com/openshift/api/network/v1/types.go +++ b/vendor/github.com/openshift/api/network/v1/types.go @@ -32,39 +32,37 @@ type ClusterNetwork struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Network is a CIDR string specifying the global overlay network's L3 space + // network is a CIDR string specifying the global overlay network's L3 space // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` Network string `json:"network,omitempty" protobuf:"bytes,2,opt,name=network"` - // HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods + // hostsubnetlength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods // +kubebuilder:validation:Minimum=2 // +kubebuilder:validation:Maximum=30 HostSubnetLength uint32 `json:"hostsubnetlength,omitempty" protobuf:"varint,3,opt,name=hostsubnetlength"` - // ServiceNetwork is the CIDR range that Service IP addresses are allocated from + // serviceNetwork is the CIDR range that Service IP addresses are allocated from // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` ServiceNetwork string `json:"serviceNetwork" protobuf:"bytes,4,opt,name=serviceNetwork"` - // PluginName is the name of the network plugin being used + // pluginName is the name of the network plugin being used PluginName string `json:"pluginName,omitempty" protobuf:"bytes,5,opt,name=pluginName"` - // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. + // clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks" protobuf:"bytes,6,rep,name=clusterNetworks"` - // VXLANPort sets the VXLAN destination port used by the cluster. + // vxlanPort sets the VXLAN destination port used by the cluster. // It is set by the master configuration file on startup and cannot be edited manually. // Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. // Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 - // +kubebuilder:validation:Optional // +optional VXLANPort *uint32 `json:"vxlanPort,omitempty" protobuf:"varint,7,opt,name=vxlanPort"` - // MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. + // mtu is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. // +kubebuilder:validation:Minimum=576 // +kubebuilder:validation:Maximum=65536 - // +kubebuilder:validation:Optional // +optional MTU *uint32 `json:"mtu,omitempty" protobuf:"varint,8,opt,name=mtu"` } @@ -75,7 +73,7 @@ type ClusterNetworkEntry struct { // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` CIDR string `json:"CIDR" protobuf:"bytes,1,opt,name=cidr"` - // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. + // hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. // +kubebuilder:validation:Minimum=2 // +kubebuilder:validation:Maximum=30 HostSubnetLength uint32 `json:"hostSubnetLength" protobuf:"varint,2,opt,name=hostSubnetLength"` @@ -94,7 +92,7 @@ type ClusterNetworkList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of cluster networks + // items is the list of cluster networks Items []ClusterNetwork `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -134,25 +132,25 @@ type HostSubnet struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Host is the name of the node. (This is the same as the object's name, but both fields must be set.) + // host is the name of the node. (This is the same as the object's name, but both fields must be set.) // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` Host string `json:"host" protobuf:"bytes,2,opt,name=host"` - // HostIP is the IP address to be used as a VTEP by other nodes in the overlay network + // hostIP is the IP address to be used as a VTEP by other nodes in the overlay network // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` HostIP string `json:"hostIP" protobuf:"bytes,3,opt,name=hostIP"` - // Subnet is the CIDR range of the overlay network assigned to the node for its pods + // subnet is the CIDR range of the overlay network assigned to the node for its pods // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` Subnet string `json:"subnet" protobuf:"bytes,4,opt,name=subnet"` - // EgressIPs is the list of automatic egress IP addresses currently hosted by this node. + // egressIPs is the list of automatic egress IP addresses currently hosted by this node. // If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the // master will overwrite the value here with its own allocation of egress IPs. // +optional EgressIPs []HostSubnetEgressIP `json:"egressIPs,omitempty" protobuf:"bytes,5,rep,name=egressIPs"` - // EgressCIDRs is the list of CIDR ranges available for automatically assigning + // egressCIDRs is the list of CIDR ranges available for automatically assigning // egress IPs to this node from. If this field is set then EgressIPs should be // treated as read-only. // +optional @@ -172,7 +170,7 @@ type HostSubnetList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of host subnets + // items is the list of host subnets Items []HostSubnet `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -204,16 +202,16 @@ type NetNamespace struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) + // netname is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` NetName string `json:"netname" protobuf:"bytes,2,opt,name=netname"` - // NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. + // netid is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=16777215 NetID uint32 `json:"netid" protobuf:"varint,3,opt,name=netid"` - // EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. + // egressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. // (If empty, external traffic will be masqueraded to Node IPs.) // +optional EgressIPs []NetNamespaceEgressIP `json:"egressIPs,omitempty" protobuf:"bytes,4,rep,name=egressIPs"` @@ -232,7 +230,7 @@ type NetNamespaceList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of net namespaces + // items is the list of net namespaces Items []NetNamespace `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -247,13 +245,13 @@ const ( // EgressNetworkPolicyPeer specifies a target to apply egress network policy to type EgressNetworkPolicyPeer struct { - // CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset + // cidrSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset // Ideally we would have liked to use the cidr openapi format for this property. // But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs // We are therefore using a regex pattern to validate instead. // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` CIDRSelector string `json:"cidrSelector,omitempty" protobuf:"bytes,1,rep,name=cidrSelector"` - // DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset + // dnsName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset // +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` DNSName string `json:"dnsName,omitempty" protobuf:"bytes,2,rep,name=dnsName"` } diff --git a/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go index 743ddeab57..a0e1240962 100644 --- a/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go @@ -14,13 +14,13 @@ package v1 var map_ClusterNetwork = map[string]string{ "": "ClusterNetwork was used by OpenShift SDN. DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in any way by OpenShift.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "network": "Network is a CIDR string specifying the global overlay network's L3 space", - "hostsubnetlength": "HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods", - "serviceNetwork": "ServiceNetwork is the CIDR range that Service IP addresses are allocated from", - "pluginName": "PluginName is the name of the network plugin being used", - "clusterNetworks": "ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from.", - "vxlanPort": "VXLANPort sets the VXLAN destination port used by the cluster. It is set by the master configuration file on startup and cannot be edited manually. Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port.", - "mtu": "MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator.", + "network": "network is a CIDR string specifying the global overlay network's L3 space", + "hostsubnetlength": "hostsubnetlength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods", + "serviceNetwork": "serviceNetwork is the CIDR range that Service IP addresses are allocated from", + "pluginName": "pluginName is the name of the network plugin being used", + "clusterNetworks": "clusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from.", + "vxlanPort": "vxlanPort sets the VXLAN destination port used by the cluster. It is set by the master configuration file on startup and cannot be edited manually. Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port.", + "mtu": "mtu is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator.", } func (ClusterNetwork) SwaggerDoc() map[string]string { @@ -30,7 +30,7 @@ func (ClusterNetwork) SwaggerDoc() map[string]string { var map_ClusterNetworkEntry = map[string]string{ "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.", "CIDR": "CIDR defines the total range of a cluster networks address space.", - "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods.", + "hostSubnetLength": "hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods.", } func (ClusterNetworkEntry) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (ClusterNetworkEntry) SwaggerDoc() map[string]string { var map_ClusterNetworkList = map[string]string{ "": "ClusterNetworkList is a collection of ClusterNetworks\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of cluster networks", + "items": "items is the list of cluster networks", } func (ClusterNetworkList) SwaggerDoc() map[string]string { @@ -69,8 +69,8 @@ func (EgressNetworkPolicyList) SwaggerDoc() map[string]string { var map_EgressNetworkPolicyPeer = map[string]string{ "": "EgressNetworkPolicyPeer specifies a target to apply egress network policy to", - "cidrSelector": "CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset Ideally we would have liked to use the cidr openapi format for this property. But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs We are therefore using a regex pattern to validate instead.", - "dnsName": "DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset", + "cidrSelector": "cidrSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset Ideally we would have liked to use the cidr openapi format for this property. But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs We are therefore using a regex pattern to validate instead.", + "dnsName": "dnsName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset", } func (EgressNetworkPolicyPeer) SwaggerDoc() map[string]string { @@ -99,11 +99,11 @@ func (EgressNetworkPolicySpec) SwaggerDoc() map[string]string { var map_HostSubnet = map[string]string{ "": "HostSubnet was used by OpenShift SDN. DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in any way by OpenShift.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "host": "Host is the name of the node. (This is the same as the object's name, but both fields must be set.)", - "hostIP": "HostIP is the IP address to be used as a VTEP by other nodes in the overlay network", - "subnet": "Subnet is the CIDR range of the overlay network assigned to the node for its pods", - "egressIPs": "EgressIPs is the list of automatic egress IP addresses currently hosted by this node. If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the master will overwrite the value here with its own allocation of egress IPs.", - "egressCIDRs": "EgressCIDRs is the list of CIDR ranges available for automatically assigning egress IPs to this node from. If this field is set then EgressIPs should be treated as read-only.", + "host": "host is the name of the node. (This is the same as the object's name, but both fields must be set.)", + "hostIP": "hostIP is the IP address to be used as a VTEP by other nodes in the overlay network", + "subnet": "subnet is the CIDR range of the overlay network assigned to the node for its pods", + "egressIPs": "egressIPs is the list of automatic egress IP addresses currently hosted by this node. If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the master will overwrite the value here with its own allocation of egress IPs.", + "egressCIDRs": "egressCIDRs is the list of CIDR ranges available for automatically assigning egress IPs to this node from. If this field is set then EgressIPs should be treated as read-only.", } func (HostSubnet) SwaggerDoc() map[string]string { @@ -113,7 +113,7 @@ func (HostSubnet) SwaggerDoc() map[string]string { var map_HostSubnetList = map[string]string{ "": "HostSubnetList is a collection of HostSubnets\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of host subnets", + "items": "items is the list of host subnets", } func (HostSubnetList) SwaggerDoc() map[string]string { @@ -123,9 +123,9 @@ func (HostSubnetList) SwaggerDoc() map[string]string { var map_NetNamespace = map[string]string{ "": "NetNamespace was used by OpenShift SDN. DEPRECATED: OpenShift SDN is no longer supported and this object is no longer used in any way by OpenShift.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "netname": "NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.)", - "netid": "NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the \"oc adm pod-network\" commands.", - "egressIPs": "EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. (If empty, external traffic will be masqueraded to Node IPs.)", + "netname": "netname is the name of the network namespace. (This is the same as the object's name, but both fields must be set.)", + "netid": "netid is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the \"oc adm pod-network\" commands.", + "egressIPs": "egressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. (If empty, external traffic will be masqueraded to Node IPs.)", } func (NetNamespace) SwaggerDoc() map[string]string { @@ -135,7 +135,7 @@ func (NetNamespace) SwaggerDoc() map[string]string { var map_NetNamespaceList = map[string]string{ "": "NetNamespaceList is a collection of NetNamespaces\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of net namespaces", + "items": "items is the list of net namespaces", } func (NetNamespaceList) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go b/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go index 394f2e4ac0..cd0d1b31a5 100644 --- a/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go +++ b/vendor/github.com/openshift/api/network/v1alpha1/types_dnsnameresolver.go @@ -26,7 +26,7 @@ type DNSNameResolver struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec is the specification of the desired behavior of the DNSNameResolver. - // +kubebuilder:validation:Required + // +required Spec DNSNameResolverSpec `json:"spec"` // status is the most recently observed status of the DNSNameResolver. // +optional @@ -47,7 +47,7 @@ type DNSNameResolverSpec struct { // For a wildcard DNS name, the '*' will match only one label. Additionally, only a single // '*' can be used at the beginning of the wildcard DNS name. For example, '*.example.com.' // will match 'sub1.example.com.' but won't match 'sub2.sub1.example.com.' - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="spec.name is immutable" Name DNSName `json:"name"` } @@ -82,12 +82,12 @@ type DNSNameResolverResolvedName struct { // will store the regular DNS names which match the wildcard DNS name and have been successfully resolved. // If the wildcard DNS name can also be successfully resolved, then this field will store the wildcard // DNS name as well. - // +kubebuilder:validation:Required + // +required DNSName DNSName `json:"dnsName"` // resolvedAddresses gives the list of associated IP addresses and their corresponding TTLs and last // lookup times for the dnsName. - // +kubebuilder:validation:Required + // +required // +listType=map // +listMapKey=ip ResolvedAddresses []DNSNameResolverResolvedAddress `json:"resolvedAddresses"` @@ -106,21 +106,21 @@ type DNSNameResolverResolvedAddress struct { // lastLookupTime + ttlSeconds. To refresh the information, a DNS lookup will be performed upon // the expiration of the IP address's validity. If the information is not refreshed then it will // be removed with a grace period after the expiration of the IP address's validity. - // +kubebuilder:validation:Required + // +required IP string `json:"ip"` // ttlSeconds is the time-to-live value of the IP address. The validity of the IP address expires after // lastLookupTime + ttlSeconds. On a successful DNS lookup the value of this field will be updated with // the current time-to-live value. If the information is not refreshed then it will be removed with a // grace period after the expiration of the IP address's validity. - // +kubebuilder:validation:Required + // +required TTLSeconds int32 `json:"ttlSeconds"` // lastLookupTime is the timestamp when the last DNS lookup was completed successfully. The validity of // the IP address expires after lastLookupTime + ttlSeconds. The value of this field will be updated to // the current time on a successful DNS lookup. If the information is not refreshed then it will be // removed with a grace period after the expiration of the IP address's validity. - // +kubebuilder:validation:Required + // +required LastLookupTime *metav1.Time `json:"lastLookupTime"` } diff --git a/vendor/github.com/openshift/api/networkoperator/v1/generated.proto b/vendor/github.com/openshift/api/networkoperator/v1/generated.proto index 37c374557d..ebf09e7e9a 100644 --- a/vendor/github.com/openshift/api/networkoperator/v1/generated.proto +++ b/vendor/github.com/openshift/api/networkoperator/v1/generated.proto @@ -43,7 +43,7 @@ message EgressRouter { optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired egress router. - // +kubebuilder:validation:Required + // +required optional EgressRouterSpec spec = 2; // Observed status of EgressRouter. @@ -53,8 +53,8 @@ message EgressRouter { // EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface // +kubebuilder:validation:Required message EgressRouterAddress { - // IP is the address to configure on the router's interface. Can be IPv4 or IPv6. - // +kubebuilder:validation:Required + // ip is the address to configure on the router's interface. Can be IPv4 or IPv6. + // +required optional string ip = 1; // IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6. @@ -86,30 +86,30 @@ message EgressRouterList { // +k8s:openapi-gen=true // +kubebuilder:validation:Required message EgressRouterSpec { - // Mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. - // +kubebuilder:validation:Required + // mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. + // +required // +kubebuilder:validation:Enum="Redirect" // +kubebuilder:default:="Redirect" optional string mode = 1; - // Redirect represents the configuration parameters specific to redirect mode. + // redirect represents the configuration parameters specific to redirect mode. optional RedirectConfig redirect = 2; // Specification of interface to create/use. The default is macvlan. // Currently only macvlan is supported. - // +kubebuilder:validation:Required + // +required // +kubebuilder:default:={macvlan: {mode: Bridge}} optional EgressRouterInterface networkInterface = 3; // List of IP addresses to configure on the pod's secondary interface. - // +kubebuilder:validation:Required + // +required repeated EgressRouterAddress addresses = 4; } // EgressRouterStatus contains the observed status of EgressRouter. Read-only. message EgressRouterStatus { // Observed status of the egress router - // +kubebuilder:validation:Required + // +required repeated EgressRouterStatusCondition conditions = 1; } @@ -117,28 +117,25 @@ message EgressRouterStatus { // managed and monitored components. // +k8s:deepcopy-gen=true message EgressRouterStatusCondition { - // Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded - // +kubebuilder:validation:Required + // type specifies the aspect reported by this condition; one of Available, Progressing, Degraded // +kubebuilder:validation:Enum="Available";"Progressing";"Degraded" // +required optional string type = 1; - // Status of the condition, one of True, False, Unknown. - // +kubebuilder:validation:Required + // status of the condition, one of True, False, Unknown. // +kubebuilder:validation:Enum="True";"False";"Unknown" // +required optional string status = 2; - // LastTransitionTime is the time of the last update to the current status property. - // +kubebuilder:validation:Required + // lastTransitionTime is the time of the last update to the current status property. // +required // +nullable optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - // Reason is the CamelCase reason for the condition's current status. + // reason is the CamelCase reason for the condition's current status. optional string reason = 4; - // Message provides additional information about the current condition. + // message provides additional information about the current condition. // This is only to be consumed by humans. It may contain Line Feed // characters (U+000A), which should be rendered as new lines. optional string message = 5; @@ -147,21 +144,21 @@ message EgressRouterStatusCondition { // L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port. message L4RedirectRule { // IP specifies the remote destination's IP address. Can be IPv4 or IPv6. - // +kubebuilder:validation:Required + // +required optional string destinationIP = 1; - // Port is the port number to which clients should send traffic to be redirected. - // +kubebuilder:validation:Required + // port is the port number to which clients should send traffic to be redirected. + // +required // +kubebuilder:validation:Maximum:=65535 // +kubebuilder:validation:Minimum:=1 optional int32 port = 2; - // Protocol can be TCP, SCTP or UDP. - // +kubebuilder:validation:Required + // protocol can be TCP, SCTP or UDP. + // +required // +kubebuilder:validation:Enum="TCP";"UDP";"SCTP" optional string protocol = 3; - // TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. + // targetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. // If unspecified, the value from "Port" is used. // +kubebuilder:validation:Maximum:=65535 // +kubebuilder:validation:Minimum:=1 @@ -170,8 +167,8 @@ message L4RedirectRule { // MacvlanConfig consists of arguments specific to the macvlan EgressRouterInterfaceType message MacvlanConfig { - // Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge". - // +kubebuilder:validation:Required + // mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge". + // +required // +kubebuilder:validation:Enum="Bridge";"Private";"VEPA";"Passthru" // +kubebuilder:default:="Bridge" optional string mode = 1; @@ -185,7 +182,7 @@ message RedirectConfig { // List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode. repeated L4RedirectRule redirectRules = 1; - // FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. + // fallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. // If no redirect rules are specified, all traffic from the router are redirected to this IP. // If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. // If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected. diff --git a/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go b/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go index 9f11590e05..87e279eda8 100644 --- a/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go +++ b/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go @@ -38,7 +38,7 @@ type EgressRouter struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired egress router. - // +kubebuilder:validation:Required + // +required Spec EgressRouterSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // Observed status of EgressRouter. @@ -51,23 +51,23 @@ type EgressRouter struct { // +k8s:openapi-gen=true // +kubebuilder:validation:Required type EgressRouterSpec struct { - // Mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. - // +kubebuilder:validation:Required + // mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. + // +required // +kubebuilder:validation:Enum="Redirect" // +kubebuilder:default:="Redirect" Mode EgressRouterMode `json:"mode" protobuf:"bytes,1,opt,name=mode,casttype=EgressRouterMode"` - // Redirect represents the configuration parameters specific to redirect mode. + // redirect represents the configuration parameters specific to redirect mode. Redirect *RedirectConfig `json:"redirect,omitempty" protobuf:"bytes,2,opt,name=redirect"` // Specification of interface to create/use. The default is macvlan. // Currently only macvlan is supported. - // +kubebuilder:validation:Required + // +required // +kubebuilder:default:={macvlan: {mode: Bridge}} NetworkInterface EgressRouterInterface `json:"networkInterface" protobuf:"bytes,3,opt,name=networkInterface"` // List of IP addresses to configure on the pod's secondary interface. - // +kubebuilder:validation:Required + // +required Addresses []EgressRouterAddress `json:"addresses" protobuf:"bytes,4,rep,name=addresses"` } @@ -86,7 +86,7 @@ type RedirectConfig struct { // List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode. RedirectRules []L4RedirectRule `json:"redirectRules,omitempty" protobuf:"bytes,1,rep,name=redirectRules"` - // FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. + // fallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. // If no redirect rules are specified, all traffic from the router are redirected to this IP. // If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. // If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected. @@ -96,21 +96,21 @@ type RedirectConfig struct { // L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port. type L4RedirectRule struct { // IP specifies the remote destination's IP address. Can be IPv4 or IPv6. - // +kubebuilder:validation:Required + // +required DestinationIP string `json:"destinationIP" protobuf:"bytes,1,opt,name=destinationIP"` - // Port is the port number to which clients should send traffic to be redirected. - // +kubebuilder:validation:Required + // port is the port number to which clients should send traffic to be redirected. + // +required // +kubebuilder:validation:Maximum:=65535 // +kubebuilder:validation:Minimum:=1 Port int32 `json:"port" protobuf:"varint,2,opt,name=port"` - // Protocol can be TCP, SCTP or UDP. - // +kubebuilder:validation:Required + // protocol can be TCP, SCTP or UDP. + // +required // +kubebuilder:validation:Enum="TCP";"UDP";"SCTP" Protocol ProtocolType `json:"protocol" protobuf:"bytes,3,opt,name=protocol,casttype=ProtocolType"` - // TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. + // targetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. // If unspecified, the value from "Port" is used. // +kubebuilder:validation:Maximum:=65535 // +kubebuilder:validation:Minimum:=1 @@ -165,8 +165,8 @@ const ( // MacvlanConfig consists of arguments specific to the macvlan EgressRouterInterfaceType type MacvlanConfig struct { - // Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge". - // +kubebuilder:validation:Required + // mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge". + // +required // +kubebuilder:validation:Enum="Bridge";"Private";"VEPA";"Passthru" // +kubebuilder:default:="Bridge" Mode MacvlanMode `json:"mode" protobuf:"bytes,1,opt,name=mode,casttype=MacvlanMode"` @@ -178,8 +178,8 @@ type MacvlanConfig struct { // EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface // +kubebuilder:validation:Required type EgressRouterAddress struct { - // IP is the address to configure on the router's interface. Can be IPv4 or IPv6. - // +kubebuilder:validation:Required + // ip is the address to configure on the router's interface. Can be IPv4 or IPv6. + // +required IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` // IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6. Gateway string `json:"gateway,omitempty" protobuf:"bytes,2,opt,name=gateway"` @@ -219,28 +219,25 @@ const ( // managed and monitored components. // +k8s:deepcopy-gen=true type EgressRouterStatusCondition struct { - // Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded - // +kubebuilder:validation:Required + // type specifies the aspect reported by this condition; one of Available, Progressing, Degraded // +kubebuilder:validation:Enum="Available";"Progressing";"Degraded" // +required Type EgressRouterStatusConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=EgressRouterStatusConditionType"` - // Status of the condition, one of True, False, Unknown. - // +kubebuilder:validation:Required + // status of the condition, one of True, False, Unknown. // +kubebuilder:validation:Enum="True";"False";"Unknown" // +required Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // LastTransitionTime is the time of the last update to the current status property. - // +kubebuilder:validation:Required + // lastTransitionTime is the time of the last update to the current status property. // +required // +nullable LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // Reason is the CamelCase reason for the condition's current status. + // reason is the CamelCase reason for the condition's current status. Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // Message provides additional information about the current condition. + // message provides additional information about the current condition. // This is only to be consumed by humans. It may contain Line Feed // characters (U+000A), which should be rendered as new lines. Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` @@ -249,7 +246,7 @@ type EgressRouterStatusCondition struct { // EgressRouterStatus contains the observed status of EgressRouter. Read-only. type EgressRouterStatus struct { // Observed status of the egress router - // +kubebuilder:validation:Required + // +required Conditions []EgressRouterStatusCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` } diff --git a/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go index 97bec9e29d..000cb19033 100644 --- a/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go @@ -24,7 +24,7 @@ func (EgressRouter) SwaggerDoc() map[string]string { var map_EgressRouterAddress = map[string]string{ "": "EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface", - "ip": "IP is the address to configure on the router's interface. Can be IPv4 or IPv6.", + "ip": "ip is the address to configure on the router's interface. Can be IPv4 or IPv6.", "gateway": "IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6.", } @@ -52,8 +52,8 @@ func (EgressRouterList) SwaggerDoc() map[string]string { var map_EgressRouterSpec = map[string]string{ "": "EgressRouterSpec contains the configuration for an egress router. Mode, networkInterface and addresses fields must be specified along with exactly one \"Config\" that matches the mode. Each config consists of parameters specific to that mode.", - "mode": "Mode depicts the mode that is used for the egress router. The default mode is \"Redirect\" and is the only supported mode currently.", - "redirect": "Redirect represents the configuration parameters specific to redirect mode.", + "mode": "mode depicts the mode that is used for the egress router. The default mode is \"Redirect\" and is the only supported mode currently.", + "redirect": "redirect represents the configuration parameters specific to redirect mode.", "networkInterface": "Specification of interface to create/use. The default is macvlan. Currently only macvlan is supported.", "addresses": "List of IP addresses to configure on the pod's secondary interface.", } @@ -73,11 +73,11 @@ func (EgressRouterStatus) SwaggerDoc() map[string]string { var map_EgressRouterStatusCondition = map[string]string{ "": "EgressRouterStatusCondition represents the state of the egress router's managed and monitored components.", - "type": "Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "LastTransitionTime is the time of the last update to the current status property.", - "reason": "Reason is the CamelCase reason for the condition's current status.", - "message": "Message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", + "type": "type specifies the aspect reported by this condition; one of Available, Progressing, Degraded", + "status": "status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status property.", + "reason": "reason is the CamelCase reason for the condition's current status.", + "message": "message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", } func (EgressRouterStatusCondition) SwaggerDoc() map[string]string { @@ -87,9 +87,9 @@ func (EgressRouterStatusCondition) SwaggerDoc() map[string]string { var map_L4RedirectRule = map[string]string{ "": "L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port.", "destinationIP": "IP specifies the remote destination's IP address. Can be IPv4 or IPv6.", - "port": "Port is the port number to which clients should send traffic to be redirected.", - "protocol": "Protocol can be TCP, SCTP or UDP.", - "targetPort": "TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. If unspecified, the value from \"Port\" is used.", + "port": "port is the port number to which clients should send traffic to be redirected.", + "protocol": "protocol can be TCP, SCTP or UDP.", + "targetPort": "targetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. If unspecified, the value from \"Port\" is used.", } func (L4RedirectRule) SwaggerDoc() map[string]string { @@ -98,7 +98,7 @@ func (L4RedirectRule) SwaggerDoc() map[string]string { var map_MacvlanConfig = map[string]string{ "": "MacvlanConfig consists of arguments specific to the macvlan EgressRouterInterfaceType", - "mode": "Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is \"Bridge\".", + "mode": "mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is \"Bridge\".", "master": "Name of the master interface. Need not be specified if it can be inferred from the IP address.", } @@ -109,7 +109,7 @@ func (MacvlanConfig) SwaggerDoc() map[string]string { var map_RedirectConfig = map[string]string{ "": "RedirectConfig represents the configuration parameters specific to redirect mode.", "redirectRules": "List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode.", - "fallbackIP": "FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. If no redirect rules are specified, all traffic from the router are redirected to this IP. If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected.", + "fallbackIP": "fallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. If no redirect rules are specified, all traffic from the router are redirected to this IP. If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected.", } func (RedirectConfig) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/oauth/v1/generated.proto b/vendor/github.com/openshift/api/oauth/v1/generated.proto index 7630d896da..4a5474e0c6 100644 --- a/vendor/github.com/openshift/api/oauth/v1/generated.proto +++ b/vendor/github.com/openshift/api/oauth/v1/generated.proto @@ -13,13 +13,13 @@ option go_package = "github.com/openshift/api/oauth/v1"; // ClusterRoleScopeRestriction describes restrictions on cluster role scopes message ClusterRoleScopeRestriction { - // RoleNames is the list of cluster roles that can referenced. * means anything + // roleNames is the list of cluster roles that can referenced. * means anything repeated string roleNames = 1; - // Namespaces is the list of namespaces that can be referenced. * means any of them (including *) + // namespaces is the list of namespaces that can be referenced. * means any of them (including *) repeated string namespaces = 2; - // AllowEscalation indicates whether you can request roles and their escalating resources + // allowEscalation indicates whether you can request roles and their escalating resources optional bool allowEscalation = 3; } @@ -37,31 +37,31 @@ message OAuthAccessToken { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // ClientName references the client that created this token. + // clientName references the client that created this token. optional string clientName = 2; - // ExpiresIn is the seconds from CreationTime before this token expires. + // expiresIn is the seconds from CreationTime before this token expires. optional int64 expiresIn = 3; - // Scopes is an array of the requested scopes. + // scopes is an array of the requested scopes. repeated string scopes = 4; - // RedirectURI is the redirection associated with the token. + // redirectURI is the redirection associated with the token. optional string redirectURI = 5; - // UserName is the user name associated with this token + // userName is the user name associated with this token optional string userName = 6; - // UserUID is the unique UID associated with this token + // userUID is the unique UID associated with this token optional string userUID = 7; - // AuthorizeToken contains the token that authorized this token + // authorizeToken contains the token that authorized this token optional string authorizeToken = 8; - // RefreshToken is the value by which this token can be renewed. Can be blank. + // refreshToken is the value by which this token can be renewed. Can be blank. optional string refreshToken = 9; - // InactivityTimeoutSeconds is the value in seconds, from the + // inactivityTimeoutSeconds is the value in seconds, from the // CreationTimestamp, after which this token can no longer be used. // The value is automatically incremented when the token is used. optional int32 inactivityTimeoutSeconds = 10; @@ -76,7 +76,7 @@ message OAuthAccessTokenList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of OAuth access tokens + // items is the list of OAuth access tokens repeated OAuthAccessToken items = 2; } @@ -89,32 +89,32 @@ message OAuthAuthorizeToken { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // ClientName references the client that created this token. + // clientName references the client that created this token. optional string clientName = 2; - // ExpiresIn is the seconds from CreationTime before this token expires. + // expiresIn is the seconds from CreationTime before this token expires. optional int64 expiresIn = 3; - // Scopes is an array of the requested scopes. + // scopes is an array of the requested scopes. repeated string scopes = 4; - // RedirectURI is the redirection associated with the token. + // redirectURI is the redirection associated with the token. optional string redirectURI = 5; - // State data from request + // state data from request optional string state = 6; - // UserName is the user name associated with this token + // userName is the user name associated with this token optional string userName = 7; - // UserUID is the unique UID associated with this token. UserUID and UserName must both match + // userUID is the unique UID associated with this token. UserUID and UserName must both match // for this token to be valid. optional string userUID = 8; - // CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 + // codeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 optional string codeChallenge = 9; - // CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 + // codeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 optional string codeChallengeMethod = 10; } @@ -127,7 +127,7 @@ message OAuthAuthorizeTokenList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of OAuth authorization tokens + // items is the list of OAuth authorization tokens repeated OAuthAuthorizeToken items = 2; } @@ -140,36 +140,36 @@ message OAuthClient { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Secret is the unique secret associated with a client + // secret is the unique secret associated with a client optional string secret = 2; - // AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation + // additionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation // and for service account token validation repeated string additionalSecrets = 3; - // RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects + // respondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects optional bool respondWithChallenges = 4; - // RedirectURIs is the valid redirection URIs associated with a client + // redirectURIs is the valid redirection URIs associated with a client // +patchStrategy=merge repeated string redirectURIs = 5; - // GrantMethod is a required field which determines how to handle grants for this client. + // grantMethod is a required field which determines how to handle grants for this client. // Valid grant handling methods are: // - auto: always approves grant requests, useful for trusted clients // - prompt: prompts the end user for approval of grant requests, useful for third-party clients optional string grantMethod = 6; - // ScopeRestrictions describes which scopes this client can request. Each requested scope + // scopeRestrictions describes which scopes this client can request. Each requested scope // is checked against each restriction. If any restriction matches, then the scope is allowed. // If no restriction matches, then the scope is denied. repeated ScopeRestriction scopeRestrictions = 7; - // AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. + // accessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. // 0 means no expiration. optional int32 accessTokenMaxAgeSeconds = 8; - // AccessTokenInactivityTimeoutSeconds overrides the default token + // accessTokenInactivityTimeoutSeconds overrides the default token // inactivity timeout for tokens granted to this client. // The value represents the maximum amount of time that can occur between // consecutive uses of the token. Tokens become invalid if they are not @@ -194,17 +194,17 @@ message OAuthClientAuthorization { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // ClientName references the client that created this authorization + // clientName references the client that created this authorization optional string clientName = 2; - // UserName is the user name that authorized this client + // userName is the user name that authorized this client optional string userName = 3; - // UserUID is the unique UID associated with this authorization. UserUID and UserName + // userUID is the unique UID associated with this authorization. UserUID and UserName // must both match for this authorization to be valid. optional string userUID = 4; - // Scopes is an array of the granted scopes. + // scopes is an array of the granted scopes. repeated string scopes = 5; } @@ -217,7 +217,7 @@ message OAuthClientAuthorizationList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of OAuth client authorizations + // items is the list of OAuth client authorizations repeated OAuthClientAuthorization items = 2; } @@ -230,7 +230,7 @@ message OAuthClientList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of OAuth clients + // items is the list of OAuth clients repeated OAuthClient items = 2; } @@ -264,7 +264,7 @@ message ScopeRestriction { // ExactValues means the scope has to match a particular set of strings exactly repeated string literals = 1; - // ClusterRole describes a set of restrictions for cluster role scoping. + // clusterRole describes a set of restrictions for cluster role scoping. optional ClusterRoleScopeRestriction clusterRole = 2; } @@ -276,31 +276,31 @@ message UserOAuthAccessToken { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // ClientName references the client that created this token. + // clientName references the client that created this token. optional string clientName = 2; - // ExpiresIn is the seconds from CreationTime before this token expires. + // expiresIn is the seconds from CreationTime before this token expires. optional int64 expiresIn = 3; - // Scopes is an array of the requested scopes. + // scopes is an array of the requested scopes. repeated string scopes = 4; - // RedirectURI is the redirection associated with the token. + // redirectURI is the redirection associated with the token. optional string redirectURI = 5; - // UserName is the user name associated with this token + // userName is the user name associated with this token optional string userName = 6; - // UserUID is the unique UID associated with this token + // userUID is the unique UID associated with this token optional string userUID = 7; - // AuthorizeToken contains the token that authorized this token + // authorizeToken contains the token that authorized this token optional string authorizeToken = 8; - // RefreshToken is the value by which this token can be renewed. Can be blank. + // refreshToken is the value by which this token can be renewed. Can be blank. optional string refreshToken = 9; - // InactivityTimeoutSeconds is the value in seconds, from the + // inactivityTimeoutSeconds is the value in seconds, from the // CreationTimestamp, after which this token can no longer be used. // The value is automatically incremented when the token is used. optional int32 inactivityTimeoutSeconds = 10; diff --git a/vendor/github.com/openshift/api/oauth/v1/types.go b/vendor/github.com/openshift/api/oauth/v1/types.go index 026c527f5b..5a70b47749 100644 --- a/vendor/github.com/openshift/api/oauth/v1/types.go +++ b/vendor/github.com/openshift/api/oauth/v1/types.go @@ -24,31 +24,31 @@ type OAuthAccessToken struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // ClientName references the client that created this token. + // clientName references the client that created this token. ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` - // ExpiresIn is the seconds from CreationTime before this token expires. + // expiresIn is the seconds from CreationTime before this token expires. ExpiresIn int64 `json:"expiresIn,omitempty" protobuf:"varint,3,opt,name=expiresIn"` - // Scopes is an array of the requested scopes. + // scopes is an array of the requested scopes. Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,rep,name=scopes"` - // RedirectURI is the redirection associated with the token. + // redirectURI is the redirection associated with the token. RedirectURI string `json:"redirectURI,omitempty" protobuf:"bytes,5,opt,name=redirectURI"` - // UserName is the user name associated with this token + // userName is the user name associated with this token UserName string `json:"userName,omitempty" protobuf:"bytes,6,opt,name=userName"` - // UserUID is the unique UID associated with this token + // userUID is the unique UID associated with this token UserUID string `json:"userUID,omitempty" protobuf:"bytes,7,opt,name=userUID"` - // AuthorizeToken contains the token that authorized this token + // authorizeToken contains the token that authorized this token AuthorizeToken string `json:"authorizeToken,omitempty" protobuf:"bytes,8,opt,name=authorizeToken"` - // RefreshToken is the value by which this token can be renewed. Can be blank. + // refreshToken is the value by which this token can be renewed. Can be blank. RefreshToken string `json:"refreshToken,omitempty" protobuf:"bytes,9,opt,name=refreshToken"` - // InactivityTimeoutSeconds is the value in seconds, from the + // inactivityTimeoutSeconds is the value in seconds, from the // CreationTimestamp, after which this token can no longer be used. // The value is automatically incremented when the token is used. InactivityTimeoutSeconds int32 `json:"inactivityTimeoutSeconds,omitempty" protobuf:"varint,10,opt,name=inactivityTimeoutSeconds"` @@ -69,32 +69,32 @@ type OAuthAuthorizeToken struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // ClientName references the client that created this token. + // clientName references the client that created this token. ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` - // ExpiresIn is the seconds from CreationTime before this token expires. + // expiresIn is the seconds from CreationTime before this token expires. ExpiresIn int64 `json:"expiresIn,omitempty" protobuf:"varint,3,opt,name=expiresIn"` - // Scopes is an array of the requested scopes. + // scopes is an array of the requested scopes. Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,rep,name=scopes"` - // RedirectURI is the redirection associated with the token. + // redirectURI is the redirection associated with the token. RedirectURI string `json:"redirectURI,omitempty" protobuf:"bytes,5,opt,name=redirectURI"` - // State data from request + // state data from request State string `json:"state,omitempty" protobuf:"bytes,6,opt,name=state"` - // UserName is the user name associated with this token + // userName is the user name associated with this token UserName string `json:"userName,omitempty" protobuf:"bytes,7,opt,name=userName"` - // UserUID is the unique UID associated with this token. UserUID and UserName must both match + // userUID is the unique UID associated with this token. UserUID and UserName must both match // for this token to be valid. UserUID string `json:"userUID,omitempty" protobuf:"bytes,8,opt,name=userUID"` - // CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 + // codeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 CodeChallenge string `json:"codeChallenge,omitempty" protobuf:"bytes,9,opt,name=codeChallenge"` - // CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 + // codeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 CodeChallengeMethod string `json:"codeChallengeMethod,omitempty" protobuf:"bytes,10,opt,name=codeChallengeMethod"` } @@ -113,36 +113,36 @@ type OAuthClient struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Secret is the unique secret associated with a client + // secret is the unique secret associated with a client Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` - // AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation + // additionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation // and for service account token validation AdditionalSecrets []string `json:"additionalSecrets,omitempty" protobuf:"bytes,3,rep,name=additionalSecrets"` - // RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects + // respondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects RespondWithChallenges bool `json:"respondWithChallenges,omitempty" protobuf:"varint,4,opt,name=respondWithChallenges"` - // RedirectURIs is the valid redirection URIs associated with a client + // redirectURIs is the valid redirection URIs associated with a client // +patchStrategy=merge RedirectURIs []string `json:"redirectURIs,omitempty" patchStrategy:"merge" protobuf:"bytes,5,rep,name=redirectURIs"` - // GrantMethod is a required field which determines how to handle grants for this client. + // grantMethod is a required field which determines how to handle grants for this client. // Valid grant handling methods are: // - auto: always approves grant requests, useful for trusted clients // - prompt: prompts the end user for approval of grant requests, useful for third-party clients GrantMethod GrantHandlerType `json:"grantMethod,omitempty" protobuf:"bytes,6,opt,name=grantMethod,casttype=GrantHandlerType"` - // ScopeRestrictions describes which scopes this client can request. Each requested scope + // scopeRestrictions describes which scopes this client can request. Each requested scope // is checked against each restriction. If any restriction matches, then the scope is allowed. // If no restriction matches, then the scope is denied. ScopeRestrictions []ScopeRestriction `json:"scopeRestrictions,omitempty" protobuf:"bytes,7,rep,name=scopeRestrictions"` - // AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. + // accessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. // 0 means no expiration. AccessTokenMaxAgeSeconds *int32 `json:"accessTokenMaxAgeSeconds,omitempty" protobuf:"varint,8,opt,name=accessTokenMaxAgeSeconds"` - // AccessTokenInactivityTimeoutSeconds overrides the default token + // accessTokenInactivityTimeoutSeconds overrides the default token // inactivity timeout for tokens granted to this client. // The value represents the maximum amount of time that can occur between // consecutive uses of the token. Tokens become invalid if they are not @@ -174,17 +174,17 @@ type ScopeRestriction struct { // ExactValues means the scope has to match a particular set of strings exactly ExactValues []string `json:"literals,omitempty" protobuf:"bytes,1,rep,name=literals"` - // ClusterRole describes a set of restrictions for cluster role scoping. + // clusterRole describes a set of restrictions for cluster role scoping. ClusterRole *ClusterRoleScopeRestriction `json:"clusterRole,omitempty" protobuf:"bytes,2,opt,name=clusterRole"` } // ClusterRoleScopeRestriction describes restrictions on cluster role scopes type ClusterRoleScopeRestriction struct { - // RoleNames is the list of cluster roles that can referenced. * means anything + // roleNames is the list of cluster roles that can referenced. * means anything RoleNames []string `json:"roleNames" protobuf:"bytes,1,rep,name=roleNames"` - // Namespaces is the list of namespaces that can be referenced. * means any of them (including *) + // namespaces is the list of namespaces that can be referenced. * means any of them (including *) Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` - // AllowEscalation indicates whether you can request roles and their escalating resources + // allowEscalation indicates whether you can request roles and their escalating resources AllowEscalation bool `json:"allowEscalation" protobuf:"varint,3,opt,name=allowEscalation"` } @@ -203,17 +203,17 @@ type OAuthClientAuthorization struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // ClientName references the client that created this authorization + // clientName references the client that created this authorization ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` - // UserName is the user name that authorized this client + // userName is the user name that authorized this client UserName string `json:"userName,omitempty" protobuf:"bytes,3,opt,name=userName"` - // UserUID is the unique UID associated with this authorization. UserUID and UserName + // userUID is the unique UID associated with this authorization. UserUID and UserName // must both match for this authorization to be valid. UserUID string `json:"userUID,omitempty" protobuf:"bytes,4,opt,name=userUID"` - // Scopes is an array of the granted scopes. + // scopes is an array of the granted scopes. Scopes []string `json:"scopes,omitempty" protobuf:"bytes,5,rep,name=scopes"` } @@ -230,7 +230,7 @@ type OAuthAccessTokenList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of OAuth access tokens + // items is the list of OAuth access tokens Items []OAuthAccessToken `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -247,7 +247,7 @@ type OAuthAuthorizeTokenList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of OAuth authorization tokens + // items is the list of OAuth authorization tokens Items []OAuthAuthorizeToken `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -264,7 +264,7 @@ type OAuthClientList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of OAuth clients + // items is the list of OAuth clients Items []OAuthClient `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -281,7 +281,7 @@ type OAuthClientAuthorizationList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of OAuth client authorizations + // items is the list of OAuth client authorizations Items []OAuthClientAuthorization `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go index f62b715c01..171b5221f6 100644 --- a/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go @@ -13,9 +13,9 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_ClusterRoleScopeRestriction = map[string]string{ "": "ClusterRoleScopeRestriction describes restrictions on cluster role scopes", - "roleNames": "RoleNames is the list of cluster roles that can referenced. * means anything", - "namespaces": "Namespaces is the list of namespaces that can be referenced. * means any of them (including *)", - "allowEscalation": "AllowEscalation indicates whether you can request roles and their escalating resources", + "roleNames": "roleNames is the list of cluster roles that can referenced. * means anything", + "namespaces": "namespaces is the list of namespaces that can be referenced. * means any of them (including *)", + "allowEscalation": "allowEscalation indicates whether you can request roles and their escalating resources", } func (ClusterRoleScopeRestriction) SwaggerDoc() map[string]string { @@ -25,15 +25,15 @@ func (ClusterRoleScopeRestriction) SwaggerDoc() map[string]string { var map_OAuthAccessToken = map[string]string{ "": "OAuthAccessToken describes an OAuth access token. The name of a token must be prefixed with a `sha256~` string, must not contain \"/\" or \"%\" characters and must be at least 32 characters long.\n\nThe name of the token is constructed from the actual token by sha256-hashing it and using URL-safe unpadded base64-encoding (as described in RFC4648) on the hashed result.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "clientName": "ClientName references the client that created this token.", - "expiresIn": "ExpiresIn is the seconds from CreationTime before this token expires.", - "scopes": "Scopes is an array of the requested scopes.", - "redirectURI": "RedirectURI is the redirection associated with the token.", - "userName": "UserName is the user name associated with this token", - "userUID": "UserUID is the unique UID associated with this token", - "authorizeToken": "AuthorizeToken contains the token that authorized this token", - "refreshToken": "RefreshToken is the value by which this token can be renewed. Can be blank.", - "inactivityTimeoutSeconds": "InactivityTimeoutSeconds is the value in seconds, from the CreationTimestamp, after which this token can no longer be used. The value is automatically incremented when the token is used.", + "clientName": "clientName references the client that created this token.", + "expiresIn": "expiresIn is the seconds from CreationTime before this token expires.", + "scopes": "scopes is an array of the requested scopes.", + "redirectURI": "redirectURI is the redirection associated with the token.", + "userName": "userName is the user name associated with this token", + "userUID": "userUID is the unique UID associated with this token", + "authorizeToken": "authorizeToken contains the token that authorized this token", + "refreshToken": "refreshToken is the value by which this token can be renewed. Can be blank.", + "inactivityTimeoutSeconds": "inactivityTimeoutSeconds is the value in seconds, from the CreationTimestamp, after which this token can no longer be used. The value is automatically incremented when the token is used.", } func (OAuthAccessToken) SwaggerDoc() map[string]string { @@ -43,7 +43,7 @@ func (OAuthAccessToken) SwaggerDoc() map[string]string { var map_OAuthAccessTokenList = map[string]string{ "": "OAuthAccessTokenList is a collection of OAuth access tokens\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of OAuth access tokens", + "items": "items is the list of OAuth access tokens", } func (OAuthAccessTokenList) SwaggerDoc() map[string]string { @@ -53,15 +53,15 @@ func (OAuthAccessTokenList) SwaggerDoc() map[string]string { var map_OAuthAuthorizeToken = map[string]string{ "": "OAuthAuthorizeToken describes an OAuth authorization token\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "clientName": "ClientName references the client that created this token.", - "expiresIn": "ExpiresIn is the seconds from CreationTime before this token expires.", - "scopes": "Scopes is an array of the requested scopes.", - "redirectURI": "RedirectURI is the redirection associated with the token.", - "state": "State data from request", - "userName": "UserName is the user name associated with this token", - "userUID": "UserUID is the unique UID associated with this token. UserUID and UserName must both match for this token to be valid.", - "codeChallenge": "CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636", - "codeChallengeMethod": "CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636", + "clientName": "clientName references the client that created this token.", + "expiresIn": "expiresIn is the seconds from CreationTime before this token expires.", + "scopes": "scopes is an array of the requested scopes.", + "redirectURI": "redirectURI is the redirection associated with the token.", + "state": "state data from request", + "userName": "userName is the user name associated with this token", + "userUID": "userUID is the unique UID associated with this token. UserUID and UserName must both match for this token to be valid.", + "codeChallenge": "codeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636", + "codeChallengeMethod": "codeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636", } func (OAuthAuthorizeToken) SwaggerDoc() map[string]string { @@ -71,7 +71,7 @@ func (OAuthAuthorizeToken) SwaggerDoc() map[string]string { var map_OAuthAuthorizeTokenList = map[string]string{ "": "OAuthAuthorizeTokenList is a collection of OAuth authorization tokens\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of OAuth authorization tokens", + "items": "items is the list of OAuth authorization tokens", } func (OAuthAuthorizeTokenList) SwaggerDoc() map[string]string { @@ -81,14 +81,14 @@ func (OAuthAuthorizeTokenList) SwaggerDoc() map[string]string { var map_OAuthClient = map[string]string{ "": "OAuthClient describes an OAuth client\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "secret": "Secret is the unique secret associated with a client", - "additionalSecrets": "AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation and for service account token validation", - "respondWithChallenges": "RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects", - "redirectURIs": "RedirectURIs is the valid redirection URIs associated with a client", - "grantMethod": "GrantMethod is a required field which determines how to handle grants for this client. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients", - "scopeRestrictions": "ScopeRestrictions describes which scopes this client can request. Each requested scope is checked against each restriction. If any restriction matches, then the scope is allowed. If no restriction matches, then the scope is denied.", - "accessTokenMaxAgeSeconds": "AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. 0 means no expiration.", - "accessTokenInactivityTimeoutSeconds": "AccessTokenInactivityTimeoutSeconds overrides the default token inactivity timeout for tokens granted to this client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. This value needs to be set only if the default set in configuration is not appropriate for this client. Valid values are: - 0: Tokens for this client never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", + "secret": "secret is the unique secret associated with a client", + "additionalSecrets": "additionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation and for service account token validation", + "respondWithChallenges": "respondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects", + "redirectURIs": "redirectURIs is the valid redirection URIs associated with a client", + "grantMethod": "grantMethod is a required field which determines how to handle grants for this client. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients", + "scopeRestrictions": "scopeRestrictions describes which scopes this client can request. Each requested scope is checked against each restriction. If any restriction matches, then the scope is allowed. If no restriction matches, then the scope is denied.", + "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. 0 means no expiration.", + "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds overrides the default token inactivity timeout for tokens granted to this client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. This value needs to be set only if the default set in configuration is not appropriate for this client. Valid values are: - 0: Tokens for this client never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", } func (OAuthClient) SwaggerDoc() map[string]string { @@ -98,10 +98,10 @@ func (OAuthClient) SwaggerDoc() map[string]string { var map_OAuthClientAuthorization = map[string]string{ "": "OAuthClientAuthorization describes an authorization created by an OAuth client\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "clientName": "ClientName references the client that created this authorization", - "userName": "UserName is the user name that authorized this client", - "userUID": "UserUID is the unique UID associated with this authorization. UserUID and UserName must both match for this authorization to be valid.", - "scopes": "Scopes is an array of the granted scopes.", + "clientName": "clientName references the client that created this authorization", + "userName": "userName is the user name that authorized this client", + "userUID": "userUID is the unique UID associated with this authorization. UserUID and UserName must both match for this authorization to be valid.", + "scopes": "scopes is an array of the granted scopes.", } func (OAuthClientAuthorization) SwaggerDoc() map[string]string { @@ -111,7 +111,7 @@ func (OAuthClientAuthorization) SwaggerDoc() map[string]string { var map_OAuthClientAuthorizationList = map[string]string{ "": "OAuthClientAuthorizationList is a collection of OAuth client authorizations\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of OAuth client authorizations", + "items": "items is the list of OAuth client authorizations", } func (OAuthClientAuthorizationList) SwaggerDoc() map[string]string { @@ -121,7 +121,7 @@ func (OAuthClientAuthorizationList) SwaggerDoc() map[string]string { var map_OAuthClientList = map[string]string{ "": "OAuthClientList is a collection of OAuth clients\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of OAuth clients", + "items": "items is the list of OAuth clients", } func (OAuthClientList) SwaggerDoc() map[string]string { @@ -152,7 +152,7 @@ func (RedirectReference) SwaggerDoc() map[string]string { var map_ScopeRestriction = map[string]string{ "": "ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil.", "literals": "ExactValues means the scope has to match a particular set of strings exactly", - "clusterRole": "ClusterRole describes a set of restrictions for cluster role scoping.", + "clusterRole": "clusterRole describes a set of restrictions for cluster role scoping.", } func (ScopeRestriction) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go index 33e3cf2912..498f78df6d 100644 --- a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go @@ -123,15 +123,15 @@ type RoutingConfig struct { type ImportModeType string const ( - // ImportModeLegacy indicates that the legacy behaviour should be used. - // For manifest lists, the legacy behaviour will discard the manifest list and import a single - // sub-manifest. In this case, the platform is chosen in the following order of priority: - // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. - // This mode is the default. - ImportModeLegacy ImportModeType = "Legacy" - // ImportModePreserveOriginal indicates that the original manifest will be preserved. - // For manifest lists, the manifest list and all its sub-manifests will be imported. - ImportModePreserveOriginal ImportModeType = "PreserveOriginal" + // ImportModeLegacy indicates that the legacy behaviour should be used. + // For manifest lists, the legacy behaviour will discard the manifest list and import a single + // sub-manifest. In this case, the platform is chosen in the following order of priority: + // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. + // This mode is the default. + ImportModeLegacy ImportModeType = "Legacy" + // ImportModePreserveOriginal indicates that the original manifest will be preserved. + // For manifest lists, the manifest list and all its sub-manifests will be imported. + ImportModePreserveOriginal ImportModeType = "PreserveOriginal" ) type ImagePolicyConfig struct { @@ -180,11 +180,11 @@ type AllowedRegistries []RegistryLocation // RegistryLocation contains a location of the registry specified by the registry domain // name. The domain name might include wildcards, like '*' or '??'. type RegistryLocation struct { - // DomainName specifies a domain name for the registry + // domainName specifies a domain name for the registry // In case the registry use non-standard (80 or 443) port, the port should be included // in the domain name as well. DomainName string `json:"domainName"` - // Insecure indicates whether the registry is secure (https) or insecure (http) + // insecure indicates whether the registry is secure (https) or insecure (http) // By default (if not specified) the registry is assumed as secure. Insecure bool `json:"insecure,omitempty"` } @@ -440,36 +440,36 @@ type BuildOverridesConfig struct { // ImageConfig holds the necessary configuration options for building image names for system components type ImageConfig struct { - // Format is the format of the name to be built for the system component + // format is the format of the name to be built for the system component Format string `json:"format"` - // Latest determines if the latest tag will be pulled from the registry + // latest determines if the latest tag will be pulled from the registry Latest bool `json:"latest"` } // ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for // pods fulfilling a service to serve with. type ServiceServingCert struct { - // Signer holds the signing information used to automatically sign serving certificates. + // signer holds the signing information used to automatically sign serving certificates. // If this value is nil, then certs are not signed automatically. Signer *configv1.CertInfo `json:"signer"` } // ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. type ClusterNetworkEntry struct { - // CIDR defines the total range of a cluster networks address space. + // cidr defines the total range of a cluster networks address space. CIDR string `json:"cidr"` - // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod. + // hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod. HostSubnetLength uint32 `json:"hostSubnetLength"` } // SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled. type SecurityAllocator struct { - // UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the + // uidAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the // block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks // before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the // ranges container images will use once user namespaces are started). UIDAllocatorRange string `json:"uidAllocatorRange"` - // MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is + // mcsAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is // "/[,]". The default is "s0/2" and will allocate from c0 -> c1023, which means a total of 535k labels // are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated // to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default @@ -480,7 +480,7 @@ type SecurityAllocator struct { // * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511 // MCSAllocatorRange string `json:"mcsAllocatorRange"` - // MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS + // mcsLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS // ranges (100k namespaces, 535k/5 labels). MCSLabelsPerProject int `json:"mcsLabelsPerProject"` } diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go index 5162e46ba0..0c73046ee1 100644 --- a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go @@ -59,8 +59,8 @@ func (BuildOverridesConfig) SwaggerDoc() map[string]string { var map_ClusterNetworkEntry = map[string]string{ "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.", - "cidr": "CIDR defines the total range of a cluster networks address space.", - "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", + "cidr": "cidr defines the total range of a cluster networks address space.", + "hostSubnetLength": "hostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", } func (ClusterNetworkEntry) SwaggerDoc() map[string]string { @@ -90,8 +90,8 @@ func (FrontProxyConfig) SwaggerDoc() map[string]string { var map_ImageConfig = map[string]string{ "": "ImageConfig holds the necessary configuration options for building image names for system components", - "format": "Format is the format of the name to be built for the system component", - "latest": "Latest determines if the latest tag will be pulled from the registry", + "format": "format is the format of the name to be built for the system component", + "latest": "latest determines if the latest tag will be pulled from the registry", } func (ImageConfig) SwaggerDoc() map[string]string { @@ -201,8 +201,8 @@ func (ProjectConfig) SwaggerDoc() map[string]string { var map_RegistryLocation = map[string]string{ "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", - "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", - "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", + "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", } func (RegistryLocation) SwaggerDoc() map[string]string { @@ -220,9 +220,9 @@ func (RoutingConfig) SwaggerDoc() map[string]string { var map_SecurityAllocator = map[string]string{ "": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", - "uidAllocatorRange": "UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", - "mcsAllocatorRange": "MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", - "mcsLabelsPerProject": "MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", + "uidAllocatorRange": "uidAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", + "mcsAllocatorRange": "mcsAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", + "mcsLabelsPerProject": "mcsLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", } func (SecurityAllocator) SwaggerDoc() map[string]string { @@ -239,7 +239,7 @@ func (ServiceAccountControllerConfig) SwaggerDoc() map[string]string { var map_ServiceServingCert = map[string]string{ "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", - "signer": "Signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", + "signer": "signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", } func (ServiceServingCert) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/operator/v1/register.go b/vendor/github.com/openshift/api/operator/v1/register.go index 21919f9a8b..5920c4fca7 100644 --- a/vendor/github.com/openshift/api/operator/v1/register.go +++ b/vendor/github.com/openshift/api/operator/v1/register.go @@ -62,6 +62,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &OpenShiftAPIServerList{}, &OpenShiftControllerManager{}, &OpenShiftControllerManagerList{}, + &OLM{}, + &OLMList{}, &ServiceCA{}, &ServiceCAList{}, &ServiceCatalogAPIServer{}, diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go index f04b6846ae..284dfe54dd 100644 --- a/vendor/github.com/openshift/api/operator/v1/types.go +++ b/vendor/github.com/openshift/api/operator/v1/types.go @@ -16,7 +16,6 @@ type MyOperatorResource struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // +kubebuilder:validation:Required // +required Spec MyOperatorResourceSpec `json:"spec"` Status MyOperatorResourceStatus `json:"status"` @@ -145,19 +144,19 @@ type OperatorStatus struct { // GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. type GenerationStatus struct { // group is the group of the thing you're tracking - // +kubebuilder:validation:Required + // +required Group string `json:"group"` // resource is the resource type of the thing you're tracking - // +kubebuilder:validation:Required + // +required Resource string `json:"resource"` // namespace is where the thing you're tracking is - // +kubebuilder:validation:Required + // +required Namespace string `json:"namespace"` // name is the name of the thing you're tracking - // +kubebuilder:validation:Required + // +required Name string `json:"name"` // TODO: Add validation for lastGeneration. The value for this field should generally increase, except when the associated @@ -194,21 +193,18 @@ type OperatorCondition struct { // useful (see .node.status.conditions), the ability to deconflict is important. // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` // +kubebuilder:validation:MaxLength=316 Type string `json:"type" protobuf:"bytes,1,opt,name=type"` // status of the condition, one of True, False, Unknown. // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Enum=True;False;Unknown Status ConditionStatus `json:"status"` // lastTransitionTime is the last time the condition transitioned from one status to another. // This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. // +required - // +kubebuilder:validation:Required // +kubebuilder:validation:Type=string // +kubebuilder:validation:Format=date-time LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` @@ -256,16 +252,19 @@ type StaticPodOperatorStatus struct { // +listType=map // +listMapKey=nodeName // +optional + // +kubebuilder:validation:XValidation:rule="size(self.filter(status, status.?targetRevision.orValue(0) != 0)) <= 1",message="no more than 1 node status may have a nonzero targetRevision" NodeStatuses []NodeStatus `json:"nodeStatuses,omitempty"` } // NodeStatus provides information about the current state of a particular node managed by this operator. +// +kubebuilder:validation:XValidation:rule="has(self.currentRevision) || !has(oldSelf.currentRevision)",message="cannot be unset once set",fieldPath=".currentRevision" type NodeStatus struct { // nodeName is the name of the node - // +kubebuilder:validation:Required + // +required NodeName string `json:"nodeName"` // currentRevision is the generation of the most recently successful deployment + // +kubebuilder:validation:XValidation:rule="self >= oldSelf",message="must only increase" CurrentRevision int32 `json:"currentRevision"` // targetRevision is the generation of the deployment we're trying to apply TargetRevision int32 `json:"targetRevision,omitempty"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_authentication.go b/vendor/github.com/openshift/api/operator/v1/types_authentication.go index 58d8748d97..bf103f19bb 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/operator/v1/types_authentication.go @@ -25,7 +25,6 @@ type Authentication struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required // +required Spec AuthenticationSpec `json:"spec,omitempty"` // +optional @@ -37,7 +36,7 @@ type AuthenticationSpec struct { } type AuthenticationStatus struct { - // OAuthAPIServer holds status specific only to oauth-apiserver + // oauthAPIServer holds status specific only to oauth-apiserver // +optional OAuthAPIServer OAuthAPIServerStatus `json:"oauthAPIServer,omitempty"` @@ -45,7 +44,7 @@ type AuthenticationStatus struct { } type OAuthAPIServerStatus struct { - // LatestAvailableRevision is the latest revision used as suffix of revisioned + // latestAvailableRevision is the latest revision used as suffix of revisioned // secrets like encryption-config. A new revision causes a new deployment of pods. // +optional // +kubebuilder:validation:Minimum=0 diff --git a/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go index 9666b27922..b6ef52e937 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go +++ b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go @@ -25,7 +25,6 @@ type CloudCredential struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required // +required Spec CloudCredentialSpec `json:"spec"` // +optional @@ -60,7 +59,7 @@ const ( // CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator. type CloudCredentialSpec struct { OperatorSpec `json:",inline"` - // CredentialsMode allows informing CCO that it should not attempt to dynamically + // credentialsMode allows informing CCO that it should not attempt to dynamically // determine the root cloud credentials capabilities, and it should just run in // the specified mode. // It also allows putting the operator into "manual" mode if desired. diff --git a/vendor/github.com/openshift/api/operator/v1/types_config.go b/vendor/github.com/openshift/api/operator/v1/types_config.go index e7c6d59dbb..f0d190e6db 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_config.go +++ b/vendor/github.com/openshift/api/operator/v1/types_config.go @@ -26,7 +26,6 @@ type Config struct { metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Config Operator. - // +kubebuilder:validation:Required // +required Spec ConfigSpec `json:"spec"` @@ -56,6 +55,6 @@ type ConfigList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []Config `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go index aa39b2f950..68d9daa450 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_console.go +++ b/vendor/github.com/openshift/api/operator/v1/types_console.go @@ -26,7 +26,6 @@ type Console struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required // +required Spec ConsoleSpec `json:"spec,omitempty"` // +optional @@ -128,7 +127,7 @@ type CapabilityVisibility struct { // Disabling the capability in the console UI is represented by the "Disabled" value. // +unionDiscriminator // +kubebuilder:validation:Enum:="Enabled";"Disabled" - // +kubebuilder:validation:Required + // +required State CapabilityState `json:"state"` } @@ -137,10 +136,10 @@ type Capability struct { // name is the unique name of a capability. // Available capabilities are LightspeedButton and GettingStartedBanner. // +kubebuilder:validation:Enum:="LightspeedButton";"GettingStartedBanner" - // +kubebuilder:validation:Required + // +required Name ConsoleCapabilityName `json:"name"` // visibility defines the visibility state of the capability. - // +kubebuilder:validation:Required + // +required Visibility CapabilityVisibility `json:"visibility"` } @@ -184,21 +183,17 @@ type ConsoleCustomization struct { // +optional CustomLogoFile configv1.ConfigMapFileReference `json:"customLogoFile,omitempty"` // developerCatalog allows to configure the shown developer catalog categories (filters) and types (sub-catalogs). - // +kubebuilder:validation:Optional // +optional DeveloperCatalog DeveloperConsoleCatalogCustomization `json:"developerCatalog,omitempty"` // projectAccess allows customizing the available list of ClusterRoles in the Developer perspective // Project access page which can be used by a project admin to specify roles to other users and // restrict access within the project. If set, the list will replace the default ClusterRole options. - // +kubebuilder:validation:Optional // +optional ProjectAccess ProjectAccess `json:"projectAccess,omitempty"` // quickStarts allows customization of available ConsoleQuickStart resources in console. - // +kubebuilder:validation:Optional // +optional QuickStarts QuickStarts `json:"quickStarts,omitempty"` // addPage allows customizing actions on the Add page in developer perspective. - // +kubebuilder:validation:Optional // +optional AddPage AddPage `json:"addPage,omitempty"` // perspectives allows enabling/disabling of perspective(s) that user can see in the Perspective switcher dropdown. @@ -212,7 +207,6 @@ type ConsoleCustomization struct { type ProjectAccess struct { // availableClusterRoles is the list of ClusterRole names that are assignable to users // through the project access tab. - // +kubebuilder:validation:Optional // +optional AvailableClusterRoles []string `json:"availableClusterRoles,omitempty"` } @@ -235,7 +229,7 @@ type DeveloperConsoleCatalogTypes struct { // +kubebuilder:validation:Enum:="Enabled";"Disabled"; // +kubebuilder:default:="Enabled" // +default="Enabled" - // +kubebuilder:validation:Required + // +required State CatalogTypesState `json:"state,omitempty"` // enabled is a list of developer catalog types (sub-catalogs IDs) that will be shown to users. // Types (sub-catalogs) are added via console plugins, the available types (sub-catalog IDs) are available @@ -259,7 +253,6 @@ type DeveloperConsoleCatalogTypes struct { // DeveloperConsoleCatalogCustomization allow cluster admin to configure developer catalog. type DeveloperConsoleCatalogCustomization struct { // categories which are shown in the developer catalog. - // +kubebuilder:validation:Optional // +optional Categories []DeveloperConsoleCatalogCategory `json:"categories,omitempty"` // types allows enabling or disabling of sub-catalog types that user can see in the Developer catalog. @@ -270,23 +263,20 @@ type DeveloperConsoleCatalogCustomization struct { // DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category. type DeveloperConsoleCatalogCategoryMeta struct { - // ID is an identifier used in the URL to enable deep linking in console. + // id is an identifier used in the URL to enable deep linking in console. // ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=32 // +kubebuilder:validation:Pattern=`^[A-Za-z0-9-_]+$` // +required ID string `json:"id"` // label defines a category display label. It is required and must have 1-64 characters. - // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=64 // +required Label string `json:"label"` // tags is a list of strings that will match the category. A selected category // show all items which has at least one overlapping tag between category and item. - // +kubebuilder:validation:Optional // +optional Tags []string `json:"tags,omitempty"` } @@ -296,7 +286,6 @@ type DeveloperConsoleCatalogCategory struct { // defines top level category ID, label and filter tags. DeveloperConsoleCatalogCategoryMeta `json:",inline"` // subcategories defines a list of child categories. - // +kubebuilder:validation:Optional // +optional Subcategories []DeveloperConsoleCatalogCategoryMeta `json:"subcategories,omitempty"` } @@ -304,7 +293,6 @@ type DeveloperConsoleCatalogCategory struct { // QuickStarts allow cluster admins to customize available ConsoleQuickStart resources. type QuickStarts struct { // disabled is a list of ConsoleQuickStart resource names that are not shown to users. - // +kubebuilder:validation:Optional // +optional Disabled []string `json:"disabled,omitempty"` } @@ -313,7 +301,6 @@ type QuickStarts struct { type AddPage struct { // disabledActions is a list of actions that are not shown to users. // Each action in the list is represented by its ID. - // +kubebuilder:validation:Optional // +kubebuilder:validation:MinItems=1 // +optional DisabledActions []string `json:"disabledActions,omitempty"` @@ -350,7 +337,7 @@ type PerspectiveVisibility struct { // state defines the perspective is enabled or disabled or access review check is required. // +unionDiscriminator // +kubebuilder:validation:Enum:="Enabled";"Disabled";"AccessReview" - // +kubebuilder:validation:Required + // +required State PerspectiveState `json:"state"` // accessReview defines required and missing access review checks. // +optional @@ -365,10 +352,10 @@ type Perspective struct { // Example: "dev", "admin". // The available perspective ids can be found in the code snippet section next to the yaml editor. // Incorrect or unknown ids will be ignored. - // +kubebuilder:validation:Required + // +required ID string `json:"id"` // visibility defines the state of perspective along with access review checks if needed for that perspective. - // +kubebuilder:validation:Required + // +required Visibility PerspectiveVisibility `json:"visibility"` // pinnedResources defines the list of default pinned resources that users will see on the perspective navigation if they have not customized these pinned resources themselves. // The list of available Kubernetes resources could be read via `kubectl api-resources`. @@ -386,20 +373,20 @@ type PinnedResourceReference struct { // This value should consist of only lowercase alphanumeric characters, hyphens and periods. // Example: "", "apps", "build.openshift.io", etc. // +kubebuilder:validation:Pattern:="^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" - // +kubebuilder:validation:Required + // +required Group string `json:"group"` // version is the API Version of the Resource. // This value should consist of only lowercase alphanumeric characters. // Example: "v1", "v1beta1", etc. // +kubebuilder:validation:Pattern:="^[a-z0-9]+$" - // +kubebuilder:validation:Required + // +required Version string `json:"version"` // resource is the type that is being referenced. // It is normally the plural form of the resource kind in lowercase. // This value should consist of only lowercase alphanumeric characters and hyphens. // Example: "deployments", "deploymentconfigs", "pods", etc. // +kubebuilder:validation:Pattern:="^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - // +kubebuilder:validation:Required + // +required Resource string `json:"resource"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go index 0644b6a93c..731323750a 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -20,7 +20,7 @@ import ( // +kubebuilder:resource:path=clustercsidrivers,scope=Cluster // +kubebuilder:subresource:status // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/701 -// +openshift:file-pattern=cvoRunLevel=0000_90,operatorName=csi-driver,operatorOrdering=01 +// +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=csi-driver,operatorOrdering=01 // ClusterCSIDriver object allows management and configuration of a CSI driver operator // installed by default in OpenShift. Name of the object must be name of the CSI driver @@ -36,7 +36,6 @@ type ClusterCSIDriver struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ClusterCSIDriverSpec `json:"spec"` @@ -71,7 +70,7 @@ const ( RemovedStorageClass StorageClassStateName = "Removed" ) -// If you are adding a new driver name here, ensure that 0000_90_cluster_csi_driver_01_config.crd.yaml-merge-patch file is also updated with new driver name. +// If you are adding a new driver name here, ensure that 0000_50_cluster_csi_driver_01_config.crd.yaml-merge-patch file is also updated with new driver name. const ( AWSEBSCSIDriver CSIDriverName = "ebs.csi.aws.com" AWSEFSCSIDriver CSIDriverName = "efs.csi.aws.com" @@ -95,7 +94,7 @@ const ( // ClusterCSIDriverSpec is the desired behavior of CSI driver operator type ClusterCSIDriverSpec struct { OperatorSpec `json:",inline"` - // StorageClassState determines if CSI operator should create and manage storage classes. + // storageClassState determines if CSI operator should create and manage storage classes. // If this field value is empty or Managed - CSI operator will continuously reconcile // storage class and create if necessary. // If this field value is Unmanaged - CSI operator will not reconcile any previously created @@ -135,7 +134,7 @@ type CSIDriverConfigSpec struct { // driverConfig is being applied to. // Valid values are: AWS, Azure, GCP, IBMCloud, vSphere and omitted. // Consumers should treat unknown values as a NO-OP. - // +kubebuilder:validation:Required + // +required // +unionDiscriminator DriverType CSIDriverType `json:"driverType"` @@ -155,7 +154,7 @@ type CSIDriverConfigSpec struct { // +optional IBMCloud *IBMCloudCSIDriverConfigSpec `json:"ibmcloud,omitempty"` - // vsphere is used to configure the vsphere CSI driver. + // vSphere is used to configure the vsphere CSI driver. // +optional VSphere *VSphereCSIDriverConfigSpec `json:"vSphere,omitempty"` } @@ -198,7 +197,7 @@ type AWSEFSVolumeMetrics struct { // RecursiveWalk means the AWS EFS CSI Driver will recursively scan volumes to collect metrics. // This process may result in high CPU and memory usage, depending on the volume size. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required State AWSEFSVolumeMetricsState `json:"state"` // recursiveWalk provides additional configuration for collecting volume metrics in the AWS EFS CSI Driver @@ -240,7 +239,7 @@ type AzureDiskEncryptionSet struct { // 5. The second, third, and fourth groups should be 4 characters long. // 6. The fifth group should be 12 characters long. // An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378 - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength:=36 // +kubebuilder:validation:Pattern:=`^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$` SubscriptionID string `json:"subscriptionID"` @@ -250,7 +249,7 @@ type AzureDiskEncryptionSet struct { // underscores (_), parentheses, hyphens and periods. // The value should not end in a period and be at most 90 characters in // length. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength:=90 // +kubebuilder:validation:Pattern:=`^[\w\.\-\(\)]*[\w\-\(\)]$` ResourceGroup string `json:"resourceGroup"` @@ -258,7 +257,7 @@ type AzureDiskEncryptionSet struct { // name is the name of the disk encryption set that will be set on the default storage class. // The value should consist of only alphanumberic characters, // underscores (_), hyphens, and be at most 80 characters in length. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength:=80 // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` Name string `json:"name"` @@ -281,7 +280,7 @@ type GCPKMSKeyReference struct { // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` // +kubebuilder:validation:MinLength:=1 // +kubebuilder:validation:MaxLength:=63 - // +kubebuilder:validation:Required + // +required Name string `json:"name"` // keyRing is the name of the KMS Key Ring which the KMS Key belongs to. @@ -291,7 +290,7 @@ type GCPKMSKeyReference struct { // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` // +kubebuilder:validation:MinLength:=1 // +kubebuilder:validation:MaxLength:=63 - // +kubebuilder:validation:Required + // +required KeyRing string `json:"keyRing"` // projectID is the ID of the Project in which the KMS Key Ring exists. @@ -300,7 +299,7 @@ type GCPKMSKeyReference struct { // +kubebuilder:validation:Pattern:=`^[a-z][a-z0-9-]+[a-z0-9]$` // +kubebuilder:validation:MinLength:=6 // +kubebuilder:validation:MaxLength:=30 - // +kubebuilder:validation:Required + // +required ProjectID string `json:"projectID"` // location is the GCP location in which the Key Ring exists. @@ -323,7 +322,7 @@ type GCPCSIDriverConfigSpec struct { type IBMCloudCSIDriverConfigSpec struct { // encryptionKeyCRN is the IBM Cloud CRN of the customer-managed root key to use // for disk encryption of volumes for the default storage classes. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength:=154 // +kubebuilder:validation:MinLength:=144 // +kubebuilder:validation:Pattern:=`^crn:v[0-9]+:bluemix:(public|private):(kms|hs-crypto):[a-z-]+:a/[0-9a-f]+:[0-9a-f-]{36}:key:[0-9a-f-]{36}$` diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go index f96384819c..d6d283d365 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go @@ -25,7 +25,6 @@ type CSISnapshotController struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec CSISnapshotControllerSpec `json:"spec"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_dns.go b/vendor/github.com/openshift/api/operator/v1/types_dns.go index 3d7cbb6c00..2588047868 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_dns.go +++ b/vendor/github.com/openshift/api/operator/v1/types_dns.go @@ -226,7 +226,7 @@ type DNSOverTLSConfig struct { // // + --- // + Inspired by the DNS1123 patterns in Kubernetes: https://github.com/kubernetes/kubernetes/blob/7c46f40bdf89a437ecdbc01df45e235b5f6d9745/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L178-L218 - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` ServerName string `json:"serverName"` @@ -319,7 +319,7 @@ type ForwardPlugin struct { // * At least one upstream should be specified. // * the default policy is Sequential type UpstreamResolvers struct { - // Upstreams is a list of resolvers to forward name queries for the "." domain. + // upstreams is a list of resolvers to forward name queries for the "." domain. // Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream // returns an error during the exchange, another resolver is tried from Upstreams. The // Upstreams are selected in the order specified in Policy. @@ -332,7 +332,7 @@ type UpstreamResolvers struct { // +kubebuilder:default={{"type":"SystemResolvConf"}} Upstreams []Upstream `json:"upstreams"` - // Policy is used to determine the order in which upstream servers are selected for querying. + // policy is used to determine the order in which upstream servers are selected for querying. // Any one of the following values may be specified: // // * "Random" picks a random upstream server for each query. @@ -381,31 +381,28 @@ type UpstreamResolvers struct { // with an IP address or IP:port if the upstream listens on a port other than 53. type Upstream struct { - // Type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. + // type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. // Type accepts 2 possible values: SystemResolvConf or Network. // // * When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined: // /etc/resolv.conf will be used // * When Network is used, the Upstream structure must contain at least an Address // - // +kubebuilder:validation:Required // +required Type UpstreamType `json:"type"` - // Address must be defined when Type is set to Network. It will be ignored otherwise. + // address must be defined when Type is set to Network. It will be ignored otherwise. // It must be a valid ipv4 or ipv6 address. // // +optional - // +kubebuilder:validation:Optional Address string `json:"address,omitempty"` - // Port may be defined when Type is set to Network. It will be ignored otherwise. + // port may be defined when Type is set to Network. It will be ignored otherwise. // Port must be between 65535 // // +optional // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 - // +kubebuilder:validation:Optional // +kubebuilder:default=53 Port uint32 `json:"port,omitempty"` } @@ -483,7 +480,6 @@ type DNSStatus struct { // // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // - // +kubebuilder:validation:Required // +required ClusterIP string `json:"clusterIP"` @@ -494,7 +490,6 @@ type DNSStatus struct { // // More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service // - // +kubebuilder:validation:Required // +required ClusterDomain string `json:"clusterDomain"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/vendor/github.com/openshift/api/operator/v1/types_etcd.go index 71345d7d78..375ec5fb7f 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_etcd.go +++ b/vendor/github.com/openshift/api/operator/v1/types_etcd.go @@ -24,7 +24,6 @@ type Etcd struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // +kubebuilder:validation:Required // +required Spec EtcdSpec `json:"spec"` // +optional @@ -39,7 +38,6 @@ type EtcdSpec struct { // Valid values are "", "Standard" and "Slower". // "" means no opinion and the platform is left to choose a reasonable default // which is subject to change without notice. - // +kubebuilder:validation:Optional // +openshift:enable:FeatureGate=HardwareSpeed // +optional HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"` @@ -93,6 +91,6 @@ type EtcdList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []Etcd `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index 1f56643456..a8ea2d695b 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -258,6 +258,75 @@ type IngressControllerSpec struct { // // +optional HTTPCompression HTTPCompressionPolicy `json:"httpCompression,omitempty"` + + // idleConnectionTerminationPolicy maps directly to HAProxy's + // idle-close-on-response option and controls whether HAProxy + // keeps idle frontend connections open during a soft stop + // (router reload). + // + // Allowed values for this field are "Immediate" and + // "Deferred". The default value is "Immediate". + // + // When set to "Immediate", idle connections are closed + // immediately during router reloads. This ensures immediate + // propagation of route changes but may impact clients + // sensitive to connection resets. + // + // When set to "Deferred", HAProxy will maintain idle + // connections during a soft reload instead of closing them + // immediately. These connections remain open until any of the + // following occurs: + // + // - A new request is received on the connection, in which + // case HAProxy handles it in the old process and closes + // the connection after sending the response. + // + // - HAProxy's `timeout http-keep-alive` duration expires + // (300 seconds in OpenShift's configuration, not + // configurable). + // + // - The client's keep-alive timeout expires, causing the + // client to close the connection. + // + // Setting Deferred can help prevent errors in clients or load + // balancers that do not properly handle connection resets. + // Additionally, this option allows you to retain the pre-2.4 + // HAProxy behaviour: in HAProxy version 2.2 (OpenShift + // versions < 4.14), maintaining idle connections during a + // soft reload was the default behaviour, but starting with + // HAProxy 2.4, the default changed to closing idle + // connections immediately. + // + // Important Consideration: + // + // - Using Deferred will result in temporary inconsistencies + // for the first request on each persistent connection + // after a route update and router reload. This request + // will be processed by the old HAProxy process using its + // old configuration. Subsequent requests will use the + // updated configuration. + // + // Operational Considerations: + // + // - Keeping idle connections open during reloads may lead + // to an accumulation of old HAProxy processes if + // connections remain idle for extended periods, + // especially in environments where frequent reloads + // occur. + // + // - Consider monitoring the number of HAProxy processes in + // the router pods when Deferred is set. + // + // - You may need to enable or adjust the + // `ingress.operator.openshift.io/hard-stop-after` + // duration (configured via an annotation on the + // IngressController resource) in environments with + // frequent reloads to prevent resource exhaustion. + // + // +optional + // +kubebuilder:default:="Immediate" + // +default="Immediate" + IdleConnectionTerminationPolicy IngressControllerConnectionTerminationPolicy `json:"idleConnectionTerminationPolicy,omitempty"` } // httpCompressionPolicy turns on compression for the specified MIME types. @@ -397,7 +466,6 @@ type LoadBalancerStrategy struct { // scope indicates the scope at which the load balancer is exposed. // Possible values are "External" and "Internal". // - // +kubebuilder:validation:Required // +required Scope LoadBalancerScope `json:"scope"` @@ -434,7 +502,7 @@ type LoadBalancerStrategy struct { // Valid values are: Managed and Unmanaged. // // +kubebuilder:default:="Managed" - // +kubebuilder:validation:Required + // +required // +default="Managed" DNSManagementPolicy LoadBalancerDNSManagementPolicy `json:"dnsManagementPolicy,omitempty"` } @@ -464,7 +532,6 @@ type ProviderLoadBalancerParameters struct { // "OpenStack", and "VSphere". // // +unionDiscriminator - // +kubebuilder:validation:Required // +required Type LoadBalancerProviderType `json:"type"` @@ -544,7 +611,6 @@ type AWSLoadBalancerParameters struct { // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb // // +unionDiscriminator - // +kubebuilder:validation:Required // +required Type AWSLoadBalancerType `json:"type"` @@ -713,7 +779,6 @@ type AWSClassicLoadBalancerParameters struct { // means no opinion, in which case a default value is used. The default // value for this field is 60s. This default is subject to change. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Format=duration // +optional ConnectionIdleTimeout metav1.Duration `json:"connectionIdleTimeout,omitempty"` @@ -828,7 +893,6 @@ type HostNetworkStrategy struct { // The empty string specifies the default, which is TCP without PROXY // protocol. Note that the default is subject to change. // - // +kubebuilder:validation:Optional // +optional Protocol IngressControllerProtocol `json:"protocol,omitempty"` @@ -836,7 +900,6 @@ type HostNetworkStrategy struct { // HTTP requests. This field should be set when port 80 is already in use. // The value should not coincide with the NodePort range of the cluster. // When the value is 0 or is not specified it defaults to 80. - // +kubebuilder:validation:Optional // +kubebuilder:validation:Maximum=65535 // +kubebuilder:validation:Minimum=0 // +kubebuilder:default=80 @@ -847,7 +910,6 @@ type HostNetworkStrategy struct { // HTTPS requests. This field should be set when port 443 is already in use. // The value should not coincide with the NodePort range of the cluster. // When the value is 0 or is not specified it defaults to 443. - // +kubebuilder:validation:Optional // +kubebuilder:validation:Maximum=65535 // +kubebuilder:validation:Minimum=0 // +kubebuilder:default=443 @@ -868,7 +930,6 @@ type HostNetworkStrategy struct { // a threshold of two successful or failed requests to become healthy or // unhealthy respectively, are well-tested values. When the value is 0 or // is not specified it defaults to 1936. - // +kubebuilder:validation:Optional // +kubebuilder:validation:Maximum=65535 // +kubebuilder:validation:Minimum=0 // +kubebuilder:default=1936 @@ -904,7 +965,6 @@ type PrivateStrategy struct { // The empty string specifies the default, which is TCP without PROXY // protocol. Note that the default is subject to change. // - // +kubebuilder:validation:Optional // +optional Protocol IngressControllerProtocol `json:"protocol,omitempty"` } @@ -936,7 +996,6 @@ type NodePortStrategy struct { // The empty string specifies the default, which is TCP without PROXY // protocol. Note that the default is subject to change. // - // +kubebuilder:validation:Optional // +optional Protocol IngressControllerProtocol `json:"protocol,omitempty"` } @@ -1004,7 +1063,6 @@ type EndpointPublishingStrategy struct { // field of the managed NodePort Service will preserved. // // +unionDiscriminator - // +kubebuilder:validation:Required // +required Type EndpointPublishingStrategyType `json:"type"` @@ -1054,7 +1112,6 @@ type ClientTLS struct { // edge-terminated and reencrypt TLS routes; it cannot check // certificates for cleartext HTTP or passthrough TLS routes. // - // +kubebuilder:validation:Required // +required ClientCertificatePolicy ClientCertificatePolicy `json:"clientCertificatePolicy"` @@ -1063,7 +1120,6 @@ type ClientTLS struct { // certificate. The administrator must create this configmap in the // openshift-config namespace. // - // +kubebuilder:validation:Required // +required ClientCA configv1.ConfigMapNameReference `json:"clientCA"` @@ -1167,14 +1223,12 @@ type SyslogLoggingDestinationParameters struct { // address is the IP address of the syslog endpoint that receives log // messages. // - // +kubebuilder:validation:Required // +required Address string `json:"address"` // port is the UDP port number of the syslog endpoint that receives log // messages. // - // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=65535 // +required @@ -1184,7 +1238,6 @@ type SyslogLoggingDestinationParameters struct { // // If this field is empty, the facility is "local1". // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;cron;auth2;ftp;ntp;audit;alert;cron2;local0;local1;local2;local3;local4;local5;local6;local7 // +optional Facility string `json:"facility,omitempty"` @@ -1244,7 +1297,6 @@ type LoggingDestination struct { // that the administrator has configured a custom syslog instance. // // +unionDiscriminator - // +kubebuilder:validation:Required // +required Type LoggingDestinationType `json:"type"` @@ -1267,7 +1319,6 @@ type IngressControllerCaptureHTTPHeader struct { // name specifies a header name. Its value must be a valid HTTP header // name as defined in RFC 2616 section 4.2. // - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" // +required Name string `json:"name"` @@ -1277,7 +1328,6 @@ type IngressControllerCaptureHTTPHeader struct { // log message. Note that the ingress controller may impose a separate // bound on the total length of HTTP headers in a request. // - // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum=1 // +required MaxLength int `json:"maxLength"` @@ -1331,7 +1381,6 @@ type IngressControllerCaptureHTTPCookie struct { // controller may impose a separate bound on the total length of HTTP // headers in a request. // - // +kubebuilder:validation:Required // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=1024 // +required @@ -1351,7 +1400,6 @@ type IngressControllerCaptureHTTPCookieUnion struct { // matching cookie is captured. // // +unionDiscriminator - // +kubebuilder:validation:Required // +required MatchType CookieMatchType `json:"matchType,omitempty"` @@ -1389,7 +1437,6 @@ const ( type AccessLogging struct { // destination is where access logs go. // - // +kubebuilder:validation:Required // +required Destination LoggingDestination `json:"destination"` @@ -1646,7 +1693,7 @@ type IngressControllerHTTPHeader struct { // Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. // It must be no more than 255 characters in length. // Header name must be unique. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=255 // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" @@ -1657,7 +1704,7 @@ type IngressControllerHTTPHeader struct { // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'set-cookie'",message="set-cookie header may not be modified via header actions" Name string `json:"name"` // action specifies actions to perform on headers, such as setting or deleting headers. - // +kubebuilder:validation:Required + // +required Action IngressControllerHTTPHeaderActionUnion `json:"action"` } @@ -1671,7 +1718,7 @@ type IngressControllerHTTPHeaderActionUnion struct { // Delete allows you to delete HTTP request and response headers. // +unionDiscriminator // +kubebuilder:validation:Enum:=Set;Delete - // +kubebuilder:validation:Required + // +required Type IngressControllerHTTPHeaderActionType `json:"type"` // set specifies how the HTTP header should be set. @@ -1704,7 +1751,7 @@ type IngressControllerSetHTTPHeader struct { // + --- // + Note: This limit was selected as most common web servers have a limit of 16384 characters or some lower limit. // + See . - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=16384 Value string `json:"value"` @@ -1725,7 +1772,6 @@ type IngressControllerTuningOptions struct { // headerBufferBytes values that are too large could cause the // IngressController to use significantly more memory than necessary. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Minimum=16384 // +optional HeaderBufferBytes int32 `json:"headerBufferBytes,omitempty"` @@ -1745,7 +1791,6 @@ type IngressControllerTuningOptions struct { // large could cause the IngressController to use significantly more memory // than necessary. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Minimum=4096 // +optional HeaderBufferMaxRewriteBytes int32 `json:"headerBufferMaxRewriteBytes,omitempty"` @@ -1763,7 +1808,6 @@ type IngressControllerTuningOptions struct { // Reducing the number of threads may cause the ingress controller to // perform poorly. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=64 // +optional @@ -1773,7 +1817,6 @@ type IngressControllerTuningOptions struct { // waiting for a client response. // // If unset, the default timeout is 30s - // +kubebuilder:validation:Optional // +kubebuilder:validation:Format=duration // +optional ClientTimeout *metav1.Duration `json:"clientTimeout,omitempty"` @@ -1783,7 +1826,6 @@ type IngressControllerTuningOptions struct { // connection. // // If unset, the default timeout is 1s - // +kubebuilder:validation:Optional // +kubebuilder:validation:Format=duration // +optional ClientFinTimeout *metav1.Duration `json:"clientFinTimeout,omitempty"` @@ -1792,7 +1834,6 @@ type IngressControllerTuningOptions struct { // waiting for a server/backend response. // // If unset, the default timeout is 30s - // +kubebuilder:validation:Optional // +kubebuilder:validation:Format=duration // +optional ServerTimeout *metav1.Duration `json:"serverTimeout,omitempty"` @@ -1802,7 +1843,6 @@ type IngressControllerTuningOptions struct { // connection. // // If unset, the default timeout is 1s - // +kubebuilder:validation:Optional // +kubebuilder:validation:Format=duration // +optional ServerFinTimeout *metav1.Duration `json:"serverFinTimeout,omitempty"` @@ -1811,12 +1851,11 @@ type IngressControllerTuningOptions struct { // websockets) will be held open while the tunnel is idle. // // If unset, the default timeout is 1h - // +kubebuilder:validation:Optional // +kubebuilder:validation:Format=duration // +optional TunnelTimeout *metav1.Duration `json:"tunnelTimeout,omitempty"` - // ConnectTimeout defines the maximum time to wait for + // connectTimeout defines the maximum time to wait for // a connection attempt to a server/backend to succeed. // // This field expects an unsigned duration string of decimal numbers, each with optional @@ -1827,7 +1866,6 @@ type IngressControllerTuningOptions struct { // to choose a reasonable default. This default is subject to change over time. // The current default is 5s. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ // +kubebuilder:validation:Type:=string // +optional @@ -1841,7 +1879,6 @@ type IngressControllerTuningOptions struct { // matching certificate could be used. // // If unset, the default inspect delay is 5s - // +kubebuilder:validation:Optional // +kubebuilder:validation:Format=duration // +optional TLSInspectDelay *metav1.Duration `json:"tlsInspectDelay,omitempty"` @@ -1867,7 +1904,6 @@ type IngressControllerTuningOptions struct { // Currently the minimum allowed value is 1s and the maximum allowed value is // 2147483647ms (24.85 days). Both are subject to change over time. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ // +kubebuilder:validation:Type:=string // +optional @@ -1914,7 +1950,6 @@ type IngressControllerTuningOptions struct { // processes in router containers with the following metric: // 'container_memory_working_set_bytes{container="router",namespace="openshift-ingress"}/container_processes{container="router",namespace="openshift-ingress"}'. // - // +kubebuilder:validation:Optional // +optional MaxConnections int32 `json:"maxConnections,omitempty"` @@ -1945,7 +1980,6 @@ type IngressControllerTuningOptions struct { // be reloaded less frequently, and newly created routes will not be served until the // subsequent reload. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Pattern=^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ // +kubebuilder:validation:Type:=string // +optional @@ -2068,3 +2102,23 @@ type IngressControllerList struct { Items []IngressController `json:"items"` } + +// IngressControllerConnectionTerminationPolicy defines the behaviour +// for handling idle connections during a soft reload of the router. +// +// +kubebuilder:validation:Enum=Immediate;Deferred +type IngressControllerConnectionTerminationPolicy string + +const ( + // IngressControllerConnectionTerminationPolicyImmediate specifies + // that idle connections should be closed immediately during a + // router reload. + IngressControllerConnectionTerminationPolicyImmediate IngressControllerConnectionTerminationPolicy = "Immediate" + + // IngressControllerConnectionTerminationPolicyDeferred + // specifies that idle connections should remain open until a + // terminating event, such as a new request, the expiration of + // the proxy keep-alive timeout, or the client closing the + // connection. + IngressControllerConnectionTerminationPolicyDeferred IngressControllerConnectionTerminationPolicy = "Deferred" +) diff --git a/vendor/github.com/openshift/api/operator/v1/types_insights.go b/vendor/github.com/openshift/api/operator/v1/types_insights.go index 56e2b51c14..ed59bb438b 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_insights.go +++ b/vendor/github.com/openshift/api/operator/v1/types_insights.go @@ -25,7 +25,7 @@ type InsightsOperator struct { metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Insights. - // +kubebuilder:validation:Required + // +required Spec InsightsOperatorSpec `json:"spec"` // status is the most recently observed status of the Insights operator. @@ -58,7 +58,7 @@ type GatherStatus struct { // lastGatherDuration is the total time taken to process // all gatherers during the last gather event. // +optional - // +kubebuilder:validation:Pattern="^0|([1-9][0-9]*(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" + // +kubebuilder:validation:Pattern="^(0|([0-9]+(?:\\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$" // +kubebuilder:validation:Type=string LastGatherDuration metav1.Duration `json:"lastGatherDuration,omitempty"` // gatherers is a list of active gatherers (and their statuses) in the last gathering. @@ -85,25 +85,25 @@ type InsightsReport struct { // healthCheck represents an Insights health check attributes. type HealthCheck struct { // description provides basic description of the healtcheck. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=2048 // +kubebuilder:validation:MinLength=10 Description string `json:"description"` // totalRisk of the healthcheck. Indicator of the total risk posed // by the detected issue; combination of impact and likelihood. The values can be from 1 to 4, // and the higher the number, the more important the issue. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=4 TotalRisk int32 `json:"totalRisk"` // advisorURI provides the URL link to the Insights Advisor. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Pattern=`^https:\/\/\S+` AdvisorURI string `json:"advisorURI"` // state determines what the current state of the health check is. // Health check is enabled by default and can be disabled // by the user in the Insights advisor user interface. - // +kubebuilder:validation:Required + // +required State HealthCheckState `json:"state"` } @@ -124,18 +124,18 @@ const ( type GathererStatus struct { // conditions provide details on the status of each gatherer. // +listType=atomic - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinItems=1 Conditions []metav1.Condition `json:"conditions"` // name is the name of the gatherer. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MaxLength=256 // +kubebuilder:validation:MinLength=5 Name string `json:"name"` // lastGatherDuration represents the time spent gathering. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Type=string - // +kubebuilder:validation:Pattern="^([1-9][0-9]*(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" + // +kubebuilder:validation:Pattern="^(([0-9]+(?:\\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$" LastGatherDuration metav1.Duration `json:"lastGatherDuration"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go index 5c9d43a2a2..ce00b4b62c 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go @@ -26,7 +26,6 @@ type KubeAPIServer struct { metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Kubernetes API Server - // +kubebuilder:validation:Required // +required Spec KubeAPIServerSpec `json:"spec"` @@ -78,6 +77,6 @@ type KubeAPIServerList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []KubeAPIServer `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go index 93ab209a0d..ee104aa506 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go @@ -25,7 +25,6 @@ type KubeControllerManager struct { metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Kubernetes Controller Manager - // +kubebuilder:validation:Required // +required Spec KubeControllerManagerSpec `json:"spec"` @@ -63,6 +62,6 @@ type KubeControllerManagerList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []KubeControllerManager `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go index 470dc5097d..f3add49101 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go @@ -24,7 +24,6 @@ type KubeStorageVersionMigrator struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // +kubebuilder:validation:Required // +required Spec KubeStorageVersionMigratorSpec `json:"spec"` // +optional @@ -52,6 +51,6 @@ type KubeStorageVersionMigratorList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []KubeStorageVersionMigrator `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go index 8bd41eb69d..88b89f8188 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go +++ b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go @@ -25,7 +25,7 @@ type MachineConfiguration struct { metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Machine Config Operator - // +kubebuilder:validation:Required + // +required Spec MachineConfigurationSpec `json:"spec"` // status is the most recently observed status of the Machine Config Operator @@ -111,7 +111,7 @@ type MachineConfigurationList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []MachineConfiguration `json:"items"` } @@ -131,17 +131,17 @@ type MachineManager struct { // resource is the machine management resource's type. // The only current valid value is machinesets. // machinesets means that the machine manager will only register resources of the kind MachineSet. - // +kubebuilder:validation:Required + // +required Resource MachineManagerMachineSetsResourceType `json:"resource"` // apiGroup is name of the APIGroup that the machine management resource belongs to. // The only current valid value is machine.openshift.io. // machine.openshift.io means that the machine manager will only register resources that belong to OpenShift machine API group. - // +kubebuilder:validation:Required + // +required APIGroup MachineManagerMachineSetsAPIGroupType `json:"apiGroup"` // selection allows granular control of the machine management resources that will be registered for boot image updates. - // +kubebuilder:validation:Required + // +required Selection MachineManagerSelector `json:"selection"` } @@ -153,7 +153,7 @@ type MachineManagerSelector struct { // All means that every resource matched by the machine manager will be updated. // Partial requires specified selector(s) and allows customisation of which resources matched by the machine manager will be updated. // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Mode MachineManagerSelectorMode `json:"mode"` // partial provides label selector(s) that can be used to match machine management resources. @@ -165,7 +165,7 @@ type MachineManagerSelector struct { // PartialSelector provides label selector(s) that can be used to match machine management resources. type PartialSelector struct { // machineResourceSelector is a label selector that can be used to select machine resources like MachineSets. - // +kubebuilder:validation:Required + // +required MachineResourceSelector *metav1.LabelSelector `json:"machineResourceSelector,omitempty"` } @@ -255,7 +255,7 @@ type NodeDisruptionPolicyClusterStatus struct { type NodeDisruptionPolicySpecFile struct { // path is the location of a file being managed through a MachineConfig. // The Actions in the policy will apply to changes to the file at this path. - // +kubebuilder:validation:Required + // +required Path string `json:"path"` // actions represents the series of commands to be executed on changes to the file at // the corresponding file path. Actions will be applied in the order that @@ -264,7 +264,7 @@ type NodeDisruptionPolicySpecFile struct { // Valid actions are Reboot, Drain, Reload, DaemonReload and None. // The Reboot action and the None action cannot be used in conjunction with any of the other actions. // This list supports a maximum of 10 entries. - // +kubebuilder:validation:Required + // +required // +listType=atomic // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" @@ -276,7 +276,7 @@ type NodeDisruptionPolicySpecFile struct { type NodeDisruptionPolicyStatusFile struct { // path is the location of a file being managed through a MachineConfig. // The Actions in the policy will apply to changes to the file at this path. - // +kubebuilder:validation:Required + // +required Path string `json:"path"` // actions represents the series of commands to be executed on changes to the file at // the corresponding file path. Actions will be applied in the order that @@ -285,7 +285,7 @@ type NodeDisruptionPolicyStatusFile struct { // Valid actions are Reboot, Drain, Reload, DaemonReload and None. // The Reboot action and the None action cannot be used in conjunction with any of the other actions. // This list supports a maximum of 10 entries. - // +kubebuilder:validation:Required + // +required // +listType=atomic // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" @@ -300,7 +300,7 @@ type NodeDisruptionPolicySpecUnit struct { // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". - // +kubebuilder:validation:Required + // +required Name NodeDisruptionPolicyServiceName `json:"name"` // actions represents the series of commands to be executed on changes to the file at @@ -310,7 +310,7 @@ type NodeDisruptionPolicySpecUnit struct { // Valid actions are Reboot, Drain, Reload, DaemonReload and None. // The Reboot action and the None action cannot be used in conjunction with any of the other actions. // This list supports a maximum of 10 entries. - // +kubebuilder:validation:Required + // +required // +listType=atomic // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" @@ -325,7 +325,7 @@ type NodeDisruptionPolicyStatusUnit struct { // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". - // +kubebuilder:validation:Required + // +required Name NodeDisruptionPolicyServiceName `json:"name"` // actions represents the series of commands to be executed on changes to the file at @@ -335,7 +335,7 @@ type NodeDisruptionPolicyStatusUnit struct { // Valid actions are Reboot, Drain, Reload, DaemonReload and None. // The Reboot action and the None action cannot be used in conjunction with any of the other actions. // This list supports a maximum of 10 entries. - // +kubebuilder:validation:Required + // +required // +listType=atomic // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" @@ -352,7 +352,7 @@ type NodeDisruptionPolicySpecSSHKey struct { // Valid actions are Reboot, Drain, Reload, DaemonReload and None. // The Reboot action and the None action cannot be used in conjunction with any of the other actions. // This list supports a maximum of 10 entries. - // +kubebuilder:validation:Required + // +required // +listType=atomic // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" @@ -369,7 +369,7 @@ type NodeDisruptionPolicyStatusSSHKey struct { // Valid actions are Reboot, Drain, Reload, DaemonReload and None. // The Reboot action and the None action cannot be used in conjunction with any of the other actions. // This list supports a maximum of 10 entries. - // +kubebuilder:validation:Required + // +required // +listType=atomic // +kubebuilder:validation:MaxItems=10 // +kubebuilder:validation:XValidation:rule="self.exists(x, x.type=='Reboot') ? size(self) == 1 : true", message="Reboot action can only be specified standalone, as it will override any other actions" @@ -386,7 +386,7 @@ type NodeDisruptionPolicySpecAction struct { // reload/restart requires a corresponding service target specified in the reload/restart field. // Other values require no further configuration // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Type NodeDisruptionPolicySpecActionType `json:"type"` // reload specifies the service to reload, only valid if type is reload // +optional @@ -405,7 +405,7 @@ type NodeDisruptionPolicyStatusAction struct { // reload/restart requires a corresponding service target specified in the reload/restart field. // Other values require no further configuration // +unionDiscriminator - // +kubebuilder:validation:Required + // +required Type NodeDisruptionPolicyStatusActionType `json:"type"` // reload specifies the service to reload, only valid if type is reload // +optional @@ -421,7 +421,7 @@ type ReloadService struct { // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". - // +kubebuilder:validation:Required + // +required ServiceName NodeDisruptionPolicyServiceName `json:"serviceName"` } @@ -431,7 +431,7 @@ type RestartService struct { // Service names should be of the format ${NAME}${SERVICETYPE} and can up to 255 characters long. // ${NAME} must be atleast 1 character long and can only consist of alphabets, digits, ":", "-", "_", ".", and "\". // ${SERVICETYPE} must be one of ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" or ".scope". - // +kubebuilder:validation:Required + // +required ServiceName NodeDisruptionPolicyServiceName `json:"serviceName"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index 9b1588bc25..b4b0a6d6d6 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -54,7 +54,7 @@ type NetworkList struct { // NetworkSpec is the top-level network configuration object. // +kubebuilder:validation:XValidation:rule="!has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.gatewayConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding) || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == oldSelf.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == 'Restricted' || self.defaultNetwork.ovnKubernetesConfig.gatewayConfig.ipForwarding == 'Global'",message="invalid value for IPForwarding, valid values are 'Restricted' or 'Global'" -// +openshift:validation:FeatureGateAwareXValidation:featureGate=AdditionalRoutingCapabilities,rule="(has(self.additionalRoutingCapabilities) && ('FRR' in self.additionalRoutingCapabilities.providers)) || !has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements) || self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements != 'Enabled'",message="Route advertisements cannot be Enabled if 'FRR' routing capability provider is not available" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=RouteAdvertisements,rule="(has(self.additionalRoutingCapabilities) && ('FRR' in self.additionalRoutingCapabilities.providers)) || !has(self.defaultNetwork) || !has(self.defaultNetwork.ovnKubernetesConfig) || !has(self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements) || self.defaultNetwork.ovnKubernetesConfig.routeAdvertisements != 'Enabled'",message="Route advertisements cannot be Enabled if 'FRR' routing capability provider is not available" type NetworkSpec struct { OperatorSpec `json:",inline"` @@ -250,7 +250,7 @@ type DefaultNetworkDefinition struct { // All NetworkTypes are supported except for NetworkTypeRaw Type NetworkType `json:"type"` - // openShiftSDNConfig was previously used to configure the openshift-sdn plugin. + // openshiftSDNConfig was previously used to configure the openshift-sdn plugin. // DEPRECATED: OpenShift SDN is no longer supported. // +optional OpenShiftSDNConfig *OpenShiftSDNConfig `json:"openshiftSDNConfig,omitempty"` @@ -267,7 +267,7 @@ type SimpleMacvlanConfig struct { // +optional Master string `json:"master,omitempty"` - // IPAMConfig configures IPAM module will be used for IP Address Management (IPAM). + // ipamConfig configures IPAM module will be used for IP Address Management (IPAM). // +optional IPAMConfig *IPAMConfig `json:"ipamConfig,omitempty"` @@ -284,19 +284,19 @@ type SimpleMacvlanConfig struct { // StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses type StaticIPAMAddresses struct { - // Address is the IP address in CIDR format + // address is the IP address in CIDR format // +optional Address string `json:"address"` - // Gateway is IP inside of subnet to designate as the gateway + // gateway is IP inside of subnet to designate as the gateway // +optional Gateway string `json:"gateway,omitempty"` } // StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes type StaticIPAMRoutes struct { - // Destination points the IP route destination + // destination points the IP route destination Destination string `json:"destination"` - // Gateway is the route's next-hop IP address + // gateway is the route's next-hop IP address // If unset, a default gateway is assumed (as determined by the CNI plugin). // +optional Gateway string `json:"gateway,omitempty"` @@ -304,14 +304,14 @@ type StaticIPAMRoutes struct { // StaticIPAMDNS provides DNS related information for static IPAM type StaticIPAMDNS struct { - // Nameservers points DNS servers for IP lookup + // nameservers points DNS servers for IP lookup // +optional // +listType=atomic Nameservers []string `json:"nameservers,omitempty"` - // Domain configures the domainname the local domain used for short hostname lookups + // domain configures the domainname the local domain used for short hostname lookups // +optional Domain string `json:"domain,omitempty"` - // Search configures priority ordered search domains for short hostname lookups + // search configures priority ordered search domains for short hostname lookups // +optional // +listType=atomic Search []string `json:"search,omitempty"` @@ -319,26 +319,26 @@ type StaticIPAMDNS struct { // StaticIPAMConfig contains configurations for static IPAM (IP Address Management) type StaticIPAMConfig struct { - // Addresses configures IP address for the interface + // addresses configures IP address for the interface // +optional // +listType=atomic Addresses []StaticIPAMAddresses `json:"addresses,omitempty"` - // Routes configures IP routes for the interface + // routes configures IP routes for the interface // +optional // +listType=atomic Routes []StaticIPAMRoutes `json:"routes,omitempty"` - // DNS configures DNS for the interface + // dns configures DNS for the interface // +optional DNS *StaticIPAMDNS `json:"dns,omitempty"` } // IPAMConfig contains configurations for IPAM (IP Address Management) type IPAMConfig struct { - // Type is the type of IPAM module will be used for IP Address Management(IPAM). + // type is the type of IPAM module will be used for IP Address Management(IPAM). // The supported values are IPAMTypeDHCP, IPAMTypeStatic Type IPAMType `json:"type"` - // StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic + // staticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic // +optional StaticIPAMConfig *StaticIPAMConfig `json:"staticIPAMConfig,omitempty"` } @@ -353,7 +353,7 @@ type AdditionalNetworkDefinition struct { // name is the name of the network. This will be populated in the resulting CRD // This must be unique. - // +kubebuilder:validation:Required + // +required Name string `json:"name"` // namespace is the namespace of the network. This will be populated in the resulting CRD @@ -364,7 +364,7 @@ type AdditionalNetworkDefinition struct { // NetworkAttachmentDefinition CRD RawCNIConfig string `json:"rawCNIConfig,omitempty"` - // SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan + // simpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan // +optional SimpleMacvlanConfig *SimpleMacvlanConfig `json:"simpleMacvlanConfig,omitempty"` } @@ -410,7 +410,7 @@ type OVNKubernetesConfig struct { // +kubebuilder:validation:Minimum=1 // +optional GenevePort *uint32 `json:"genevePort,omitempty"` - // HybridOverlayConfig configures an additional overlay network for peers that are + // hybridOverlayConfig configures an additional overlay network for peers that are // not using OVN. // +optional HybridOverlayConfig *HybridOverlayConfig `json:"hybridOverlayConfig,omitempty"` @@ -540,16 +540,18 @@ type IPv6OVNKubernetesConfig struct { } type HybridOverlayConfig struct { - // HybridClusterNetwork defines a network space given to nodes on an additional overlay network. + // hybridClusterNetwork defines a network space given to nodes on an additional overlay network. // +listType=atomic HybridClusterNetwork []ClusterNetworkEntry `json:"hybridClusterNetwork"` - // HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. + // hybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. // Default is 4789 // +optional HybridOverlayVXLANPort *uint32 `json:"hybridOverlayVXLANPort,omitempty"` } // +kubebuilder:validation:XValidation:rule="self == oldSelf || has(self.mode)",message="ipsecConfig.mode is required" +// +kubebuilder:validation:XValidation:rule="has(self.mode) && self.mode == 'Full' ? true : !has(self.full)",message="full is forbidden when mode is not Full" +// +union type IPsecConfig struct { // mode defines the behaviour of the ipsec configuration within the platform. // Valid values are `Disabled`, `External` and `Full`. @@ -561,7 +563,43 @@ type IPsecConfig struct { // this is left to the user to configure. // +kubebuilder:validation:Enum=Disabled;External;Full // +optional + // +unionDiscriminator Mode IPsecMode `json:"mode,omitempty"` + + // full defines configuration parameters for the IPsec `Full` mode. + // This is permitted only when mode is configured with `Full`, + // and forbidden otherwise. + // +unionMember,optional + // +optional + Full *IPsecFullModeConfig `json:"full,omitempty"` +} + +type Encapsulation string + +const ( + // EncapsulationAlways always enable UDP encapsulation regardless of whether NAT is detected. + EncapsulationAlways = "Always" + // EncapsulationNever never enable UDP encapsulation even if NAT is present. + EncapsulationNever = "Never" + // EncapsulationAuto enable UDP encapsulation based on the detection of NAT. + EncapsulationAuto = "Auto" +) + +// IPsecFullModeConfig defines configuration parameters for the IPsec `Full` mode. +// +kubebuilder:validation:MinProperties:=1 +type IPsecFullModeConfig struct { + // encapsulation option to configure libreswan on how inter-pod traffic across nodes + // are encapsulated to handle NAT traversal. When configured it uses UDP port 4500 + // for the encapsulation. + // Valid values are Always, Never, Auto and omitted. + // Always means enable UDP encapsulation regardless of whether NAT is detected. + // Disable means never enable UDP encapsulation even if NAT is present. + // Auto means enable UDP encapsulation based on the detection of NAT. + // When omitted, this means no opinion and the platform is left to choose a reasonable + // default, which is subject to change over time. The current default is Auto. + // +kubebuilder:validation:Enum:=Always;Never;Auto + // +optional + Encapsulation Encapsulation `json:"encapsulation,omitempty"` } type IPForwardingMode string @@ -577,14 +615,14 @@ const ( // GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides type GatewayConfig struct { - // RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port + // routingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port // into the host before sending it out. If this is not set, traffic will always egress directly // from OVN to outside without touching the host stack. Setting this to true means hardware // offload will not be supported. Default is false if GatewayConfig is specified. // +kubebuilder:default:=false // +optional RoutingViaHost bool `json:"routingViaHost,omitempty"` - // IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). + // ipForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). // By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other // IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across // OVN-Kubernetes managed interfaces, then set this field to "Global". @@ -862,7 +900,7 @@ type AdditionalRoutingCapabilities struct { // is currrently "FRR" which provides FRR routing capabilities through the // deployment of FRR. // +listType=atomic - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=1 // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" diff --git a/vendor/github.com/openshift/api/operator/v1/types_olm.go b/vendor/github.com/openshift/api/operator/v1/types_olm.go new file mode 100644 index 0000000000..07c94ece2e --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_olm.go @@ -0,0 +1,61 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OLM provides information to configure an operator to manage the OLM controllers +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=olms,scope=Cluster +// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=include.release.openshift.io/ibm-cloud-managed=false +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1504 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=operator-lifecycle-manager,operatorOrdering=01 +// +openshift:enable:FeatureGate=NewOLM +// +openshift:capability=OperatorLifecycleManagerV1 +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="olm is a singleton, .metadata.name must be 'cluster'" +type OLM struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + //spec holds user settable values for configuration + //+kubebuilder:validation:Required + Spec OLMSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status OLMStatus `json:"status"` +} + +type OLMSpec struct { + OperatorSpec `json:",inline"` +} + +type OLMStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OLMList is a collection of items +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OLMList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items contains the items + Items []OLM `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go index cd2c8a588f..a96e033cb7 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go @@ -25,7 +25,6 @@ type OpenShiftAPIServer struct { metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the OpenShift API Server. - // +kubebuilder:validation:Required // +required Spec OpenShiftAPIServerSpec `json:"spec"` @@ -55,6 +54,6 @@ type OpenShiftAPIServerList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []OpenShiftAPIServer `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go index 8e8929a903..8a553a0579 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go @@ -24,7 +24,6 @@ type OpenShiftControllerManager struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // +kubebuilder:validation:Required // +required Spec OpenShiftControllerManagerSpec `json:"spec"` // +optional @@ -52,6 +51,6 @@ type OpenShiftControllerManagerList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []OpenShiftControllerManager `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go index 448c458c19..cfb04e8d94 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go +++ b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go @@ -25,7 +25,6 @@ type KubeScheduler struct { metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Kubernetes Scheduler - // +kubebuilder:validation:Required // +required Spec KubeSchedulerSpec `json:"spec"` @@ -55,6 +54,6 @@ type KubeSchedulerList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []KubeScheduler `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go index e4d8d1d7ad..48534d4c63 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go +++ b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go @@ -25,7 +25,6 @@ type ServiceCA struct { metav1.ObjectMeta `json:"metadata"` //spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ServiceCASpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. @@ -54,6 +53,6 @@ type ServiceCAList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []ServiceCA `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go index 006b8bb99d..e058c065a6 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go @@ -20,7 +20,6 @@ type ServiceCatalogAPIServer struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` - // +kubebuilder:validation:Required // +required Spec ServiceCatalogAPIServerSpec `json:"spec"` // +optional @@ -49,6 +48,6 @@ type ServiceCatalogAPIServerList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []ServiceCatalogAPIServer `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go index 859965408b..4fe2aa46a3 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go @@ -20,7 +20,6 @@ type ServiceCatalogControllerManager struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // +kubebuilder:validation:Required // +required Spec ServiceCatalogControllerManagerSpec `json:"spec"` // +optional @@ -49,6 +48,6 @@ type ServiceCatalogControllerManagerList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []ServiceCatalogControllerManager `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_storage.go b/vendor/github.com/openshift/api/operator/v1/types_storage.go index aa48b0c84f..69691a83ad 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_storage.go +++ b/vendor/github.com/openshift/api/operator/v1/types_storage.go @@ -25,7 +25,6 @@ type Storage struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec StorageSpec `json:"spec"` @@ -47,7 +46,7 @@ const ( type StorageSpec struct { OperatorSpec `json:",inline"` - // VSphereStorageDriver indicates the storage driver to use on VSphere clusters. + // vsphereStorageDriver indicates the storage driver to use on VSphere clusters. // Once this field is set to CSIWithMigrationDriver, it can not be changed. // If this is empty, the platform will choose a good default, // which may change over time without notice. diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms.crd.yaml similarity index 96% rename from vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms-DevPreviewNoUpgrade.crd.yaml rename to vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms.crd.yaml index 5cb724f282..ec93248567 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms.crd.yaml @@ -4,9 +4,8 @@ metadata: annotations: api-approved.openshift.io: https://github.com/openshift/api/pull/1504 api.openshift.io/merged-by-featuregates: "true" - include.release.openshift.io/ibm-cloud-managed: "true" + capability.openshift.io/name: OperatorLifecycleManagerV1 include.release.openshift.io/self-managed-high-availability: "true" - release.openshift.io/feature-set: DevPreviewNoUpgrade name: olms.operator.openshift.io spec: group: operator.openshift.io @@ -17,13 +16,13 @@ spec: singular: olm scope: Cluster versions: - - name: v1alpha1 + - name: v1 schema: openAPIV3Schema: description: |- OLM provides information to configure an operator to manage the OLM controllers - Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). properties: apiVersion: description: |- @@ -204,7 +203,6 @@ spec: type: string type: object required: - - metadata - spec type: object x-kubernetes-validations: diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-CustomNoUpgrade.crd.yaml index b68cce4db3..294921e476 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-CustomNoUpgrade.crd.yaml @@ -256,6 +256,9 @@ spec: successful deployment format: int32 type: integer + x-kubernetes-validations: + - message: must only increase + rule: self >= oldSelf lastFailedCount: description: lastFailedCount is how often the installer pod of the last failed revision failed. @@ -296,10 +299,18 @@ spec: required: - nodeName type: object + x-kubernetes-validations: + - fieldPath: .currentRevision + message: cannot be unset once set + rule: has(self.currentRevision) || !has(oldSelf.currentRevision) type: array x-kubernetes-list-map-keys: - nodeName x-kubernetes-list-type: map + x-kubernetes-validations: + - message: no more than 1 node status may have a nonzero targetRevision + rule: size(self.filter(status, status.?targetRevision.orValue(0) + != 0)) <= 1 observedGeneration: description: observedGeneration is the last generation change you've dealt with diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-Default.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-Default.crd.yaml index ebe2486ef5..7a41ad8bfa 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-Default.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-Default.crd.yaml @@ -243,6 +243,9 @@ spec: successful deployment format: int32 type: integer + x-kubernetes-validations: + - message: must only increase + rule: self >= oldSelf lastFailedCount: description: lastFailedCount is how often the installer pod of the last failed revision failed. @@ -283,10 +286,18 @@ spec: required: - nodeName type: object + x-kubernetes-validations: + - fieldPath: .currentRevision + message: cannot be unset once set + rule: has(self.currentRevision) || !has(oldSelf.currentRevision) type: array x-kubernetes-list-map-keys: - nodeName x-kubernetes-list-type: map + x-kubernetes-validations: + - message: no more than 1 node status may have a nonzero targetRevision + rule: size(self.filter(status, status.?targetRevision.orValue(0) + != 0)) <= 1 observedGeneration: description: observedGeneration is the last generation change you've dealt with diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-DevPreviewNoUpgrade.crd.yaml index bc49df765d..71376efe1c 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-DevPreviewNoUpgrade.crd.yaml @@ -256,6 +256,9 @@ spec: successful deployment format: int32 type: integer + x-kubernetes-validations: + - message: must only increase + rule: self >= oldSelf lastFailedCount: description: lastFailedCount is how often the installer pod of the last failed revision failed. @@ -296,10 +299,18 @@ spec: required: - nodeName type: object + x-kubernetes-validations: + - fieldPath: .currentRevision + message: cannot be unset once set + rule: has(self.currentRevision) || !has(oldSelf.currentRevision) type: array x-kubernetes-list-map-keys: - nodeName x-kubernetes-list-type: map + x-kubernetes-validations: + - message: no more than 1 node status may have a nonzero targetRevision + rule: size(self.filter(status, status.?targetRevision.orValue(0) + != 0)) <= 1 observedGeneration: description: observedGeneration is the last generation change you've dealt with diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-TechPreviewNoUpgrade.crd.yaml index 8449c20a6c..53ee94c57c 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-TechPreviewNoUpgrade.crd.yaml @@ -256,6 +256,9 @@ spec: successful deployment format: int32 type: integer + x-kubernetes-validations: + - message: must only increase + rule: self >= oldSelf lastFailedCount: description: lastFailedCount is how often the installer pod of the last failed revision failed. @@ -296,10 +299,18 @@ spec: required: - nodeName type: object + x-kubernetes-validations: + - fieldPath: .currentRevision + message: cannot be unset once set + rule: has(self.currentRevision) || !has(oldSelf.currentRevision) type: array x-kubernetes-list-map-keys: - nodeName x-kubernetes-list-type: map + x-kubernetes-validations: + - message: no more than 1 node status may have a nonzero targetRevision + rule: size(self.filter(status, status.?targetRevision.orValue(0) + != 0)) <= 1 observedGeneration: description: observedGeneration is the last generation change you've dealt with diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers.crd.yaml index 435a8a81ee..189f127048 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_20_kube-apiserver_01_kubeapiservers.crd.yaml @@ -225,6 +225,9 @@ spec: successful deployment format: int32 type: integer + x-kubernetes-validations: + - message: must only increase + rule: self >= oldSelf lastFailedCount: description: lastFailedCount is how often the installer pod of the last failed revision failed. @@ -265,10 +268,18 @@ spec: required: - nodeName type: object + x-kubernetes-validations: + - fieldPath: .currentRevision + message: cannot be unset once set + rule: has(self.currentRevision) || !has(oldSelf.currentRevision) type: array x-kubernetes-list-map-keys: - nodeName x-kubernetes-list-type: map + x-kubernetes-validations: + - message: no more than 1 node status may have a nonzero targetRevision + rule: size(self.filter(status, status.?targetRevision.orValue(0) + != 0)) <= 1 observedGeneration: description: observedGeneration is the last generation change you've dealt with diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_25_kube-controller-manager_01_kubecontrollermanagers.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_25_kube-controller-manager_01_kubecontrollermanagers.crd.yaml index 7cd18e09b5..7b6b138b7f 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_25_kube-controller-manager_01_kubecontrollermanagers.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_25_kube-controller-manager_01_kubecontrollermanagers.crd.yaml @@ -234,6 +234,9 @@ spec: successful deployment format: int32 type: integer + x-kubernetes-validations: + - message: must only increase + rule: self >= oldSelf lastFailedCount: description: lastFailedCount is how often the installer pod of the last failed revision failed. @@ -274,10 +277,18 @@ spec: required: - nodeName type: object + x-kubernetes-validations: + - fieldPath: .currentRevision + message: cannot be unset once set + rule: has(self.currentRevision) || !has(oldSelf.currentRevision) type: array x-kubernetes-list-map-keys: - nodeName x-kubernetes-list-type: map + x-kubernetes-validations: + - message: no more than 1 node status may have a nonzero targetRevision + rule: size(self.filter(status, status.?targetRevision.orValue(0) + != 0)) <= 1 observedGeneration: description: observedGeneration is the last generation change you've dealt with diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_25_kube-scheduler_01_kubeschedulers.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_25_kube-scheduler_01_kubeschedulers.crd.yaml index 9654facc4d..ced827d0a7 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_25_kube-scheduler_01_kubeschedulers.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_25_kube-scheduler_01_kubeschedulers.crd.yaml @@ -225,6 +225,9 @@ spec: successful deployment format: int32 type: integer + x-kubernetes-validations: + - message: must only increase + rule: self >= oldSelf lastFailedCount: description: lastFailedCount is how often the installer pod of the last failed revision failed. @@ -265,10 +268,18 @@ spec: required: - nodeName type: object + x-kubernetes-validations: + - fieldPath: .currentRevision + message: cannot be unset once set + rule: has(self.currentRevision) || !has(oldSelf.currentRevision) type: array x-kubernetes-list-map-keys: - nodeName x-kubernetes-list-type: map + x-kubernetes-validations: + - message: no more than 1 node status may have a nonzero targetRevision + rule: size(self.filter(status, status.?targetRevision.orValue(0) + != 0)) <= 1 observedGeneration: description: observedGeneration is the last generation change you've dealt with diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_40_cloud-credential_00_cloudcredentials.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_40_cloud-credential_00_cloudcredentials.crd.yaml index bf6c616afc..1414aebd7d 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_40_cloud-credential_00_cloudcredentials.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_40_cloud-credential_00_cloudcredentials.crd.yaml @@ -48,7 +48,7 @@ spec: properties: credentialsMode: description: |- - CredentialsMode allows informing CCO that it should not attempt to dynamically + credentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into "manual" mode if desired. diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_authentication_01_authentications.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_authentication_01_authentications.crd.yaml index 3fab2ff1f5..029c91ac0f 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_authentication_01_authentications.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_authentication_01_authentications.crd.yaml @@ -185,11 +185,11 @@ spec: - message: must only increase rule: self >= oldSelf oauthAPIServer: - description: OAuthAPIServer holds status specific only to oauth-apiserver + description: oauthAPIServer holds status specific only to oauth-apiserver properties: latestAvailableRevision: description: |- - LatestAvailableRevision is the latest revision used as suffix of revisioned + latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods. format: int32 minimum: 0 diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_console_01_consoles.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_console_01_consoles.crd.yaml index 505332e4b5..6578035ed1 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_console_01_consoles.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_console_01_consoles.crd.yaml @@ -141,7 +141,7 @@ spec: SVG format preferred properties: key: - description: Key allows pointing to a specific key/value inside + description: key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. type: string name: @@ -164,7 +164,7 @@ spec: properties: id: description: |- - ID is an identifier used in the URL to enable deep linking in console. + id is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. maxLength: 32 minLength: 1 @@ -184,7 +184,7 @@ spec: properties: id: description: |- - ID is an identifier used in the URL to enable deep linking in console. + id is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. maxLength: 32 minLength: 1 diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_csi-driver_01_clustercsidrivers.crd.yaml similarity index 99% rename from vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers.crd.yaml rename to vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_csi-driver_01_clustercsidrivers.crd.yaml index 9a65a695ac..1db446757e 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_csi-driver_01_clustercsidrivers.crd.yaml @@ -253,7 +253,7 @@ spec: - encryptionKeyCRN type: object vSphere: - description: vsphere is used to configure the vsphere CSI driver. + description: vSphere is used to configure the vsphere CSI driver. properties: globalMaxSnapshotsPerBlockVolume: description: |- @@ -348,7 +348,7 @@ spec: type: string storageClassState: description: |- - StorageClassState determines if CSI operator should create and manage storage classes. + storageClassState determines if CSI operator should create and manage storage classes. If this field value is empty or Managed - CSI operator will continuously reconcile storage class and create if necessary. If this field value is Unmanaged - CSI operator will not reconcile any previously created diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_ingress_00_ingresscontrollers.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_ingress_00_ingresscontrollers.crd.yaml index 2524b5a84a..10ca42895c 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_ingress_00_ingresscontrollers.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_ingress_00_ingresscontrollers.crd.yaml @@ -1255,6 +1255,76 @@ spec: type: string type: object type: object + idleConnectionTerminationPolicy: + default: Immediate + description: |- + idleConnectionTerminationPolicy maps directly to HAProxy's + idle-close-on-response option and controls whether HAProxy + keeps idle frontend connections open during a soft stop + (router reload). + + Allowed values for this field are "Immediate" and + "Deferred". The default value is "Immediate". + + When set to "Immediate", idle connections are closed + immediately during router reloads. This ensures immediate + propagation of route changes but may impact clients + sensitive to connection resets. + + When set to "Deferred", HAProxy will maintain idle + connections during a soft reload instead of closing them + immediately. These connections remain open until any of the + following occurs: + + - A new request is received on the connection, in which + case HAProxy handles it in the old process and closes + the connection after sending the response. + + - HAProxy's `timeout http-keep-alive` duration expires + (300 seconds in OpenShift's configuration, not + configurable). + + - The client's keep-alive timeout expires, causing the + client to close the connection. + + Setting Deferred can help prevent errors in clients or load + balancers that do not properly handle connection resets. + Additionally, this option allows you to retain the pre-2.4 + HAProxy behaviour: in HAProxy version 2.2 (OpenShift + versions < 4.14), maintaining idle connections during a + soft reload was the default behaviour, but starting with + HAProxy 2.4, the default changed to closing idle + connections immediately. + + Important Consideration: + + - Using Deferred will result in temporary inconsistencies + for the first request on each persistent connection + after a route update and router reload. This request + will be processed by the old HAProxy process using its + old configuration. Subsequent requests will use the + updated configuration. + + Operational Considerations: + + - Keeping idle connections open during reloads may lead + to an accumulation of old HAProxy processes if + connections remain idle for extended periods, + especially in environments where frequent reloads + occur. + + - Consider monitoring the number of HAProxy processes in + the router pods when Deferred is set. + + - You may need to enable or adjust the + `ingress.operator.openshift.io/hard-stop-after` + duration (configured via an annotation on the + IngressController resource) in environments with + frequent reloads to prevent resource exhaustion. + enum: + - Immediate + - Deferred + type: string logging: description: |- logging defines parameters for what should be logged where. If this @@ -2089,7 +2159,7 @@ spec: type: string connectTimeout: description: |- - ConnectTimeout defines the maximum time to wait for + connectTimeout defines the maximum time to wait for a connection attempt to a server/backend to succeed. This field expects an unsigned duration string of decimal numbers, each with optional diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_insights_00_insightsoperators.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_insights_00_insightsoperators.crd.yaml index b7ce165e3f..0beb83b190 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_insights_00_insightsoperators.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_insights_00_insightsoperators.crd.yaml @@ -216,7 +216,7 @@ spec: lastGatherDuration: description: lastGatherDuration represents the time spent gathering. - pattern: ^([1-9][0-9]*(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + pattern: ^(([0-9]+(?:\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ type: string name: description: name is the name of the gatherer. @@ -234,7 +234,7 @@ spec: description: |- lastGatherDuration is the total time taken to process all gatherers during the last gather event. - pattern: ^0|([1-9][0-9]*(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + pattern: ^(0|([0-9]+(?:\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ type: string lastGatherTime: description: |- diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_storage_01_storages.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_storage_01_storages.crd.yaml index e0f8418814..2432d6c8bd 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_storage_01_storages.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_50_storage_01_storages.crd.yaml @@ -98,7 +98,7 @@ spec: x-kubernetes-preserve-unknown-fields: true vsphereStorageDriver: description: |- - VSphereStorageDriver indicates the storage driver to use on VSphere clusters. + vsphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_dns_00_dnses.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_dns_00_dnses.crd.yaml index 7d2acd004f..946f6aaade 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_dns_00_dnses.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_dns_00_dnses.crd.yaml @@ -365,7 +365,7 @@ spec: policy: default: Sequential description: |- - Policy is used to determine the order in which upstream servers are selected for querying. + policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified: * "Random" picks a random upstream server for each query. @@ -466,7 +466,7 @@ spec: default: - type: SystemResolvConf description: |- - Upstreams is a list of resolvers to forward name queries for the "." domain. + upstreams is a list of resolvers to forward name queries for the "." domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. @@ -505,13 +505,13 @@ spec: - format: ipv4 - format: ipv6 description: |- - Address must be defined when Type is set to Network. It will be ignored otherwise. + address must be defined when Type is set to Network. It will be ignored otherwise. It must be a valid ipv4 or ipv6 address. type: string port: default: 53 description: |- - Port may be defined when Type is set to Network. It will be ignored otherwise. + port may be defined when Type is set to Network. It will be ignored otherwise. Port must be between 65535 format: int32 maximum: 65535 @@ -519,7 +519,7 @@ spec: type: integer type: description: |- - Type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. + type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. Type accepts 2 possible values: SystemResolvConf or Network. * When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined: diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-CustomNoUpgrade.crd.yaml index 146c684056..2fbc8d62fc 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-CustomNoUpgrade.crd.yaml @@ -72,51 +72,51 @@ spec: NetworkAttachmentDefinition CRD type: string simpleMacvlanConfig: - description: SimpleMacvlanConfig configures the macvlan interface + description: simpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan properties: ipamConfig: - description: IPAMConfig configures IPAM module will be used + description: ipamConfig configures IPAM module will be used for IP Address Management (IPAM). properties: staticIPAMConfig: - description: StaticIPAMConfig configures the static + description: staticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic properties: addresses: - description: Addresses configures IP address for + description: addresses configures IP address for the interface items: description: StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses properties: address: - description: Address is the IP address in + description: address is the IP address in CIDR format type: string gateway: - description: Gateway is IP inside of subnet + description: gateway is IP inside of subnet to designate as the gateway type: string type: object type: array x-kubernetes-list-type: atomic dns: - description: DNS configures DNS for the interface + description: dns configures DNS for the interface properties: domain: - description: Domain configures the domainname + description: domain configures the domainname the local domain used for short hostname lookups type: string nameservers: - description: Nameservers points DNS servers + description: nameservers points DNS servers for IP lookup items: type: string type: array x-kubernetes-list-type: atomic search: - description: Search configures priority ordered + description: search configures priority ordered search domains for short hostname lookups items: type: string @@ -124,19 +124,19 @@ spec: x-kubernetes-list-type: atomic type: object routes: - description: Routes configures IP routes for the + description: routes configures IP routes for the interface items: description: StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes properties: destination: - description: Destination points the IP route + description: destination points the IP route destination type: string gateway: description: |- - Gateway is the route's next-hop IP address + gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin). type: string type: object @@ -145,7 +145,7 @@ spec: type: object type: description: |- - Type is the type of IPAM module will be used for IP Address Management(IPAM). + type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic type: string type: object @@ -238,7 +238,7 @@ spec: properties: openshiftSDNConfig: description: |- - openShiftSDNConfig was previously used to configure the openshift-sdn plugin. + openshiftSDNConfig was previously used to configure the openshift-sdn plugin. DEPRECATED: OpenShift SDN is no longer supported. properties: enableUnidling: @@ -297,7 +297,7 @@ spec: properties: ipForwarding: description: |- - IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). + ipForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to "Global". @@ -361,7 +361,7 @@ spec: routingViaHost: default: false description: |- - RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port + routingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified. @@ -376,11 +376,11 @@ spec: type: integer hybridOverlayConfig: description: |- - HybridOverlayConfig configures an additional overlay network for peers that are + hybridOverlayConfig configures an additional overlay network for peers that are not using OVN. properties: hybridClusterNetwork: - description: HybridClusterNetwork defines a network space + description: hybridClusterNetwork defines a network space given to nodes on an additional overlay network. items: description: |- @@ -400,7 +400,7 @@ spec: x-kubernetes-list-type: atomic hybridOverlayVXLANPort: description: |- - HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. + hybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789 format: int32 type: integer @@ -412,6 +412,30 @@ spec: ipsecConfig enables and configures IPsec for pods on the pod network within the cluster. properties: + full: + description: |- + full defines configuration parameters for the IPsec `Full` mode. + This is permitted only when mode is configured with `Full`, + and forbidden otherwise. + minProperties: 1 + properties: + encapsulation: + description: |- + encapsulation option to configure libreswan on how inter-pod traffic across nodes + are encapsulated to handle NAT traversal. When configured it uses UDP port 4500 + for the encapsulation. + Valid values are Always, Never, Auto and omitted. + Always means enable UDP encapsulation regardless of whether NAT is detected. + Disable means never enable UDP encapsulation even if NAT is present. + Auto means enable UDP encapsulation based on the detection of NAT. + When omitted, this means no opinion and the platform is left to choose a reasonable + default, which is subject to change over time. The current default is Auto. + enum: + - Always + - Never + - Auto + type: string + type: object mode: description: |- mode defines the behaviour of the ipsec configuration within the platform. @@ -431,6 +455,9 @@ spec: x-kubernetes-validations: - message: ipsecConfig.mode is required rule: self == oldSelf || has(self.mode) + - message: full is forbidden when mode is not Full + rule: 'has(self.mode) && self.mode == ''Full'' ? true : + !has(self.full)' ipv4: description: |- ipv4 allows users to configure IP settings for IPv4 connections. When ommitted, diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-Default.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-Default.crd.yaml index 3f150defe7..513d6b7339 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-Default.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-Default.crd.yaml @@ -72,51 +72,51 @@ spec: NetworkAttachmentDefinition CRD type: string simpleMacvlanConfig: - description: SimpleMacvlanConfig configures the macvlan interface + description: simpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan properties: ipamConfig: - description: IPAMConfig configures IPAM module will be used + description: ipamConfig configures IPAM module will be used for IP Address Management (IPAM). properties: staticIPAMConfig: - description: StaticIPAMConfig configures the static + description: staticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic properties: addresses: - description: Addresses configures IP address for + description: addresses configures IP address for the interface items: description: StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses properties: address: - description: Address is the IP address in + description: address is the IP address in CIDR format type: string gateway: - description: Gateway is IP inside of subnet + description: gateway is IP inside of subnet to designate as the gateway type: string type: object type: array x-kubernetes-list-type: atomic dns: - description: DNS configures DNS for the interface + description: dns configures DNS for the interface properties: domain: - description: Domain configures the domainname + description: domain configures the domainname the local domain used for short hostname lookups type: string nameservers: - description: Nameservers points DNS servers + description: nameservers points DNS servers for IP lookup items: type: string type: array x-kubernetes-list-type: atomic search: - description: Search configures priority ordered + description: search configures priority ordered search domains for short hostname lookups items: type: string @@ -124,19 +124,19 @@ spec: x-kubernetes-list-type: atomic type: object routes: - description: Routes configures IP routes for the + description: routes configures IP routes for the interface items: description: StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes properties: destination: - description: Destination points the IP route + description: destination points the IP route destination type: string gateway: description: |- - Gateway is the route's next-hop IP address + gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin). type: string type: object @@ -145,7 +145,7 @@ spec: type: object type: description: |- - Type is the type of IPAM module will be used for IP Address Management(IPAM). + type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic type: string type: object @@ -178,6 +178,39 @@ spec: x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map + additionalRoutingCapabilities: + description: |- + additionalRoutingCapabilities describes components and relevant + configuration providing additional routing capabilities. When set, it + enables such components and the usage of the routing capabilities they + provide for the machine network. Upstream operators, like MetalLB + operator, requiring these capabilities may rely on, or automatically set + this attribute. Network plugins may leverage advanced routing + capabilities acquired through the enablement of these components but may + require specific configuration on their side to do so; refer to their + respective documentation and configuration options. + properties: + providers: + description: |- + providers is a set of enabled components that provide additional routing + capabilities. Entries on this list must be unique. The only valid value + is currrently "FRR" which provides FRR routing capabilities through the + deployment of FRR. + items: + description: RoutingCapabilitiesProvider is a component providing + routing capabilities. + enum: + - FRR + type: string + maxItems: 1 + minItems: 1 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - rule: self.all(x, self.exists_one(y, x == y)) + required: + - providers + type: object clusterNetwork: description: |- clusterNetwork is the IP address pool to use for pod IPs. @@ -205,7 +238,7 @@ spec: properties: openshiftSDNConfig: description: |- - openShiftSDNConfig was previously used to configure the openshift-sdn plugin. + openshiftSDNConfig was previously used to configure the openshift-sdn plugin. DEPRECATED: OpenShift SDN is no longer supported. properties: enableUnidling: @@ -264,7 +297,7 @@ spec: properties: ipForwarding: description: |- - IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). + ipForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to "Global". @@ -328,7 +361,7 @@ spec: routingViaHost: default: false description: |- - RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port + routingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified. @@ -343,11 +376,11 @@ spec: type: integer hybridOverlayConfig: description: |- - HybridOverlayConfig configures an additional overlay network for peers that are + hybridOverlayConfig configures an additional overlay network for peers that are not using OVN. properties: hybridClusterNetwork: - description: HybridClusterNetwork defines a network space + description: hybridClusterNetwork defines a network space given to nodes on an additional overlay network. items: description: |- @@ -367,7 +400,7 @@ spec: x-kubernetes-list-type: atomic hybridOverlayVXLANPort: description: |- - HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. + hybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789 format: int32 type: integer @@ -379,6 +412,30 @@ spec: ipsecConfig enables and configures IPsec for pods on the pod network within the cluster. properties: + full: + description: |- + full defines configuration parameters for the IPsec `Full` mode. + This is permitted only when mode is configured with `Full`, + and forbidden otherwise. + minProperties: 1 + properties: + encapsulation: + description: |- + encapsulation option to configure libreswan on how inter-pod traffic across nodes + are encapsulated to handle NAT traversal. When configured it uses UDP port 4500 + for the encapsulation. + Valid values are Always, Never, Auto and omitted. + Always means enable UDP encapsulation regardless of whether NAT is detected. + Disable means never enable UDP encapsulation even if NAT is present. + Auto means enable UDP encapsulation based on the detection of NAT. + When omitted, this means no opinion and the platform is left to choose a reasonable + default, which is subject to change over time. The current default is Auto. + enum: + - Always + - Never + - Auto + type: string + type: object mode: description: |- mode defines the behaviour of the ipsec configuration within the platform. @@ -398,6 +455,9 @@ spec: x-kubernetes-validations: - message: ipsecConfig.mode is required rule: self == oldSelf || has(self.mode) + - message: full is forbidden when mode is not Full + rule: 'has(self.mode) && self.mode == ''Full'' ? true : + !has(self.full)' ipv4: description: |- ipv4 allows users to configure IP settings for IPv4 connections. When ommitted, diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-DevPreviewNoUpgrade.crd.yaml index fd06797632..86128eaf40 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-DevPreviewNoUpgrade.crd.yaml @@ -72,51 +72,51 @@ spec: NetworkAttachmentDefinition CRD type: string simpleMacvlanConfig: - description: SimpleMacvlanConfig configures the macvlan interface + description: simpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan properties: ipamConfig: - description: IPAMConfig configures IPAM module will be used + description: ipamConfig configures IPAM module will be used for IP Address Management (IPAM). properties: staticIPAMConfig: - description: StaticIPAMConfig configures the static + description: staticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic properties: addresses: - description: Addresses configures IP address for + description: addresses configures IP address for the interface items: description: StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses properties: address: - description: Address is the IP address in + description: address is the IP address in CIDR format type: string gateway: - description: Gateway is IP inside of subnet + description: gateway is IP inside of subnet to designate as the gateway type: string type: object type: array x-kubernetes-list-type: atomic dns: - description: DNS configures DNS for the interface + description: dns configures DNS for the interface properties: domain: - description: Domain configures the domainname + description: domain configures the domainname the local domain used for short hostname lookups type: string nameservers: - description: Nameservers points DNS servers + description: nameservers points DNS servers for IP lookup items: type: string type: array x-kubernetes-list-type: atomic search: - description: Search configures priority ordered + description: search configures priority ordered search domains for short hostname lookups items: type: string @@ -124,19 +124,19 @@ spec: x-kubernetes-list-type: atomic type: object routes: - description: Routes configures IP routes for the + description: routes configures IP routes for the interface items: description: StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes properties: destination: - description: Destination points the IP route + description: destination points the IP route destination type: string gateway: description: |- - Gateway is the route's next-hop IP address + gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin). type: string type: object @@ -145,7 +145,7 @@ spec: type: object type: description: |- - Type is the type of IPAM module will be used for IP Address Management(IPAM). + type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic type: string type: object @@ -238,7 +238,7 @@ spec: properties: openshiftSDNConfig: description: |- - openShiftSDNConfig was previously used to configure the openshift-sdn plugin. + openshiftSDNConfig was previously used to configure the openshift-sdn plugin. DEPRECATED: OpenShift SDN is no longer supported. properties: enableUnidling: @@ -297,7 +297,7 @@ spec: properties: ipForwarding: description: |- - IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). + ipForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to "Global". @@ -361,7 +361,7 @@ spec: routingViaHost: default: false description: |- - RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port + routingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified. @@ -376,11 +376,11 @@ spec: type: integer hybridOverlayConfig: description: |- - HybridOverlayConfig configures an additional overlay network for peers that are + hybridOverlayConfig configures an additional overlay network for peers that are not using OVN. properties: hybridClusterNetwork: - description: HybridClusterNetwork defines a network space + description: hybridClusterNetwork defines a network space given to nodes on an additional overlay network. items: description: |- @@ -400,7 +400,7 @@ spec: x-kubernetes-list-type: atomic hybridOverlayVXLANPort: description: |- - HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. + hybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789 format: int32 type: integer @@ -412,6 +412,30 @@ spec: ipsecConfig enables and configures IPsec for pods on the pod network within the cluster. properties: + full: + description: |- + full defines configuration parameters for the IPsec `Full` mode. + This is permitted only when mode is configured with `Full`, + and forbidden otherwise. + minProperties: 1 + properties: + encapsulation: + description: |- + encapsulation option to configure libreswan on how inter-pod traffic across nodes + are encapsulated to handle NAT traversal. When configured it uses UDP port 4500 + for the encapsulation. + Valid values are Always, Never, Auto and omitted. + Always means enable UDP encapsulation regardless of whether NAT is detected. + Disable means never enable UDP encapsulation even if NAT is present. + Auto means enable UDP encapsulation based on the detection of NAT. + When omitted, this means no opinion and the platform is left to choose a reasonable + default, which is subject to change over time. The current default is Auto. + enum: + - Always + - Never + - Auto + type: string + type: object mode: description: |- mode defines the behaviour of the ipsec configuration within the platform. @@ -431,6 +455,9 @@ spec: x-kubernetes-validations: - message: ipsecConfig.mode is required rule: self == oldSelf || has(self.mode) + - message: full is forbidden when mode is not Full + rule: 'has(self.mode) && self.mode == ''Full'' ? true : + !has(self.full)' ipv4: description: |- ipv4 allows users to configure IP settings for IPv4 connections. When ommitted, diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-TechPreviewNoUpgrade.crd.yaml index e55b94afc3..5990e70638 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_70_network_01_networks-TechPreviewNoUpgrade.crd.yaml @@ -72,51 +72,51 @@ spec: NetworkAttachmentDefinition CRD type: string simpleMacvlanConfig: - description: SimpleMacvlanConfig configures the macvlan interface + description: simpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan properties: ipamConfig: - description: IPAMConfig configures IPAM module will be used + description: ipamConfig configures IPAM module will be used for IP Address Management (IPAM). properties: staticIPAMConfig: - description: StaticIPAMConfig configures the static + description: staticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic properties: addresses: - description: Addresses configures IP address for + description: addresses configures IP address for the interface items: description: StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses properties: address: - description: Address is the IP address in + description: address is the IP address in CIDR format type: string gateway: - description: Gateway is IP inside of subnet + description: gateway is IP inside of subnet to designate as the gateway type: string type: object type: array x-kubernetes-list-type: atomic dns: - description: DNS configures DNS for the interface + description: dns configures DNS for the interface properties: domain: - description: Domain configures the domainname + description: domain configures the domainname the local domain used for short hostname lookups type: string nameservers: - description: Nameservers points DNS servers + description: nameservers points DNS servers for IP lookup items: type: string type: array x-kubernetes-list-type: atomic search: - description: Search configures priority ordered + description: search configures priority ordered search domains for short hostname lookups items: type: string @@ -124,19 +124,19 @@ spec: x-kubernetes-list-type: atomic type: object routes: - description: Routes configures IP routes for the + description: routes configures IP routes for the interface items: description: StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes properties: destination: - description: Destination points the IP route + description: destination points the IP route destination type: string gateway: description: |- - Gateway is the route's next-hop IP address + gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin). type: string type: object @@ -145,7 +145,7 @@ spec: type: object type: description: |- - Type is the type of IPAM module will be used for IP Address Management(IPAM). + type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic type: string type: object @@ -238,7 +238,7 @@ spec: properties: openshiftSDNConfig: description: |- - openShiftSDNConfig was previously used to configure the openshift-sdn plugin. + openshiftSDNConfig was previously used to configure the openshift-sdn plugin. DEPRECATED: OpenShift SDN is no longer supported. properties: enableUnidling: @@ -297,7 +297,7 @@ spec: properties: ipForwarding: description: |- - IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). + ipForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to "Global". @@ -361,7 +361,7 @@ spec: routingViaHost: default: false description: |- - RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port + routingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified. @@ -376,11 +376,11 @@ spec: type: integer hybridOverlayConfig: description: |- - HybridOverlayConfig configures an additional overlay network for peers that are + hybridOverlayConfig configures an additional overlay network for peers that are not using OVN. properties: hybridClusterNetwork: - description: HybridClusterNetwork defines a network space + description: hybridClusterNetwork defines a network space given to nodes on an additional overlay network. items: description: |- @@ -400,7 +400,7 @@ spec: x-kubernetes-list-type: atomic hybridOverlayVXLANPort: description: |- - HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. + hybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789 format: int32 type: integer @@ -412,6 +412,30 @@ spec: ipsecConfig enables and configures IPsec for pods on the pod network within the cluster. properties: + full: + description: |- + full defines configuration parameters for the IPsec `Full` mode. + This is permitted only when mode is configured with `Full`, + and forbidden otherwise. + minProperties: 1 + properties: + encapsulation: + description: |- + encapsulation option to configure libreswan on how inter-pod traffic across nodes + are encapsulated to handle NAT traversal. When configured it uses UDP port 4500 + for the encapsulation. + Valid values are Always, Never, Auto and omitted. + Always means enable UDP encapsulation regardless of whether NAT is detected. + Disable means never enable UDP encapsulation even if NAT is present. + Auto means enable UDP encapsulation based on the detection of NAT. + When omitted, this means no opinion and the platform is left to choose a reasonable + default, which is subject to change over time. The current default is Auto. + enum: + - Always + - Never + - Auto + type: string + type: object mode: description: |- mode defines the behaviour of the ipsec configuration within the platform. @@ -431,6 +455,9 @@ spec: x-kubernetes-validations: - message: ipsecConfig.mode is required rule: self == oldSelf || has(self.mode) + - message: full is forbidden when mode is not Full + rule: 'has(self.mode) && self.mode == ''Full'' ? true : + !has(self.full)' ipv4: description: |- ipv4 allows users to configure IP settings for IPv4 connections. When ommitted, diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index 84edc0cab3..700ae5e695 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -1912,6 +1912,11 @@ func (in *IPFIXConfig) DeepCopy() *IPFIXConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPsecConfig) DeepCopyInto(out *IPsecConfig) { *out = *in + if in.Full != nil { + in, out := &in.Full, &out.Full + *out = new(IPsecFullModeConfig) + **out = **in + } return } @@ -1925,6 +1930,22 @@ func (in *IPsecConfig) DeepCopy() *IPsecConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPsecFullModeConfig) DeepCopyInto(out *IPsecFullModeConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPsecFullModeConfig. +func (in *IPsecFullModeConfig) DeepCopy() *IPsecFullModeConfig { + if in == nil { + return nil + } + out := new(IPsecFullModeConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPv4GatewayConfig) DeepCopyInto(out *IPv4GatewayConfig) { *out = *in @@ -3834,6 +3855,101 @@ func (in *OAuthAPIServerStatus) DeepCopy() *OAuthAPIServerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLM) DeepCopyInto(out *OLM) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLM. +func (in *OLM) DeepCopy() *OLM { + if in == nil { + return nil + } + out := new(OLM) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OLM) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMList) DeepCopyInto(out *OLMList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OLM, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMList. +func (in *OLMList) DeepCopy() *OLMList { + if in == nil { + return nil + } + out := new(OLMList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OLMList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMSpec) DeepCopyInto(out *OLMSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMSpec. +func (in *OLMSpec) DeepCopy() *OLMSpec { + if in == nil { + return nil + } + out := new(OLMSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMStatus) DeepCopyInto(out *OLMStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMStatus. +func (in *OLMStatus) DeepCopy() *OLMStatus { + if in == nil { + return nil + } + out := new(OLMStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { *out = *in @@ -3855,7 +3971,7 @@ func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { if in.IPsecConfig != nil { in, out := &in.IPsecConfig, &out.IPsecConfig *out = new(IPsecConfig) - **out = **in + (*in).DeepCopyInto(*out) } if in.PolicyAuditConfig != nil { in, out := &in.PolicyAuditConfig, &out.PolicyAuditConfig diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml index 9ed8975177..d45d8ac300 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml @@ -73,7 +73,7 @@ clustercsidrivers.operator.openshift.io: - VSphereDriverConfiguration FilenameOperatorName: csi-driver FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_90" + FilenameRunLevel: "0000_50" GroupName: operator.openshift.io HasStatus: true KindName: ClusterCSIDriver @@ -346,6 +346,31 @@ networks.operator.openshift.io: TopLevelFeatureGates: [] Version: v1 +olms.operator.openshift.io: + Annotations: + include.release.openshift.io/ibm-cloud-managed: "false" + include.release.openshift.io/self-managed-high-availability: "true" + ApprovedPRNumber: https://github.com/openshift/api/pull/1504 + CRDName: olms.operator.openshift.io + Capability: OperatorLifecycleManagerV1 + Category: "" + FeatureGates: + - NewOLM + FilenameOperatorName: operator-lifecycle-manager + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: operator.openshift.io + HasStatus: true + KindName: OLM + Labels: {} + PluralName: olms + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - NewOLM + Version: v1 + openshiftapiservers.operator.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/475 diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index 03d9e16edb..93eca5730f 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -128,7 +128,7 @@ func (AuthenticationList) SwaggerDoc() map[string]string { } var map_AuthenticationStatus = map[string]string{ - "oauthAPIServer": "OAuthAPIServer holds status specific only to oauth-apiserver", + "oauthAPIServer": "oauthAPIServer holds status specific only to oauth-apiserver", } func (AuthenticationStatus) SwaggerDoc() map[string]string { @@ -136,7 +136,7 @@ func (AuthenticationStatus) SwaggerDoc() map[string]string { } var map_OAuthAPIServerStatus = map[string]string{ - "latestAvailableRevision": "LatestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", + "latestAvailableRevision": "latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", } func (OAuthAPIServerStatus) SwaggerDoc() map[string]string { @@ -163,7 +163,7 @@ func (CloudCredentialList) SwaggerDoc() map[string]string { var map_CloudCredentialSpec = map[string]string{ "": "CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator.", - "credentialsMode": "CredentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into \"manual\" mode if desired. Leaving the field in default mode runs CCO so that the cluster's cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes:\n AWS/Azure/GCP: \"\" (Default), \"Mint\", \"Passthrough\", \"Manual\"\n Others: Do not set value as other platforms only support running in \"Passthrough\"", + "credentialsMode": "credentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into \"manual\" mode if desired. Leaving the field in default mode runs CCO so that the cluster's cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes:\n AWS/Azure/GCP: \"\" (Default), \"Mint\", \"Passthrough\", \"Manual\"\n Others: Do not set value as other platforms only support running in \"Passthrough\"", } func (CloudCredentialSpec) SwaggerDoc() map[string]string { @@ -192,7 +192,7 @@ func (Config) SwaggerDoc() map[string]string { var map_ConfigList = map[string]string{ "": "ConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (ConfigList) SwaggerDoc() map[string]string { @@ -314,7 +314,7 @@ func (DeveloperConsoleCatalogCategory) SwaggerDoc() map[string]string { var map_DeveloperConsoleCatalogCategoryMeta = map[string]string{ "": "DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category.", - "id": "ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.", + "id": "id is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.", "label": "label defines a category display label. It is required and must have 1-64 characters.", "tags": "tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item.", } @@ -480,7 +480,7 @@ var map_CSIDriverConfigSpec = map[string]string{ "azure": "azure is used to configure the Azure CSI driver.", "gcp": "gcp is used to configure the GCP CSI driver.", "ibmcloud": "ibmcloud is used to configure the IBM Cloud CSI driver.", - "vSphere": "vsphere is used to configure the vsphere CSI driver.", + "vSphere": "vSphere is used to configure the vsphere CSI driver.", } func (CSIDriverConfigSpec) SwaggerDoc() map[string]string { @@ -509,7 +509,7 @@ func (ClusterCSIDriverList) SwaggerDoc() map[string]string { var map_ClusterCSIDriverSpec = map[string]string{ "": "ClusterCSIDriverSpec is the desired behavior of CSI driver operator", - "storageClassState": "StorageClassState determines if CSI operator should create and manage storage classes. If this field value is empty or Managed - CSI operator will continuously reconcile storage class and create if necessary. If this field value is Unmanaged - CSI operator will not reconcile any previously created storage class. If this field value is Removed - CSI operator will delete the storage class it created previously. When omitted, this means the user has no opinion and the platform chooses a reasonable default, which is subject to change over time. The current default behaviour is Managed.", + "storageClassState": "storageClassState determines if CSI operator should create and manage storage classes. If this field value is empty or Managed - CSI operator will continuously reconcile storage class and create if necessary. If this field value is Unmanaged - CSI operator will not reconcile any previously created storage class. If this field value is Removed - CSI operator will delete the storage class it created previously. When omitted, this means the user has no opinion and the platform chooses a reasonable default, which is subject to change over time. The current default behaviour is Managed.", "driverConfig": "driverConfig can be used to specify platform specific driver configuration. When omitted, this means no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time.", } @@ -714,9 +714,9 @@ func (Server) SwaggerDoc() map[string]string { var map_Upstream = map[string]string{ "": "Upstream can either be of type SystemResolvConf, or of type Network.\n\n - For an Upstream of type SystemResolvConf, no further fields are necessary:\n The upstream will be configured to use /etc/resolv.conf.\n - For an Upstream of type Network, a NetworkResolver field needs to be defined\n with an IP address or IP:port if the upstream listens on a port other than 53.", - "type": "Type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. Type accepts 2 possible values: SystemResolvConf or Network.\n\n* When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined:\n /etc/resolv.conf will be used\n* When Network is used, the Upstream structure must contain at least an Address", - "address": "Address must be defined when Type is set to Network. It will be ignored otherwise. It must be a valid ipv4 or ipv6 address.", - "port": "Port may be defined when Type is set to Network. It will be ignored otherwise. Port must be between 65535", + "type": "type defines whether this upstream contains an IP/IP:port resolver or the local /etc/resolv.conf. Type accepts 2 possible values: SystemResolvConf or Network.\n\n* When SystemResolvConf is used, the Upstream structure does not require any further fields to be defined:\n /etc/resolv.conf will be used\n* When Network is used, the Upstream structure must contain at least an Address", + "address": "address must be defined when Type is set to Network. It will be ignored otherwise. It must be a valid ipv4 or ipv6 address.", + "port": "port may be defined when Type is set to Network. It will be ignored otherwise. Port must be between 65535", } func (Upstream) SwaggerDoc() map[string]string { @@ -725,8 +725,8 @@ func (Upstream) SwaggerDoc() map[string]string { var map_UpstreamResolvers = map[string]string{ "": "UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the specific case of the default (\".\") server. It defers from ForwardPlugin in the default values it accepts: * At least one upstream should be specified. * the default policy is Sequential", - "upstreams": "Upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default", - "policy": "Policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Sequential\"", + "upstreams": "upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default", + "policy": "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Sequential\"", "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.", "protocolStrategy": "protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are \"TCP\" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. \"TCP\" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. \"TCP\" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS.", } @@ -747,7 +747,7 @@ func (Etcd) SwaggerDoc() map[string]string { var map_EtcdList = map[string]string{ "": "KubeAPISOperatorConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (EtcdList) SwaggerDoc() map[string]string { @@ -1020,24 +1020,25 @@ func (IngressControllerSetHTTPHeader) SwaggerDoc() map[string]string { } var map_IngressControllerSpec = map[string]string{ - "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", - "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", - "httpErrorCodePages": "httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format \"error-page-.http\", where is an HTTP error code. For example, \"error-page-503.http\" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages.", - "replicas": "replicas is the desired number of ingress controller replicas. If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nThe value of replicas is set based on the value of a chosen field in the Infrastructure CR. If defaultPlacement is set to ControlPlane, the chosen field will be controlPlaneTopology. If it is set to Workers the chosen field will be infrastructureTopology. Replicas will then be set to 1 or 2 based whether the chosen field's value is SingleReplica or HighlyAvailable, respectively.\n\nThese defaults are subject to change.", - "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n AlibabaCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", - "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nIf a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.", - "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", - "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", - "nodePlacement": "nodePlacement enables explicit control over the scheduling of the ingress controller.\n\nIf unset, defaults are used. See NodePlacement for more details.", - "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers.\n\nIf unset, the default is based on the apiservers.config.openshift.io/cluster resource.\n\nNote that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout.", - "clientTLS": "clientTLS specifies settings for requesting and verifying client certificates, which can be used to enable mutual TLS for edge-terminated and reencrypt routes.", - "routeAdmission": "routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces).\n\nIf empty, defaults will be applied. See specific routeAdmission fields for details about their defaults.", - "logging": "logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled.", - "httpHeaders": "httpHeaders defines policy for HTTP headers.\n\nIf this field is empty, the default values are used.", - "httpEmptyRequestsPolicy": "httpEmptyRequestsPolicy describes how HTTP connections should be handled if the connection times out before a request is received. Allowed values for this field are \"Respond\" and \"Ignore\". If the field is set to \"Respond\", the ingress controller sends an HTTP 400 or 408 response, logs the connection (if access logging is enabled), and counts the connection in the appropriate metrics. If the field is set to \"Ignore\", the ingress controller closes the connection without sending a response, logging the connection, or incrementing metrics. The default value is \"Respond\".\n\nTypically, these connections come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\") and can be safely ignored. However, these requests may also be caused by network errors, and so setting this field to \"Ignore\" may impede detection and diagnosis of problems. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts.", - "tuningOptions": "tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details.\n\nSetting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations.", - "unsupportedConfigOverrides": "unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported.", - "httpCompression": "httpCompression defines a policy for HTTP traffic compression. By default, there is no HTTP compression.", + "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", + "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", + "httpErrorCodePages": "httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format \"error-page-.http\", where is an HTTP error code. For example, \"error-page-503.http\" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages.", + "replicas": "replicas is the desired number of ingress controller replicas. If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status.\n\nThe value of replicas is set based on the value of a chosen field in the Infrastructure CR. If defaultPlacement is set to ControlPlane, the chosen field will be controlPlaneTopology. If it is set to Workers the chosen field will be infrastructureTopology. Replicas will then be set to 1 or 2 based whether the chosen field's value is SingleReplica or HighlyAvailable, respectively.\n\nThese defaults are subject to change.", + "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n AlibabaCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", + "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nIf a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.", + "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", + "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", + "nodePlacement": "nodePlacement enables explicit control over the scheduling of the ingress controller.\n\nIf unset, defaults are used. See NodePlacement for more details.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers.\n\nIf unset, the default is based on the apiservers.config.openshift.io/cluster resource.\n\nNote that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout.", + "clientTLS": "clientTLS specifies settings for requesting and verifying client certificates, which can be used to enable mutual TLS for edge-terminated and reencrypt routes.", + "routeAdmission": "routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces).\n\nIf empty, defaults will be applied. See specific routeAdmission fields for details about their defaults.", + "logging": "logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled.", + "httpHeaders": "httpHeaders defines policy for HTTP headers.\n\nIf this field is empty, the default values are used.", + "httpEmptyRequestsPolicy": "httpEmptyRequestsPolicy describes how HTTP connections should be handled if the connection times out before a request is received. Allowed values for this field are \"Respond\" and \"Ignore\". If the field is set to \"Respond\", the ingress controller sends an HTTP 400 or 408 response, logs the connection (if access logging is enabled), and counts the connection in the appropriate metrics. If the field is set to \"Ignore\", the ingress controller closes the connection without sending a response, logging the connection, or incrementing metrics. The default value is \"Respond\".\n\nTypically, these connections come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\") and can be safely ignored. However, these requests may also be caused by network errors, and so setting this field to \"Ignore\" may impede detection and diagnosis of problems. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts.", + "tuningOptions": "tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details.\n\nSetting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations.", + "unsupportedConfigOverrides": "unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported.", + "httpCompression": "httpCompression defines a policy for HTTP traffic compression. By default, there is no HTTP compression.", + "idleConnectionTerminationPolicy": "idleConnectionTerminationPolicy maps directly to HAProxy's idle-close-on-response option and controls whether HAProxy keeps idle frontend connections open during a soft stop (router reload).\n\nAllowed values for this field are \"Immediate\" and \"Deferred\". The default value is \"Immediate\".\n\nWhen set to \"Immediate\", idle connections are closed immediately during router reloads. This ensures immediate propagation of route changes but may impact clients sensitive to connection resets.\n\nWhen set to \"Deferred\", HAProxy will maintain idle connections during a soft reload instead of closing them immediately. These connections remain open until any of the following occurs:\n\n - A new request is received on the connection, in which\n case HAProxy handles it in the old process and closes\n the connection after sending the response.\n\n - HAProxy's `timeout http-keep-alive` duration expires\n (300 seconds in OpenShift's configuration, not\n configurable).\n\n - The client's keep-alive timeout expires, causing the\n client to close the connection.\n\nSetting Deferred can help prevent errors in clients or load balancers that do not properly handle connection resets. Additionally, this option allows you to retain the pre-2.4 HAProxy behaviour: in HAProxy version 2.2 (OpenShift versions < 4.14), maintaining idle connections during a soft reload was the default behaviour, but starting with HAProxy 2.4, the default changed to closing idle connections immediately.\n\nImportant Consideration:\n\n - Using Deferred will result in temporary inconsistencies\n for the first request on each persistent connection\n after a route update and router reload. This request\n will be processed by the old HAProxy process using its\n old configuration. Subsequent requests will use the\n updated configuration.\n\nOperational Considerations:\n\n - Keeping idle connections open during reloads may lead\n to an accumulation of old HAProxy processes if\n connections remain idle for extended periods,\n especially in environments where frequent reloads\n occur.\n\n - Consider monitoring the number of HAProxy processes in\n the router pods when Deferred is set.\n\n - You may need to enable or adjust the\n `ingress.operator.openshift.io/hard-stop-after`\n duration (configured via an annotation on the\n IngressController resource) in environments with\n frequent reloads to prevent resource exhaustion.", } func (IngressControllerSpec) SwaggerDoc() map[string]string { @@ -1071,7 +1072,7 @@ var map_IngressControllerTuningOptions = map[string]string{ "serverTimeout": "serverTimeout defines how long a connection will be held open while waiting for a server/backend response.\n\nIf unset, the default timeout is 30s", "serverFinTimeout": "serverFinTimeout defines how long a connection will be held open while waiting for the server/backend response to the client closing the connection.\n\nIf unset, the default timeout is 1s", "tunnelTimeout": "tunnelTimeout defines how long a tunnel connection (including websockets) will be held open while the tunnel is idle.\n\nIf unset, the default timeout is 1h", - "connectTimeout": "ConnectTimeout defines the maximum time to wait for a connection attempt to a server/backend to succeed.\n\nThis field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a reasonable default. This default is subject to change over time. The current default is 5s.", + "connectTimeout": "connectTimeout defines the maximum time to wait for a connection attempt to a server/backend to succeed.\n\nThis field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a reasonable default. This default is subject to change over time. The current default is 5s.", "tlsInspectDelay": "tlsInspectDelay defines how long the router can hold data to find a matching route.\n\nSetting this too short can cause the router to fall back to the default certificate for edge-terminated or reencrypt routes even when a better matching certificate could be used.\n\nIf unset, the default inspect delay is 5s", "healthCheckInterval": "healthCheckInterval defines how long the router waits between two consecutive health checks on its configured backends. This value is applied globally as a default for all routes, but may be overridden per-route by the route annotation \"router.openshift.io/haproxy.health.check.interval\".\n\nExpects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nSetting this to less than 5s can cause excess traffic due to too frequent TCP health checks and accompanying SYN packet storms. Alternatively, setting this too high can result in increased latency, due to backend servers that are no longer available, but haven't yet been detected as such.\n\nAn empty or zero healthCheckInterval means no opinion and IngressController chooses a default, which is subject to change over time. Currently the default healthCheckInterval value is 5s.\n\nCurrently the minimum allowed value is 1s and the maximum allowed value is 2147483647ms (24.85 days). Both are subject to change over time.", "maxConnections": "maxConnections defines the maximum number of simultaneous connections that can be established per HAProxy process. Increasing this value allows each ingress controller pod to handle more connections but at the cost of additional system resources being consumed.\n\nPermitted values are: empty, 0, -1, and the range 2000-2000000.\n\nIf this field is empty or 0, the IngressController will use the default value of 50000, but the default is subject to change in future releases.\n\nIf the value is -1 then HAProxy will dynamically compute a maximum value based on the available ulimits in the running container. Selecting -1 (i.e., auto) will result in a large value being computed (~520000 on OpenShift >=4.10 clusters) and therefore each HAProxy process will incur significant memory usage compared to the current default of 50000.\n\nSetting a value that is greater than the current operating system limit will prevent the HAProxy process from starting.\n\nIf you choose a discrete value (e.g., 750000) and the router pod is migrated to a new node, there's no guarantee that that new node has identical ulimits configured. In such a scenario the pod would fail to start. If you have nodes with different ulimits configured (e.g., different tuned profiles) and you choose a discrete value then the guidance is to use -1 and let the value be computed dynamically at runtime.\n\nYou can monitor memory usage for router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}'.\n\nYou can monitor memory usage of individual HAProxy processes in router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}/container_processes{container=\"router\",namespace=\"openshift-ingress\"}'.", @@ -1264,7 +1265,7 @@ func (KubeAPIServer) SwaggerDoc() map[string]string { var map_KubeAPIServerList = map[string]string{ "": "KubeAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (KubeAPIServerList) SwaggerDoc() map[string]string { @@ -1302,7 +1303,7 @@ func (KubeControllerManager) SwaggerDoc() map[string]string { var map_KubeControllerManagerList = map[string]string{ "": "KubeControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (KubeControllerManagerList) SwaggerDoc() map[string]string { @@ -1329,7 +1330,7 @@ func (KubeStorageVersionMigrator) SwaggerDoc() map[string]string { var map_KubeStorageVersionMigratorList = map[string]string{ "": "KubeStorageVersionMigratorList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (KubeStorageVersionMigratorList) SwaggerDoc() map[string]string { @@ -1350,7 +1351,7 @@ func (MachineConfiguration) SwaggerDoc() map[string]string { var map_MachineConfigurationList = map[string]string{ "": "MachineConfigurationList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (MachineConfigurationList) SwaggerDoc() map[string]string { @@ -1545,7 +1546,7 @@ var map_AdditionalNetworkDefinition = map[string]string{ "name": "name is the name of the network. This will be populated in the resulting CRD This must be unique.", "namespace": "namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace.", "rawCNIConfig": "rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD", - "simpleMacvlanConfig": "SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan", + "simpleMacvlanConfig": "simpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan", } func (AdditionalNetworkDefinition) SwaggerDoc() map[string]string { @@ -1572,7 +1573,7 @@ func (ClusterNetworkEntry) SwaggerDoc() map[string]string { var map_DefaultNetworkDefinition = map[string]string{ "": "DefaultNetworkDefinition represents a single network plugin's configuration. type must be specified, along with exactly one \"Config\" that matches the type.", "type": "type is the type of network All NetworkTypes are supported except for NetworkTypeRaw", - "openshiftSDNConfig": "openShiftSDNConfig was previously used to configure the openshift-sdn plugin. DEPRECATED: OpenShift SDN is no longer supported.", + "openshiftSDNConfig": "openshiftSDNConfig was previously used to configure the openshift-sdn plugin. DEPRECATED: OpenShift SDN is no longer supported.", "ovnKubernetesConfig": "ovnKubernetesConfig configures the ovn-kubernetes plugin.", } @@ -1611,8 +1612,8 @@ func (FeaturesMigration) SwaggerDoc() map[string]string { var map_GatewayConfig = map[string]string{ "": "GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides", - "routingViaHost": "RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified.", - "ipForwarding": "IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to \"Global\". The supported values are \"Restricted\" and \"Global\".", + "routingViaHost": "routingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified.", + "ipForwarding": "ipForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to \"Global\". The supported values are \"Restricted\" and \"Global\".", "ipv4": "ipv4 allows users to configure IP settings for IPv4 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv4 for details of default values.", "ipv6": "ipv6 allows users to configure IP settings for IPv6 connections. When omitted, this means no opinion and the default configuration is used. Check individual members fields within ipv6 for details of default values.", } @@ -1622,8 +1623,8 @@ func (GatewayConfig) SwaggerDoc() map[string]string { } var map_HybridOverlayConfig = map[string]string{ - "hybridClusterNetwork": "HybridClusterNetwork defines a network space given to nodes on an additional overlay network.", - "hybridOverlayVXLANPort": "HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789", + "hybridClusterNetwork": "hybridClusterNetwork defines a network space given to nodes on an additional overlay network.", + "hybridOverlayVXLANPort": "hybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789", } func (HybridOverlayConfig) SwaggerDoc() map[string]string { @@ -1632,8 +1633,8 @@ func (HybridOverlayConfig) SwaggerDoc() map[string]string { var map_IPAMConfig = map[string]string{ "": "IPAMConfig contains configurations for IPAM (IP Address Management)", - "type": "Type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic", - "staticIPAMConfig": "StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic", + "type": "type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic", + "staticIPAMConfig": "staticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic", } func (IPAMConfig) SwaggerDoc() map[string]string { @@ -1650,12 +1651,22 @@ func (IPFIXConfig) SwaggerDoc() map[string]string { var map_IPsecConfig = map[string]string{ "mode": "mode defines the behaviour of the ipsec configuration within the platform. Valid values are `Disabled`, `External` and `Full`. When 'Disabled', ipsec will not be enabled at the node level. When 'External', ipsec is enabled on the node level but requires the user to configure the secure communication parameters. This mode is for external secure communications and the configuration can be done using the k8s-nmstate operator. When 'Full', ipsec is configured on the node level and inter-pod secure communication within the cluster is configured. Note with `Full`, if ipsec is desired for communication with external (to the cluster) entities (such as storage arrays), this is left to the user to configure.", + "full": "full defines configuration parameters for the IPsec `Full` mode. This is permitted only when mode is configured with `Full`, and forbidden otherwise.", } func (IPsecConfig) SwaggerDoc() map[string]string { return map_IPsecConfig } +var map_IPsecFullModeConfig = map[string]string{ + "": "IPsecFullModeConfig defines configuration parameters for the IPsec `Full` mode.", + "encapsulation": "encapsulation option to configure libreswan on how inter-pod traffic across nodes are encapsulated to handle NAT traversal. When configured it uses UDP port 4500 for the encapsulation. Valid values are Always, Never, Auto and omitted. Always means enable UDP encapsulation regardless of whether NAT is detected. Disable means never enable UDP encapsulation even if NAT is present. Auto means enable UDP encapsulation based on the detection of NAT. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is Auto.", +} + +func (IPsecFullModeConfig) SwaggerDoc() map[string]string { + return map_IPsecFullModeConfig +} + var map_IPv4GatewayConfig = map[string]string{ "": "IPV4GatewayConfig holds the configuration paramaters for IPV4 connections in the GatewayConfig for OVN-Kubernetes", "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.169.0/29 The value must be in proper IPV4 CIDR format", @@ -1782,7 +1793,7 @@ var map_OVNKubernetesConfig = map[string]string{ "": "ovnKubernetesConfig contains the configuration parameters for networks using the ovn-kubernetes network project", "mtu": "mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400", "genevePort": "geneve port is the UDP port to be used by geneve encapulation. Default is 6081", - "hybridOverlayConfig": "HybridOverlayConfig configures an additional overlay network for peers that are not using OVN.", + "hybridOverlayConfig": "hybridOverlayConfig configures an additional overlay network for peers that are not using OVN.", "ipsecConfig": "ipsecConfig enables and configures IPsec for pods on the pod network within the cluster.", "policyAuditConfig": "policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used.", "gatewayConfig": "gatewayConfig holds the configuration for node gateway options.", @@ -1845,7 +1856,7 @@ func (SFlowConfig) SwaggerDoc() map[string]string { var map_SimpleMacvlanConfig = map[string]string{ "": "SimpleMacvlanConfig contains configurations for macvlan interface.", "master": "master is the host interface to create the macvlan interface from. If not specified, it will be default route interface", - "ipamConfig": "IPAMConfig configures IPAM module will be used for IP Address Management (IPAM).", + "ipamConfig": "ipamConfig configures IPAM module will be used for IP Address Management (IPAM).", "mode": "mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge", "mtu": "mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value.", } @@ -1856,8 +1867,8 @@ func (SimpleMacvlanConfig) SwaggerDoc() map[string]string { var map_StaticIPAMAddresses = map[string]string{ "": "StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses", - "address": "Address is the IP address in CIDR format", - "gateway": "Gateway is IP inside of subnet to designate as the gateway", + "address": "address is the IP address in CIDR format", + "gateway": "gateway is IP inside of subnet to designate as the gateway", } func (StaticIPAMAddresses) SwaggerDoc() map[string]string { @@ -1866,9 +1877,9 @@ func (StaticIPAMAddresses) SwaggerDoc() map[string]string { var map_StaticIPAMConfig = map[string]string{ "": "StaticIPAMConfig contains configurations for static IPAM (IP Address Management)", - "addresses": "Addresses configures IP address for the interface", - "routes": "Routes configures IP routes for the interface", - "dns": "DNS configures DNS for the interface", + "addresses": "addresses configures IP address for the interface", + "routes": "routes configures IP routes for the interface", + "dns": "dns configures DNS for the interface", } func (StaticIPAMConfig) SwaggerDoc() map[string]string { @@ -1877,9 +1888,9 @@ func (StaticIPAMConfig) SwaggerDoc() map[string]string { var map_StaticIPAMDNS = map[string]string{ "": "StaticIPAMDNS provides DNS related information for static IPAM", - "nameservers": "Nameservers points DNS servers for IP lookup", - "domain": "Domain configures the domainname the local domain used for short hostname lookups", - "search": "Search configures priority ordered search domains for short hostname lookups", + "nameservers": "nameservers points DNS servers for IP lookup", + "domain": "domain configures the domainname the local domain used for short hostname lookups", + "search": "search configures priority ordered search domains for short hostname lookups", } func (StaticIPAMDNS) SwaggerDoc() map[string]string { @@ -1888,14 +1899,35 @@ func (StaticIPAMDNS) SwaggerDoc() map[string]string { var map_StaticIPAMRoutes = map[string]string{ "": "StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes", - "destination": "Destination points the IP route destination", - "gateway": "Gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin).", + "destination": "destination points the IP route destination", + "gateway": "gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin).", } func (StaticIPAMRoutes) SwaggerDoc() map[string]string { return map_StaticIPAMRoutes } +var map_OLM = map[string]string{ + "": "OLM provides information to configure an operator to manage the OLM controllers\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (OLM) SwaggerDoc() map[string]string { + return map_OLM +} + +var map_OLMList = map[string]string{ + "": "OLMList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items contains the items", +} + +func (OLMList) SwaggerDoc() map[string]string { + return map_OLMList +} + var map_OpenShiftAPIServer = map[string]string{ "": "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -1910,7 +1942,7 @@ func (OpenShiftAPIServer) SwaggerDoc() map[string]string { var map_OpenShiftAPIServerList = map[string]string{ "": "OpenShiftAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (OpenShiftAPIServerList) SwaggerDoc() map[string]string { @@ -1929,7 +1961,7 @@ func (OpenShiftControllerManager) SwaggerDoc() map[string]string { var map_OpenShiftControllerManagerList = map[string]string{ "": "OpenShiftControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (OpenShiftControllerManagerList) SwaggerDoc() map[string]string { @@ -1950,7 +1982,7 @@ func (KubeScheduler) SwaggerDoc() map[string]string { var map_KubeSchedulerList = map[string]string{ "": "KubeSchedulerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (KubeSchedulerList) SwaggerDoc() map[string]string { @@ -1971,7 +2003,7 @@ func (ServiceCA) SwaggerDoc() map[string]string { var map_ServiceCAList = map[string]string{ "": "ServiceCAList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (ServiceCAList) SwaggerDoc() map[string]string { @@ -1990,7 +2022,7 @@ func (ServiceCatalogAPIServer) SwaggerDoc() map[string]string { var map_ServiceCatalogAPIServerList = map[string]string{ "": "ServiceCatalogAPIServerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (ServiceCatalogAPIServerList) SwaggerDoc() map[string]string { @@ -2009,7 +2041,7 @@ func (ServiceCatalogControllerManager) SwaggerDoc() map[string]string { var map_ServiceCatalogControllerManagerList = map[string]string{ "": "ServiceCatalogControllerManagerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (ServiceCatalogControllerManagerList) SwaggerDoc() map[string]string { @@ -2038,7 +2070,7 @@ func (StorageList) SwaggerDoc() map[string]string { var map_StorageSpec = map[string]string{ "": "StorageSpec is the specification of the desired behavior of the cluster storage operator.", - "vsphereStorageDriver": "VSphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. The current default is CSIWithMigrationDriver and may not be changed. DEPRECATED: This field will be removed in a future release.", + "vsphereStorageDriver": "vsphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. The current default is CSIWithMigrationDriver and may not be changed. DEPRECATED: This field will be removed in a future release.", } func (StorageSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types.go b/vendor/github.com/openshift/api/operator/v1alpha1/types.go index 4d5a207e6a..932e8c5834 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/types.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types.go @@ -149,7 +149,7 @@ type OperatorStatus struct { type GenericOperatorConfig struct { metav1.TypeMeta `json:",inline"` - // ServingInfo is the HTTP serving information for the controller's endpoints + // servingInfo is the HTTP serving information for the controller's endpoints ServingInfo configv1.HTTPServingInfo `json:"servingInfo,omitempty"` // leaderElection provides information to elect a leader. Only override this if you have a specific need diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_clusterversion.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_clusterversion.go new file mode 100644 index 0000000000..ec9cfea9f6 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_clusterversion.go @@ -0,0 +1,76 @@ +package v1alpha1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterVersionOperator holds cluster-wide information about the Cluster Version Operator. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=cluster-version-operator,operatorOrdering=01 +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=clusterversionoperators,scope=Cluster +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2044 +// +openshift:enable:FeatureGate=ClusterVersionOperatorConfiguration +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="ClusterVersionOperator is a singleton; the .metadata.name field must be 'cluster'" +type ClusterVersionOperator struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Cluster Version Operator. + // +required + Spec ClusterVersionOperatorSpec `json:"spec"` + + // status is the most recently observed status of the Cluster Version Operator. + // +optional + Status ClusterVersionOperatorStatus `json:"status"` +} + +// ClusterVersionOperatorSpec is the specification of the desired behavior of the Cluster Version Operator. +type ClusterVersionOperatorSpec struct { + // operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + // simple way to manage coarse grained logging choices that operators have to interpret for themselves. + // + // Valid values are: "Normal", "Debug", "Trace", "TraceAll". + // Defaults to "Normal". + // +optional + // +kubebuilder:default=Normal + OperatorLogLevel operatorv1.LogLevel `json:"operatorLogLevel,omitempty"` +} + +// ClusterVersionOperatorStatus defines the observed status of the Cluster Version Operator. +type ClusterVersionOperatorStatus struct { + // observedGeneration represents the most recent generation observed by the operator and specifies the version of + // the spec field currently being synced. + // +optional + // +kubebuilder:validation:XValidation:rule="self >= oldSelf",message="observedGeneration must only increase" + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterVersionOperatorList is a collection of ClusterVersionOperators. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type ClusterVersionOperatorList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // items is a list of ClusterVersionOperators. + // +optional + Items []ClusterVersionOperator `json:"items,omitempty"` +} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go index 2654f57008..3c6f344b1e 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go @@ -23,22 +23,19 @@ type EtcdBackup struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec EtcdBackupSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. - // +kubebuilder:validation:Optional // +optional Status EtcdBackupStatus `json:"status"` } type EtcdBackupSpec struct { - // PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the + // pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the // etcd backup file would be saved // The PVC itself must always be created in the "openshift-etcd" namespace // If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. // In the future this would be backups saved across the control-plane master nodes. - // +kubebuilder:validation:Optional // +optional // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="pvcName is immutable once set" PVCName string `json:"pvcName"` @@ -56,7 +53,7 @@ type EtcdBackupStatus struct { // backupJob is the reference to the Job that executes the backup. // Optional - // +kubebuilder:validation:Optional + // +optional BackupJob *BackupJobReference `json:"backupJob"` } @@ -67,13 +64,13 @@ type BackupJobReference struct { // this is always expected to be "openshift-etcd" since the user provided PVC // is also required to be in "openshift-etcd" // Required - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:Pattern:=`^openshift-etcd$` Namespace string `json:"namespace"` // name is the name of the Job. // Required - // +kubebuilder:validation:Required + // +required Name string `json:"name"` } diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go index 0830ed7448..d4f7e17e67 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go @@ -25,7 +25,6 @@ type ImageContentSourcePolicy struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration - // +kubebuilder:validation:Required // +required Spec ImageContentSourcePolicySpec `json:"spec"` } diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go index f29385b9fa..41d160a205 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go @@ -17,6 +17,8 @@ import ( // +kubebuilder:object:root=true // +kubebuilder:resource:path=olms,scope=Cluster // +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations=include.release.openshift.io/ibm-cloud-managed=false +// +kubebuilder:metadata:annotations=include.release.openshift.io/self-managed-high-availability=true // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1504 // +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=operator-lifecycle-manager,operatorOrdering=01 // +openshift:enable:FeatureGate=NewOLM @@ -29,7 +31,7 @@ type OLM struct { metav1.ObjectMeta `json:"metadata"` //spec holds user settable values for configuration - // +kubebuilder:validation:Required + // +required Spec OLMSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. // +optional @@ -57,6 +59,6 @@ type OLMList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []OLM `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversionoperators-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversionoperators-CustomNoUpgrade.crd.yaml new file mode 100644 index 0000000000..53b1377cb4 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversionoperators-CustomNoUpgrade.crd.yaml @@ -0,0 +1,90 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2044 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: clusterversionoperators.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: ClusterVersionOperator + listKind: ClusterVersionOperatorList + plural: clusterversionoperators + singular: clusterversionoperator + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + ClusterVersionOperator holds cluster-wide information about the Cluster Version Operator. + + Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Cluster Version Operator. + properties: + operatorLogLevel: + default: Normal + description: |- + operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + simple way to manage coarse grained logging choices that operators have to interpret for themselves. + + Valid values are: "Normal", "Debug", "Trace", "TraceAll". + Defaults to "Normal". + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + type: object + status: + description: status is the most recently observed status of the Cluster + Version Operator. + properties: + observedGeneration: + description: |- + observedGeneration represents the most recent generation observed by the operator and specifies the version of + the spec field currently being synced. + format: int64 + type: integer + x-kubernetes-validations: + - message: observedGeneration must only increase + rule: self >= oldSelf + type: object + required: + - metadata + - spec + type: object + x-kubernetes-validations: + - message: ClusterVersionOperator is a singleton; the .metadata.name field + must be 'cluster' + rule: self.metadata.name == 'cluster' + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversionoperators-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversionoperators-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..0ddb0e6352 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversionoperators-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,90 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/2044 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: clusterversionoperators.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: ClusterVersionOperator + listKind: ClusterVersionOperatorList + plural: clusterversionoperators + singular: clusterversionoperator + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + ClusterVersionOperator holds cluster-wide information about the Cluster Version Operator. + + Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Cluster Version Operator. + properties: + operatorLogLevel: + default: Normal + description: |- + operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + simple way to manage coarse grained logging choices that operators have to interpret for themselves. + + Valid values are: "Normal", "Debug", "Trace", "TraceAll". + Defaults to "Normal". + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + type: object + status: + description: status is the most recently observed status of the Cluster + Version Operator. + properties: + observedGeneration: + description: |- + observedGeneration represents the most recent generation observed by the operator and specifies the version of + the spec field currently being synced. + format: int64 + type: integer + x-kubernetes-validations: + - message: observedGeneration must only increase + rule: self >= oldSelf + type: object + required: + - metadata + - spec + type: object + x-kubernetes-validations: + - message: ClusterVersionOperator is a singleton; the .metadata.name field + must be 'cluster' + rule: self.metadata.name == 'cluster' + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-CustomNoUpgrade.crd.yaml index cc2e015c73..49bba6f6cb 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-CustomNoUpgrade.crd.yaml @@ -47,7 +47,7 @@ spec: properties: pvcName: description: |- - PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the + pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup file would be saved The PVC itself must always be created in the "openshift-etcd" namespace If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-DevPreviewNoUpgrade.crd.yaml index 68df34081b..d015b811ce 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-DevPreviewNoUpgrade.crd.yaml @@ -47,7 +47,7 @@ spec: properties: pvcName: description: |- - PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the + pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup file would be saved The PVC itself must always be created in the "openshift-etcd" namespace If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-TechPreviewNoUpgrade.crd.yaml index 0bb78ad6d8..9d56894fdb 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-TechPreviewNoUpgrade.crd.yaml @@ -47,7 +47,7 @@ spec: properties: pvcName: description: |- - PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the + pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup file would be saved The PVC itself must always be created in the "openshift-etcd" namespace If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms-TechPreviewNoUpgrade.crd.yaml deleted file mode 100644 index 3961906274..0000000000 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms-TechPreviewNoUpgrade.crd.yaml +++ /dev/null @@ -1,216 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/1504 - api.openshift.io/merged-by-featuregates: "true" - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - release.openshift.io/feature-set: TechPreviewNoUpgrade - name: olms.operator.openshift.io -spec: - group: operator.openshift.io - names: - kind: OLM - listKind: OLMList - plural: olms - singular: olm - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - OLM provides information to configure an operator to manage the OLM controllers - - Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: spec holds user settable values for configuration - properties: - logLevel: - default: Normal - description: |- - logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a - simple way to manage coarse grained logging choices that operators have to interpret for their operands. - - Valid values are: "Normal", "Debug", "Trace", "TraceAll". - Defaults to "Normal". - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - type: string - managementState: - description: managementState indicates whether and how the operator - should manage the component - pattern: ^(Managed|Unmanaged|Force|Removed)$ - type: string - observedConfig: - description: |- - observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because - it is an input to the level for the operator - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - default: Normal - description: |- - operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a - simple way to manage coarse grained logging choices that operators have to interpret for themselves. - - Valid values are: "Normal", "Debug", "Trace", "TraceAll". - Defaults to "Normal". - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - type: string - unsupportedConfigOverrides: - description: |- - unsupportedConfigOverrides overrides the final configuration that was computed by the operator. - Red Hat does not support the use of this field. - Misuse of this field could lead to unexpected behavior or conflict with other configuration options. - Seek guidance from the Red Hat support before using this field. - Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. - nullable: true - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - status: - description: status holds observed values from the cluster. They may not - be overridden. - properties: - conditions: - description: conditions is a list of conditions and their status - items: - description: OperatorCondition is just the standard condition fields. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - type: string - reason: - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - generations: - description: generations are used to determine when an item needs - to be reconciled or has changed in a way that needs a reaction. - items: - description: GenerationStatus keeps track of the generation for - a given resource so that decisions about forced updates can be - made. - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without - generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload - controller involved - format: int64 - type: integer - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're - tracking - type: string - required: - - group - - name - - namespace - - resource - type: object - type: array - x-kubernetes-list-map-keys: - - group - - resource - - namespace - - name - x-kubernetes-list-type: map - latestAvailableRevision: - description: latestAvailableRevision is the deploymentID of the most - recent deployment - format: int32 - type: integer - x-kubernetes-validations: - - message: must only increase - rule: self >= oldSelf - observedGeneration: - description: observedGeneration is the last generation change you've - dealt with - format: int64 - type: integer - readyReplicas: - description: readyReplicas indicates how many replicas are ready and - at the desired state - format: int32 - type: integer - version: - description: version is the level this availability applies to - type: string - type: object - required: - - metadata - - spec - type: object - x-kubernetes-validations: - - message: olm is a singleton, .metadata.name must be 'cluster' - rule: self.metadata.name == 'cluster' - served: true - storage: true - subresources: - status: {} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms.crd.yaml similarity index 98% rename from vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms-CustomNoUpgrade.crd.yaml rename to vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms.crd.yaml index 3dc0e298c1..3d7242f13c 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms.crd.yaml @@ -4,9 +4,7 @@ metadata: annotations: api-approved.openshift.io: https://github.com/openshift/api/pull/1504 api.openshift.io/merged-by-featuregates: "true" - include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" - release.openshift.io/feature-set: CustomNoUpgrade name: olms.operator.openshift.io spec: group: operator.openshift.io diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go index 08ef2811a5..f8daa0576b 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go @@ -26,6 +26,99 @@ func (in *BackupJobReference) DeepCopy() *BackupJobReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionOperator) DeepCopyInto(out *ClusterVersionOperator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionOperator. +func (in *ClusterVersionOperator) DeepCopy() *ClusterVersionOperator { + if in == nil { + return nil + } + out := new(ClusterVersionOperator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterVersionOperator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionOperatorList) DeepCopyInto(out *ClusterVersionOperatorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterVersionOperator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionOperatorList. +func (in *ClusterVersionOperatorList) DeepCopy() *ClusterVersionOperatorList { + if in == nil { + return nil + } + out := new(ClusterVersionOperatorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterVersionOperatorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionOperatorSpec) DeepCopyInto(out *ClusterVersionOperatorSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionOperatorSpec. +func (in *ClusterVersionOperatorSpec) DeepCopy() *ClusterVersionOperatorSpec { + if in == nil { + return nil + } + out := new(ClusterVersionOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionOperatorStatus) DeepCopyInto(out *ClusterVersionOperatorStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionOperatorStatus. +func (in *ClusterVersionOperatorStatus) DeepCopy() *ClusterVersionOperatorStatus { + if in == nil { + return nil + } + out := new(ClusterVersionOperatorStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) { *out = *in diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml index 30c058236d..0d595be801 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.featuregated-crd-manifests.yaml @@ -1,3 +1,26 @@ +clusterversionoperators.operator.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2044 + CRDName: clusterversionoperators.operator.openshift.io + Capability: "" + Category: "" + FeatureGates: + - ClusterVersionOperatorConfiguration + FilenameOperatorName: cluster-version-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_00" + GroupName: operator.openshift.io + HasStatus: true + KindName: ClusterVersionOperator + Labels: {} + PluralName: clusterversionoperators + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - ClusterVersionOperatorConfiguration + Version: v1alpha1 + etcdbackups.operator.openshift.io: Annotations: {} ApprovedPRNumber: https://github.com/openshift/api/pull/1482 @@ -44,7 +67,9 @@ imagecontentsourcepolicies.operator.openshift.io: Version: v1alpha1 olms.operator.openshift.io: - Annotations: {} + Annotations: + include.release.openshift.io/ibm-cloud-managed: "false" + include.release.openshift.io/self-managed-high-availability: "true" ApprovedPRNumber: https://github.com/openshift/api/pull/1504 CRDName: olms.operator.openshift.io Capability: "" diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go index c8cce688f4..9060bf9981 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go @@ -44,7 +44,7 @@ func (GenerationHistory) SwaggerDoc() map[string]string { var map_GenericOperatorConfig = map[string]string{ "": "GenericOperatorConfig provides information to configure an operator\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints", + "servingInfo": "servingInfo is the HTTP serving information for the controller's endpoints", "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need", "authentication": "authentication allows configuration of authentication for the endpoints", "authorization": "authorization allows configuration of authentication for the endpoints", @@ -135,6 +135,45 @@ func (VersionAvailability) SwaggerDoc() map[string]string { return map_VersionAvailability } +var map_ClusterVersionOperator = map[string]string{ + "": "ClusterVersionOperator holds cluster-wide information about the Cluster Version Operator.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Cluster Version Operator.", + "status": "status is the most recently observed status of the Cluster Version Operator.", +} + +func (ClusterVersionOperator) SwaggerDoc() map[string]string { + return map_ClusterVersionOperator +} + +var map_ClusterVersionOperatorList = map[string]string{ + "": "ClusterVersionOperatorList is a collection of ClusterVersionOperators.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ClusterVersionOperators.", +} + +func (ClusterVersionOperatorList) SwaggerDoc() map[string]string { + return map_ClusterVersionOperatorList +} + +var map_ClusterVersionOperatorSpec = map[string]string{ + "": "ClusterVersionOperatorSpec is the specification of the desired behavior of the Cluster Version Operator.", + "operatorLogLevel": "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", +} + +func (ClusterVersionOperatorSpec) SwaggerDoc() map[string]string { + return map_ClusterVersionOperatorSpec +} + +var map_ClusterVersionOperatorStatus = map[string]string{ + "": "ClusterVersionOperatorStatus defines the observed status of the Cluster Version Operator.", + "observedGeneration": "observedGeneration represents the most recent generation observed by the operator and specifies the version of the spec field currently being synced.", +} + +func (ClusterVersionOperatorStatus) SwaggerDoc() map[string]string { + return map_ClusterVersionOperatorStatus +} + var map_BackupJobReference = map[string]string{ "": "BackupJobReference holds a reference to the batch/v1 Job created to run the etcd backup", "namespace": "namespace is the namespace of the Job. this is always expected to be \"openshift-etcd\" since the user provided PVC is also required to be in \"openshift-etcd\" Required", @@ -164,7 +203,7 @@ func (EtcdBackupList) SwaggerDoc() map[string]string { } var map_EtcdBackupSpec = map[string]string{ - "pvcName": "PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup file would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", + "pvcName": "pvcName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup file would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", } func (EtcdBackupSpec) SwaggerDoc() map[string]string { @@ -232,7 +271,7 @@ func (OLM) SwaggerDoc() map[string]string { var map_OLMList = map[string]string{ "": "OLMList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (OLMList) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go index f4b48e8545..ba92985c13 100644 --- a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go +++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go @@ -26,19 +26,17 @@ type PodNetworkConnectivityCheck struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // Spec defines the source and target of the connectivity check - // +kubebuilder:validation:Required + // spec defines the source and target of the connectivity check // +required Spec PodNetworkConnectivityCheckSpec `json:"spec"` - // Status contains the observed status of the connectivity check + // status contains the observed status of the connectivity check // +optional Status PodNetworkConnectivityCheckStatus `json:"status,omitempty"` } type PodNetworkConnectivityCheckSpec struct { - // SourcePod names the pod from which the condition will be checked - // +kubebuilder:validation:Required + // sourcePod names the pod from which the condition will be checked // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` // +required SourcePod string `json:"sourcePod"` @@ -46,7 +44,6 @@ type PodNetworkConnectivityCheckSpec struct { // EndpointAddress to check. A TCP address of the form host:port. Note that // if host is a DNS name, then the check would fail if the DNS name cannot // be resolved. Specify an IP address for host to bypass DNS name lookup. - // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^\S+:\d*$` // +required TargetEndpoint string `json:"targetEndpoint"` @@ -62,19 +59,19 @@ type PodNetworkConnectivityCheckSpec struct { // +k8s:deepcopy-gen=true type PodNetworkConnectivityCheckStatus struct { - // Successes contains logs successful check actions + // successes contains logs successful check actions // +optional Successes []LogEntry `json:"successes,omitempty"` - // Failures contains logs of unsuccessful check actions + // failures contains logs of unsuccessful check actions // +optional Failures []LogEntry `json:"failures,omitempty"` - // Outages contains logs of time periods of outages + // outages contains logs of time periods of outages // +optional Outages []OutageEntry `json:"outages,omitempty"` - // Conditions summarize the status of the check + // conditions summarize the status of the check // +patchMergeKey=type // +patchStrategy=merge // +optional @@ -84,25 +81,23 @@ type PodNetworkConnectivityCheckStatus struct { // LogEntry records events type LogEntry struct { // Start time of check action. - // +kubebuilder:validation:Required // +required // +nullable Start metav1.Time `json:"time"` - // Success indicates if the log entry indicates a success or failure. - // +kubebuilder:validation:Required + // success indicates if the log entry indicates a success or failure. // +required Success bool `json:"success"` - // Reason for status in a machine readable format. + // reason for status in a machine readable format. // +optional Reason string `json:"reason,omitempty"` - // Message explaining status in a human readable format. + // message explaining status in a human readable format. // +optional Message string `json:"message,omitempty"` - // Latency records how long the action mentioned in the entry took. + // latency records how long the action mentioned in the entry took. // +optional // +nullable Latency metav1.Duration `json:"latency,omitempty"` @@ -111,28 +106,27 @@ type LogEntry struct { // OutageEntry records time period of an outage type OutageEntry struct { - // Start of outage detected - // +kubebuilder:validation:Required + // start of outage detected // +required // +nullable Start metav1.Time `json:"start"` - // End of outage detected + // end of outage detected // +optional // +nullable End metav1.Time `json:"end,omitempty"` - // StartLogs contains log entries related to the start of this outage. Should contain + // startLogs contains log entries related to the start of this outage. Should contain // the original failure, any entries where the failure mode changed. // +optional StartLogs []LogEntry `json:"startLogs,omitempty"` - // EndLogs contains log entries related to the end of this outage. Should contain the success + // endLogs contains log entries related to the end of this outage. Should contain the success // entry that resolved the outage and possibly a few of the failure log entries that preceded it. // +optional EndLogs []LogEntry `json:"endLogs,omitempty"` - // Message summarizes outage details in a human readable format. + // message summarizes outage details in a human readable format. // +optional Message string `json:"message,omitempty"` } @@ -141,26 +135,23 @@ type OutageEntry struct { // +k8s:deepcopy-gen=true type PodNetworkConnectivityCheckCondition struct { - // Type of the condition - // +kubebuilder:validation:Required + // type of the condition // +required Type PodNetworkConnectivityCheckConditionType `json:"type"` - // Status of the condition - // +kubebuilder:validation:Required + // status of the condition // +required Status metav1.ConditionStatus `json:"status"` - // Reason for the condition's last status transition in a machine readable format. + // reason for the condition's last status transition in a machine readable format. // +optional Reason string `json:"reason,omitempty"` - // Message indicating details about last transition in a human readable format. + // message indicating details about last transition in a human readable format. // +optional Message string `json:"message,omitempty"` // Last time the condition transitioned from one status to another. - // +kubebuilder:validation:Required // +required // +nullable LastTransitionTime metav1.Time `json:"lastTransitionTime"` @@ -193,6 +184,6 @@ type PodNetworkConnectivityCheckList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - // Items contains the items + // items contains the items Items []PodNetworkConnectivityCheck `json:"items"` } diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go index 5ecc5e48af..f6cd1975de 100644 --- a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go @@ -14,10 +14,10 @@ package v1alpha1 var map_LogEntry = map[string]string{ "": "LogEntry records events", "time": "Start time of check action.", - "success": "Success indicates if the log entry indicates a success or failure.", - "reason": "Reason for status in a machine readable format.", - "message": "Message explaining status in a human readable format.", - "latency": "Latency records how long the action mentioned in the entry took.", + "success": "success indicates if the log entry indicates a success or failure.", + "reason": "reason for status in a machine readable format.", + "message": "message explaining status in a human readable format.", + "latency": "latency records how long the action mentioned in the entry took.", } func (LogEntry) SwaggerDoc() map[string]string { @@ -26,11 +26,11 @@ func (LogEntry) SwaggerDoc() map[string]string { var map_OutageEntry = map[string]string{ "": "OutageEntry records time period of an outage", - "start": "Start of outage detected", - "end": "End of outage detected", - "startLogs": "StartLogs contains log entries related to the start of this outage. Should contain the original failure, any entries where the failure mode changed.", - "endLogs": "EndLogs contains log entries related to the end of this outage. Should contain the success entry that resolved the outage and possibly a few of the failure log entries that preceded it.", - "message": "Message summarizes outage details in a human readable format.", + "start": "start of outage detected", + "end": "end of outage detected", + "startLogs": "startLogs contains log entries related to the start of this outage. Should contain the original failure, any entries where the failure mode changed.", + "endLogs": "endLogs contains log entries related to the end of this outage. Should contain the success entry that resolved the outage and possibly a few of the failure log entries that preceded it.", + "message": "message summarizes outage details in a human readable format.", } func (OutageEntry) SwaggerDoc() map[string]string { @@ -40,8 +40,8 @@ func (OutageEntry) SwaggerDoc() map[string]string { var map_PodNetworkConnectivityCheck = map[string]string{ "": "PodNetworkConnectivityCheck\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the source and target of the connectivity check", - "status": "Status contains the observed status of the connectivity check", + "spec": "spec defines the source and target of the connectivity check", + "status": "status contains the observed status of the connectivity check", } func (PodNetworkConnectivityCheck) SwaggerDoc() map[string]string { @@ -50,10 +50,10 @@ func (PodNetworkConnectivityCheck) SwaggerDoc() map[string]string { var map_PodNetworkConnectivityCheckCondition = map[string]string{ "": "PodNetworkConnectivityCheckCondition represents the overall status of the pod network connectivity.", - "type": "Type of the condition", - "status": "Status of the condition", - "reason": "Reason for the condition's last status transition in a machine readable format.", - "message": "Message indicating details about last transition in a human readable format.", + "type": "type of the condition", + "status": "status of the condition", + "reason": "reason for the condition's last status transition in a machine readable format.", + "message": "message indicating details about last transition in a human readable format.", "lastTransitionTime": "Last time the condition transitioned from one status to another.", } @@ -64,7 +64,7 @@ func (PodNetworkConnectivityCheckCondition) SwaggerDoc() map[string]string { var map_PodNetworkConnectivityCheckList = map[string]string{ "": "PodNetworkConnectivityCheckList is a collection of PodNetworkConnectivityCheck\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (PodNetworkConnectivityCheckList) SwaggerDoc() map[string]string { @@ -72,7 +72,7 @@ func (PodNetworkConnectivityCheckList) SwaggerDoc() map[string]string { } var map_PodNetworkConnectivityCheckSpec = map[string]string{ - "sourcePod": "SourcePod names the pod from which the condition will be checked", + "sourcePod": "sourcePod names the pod from which the condition will be checked", "targetEndpoint": "EndpointAddress to check. A TCP address of the form host:port. Note that if host is a DNS name, then the check would fail if the DNS name cannot be resolved. Specify an IP address for host to bypass DNS name lookup.", "tlsClientCert": "TLSClientCert, if specified, references a kubernetes.io/tls type secret with 'tls.crt' and 'tls.key' entries containing an optional TLS client certificate and key to be used when checking endpoints that require a client certificate in order to gracefully preform the scan without causing excessive logging in the endpoint process. The secret must exist in the same namespace as this resource.", } @@ -82,10 +82,10 @@ func (PodNetworkConnectivityCheckSpec) SwaggerDoc() map[string]string { } var map_PodNetworkConnectivityCheckStatus = map[string]string{ - "successes": "Successes contains logs successful check actions", - "failures": "Failures contains logs of unsuccessful check actions", - "outages": "Outages contains logs of time periods of outages", - "conditions": "Conditions summarize the status of the check", + "successes": "successes contains logs successful check actions", + "failures": "failures contains logs of unsuccessful check actions", + "outages": "outages contains logs of time periods of outages", + "conditions": "conditions summarize the status of the check", } func (PodNetworkConnectivityCheckStatus) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/osin/v1/types.go b/vendor/github.com/openshift/api/osin/v1/types.go index 0ea4be1ba0..35eb3ee8b0 100644 --- a/vendor/github.com/openshift/api/osin/v1/types.go +++ b/vendor/github.com/openshift/api/osin/v1/types.go @@ -473,7 +473,7 @@ type TokenConfig struct { type SessionSecrets struct { metav1.TypeMeta `json:",inline"` - // Secrets is a list of secrets + // secrets is a list of secrets // New sessions are signed and encrypted using the first secret. // Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets. Secrets []SessionSecret `json:"secrets"` @@ -481,8 +481,8 @@ type SessionSecrets struct { // SessionSecret is a secret used to authenticate/decrypt cookie-based sessions type SessionSecret struct { - // Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes. + // authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes. Authentication string `json:"authentication"` - // Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES- + // encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES- Encryption string `json:"encryption"` } diff --git a/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go index 0bffa8265b..890928a7a4 100644 --- a/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go @@ -248,8 +248,8 @@ func (SessionConfig) SwaggerDoc() map[string]string { var map_SessionSecret = map[string]string{ "": "SessionSecret is a secret used to authenticate/decrypt cookie-based sessions", - "authentication": "Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", - "encryption": "Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", + "authentication": "authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", + "encryption": "encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", } func (SessionSecret) SwaggerDoc() map[string]string { @@ -258,7 +258,7 @@ func (SessionSecret) SwaggerDoc() map[string]string { var map_SessionSecrets = map[string]string{ "": "SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "secrets": "Secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", + "secrets": "secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", } func (SessionSecrets) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/project/v1/generated.proto b/vendor/github.com/openshift/api/project/v1/generated.proto index 762dc99c65..d1ffbc341b 100644 --- a/vendor/github.com/openshift/api/project/v1/generated.proto +++ b/vendor/github.com/openshift/api/project/v1/generated.proto @@ -32,10 +32,10 @@ message Project { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec defines the behavior of the Namespace. + // spec defines the behavior of the Namespace. optional ProjectSpec spec = 2; - // Status describes the current status of a Namespace + // status describes the current status of a Namespace // +optional optional ProjectStatus status = 3; } @@ -49,7 +49,7 @@ message ProjectList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of projects + // items is the list of projects repeated Project items = 2; } @@ -62,22 +62,22 @@ message ProjectRequest { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // DisplayName is the display name to apply to a project + // displayName is the display name to apply to a project optional string displayName = 2; - // Description is the description to apply to a project + // description is the description to apply to a project optional string description = 3; } // ProjectSpec describes the attributes on a Project message ProjectSpec { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + // finalizers is an opaque list of values that must be empty to permanently remove object from storage repeated string finalizers = 1; } // ProjectStatus is information about the current status of a Project message ProjectStatus { - // Phase is the current lifecycle phase of the project + // phase is the current lifecycle phase of the project // +optional optional string phase = 1; diff --git a/vendor/github.com/openshift/api/project/v1/types.go b/vendor/github.com/openshift/api/project/v1/types.go index 9c17a5deab..5e69b775b5 100644 --- a/vendor/github.com/openshift/api/project/v1/types.go +++ b/vendor/github.com/openshift/api/project/v1/types.go @@ -18,7 +18,7 @@ type ProjectList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of projects + // items is the list of projects Items []Project `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -36,13 +36,13 @@ const ( // ProjectSpec describes the attributes on a Project type ProjectSpec struct { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + // finalizers is an opaque list of values that must be empty to permanently remove object from storage Finalizers []corev1.FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=k8s.io/api/core/v1.FinalizerName"` } // ProjectStatus is information about the current status of a Project type ProjectStatus struct { - // Phase is the current lifecycle phase of the project + // phase is the current lifecycle phase of the project // +optional Phase corev1.NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=k8s.io/api/core/v1.NamespacePhase"` @@ -79,10 +79,10 @@ type Project struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Spec defines the behavior of the Namespace. + // spec defines the behavior of the Namespace. Spec ProjectSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status describes the current status of a Namespace + // status describes the current status of a Namespace // +optional Status ProjectStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -104,8 +104,8 @@ type ProjectRequest struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // DisplayName is the display name to apply to a project + // displayName is the display name to apply to a project DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"` - // Description is the description to apply to a project + // description is the description to apply to a project Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"` } diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go index 890e651d73..b764eaface 100644 --- a/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go @@ -14,8 +14,8 @@ package v1 var map_Project = map[string]string{ "": "Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, a quota on the resources that the project may consume, and the security controls on the resources in the project. Within a project, members may have different roles - project administrators can set membership, editors can create and manage the resources, and viewers can see but not access running containers. In a normal cluster project administrators are not able to alter their quotas - that is restricted to cluster administrators.\n\nListing or watching projects will return only projects the user has the reader role on.\n\nAn OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed as editable to end users while namespaces are not. Direct creation of a project is typically restricted to administrators, while end users should use the requestproject resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Namespace.", - "status": "Status describes the current status of a Namespace", + "spec": "spec defines the behavior of the Namespace.", + "status": "status describes the current status of a Namespace", } func (Project) SwaggerDoc() map[string]string { @@ -25,7 +25,7 @@ func (Project) SwaggerDoc() map[string]string { var map_ProjectList = map[string]string{ "": "ProjectList is a list of Project objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of projects", + "items": "items is the list of projects", } func (ProjectList) SwaggerDoc() map[string]string { @@ -35,8 +35,8 @@ func (ProjectList) SwaggerDoc() map[string]string { var map_ProjectRequest = map[string]string{ "": "ProjectRequest is the set of options necessary to fully qualify a project request\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "displayName": "DisplayName is the display name to apply to a project", - "description": "Description is the description to apply to a project", + "displayName": "displayName is the display name to apply to a project", + "description": "description is the description to apply to a project", } func (ProjectRequest) SwaggerDoc() map[string]string { @@ -45,7 +45,7 @@ func (ProjectRequest) SwaggerDoc() map[string]string { var map_ProjectSpec = map[string]string{ "": "ProjectSpec describes the attributes on a Project", - "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage", + "finalizers": "finalizers is an opaque list of values that must be empty to permanently remove object from storage", } func (ProjectSpec) SwaggerDoc() map[string]string { @@ -54,7 +54,7 @@ func (ProjectSpec) SwaggerDoc() map[string]string { var map_ProjectStatus = map[string]string{ "": "ProjectStatus is information about the current status of a Project", - "phase": "Phase is the current lifecycle phase of the project", + "phase": "phase is the current lifecycle phase of the project", "conditions": "Represents the latest available observations of the project current state.", } diff --git a/vendor/github.com/openshift/api/quota/v1/generated.proto b/vendor/github.com/openshift/api/quota/v1/generated.proto index d08e8f0f9a..fb7fed242a 100644 --- a/vendor/github.com/openshift/api/quota/v1/generated.proto +++ b/vendor/github.com/openshift/api/quota/v1/generated.proto @@ -23,10 +23,10 @@ message AppliedClusterResourceQuota { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec defines the desired quota + // spec defines the desired quota optional ClusterResourceQuotaSpec spec = 2; - // Status defines the actual enforced quota and its current usage + // status defines the actual enforced quota and its current usage optional ClusterResourceQuotaStatus status = 3; } @@ -39,7 +39,7 @@ message AppliedClusterResourceQuotaList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of AppliedClusterResourceQuota + // items is a list of AppliedClusterResourceQuota repeated AppliedClusterResourceQuota items = 2; } @@ -59,10 +59,10 @@ message ClusterResourceQuota { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Spec defines the desired quota + // spec defines the desired quota optional ClusterResourceQuotaSpec spec = 2; - // Status defines the actual enforced quota and its current usage + // status defines the actual enforced quota and its current usage optional ClusterResourceQuotaStatus status = 3; } @@ -75,7 +75,7 @@ message ClusterResourceQuotaList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of ClusterResourceQuotas + // items is a list of ClusterResourceQuotas repeated ClusterResourceQuota items = 2; } @@ -96,22 +96,22 @@ message ClusterResourceQuotaSelector { // ClusterResourceQuotaSpec defines the desired quota restrictions message ClusterResourceQuotaSpec { - // Selector is the selector used to match projects. + // selector is the selector used to match projects. // It should only select active projects on the scale of dozens (though it can select // many more less active projects). These projects will contend on object creation through // this resource. optional ClusterResourceQuotaSelector selector = 1; - // Quota defines the desired quota + // quota defines the desired quota optional .k8s.io.api.core.v1.ResourceQuotaSpec quota = 2; } // ClusterResourceQuotaStatus defines the actual enforced quota and its current usage message ClusterResourceQuotaStatus { - // Total defines the actual enforced quota and its current usage across all projects + // total defines the actual enforced quota and its current usage across all projects optional .k8s.io.api.core.v1.ResourceQuotaStatus total = 1; - // Namespaces slices the usage by project. This division allows for quick resolution of + // namespaces slices the usage by project. This division allows for quick resolution of // deletion reconciliation inside of a single project without requiring a recalculation // across all projects. This can be used to pull the deltas for a given project. // +optional @@ -121,10 +121,10 @@ message ClusterResourceQuotaStatus { // ResourceQuotaStatusByNamespace gives status for a particular project message ResourceQuotaStatusByNamespace { - // Namespace the project this status applies to + // namespace the project this status applies to optional string namespace = 1; - // Status indicates how many resources have been consumed by this project + // status indicates how many resources have been consumed by this project optional .k8s.io.api.core.v1.ResourceQuotaStatus status = 2; } diff --git a/vendor/github.com/openshift/api/quota/v1/types.go b/vendor/github.com/openshift/api/quota/v1/types.go index 7a29ddcd4b..0cfb85f87e 100644 --- a/vendor/github.com/openshift/api/quota/v1/types.go +++ b/vendor/github.com/openshift/api/quota/v1/types.go @@ -27,22 +27,22 @@ type ClusterResourceQuota struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - // Spec defines the desired quota + // spec defines the desired quota Spec ClusterResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status defines the actual enforced quota and its current usage + // status defines the actual enforced quota and its current usage Status ClusterResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // ClusterResourceQuotaSpec defines the desired quota restrictions type ClusterResourceQuotaSpec struct { - // Selector is the selector used to match projects. + // selector is the selector used to match projects. // It should only select active projects on the scale of dozens (though it can select // many more less active projects). These projects will contend on object creation through // this resource. Selector ClusterResourceQuotaSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` - // Quota defines the desired quota + // quota defines the desired quota Quota corev1.ResourceQuotaSpec `json:"quota" protobuf:"bytes,2,opt,name=quota"` } @@ -63,10 +63,10 @@ type ClusterResourceQuotaSelector struct { // ClusterResourceQuotaStatus defines the actual enforced quota and its current usage type ClusterResourceQuotaStatus struct { - // Total defines the actual enforced quota and its current usage across all projects + // total defines the actual enforced quota and its current usage across all projects Total corev1.ResourceQuotaStatus `json:"total" protobuf:"bytes,1,opt,name=total"` - // Namespaces slices the usage by project. This division allows for quick resolution of + // namespaces slices the usage by project. This division allows for quick resolution of // deletion reconciliation inside of a single project without requiring a recalculation // across all projects. This can be used to pull the deltas for a given project. // +optional @@ -87,7 +87,7 @@ type ClusterResourceQuotaList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of ClusterResourceQuotas + // items is a list of ClusterResourceQuotas Items []ClusterResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -96,10 +96,10 @@ type ResourceQuotasStatusByNamespace []ResourceQuotaStatusByNamespace // ResourceQuotaStatusByNamespace gives status for a particular project type ResourceQuotaStatusByNamespace struct { - // Namespace the project this status applies to + // namespace the project this status applies to Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - // Status indicates how many resources have been consumed by this project + // status indicates how many resources have been consumed by this project Status corev1.ResourceQuotaStatus `json:"status" protobuf:"bytes,2,opt,name=status"` } @@ -120,10 +120,10 @@ type AppliedClusterResourceQuota struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - // Spec defines the desired quota + // spec defines the desired quota Spec ClusterResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status defines the actual enforced quota and its current usage + // status defines the actual enforced quota and its current usage Status ClusterResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -140,6 +140,6 @@ type AppliedClusterResourceQuotaList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of AppliedClusterResourceQuota + // items is a list of AppliedClusterResourceQuota Items []AppliedClusterResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go index 3072671c53..1bb84b8176 100644 --- a/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go @@ -14,8 +14,8 @@ package v1 var map_AppliedClusterResourceQuota = map[string]string{ "": "AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to his project and their associated usage.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the desired quota", - "status": "Status defines the actual enforced quota and its current usage", + "spec": "spec defines the desired quota", + "status": "status defines the actual enforced quota and its current usage", } func (AppliedClusterResourceQuota) SwaggerDoc() map[string]string { @@ -25,7 +25,7 @@ func (AppliedClusterResourceQuota) SwaggerDoc() map[string]string { var map_AppliedClusterResourceQuotaList = map[string]string{ "": "AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of AppliedClusterResourceQuota", + "items": "items is a list of AppliedClusterResourceQuota", } func (AppliedClusterResourceQuotaList) SwaggerDoc() map[string]string { @@ -35,8 +35,8 @@ func (AppliedClusterResourceQuotaList) SwaggerDoc() map[string]string { var map_ClusterResourceQuota = map[string]string{ "": "ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to synthetic ResourceQuota object to allow quota evaluation re-use.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the desired quota", - "status": "Status defines the actual enforced quota and its current usage", + "spec": "spec defines the desired quota", + "status": "status defines the actual enforced quota and its current usage", } func (ClusterResourceQuota) SwaggerDoc() map[string]string { @@ -46,7 +46,7 @@ func (ClusterResourceQuota) SwaggerDoc() map[string]string { var map_ClusterResourceQuotaList = map[string]string{ "": "ClusterResourceQuotaList is a collection of ClusterResourceQuotas\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of ClusterResourceQuotas", + "items": "items is a list of ClusterResourceQuotas", } func (ClusterResourceQuotaList) SwaggerDoc() map[string]string { @@ -65,8 +65,8 @@ func (ClusterResourceQuotaSelector) SwaggerDoc() map[string]string { var map_ClusterResourceQuotaSpec = map[string]string{ "": "ClusterResourceQuotaSpec defines the desired quota restrictions", - "selector": "Selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource.", - "quota": "Quota defines the desired quota", + "selector": "selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource.", + "quota": "quota defines the desired quota", } func (ClusterResourceQuotaSpec) SwaggerDoc() map[string]string { @@ -75,8 +75,8 @@ func (ClusterResourceQuotaSpec) SwaggerDoc() map[string]string { var map_ClusterResourceQuotaStatus = map[string]string{ "": "ClusterResourceQuotaStatus defines the actual enforced quota and its current usage", - "total": "Total defines the actual enforced quota and its current usage across all projects", - "namespaces": "Namespaces slices the usage by project. This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects. This can be used to pull the deltas for a given project.", + "total": "total defines the actual enforced quota and its current usage across all projects", + "namespaces": "namespaces slices the usage by project. This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects. This can be used to pull the deltas for a given project.", } func (ClusterResourceQuotaStatus) SwaggerDoc() map[string]string { @@ -85,8 +85,8 @@ func (ClusterResourceQuotaStatus) SwaggerDoc() map[string]string { var map_ResourceQuotaStatusByNamespace = map[string]string{ "": "ResourceQuotaStatusByNamespace gives status for a particular project", - "namespace": "Namespace the project this status applies to", - "status": "Status indicates how many resources have been consumed by this project", + "namespace": "namespace the project this status applies to", + "status": "status indicates how many resources have been consumed by this project", } func (ResourceQuotaStatusByNamespace) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/route/v1/generated.proto b/vendor/github.com/openshift/api/route/v1/generated.proto index 2a79b9a5a0..d6e1845b40 100644 --- a/vendor/github.com/openshift/api/route/v1/generated.proto +++ b/vendor/github.com/openshift/api/route/v1/generated.proto @@ -74,7 +74,7 @@ message RouteHTTPHeader { // Strict-Transport-Security, Proxy, Cookie, Set-Cookie. // It must be no more than 255 characters in length. // Header name must be unique. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=255 // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" @@ -85,7 +85,7 @@ message RouteHTTPHeader { optional string name = 1; // action specifies actions to perform on headers, such as setting or deleting headers. - // +kubebuilder:validation:Required + // +required optional RouteHTTPHeaderActionUnion action = 2; } @@ -99,7 +99,7 @@ message RouteHTTPHeaderActionUnion { // Delete allows you to delete HTTP request and response headers. // +unionDiscriminator // +kubebuilder:validation:Enum:=Set;Delete - // +kubebuilder:validation:Required + // +required optional string type = 1; // set defines the HTTP header that should be set: added if it doesn't exist or replaced if it does. @@ -192,13 +192,13 @@ message RouteHTTPHeaders { // RouteIngress holds information about the places where a route is exposed. message RouteIngress { - // Host is the host string under which the route is exposed; this value is required + // host is the host string under which the route is exposed; this value is required optional string host = 1; // Name is a name chosen by the router to identify itself; this value is required optional string routerName = 2; - // Conditions is the state of the route, may be empty. + // conditions is the state of the route, may be empty. // +listType=map // +listMapKey=type repeated RouteIngressCondition conditions = 3; @@ -214,11 +214,11 @@ message RouteIngress { // RouteIngressCondition contains details for the current condition of this route on a particular // router. message RouteIngressCondition { - // Type is the type of the condition. + // type is the type of the condition. // Currently only Admitted or UnservableInFutureVersions. optional string type = 1; - // Status is the status of the condition. + // status is the status of the condition. // Can be True, False, Unknown. optional string status = 2; @@ -267,7 +267,7 @@ message RouteSetHTTPHeader { // + --- // + Note: This limit was selected as most common web servers have a limit of 16384 characters or some lower limit. // + See . - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=16384 optional string value = 1; diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go index 9416199946..8fc2508773 100644 --- a/vendor/github.com/openshift/api/route/v1/types.go +++ b/vendor/github.com/openshift/api/route/v1/types.go @@ -251,7 +251,7 @@ type RouteHTTPHeader struct { // Strict-Transport-Security, Proxy, Cookie, Set-Cookie. // It must be no more than 255 characters in length. // Header name must be unique. - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=255 // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" @@ -262,7 +262,7 @@ type RouteHTTPHeader struct { Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // action specifies actions to perform on headers, such as setting or deleting headers. - // +kubebuilder:validation:Required + // +required Action RouteHTTPHeaderActionUnion `json:"action" protobuf:"bytes,2,opt,name=action"` } @@ -276,7 +276,7 @@ type RouteHTTPHeaderActionUnion struct { // Delete allows you to delete HTTP request and response headers. // +unionDiscriminator // +kubebuilder:validation:Enum:=Set;Delete - // +kubebuilder:validation:Required + // +required Type RouteHTTPHeaderActionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RouteHTTPHeaderActionType"` // set defines the HTTP header that should be set: added if it doesn't exist or replaced if it does. @@ -299,7 +299,7 @@ type RouteSetHTTPHeader struct { // + --- // + Note: This limit was selected as most common web servers have a limit of 16384 characters or some lower limit. // + See . - // +kubebuilder:validation:Required + // +required // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=16384 Value string `json:"value" protobuf:"bytes,1,opt,name=value"` @@ -359,11 +359,11 @@ type RouteStatus struct { // RouteIngress holds information about the places where a route is exposed. type RouteIngress struct { - // Host is the host string under which the route is exposed; this value is required + // host is the host string under which the route is exposed; this value is required Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` // Name is a name chosen by the router to identify itself; this value is required RouterName string `json:"routerName,omitempty" protobuf:"bytes,2,opt,name=routerName"` - // Conditions is the state of the route, may be empty. + // conditions is the state of the route, may be empty. // +listType=map // +listMapKey=type Conditions []RouteIngressCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` @@ -389,10 +389,10 @@ const ( // RouteIngressCondition contains details for the current condition of this route on a particular // router. type RouteIngressCondition struct { - // Type is the type of the condition. + // type is the type of the condition. // Currently only Admitted or UnservableInFutureVersions. Type RouteIngressConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RouteIngressConditionType"` - // Status is the status of the condition. + // status is the status of the condition. // Can be True, False, Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` // (brief) reason for the condition's last transition, and is usually a machine and human diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go index 56a4e23e3d..1d59f10335 100644 --- a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go @@ -72,9 +72,9 @@ func (RouteHTTPHeaders) SwaggerDoc() map[string]string { var map_RouteIngress = map[string]string{ "": "RouteIngress holds information about the places where a route is exposed.", - "host": "Host is the host string under which the route is exposed; this value is required", + "host": "host is the host string under which the route is exposed; this value is required", "routerName": "Name is a name chosen by the router to identify itself; this value is required", - "conditions": "Conditions is the state of the route, may be empty.", + "conditions": "conditions is the state of the route, may be empty.", "wildcardPolicy": "Wildcard policy is the wildcard policy that was allowed where this route is exposed.", "routerCanonicalHostname": "CanonicalHostname is the external host name for the router that can be used as a CNAME for the host requested for this route. This value is optional and may not be set in all cases.", } @@ -85,8 +85,8 @@ func (RouteIngress) SwaggerDoc() map[string]string { var map_RouteIngressCondition = map[string]string{ "": "RouteIngressCondition contains details for the current condition of this route on a particular router.", - "type": "Type is the type of the condition. Currently only Admitted or UnservableInFutureVersions.", - "status": "Status is the status of the condition. Can be True, False, Unknown.", + "type": "type is the type of the condition. Currently only Admitted or UnservableInFutureVersions.", + "status": "status is the status of the condition. Can be True, False, Unknown.", "reason": "(brief) reason for the condition's last transition, and is usually a machine and human readable constant", "message": "Human readable message indicating details about last transition.", "lastTransitionTime": "RFC 3339 date and time when this condition last transitioned", diff --git a/vendor/github.com/openshift/api/samples/v1/generated.proto b/vendor/github.com/openshift/api/samples/v1/generated.proto index be97c467db..eeda6835ae 100644 --- a/vendor/github.com/openshift/api/samples/v1/generated.proto +++ b/vendor/github.com/openshift/api/samples/v1/generated.proto @@ -30,7 +30,6 @@ message Config { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // +kubebuilder:validation:Required // +required optional ConfigSpec spec = 2; diff --git a/vendor/github.com/openshift/api/samples/v1/types_config.go b/vendor/github.com/openshift/api/samples/v1/types_config.go index c4bf380439..320500b0ce 100644 --- a/vendor/github.com/openshift/api/samples/v1/types_config.go +++ b/vendor/github.com/openshift/api/samples/v1/types_config.go @@ -29,7 +29,6 @@ type Config struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - // +kubebuilder:validation:Required // +required Spec ConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // +optional diff --git a/vendor/github.com/openshift/api/security/v1/generated.proto b/vendor/github.com/openshift/api/security/v1/generated.proto index fdb879ce0d..0e6bb094fb 100644 --- a/vendor/github.com/openshift/api/security/v1/generated.proto +++ b/vendor/github.com/openshift/api/security/v1/generated.proto @@ -14,16 +14,16 @@ option go_package = "github.com/openshift/api/security/v1"; // AllowedFlexVolume represents a single Flexvolume that is allowed to be used. message AllowedFlexVolume { - // Driver is the name of the Flexvolume driver. + // driver is the name of the Flexvolume driver. optional string driver = 1; } // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. message FSGroupStrategyOptions { - // Type is the strategy that will dictate what FSGroup is used in the SecurityContext. + // type is the strategy that will dictate what FSGroup is used in the SecurityContext. optional string type = 1; - // Ranges are the allowed ranges of fs groups. If you would like to force a single + // ranges are the allowed ranges of fs groups. If you would like to force a single // fs group then supply a single range with the same start and end. // +listType=atomic repeated IDRange ranges = 2; @@ -32,10 +32,10 @@ message FSGroupStrategyOptions { // IDRange provides a min/max of an allowed range of IDs. // TODO: this could be reused for UIDs. message IDRange { - // Min is the start of the range, inclusive. + // min is the start of the range, inclusive. optional int64 min = 1; - // Max is the end of the range, inclusive. + // max is the end of the range, inclusive. optional int64 max = 2; } @@ -177,23 +177,23 @@ message RangeAllocationList { // RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. message RunAsUserStrategyOptions { - // Type is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // type is the strategy that will dictate what RunAsUser is used in the SecurityContext. optional string type = 1; - // UID is the user id that containers must run as. Required for the MustRunAs strategy if not using + // uid is the user id that containers must run as. Required for the MustRunAs strategy if not using // namespace/service account allocated uids. optional int64 uid = 2; - // UIDRangeMin defines the min value for a strategy that allocates by range. + // uidRangeMin defines the min value for a strategy that allocates by range. optional int64 uidRangeMin = 3; - // UIDRangeMax defines the max value for a strategy that allocates by range. + // uidRangeMax defines the max value for a strategy that allocates by range. optional int64 uidRangeMax = 4; } // SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy. message SELinuxContextStrategyOptions { - // Type is the strategy that will dictate what SELinux context is used in the SecurityContext. + // type is the strategy that will dictate what SELinux context is used in the SecurityContext. optional string type = 1; // seLinuxOptions required to run as; required for MustRunAs @@ -229,7 +229,7 @@ message SecurityContextConstraints { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Priority influences the sort order of SCCs when evaluating which SCCs to try first for + // priority influences the sort order of SCCs when evaluating which SCCs to try first for // a given pod request based on access in the Users and Groups fields. The higher the int, the // higher priority. An unset value is considered a 0 priority. If scores // for multiple SCCs are equal they will be sorted from most restrictive to @@ -238,23 +238,23 @@ message SecurityContextConstraints { // +nullable optional int32 priority = 2; - // AllowPrivilegedContainer determines if a container can request to be run as privileged. + // allowPrivilegedContainer determines if a container can request to be run as privileged. optional bool allowPrivilegedContainer = 3; - // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // defaultAddCapabilities is the default set of capabilities that will be added to the container // unless the pod spec specifically drops the capability. You may not list a capabiility in both // DefaultAddCapabilities and RequiredDropCapabilities. // +nullable // +listType=atomic repeated string defaultAddCapabilities = 4; - // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // requiredDropCapabilities are the capabilities that will be dropped from the container. These // are required to be dropped and cannot be added. // +nullable // +listType=atomic repeated string requiredDropCapabilities = 5; - // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // allowedCapabilities is a list of capabilities that can be requested to add to the container. // Capabilities in this field maybe added at the pod author's discretion. // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. // To allow all capabilities you may use '*'. @@ -262,18 +262,18 @@ message SecurityContextConstraints { // +listType=atomic repeated string allowedCapabilities = 6; - // AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin + // allowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin // +k8s:conversion-gen=false optional bool allowHostDirVolumePlugin = 7; - // Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names + // volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". // To allow no volumes, set to ["none"]. // +nullable // +listType=atomic repeated string volumes = 8; - // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes // is allowed in the "Volumes" field. // +optional @@ -281,16 +281,16 @@ message SecurityContextConstraints { // +listType=atomic repeated AllowedFlexVolume allowedFlexVolumes = 21; - // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // allowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. optional bool allowHostNetwork = 9; - // AllowHostPorts determines if the policy allows host ports in the containers. + // allowHostPorts determines if the policy allows host ports in the containers. optional bool allowHostPorts = 10; - // AllowHostPID determines if the policy allows host pid in the containers. + // allowHostPID determines if the policy allows host pid in the containers. optional bool allowHostPID = 11; - // AllowHostIPC determines if the policy allows host ipc in the containers. + // allowHostIPC determines if the policy allows host ipc in the containers. optional bool allowHostIPC = 12; // userNamespaceLevel determines if the policy allows host users in containers. @@ -305,35 +305,35 @@ message SecurityContextConstraints { // +optional optional string userNamespaceLevel = 26; - // DefaultAllowPrivilegeEscalation controls the default setting for whether a + // defaultAllowPrivilegeEscalation controls the default setting for whether a // process can gain more privileges than its parent process. // +optional // +nullable optional bool defaultAllowPrivilegeEscalation = 22; - // AllowPrivilegeEscalation determines if a pod can request to allow + // allowPrivilegeEscalation determines if a pod can request to allow // privilege escalation. If unspecified, defaults to true. // +optional // +nullable optional bool allowPrivilegeEscalation = 23; - // SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. + // seLinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. // +nullable optional SELinuxContextStrategyOptions seLinuxContext = 13; - // RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // runAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. // +nullable optional RunAsUserStrategyOptions runAsUser = 14; - // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. // +nullable optional SupplementalGroupsStrategyOptions supplementalGroups = 15; - // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. // +nullable optional FSGroupStrategyOptions fsGroup = 16; - // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file // system. If the container specifically requests to run with a non-read only root file system // the SCC should deny the pod. // If set to false the container may run with a read only root file system if it wishes but it @@ -352,7 +352,7 @@ message SecurityContextConstraints { // +listType=atomic repeated string groups = 19; - // SeccompProfiles lists the allowed profiles that may be set for the pod or + // seccompProfiles lists the allowed profiles that may be set for the pod or // container's seccomp annotations. An unset (nil) or empty value means that no profiles may // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When // used to generate a value for a pod the first non-wildcard profile will be used as @@ -361,7 +361,7 @@ message SecurityContextConstraints { // +listType=atomic repeated string seccompProfiles = 20; - // AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. @@ -374,7 +374,7 @@ message SecurityContextConstraints { // +listType=atomic repeated string allowedUnsafeSysctls = 24; - // ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. // @@ -410,10 +410,10 @@ message ServiceAccountPodSecurityPolicyReviewStatus { // SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. message SupplementalGroupsStrategyOptions { - // Type is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // type is the strategy that will dictate what supplemental groups is used in the SecurityContext. optional string type = 1; - // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // ranges are the allowed ranges of supplemental groups. If you would like to force a single // supplemental group then supply a single range with the same start and end. // +listType=atomic repeated IDRange ranges = 2; diff --git a/vendor/github.com/openshift/api/security/v1/types.go b/vendor/github.com/openshift/api/security/v1/types.go index 9d0af5c8dd..18585e97c0 100644 --- a/vendor/github.com/openshift/api/security/v1/types.go +++ b/vendor/github.com/openshift/api/security/v1/types.go @@ -45,7 +45,7 @@ type SecurityContextConstraints struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Priority influences the sort order of SCCs when evaluating which SCCs to try first for + // priority influences the sort order of SCCs when evaluating which SCCs to try first for // a given pod request based on access in the Users and Groups fields. The higher the int, the // higher priority. An unset value is considered a 0 priority. If scores // for multiple SCCs are equal they will be sorted from most restrictive to @@ -54,49 +54,49 @@ type SecurityContextConstraints struct { // +nullable Priority *int32 `json:"priority" protobuf:"varint,2,opt,name=priority"` - // AllowPrivilegedContainer determines if a container can request to be run as privileged. + // allowPrivilegedContainer determines if a container can request to be run as privileged. AllowPrivilegedContainer bool `json:"allowPrivilegedContainer" protobuf:"varint,3,opt,name=allowPrivilegedContainer"` - // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // defaultAddCapabilities is the default set of capabilities that will be added to the container // unless the pod spec specifically drops the capability. You may not list a capabiility in both // DefaultAddCapabilities and RequiredDropCapabilities. // +nullable // +listType=atomic DefaultAddCapabilities []corev1.Capability `json:"defaultAddCapabilities" protobuf:"bytes,4,rep,name=defaultAddCapabilities,casttype=Capability"` - // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // requiredDropCapabilities are the capabilities that will be dropped from the container. These // are required to be dropped and cannot be added. // +nullable // +listType=atomic RequiredDropCapabilities []corev1.Capability `json:"requiredDropCapabilities" protobuf:"bytes,5,rep,name=requiredDropCapabilities,casttype=Capability"` - // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // allowedCapabilities is a list of capabilities that can be requested to add to the container. // Capabilities in this field maybe added at the pod author's discretion. // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. // To allow all capabilities you may use '*'. // +nullable // +listType=atomic AllowedCapabilities []corev1.Capability `json:"allowedCapabilities" protobuf:"bytes,6,rep,name=allowedCapabilities,casttype=Capability"` - // AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin + // allowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin // +k8s:conversion-gen=false AllowHostDirVolumePlugin bool `json:"allowHostDirVolumePlugin" protobuf:"varint,7,opt,name=allowHostDirVolumePlugin"` - // Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names + // volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". // To allow no volumes, set to ["none"]. // +nullable // +listType=atomic Volumes []FSType `json:"volumes" protobuf:"bytes,8,rep,name=volumes,casttype=FSType"` - // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes // is allowed in the "Volumes" field. // +optional // +nullable // +listType=atomic AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,21,rep,name=allowedFlexVolumes"` - // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // allowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. AllowHostNetwork bool `json:"allowHostNetwork" protobuf:"varint,9,opt,name=allowHostNetwork"` - // AllowHostPorts determines if the policy allows host ports in the containers. + // allowHostPorts determines if the policy allows host ports in the containers. AllowHostPorts bool `json:"allowHostPorts" protobuf:"varint,10,opt,name=allowHostPorts"` - // AllowHostPID determines if the policy allows host pid in the containers. + // allowHostPID determines if the policy allows host pid in the containers. AllowHostPID bool `json:"allowHostPID" protobuf:"varint,11,opt,name=allowHostPID"` - // AllowHostIPC determines if the policy allows host ipc in the containers. + // allowHostIPC determines if the policy allows host ipc in the containers. AllowHostIPC bool `json:"allowHostIPC" protobuf:"varint,12,opt,name=allowHostIPC"` // userNamespaceLevel determines if the policy allows host users in containers. // Valid values are "AllowHostLevel", "RequirePodLevel", and omitted. @@ -109,29 +109,29 @@ type SecurityContextConstraints struct { // +default="AllowHostLevel" // +optional UserNamespaceLevel NamespaceLevelType `json:"userNamespaceLevel,omitempty" protobuf:"bytes,26,opt,name=userNamespaceLevel"` - // DefaultAllowPrivilegeEscalation controls the default setting for whether a + // defaultAllowPrivilegeEscalation controls the default setting for whether a // process can gain more privileges than its parent process. // +optional // +nullable DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,22,rep,name=defaultAllowPrivilegeEscalation"` - // AllowPrivilegeEscalation determines if a pod can request to allow + // allowPrivilegeEscalation determines if a pod can request to allow // privilege escalation. If unspecified, defaults to true. // +optional // +nullable AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,23,rep,name=allowPrivilegeEscalation"` - // SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. + // seLinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. // +nullable SELinuxContext SELinuxContextStrategyOptions `json:"seLinuxContext,omitempty" protobuf:"bytes,13,opt,name=seLinuxContext"` - // RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // runAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. // +nullable RunAsUser RunAsUserStrategyOptions `json:"runAsUser,omitempty" protobuf:"bytes,14,opt,name=runAsUser"` - // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. // +nullable SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups,omitempty" protobuf:"bytes,15,opt,name=supplementalGroups"` - // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. // +nullable FSGroup FSGroupStrategyOptions `json:"fsGroup,omitempty" protobuf:"bytes,16,opt,name=fsGroup"` - // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file // system. If the container specifically requests to run with a non-read only root file system // the SCC should deny the pod. // If set to false the container may run with a read only root file system if it wishes but it @@ -149,7 +149,7 @@ type SecurityContextConstraints struct { // +listType=atomic Groups []string `json:"groups" protobuf:"bytes,19,rep,name=groups"` - // SeccompProfiles lists the allowed profiles that may be set for the pod or + // seccompProfiles lists the allowed profiles that may be set for the pod or // container's seccomp annotations. An unset (nil) or empty value means that no profiles may // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When // used to generate a value for a pod the first non-wildcard profile will be used as @@ -158,7 +158,7 @@ type SecurityContextConstraints struct { // +listType=atomic SeccompProfiles []string `json:"seccompProfiles,omitempty" protobuf:"bytes,20,opt,name=seccompProfiles"` - // AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. @@ -170,7 +170,7 @@ type SecurityContextConstraints struct { // +nullable // +listType=atomic AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,24,rep,name=allowedUnsafeSysctls"` - // ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. // @@ -223,13 +223,13 @@ var ( // AllowedFlexVolume represents a single Flexvolume that is allowed to be used. type AllowedFlexVolume struct { - // Driver is the name of the Flexvolume driver. + // driver is the name of the Flexvolume driver. Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` } // SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy. type SELinuxContextStrategyOptions struct { - // Type is the strategy that will dictate what SELinux context is used in the SecurityContext. + // type is the strategy that will dictate what SELinux context is used in the SecurityContext. Type SELinuxContextStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=SELinuxContextStrategyType"` // seLinuxOptions required to run as; required for MustRunAs SELinuxOptions *corev1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` @@ -237,22 +237,22 @@ type SELinuxContextStrategyOptions struct { // RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. type RunAsUserStrategyOptions struct { - // Type is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // type is the strategy that will dictate what RunAsUser is used in the SecurityContext. Type RunAsUserStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=RunAsUserStrategyType"` - // UID is the user id that containers must run as. Required for the MustRunAs strategy if not using + // uid is the user id that containers must run as. Required for the MustRunAs strategy if not using // namespace/service account allocated uids. UID *int64 `json:"uid,omitempty" protobuf:"varint,2,opt,name=uid"` - // UIDRangeMin defines the min value for a strategy that allocates by range. + // uidRangeMin defines the min value for a strategy that allocates by range. UIDRangeMin *int64 `json:"uidRangeMin,omitempty" protobuf:"varint,3,opt,name=uidRangeMin"` - // UIDRangeMax defines the max value for a strategy that allocates by range. + // uidRangeMax defines the max value for a strategy that allocates by range. UIDRangeMax *int64 `json:"uidRangeMax,omitempty" protobuf:"varint,4,opt,name=uidRangeMax"` } // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. type FSGroupStrategyOptions struct { - // Type is the strategy that will dictate what FSGroup is used in the SecurityContext. + // type is the strategy that will dictate what FSGroup is used in the SecurityContext. Type FSGroupStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=FSGroupStrategyType"` - // Ranges are the allowed ranges of fs groups. If you would like to force a single + // ranges are the allowed ranges of fs groups. If you would like to force a single // fs group then supply a single range with the same start and end. // +listType=atomic Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` @@ -260,9 +260,9 @@ type FSGroupStrategyOptions struct { // SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. type SupplementalGroupsStrategyOptions struct { - // Type is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // type is the strategy that will dictate what supplemental groups is used in the SecurityContext. Type SupplementalGroupsStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=SupplementalGroupsStrategyType"` - // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // ranges are the allowed ranges of supplemental groups. If you would like to force a single // supplemental group then supply a single range with the same start and end. // +listType=atomic Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` @@ -271,9 +271,9 @@ type SupplementalGroupsStrategyOptions struct { // IDRange provides a min/max of an allowed range of IDs. // TODO: this could be reused for UIDs. type IDRange struct { - // Min is the start of the range, inclusive. + // min is the start of the range, inclusive. Min int64 `json:"min,omitempty" protobuf:"varint,1,opt,name=min"` - // Max is the end of the range, inclusive. + // max is the end of the range, inclusive. Max int64 `json:"max,omitempty" protobuf:"varint,2,opt,name=max"` } diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go index 2f242366a8..29cddf7e64 100644 --- a/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go @@ -13,7 +13,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_AllowedFlexVolume = map[string]string{ "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", - "driver": "Driver is the name of the Flexvolume driver.", + "driver": "driver is the name of the Flexvolume driver.", } func (AllowedFlexVolume) SwaggerDoc() map[string]string { @@ -22,8 +22,8 @@ func (AllowedFlexVolume) SwaggerDoc() map[string]string { var map_FSGroupStrategyOptions = map[string]string{ "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", - "type": "Type is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.", + "type": "type is the strategy that will dictate what FSGroup is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.", } func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { @@ -32,8 +32,8 @@ func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { var map_IDRange = map[string]string{ "": "IDRange provides a min/max of an allowed range of IDs.", - "min": "Min is the start of the range, inclusive.", - "max": "Max is the end of the range, inclusive.", + "min": "min is the start of the range, inclusive.", + "max": "max is the end of the range, inclusive.", } func (IDRange) SwaggerDoc() map[string]string { @@ -146,10 +146,10 @@ func (RangeAllocationList) SwaggerDoc() map[string]string { var map_RunAsUserStrategyOptions = map[string]string{ "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", - "type": "Type is the strategy that will dictate what RunAsUser is used in the SecurityContext.", - "uid": "UID is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids.", - "uidRangeMin": "UIDRangeMin defines the min value for a strategy that allocates by range.", - "uidRangeMax": "UIDRangeMax defines the max value for a strategy that allocates by range.", + "type": "type is the strategy that will dictate what RunAsUser is used in the SecurityContext.", + "uid": "uid is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids.", + "uidRangeMin": "uidRangeMin defines the min value for a strategy that allocates by range.", + "uidRangeMax": "uidRangeMax defines the max value for a strategy that allocates by range.", } func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { @@ -158,7 +158,7 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { var map_SELinuxContextStrategyOptions = map[string]string{ "": "SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy.", - "type": "Type is the strategy that will dictate what SELinux context is used in the SecurityContext.", + "type": "type is the strategy that will dictate what SELinux context is used in the SecurityContext.", "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs", } @@ -169,31 +169,31 @@ func (SELinuxContextStrategyOptions) SwaggerDoc() map[string]string { var map_SecurityContextConstraints = map[string]string{ "": "SecurityContextConstraints governs the ability to make requests that affect the SecurityContext that will be applied to a container. For historical reasons SCC was exposed under the core Kubernetes API group. That exposure is deprecated and will be removed in a future release - users should instead use the security.openshift.io group to manage SecurityContextConstraints.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "priority": "Priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name.", - "allowPrivilegedContainer": "AllowPrivilegedContainer determines if a container can request to be run as privileged.", - "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", - "requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'.", - "allowHostDirVolumePlugin": "AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin", - "volumes": "Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use \"*\". To allow no volumes, set to [\"none\"].", - "allowedFlexVolumes": "AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.", - "allowHostNetwork": "AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "allowHostPorts": "AllowHostPorts determines if the policy allows host ports in the containers.", - "allowHostPID": "AllowHostPID determines if the policy allows host pid in the containers.", - "allowHostIPC": "AllowHostIPC determines if the policy allows host ipc in the containers.", + "priority": "priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name.", + "allowPrivilegedContainer": "allowPrivilegedContainer determines if a container can request to be run as privileged.", + "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", + "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", + "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'.", + "allowHostDirVolumePlugin": "allowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin", + "volumes": "volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use \"*\". To allow no volumes, set to [\"none\"].", + "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.", + "allowHostNetwork": "allowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", + "allowHostPorts": "allowHostPorts determines if the policy allows host ports in the containers.", + "allowHostPID": "allowHostPID determines if the policy allows host pid in the containers.", + "allowHostIPC": "allowHostIPC determines if the policy allows host ipc in the containers.", "userNamespaceLevel": "userNamespaceLevel determines if the policy allows host users in containers. Valid values are \"AllowHostLevel\", \"RequirePodLevel\", and omitted. When \"AllowHostLevel\" is set, a pod author may set `hostUsers` to either `true` or `false`. When \"RequirePodLevel\" is set, a pod author must set `hostUsers` to `false`. When omitted, the default value is \"AllowHostLevel\".", - "defaultAllowPrivilegeEscalation": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", - "allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", - "seLinuxContext": "SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.", - "runAsUser": "RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.", - "supplementalGroups": "SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", + "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", + "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", + "seLinuxContext": "seLinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.", + "runAsUser": "runAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.", + "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", + "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", + "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", "users": "The users who have permissions to use this security context constraints", "groups": "The groups that have permission to use this security context constraints", - "seccompProfiles": "SeccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default.", - "allowedUnsafeSysctls": "AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", - "forbiddenSysctls": "ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", + "seccompProfiles": "seccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default.", + "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", + "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", } func (SecurityContextConstraints) SwaggerDoc() map[string]string { @@ -221,8 +221,8 @@ func (ServiceAccountPodSecurityPolicyReviewStatus) SwaggerDoc() map[string]strin var map_SupplementalGroupsStrategyOptions = map[string]string{ "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", - "type": "Type is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.", + "type": "type is the strategy that will dictate what supplemental groups is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.", } func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go index ebd8d75efc..3ad1c560fd 100644 --- a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go @@ -48,6 +48,6 @@ type ServiceCertSignerOperatorConfigList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` - // Items contains the items + // items contains the items Items []ServiceCertSignerOperatorConfig `json:"items"` } diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go index 13b3b73644..5e341b1dad 100644 --- a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go @@ -23,7 +23,7 @@ func (ServiceCertSignerOperatorConfig) SwaggerDoc() map[string]string { var map_ServiceCertSignerOperatorConfigList = map[string]string{ "": "ServiceCertSignerOperatorConfigList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items contains the items", + "items": "items contains the items", } func (ServiceCertSignerOperatorConfigList) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go index 1eea47d02d..3b6e6be374 100644 --- a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go @@ -11,19 +11,20 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // "csi.sharedresource.openshift.io" CSI driver and a reference to the SharedConfigMap in the volume attributes: // // spec: -// volumes: -// - name: shared-configmap -// csi: -// driver: csi.sharedresource.openshift.io -// volumeAttributes: -// sharedConfigMap: my-share +// +// volumes: +// - name: shared-configmap +// csi: +// driver: csi.sharedresource.openshift.io +// volumeAttributes: +// sharedConfigMap: my-share // // For the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedConfigMap object // within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating // such Role and RoleBinding objects. // -// `oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share` -// `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default` +// `oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share` +// `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default` // // Shared resource objects, in this case ConfigMaps, have default permissions of list, get, and watch for system authenticated users. // @@ -45,7 +46,7 @@ type SharedConfigMap struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec is the specification of the desired shared configmap - // +kubebuilder:validation:Required + // +required Spec SharedConfigMapSpec `json:"spec,omitempty"` // status is the observed status of the shared configmap @@ -72,10 +73,10 @@ type SharedConfigMapList struct { // SharedConfigMapReference contains information about which ConfigMap to share type SharedConfigMapReference struct { // name represents the name of the ConfigMap that is being referenced. - // +kubebuilder:validation:Required + // +required Name string `json:"name"` // namespace represents the namespace where the referenced ConfigMap is located. - // +kubebuilder:validation:Required + // +required Namespace string `json:"namespace"` } @@ -83,7 +84,7 @@ type SharedConfigMapReference struct { // +k8s:openapi-gen=true type SharedConfigMapSpec struct { //configMapRef is a reference to the ConfigMap to share - // +kubebuilder:validation:Required + // +required ConfigMapRef SharedConfigMapReference `json:"configMapRef"` // description is a user readable explanation of what the backing resource provides. Description string `json:"description,omitempty"` diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go index 654106bce6..3ea9260f0c 100644 --- a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go @@ -11,19 +11,20 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // "csi.sharedresource.openshift.io" CSI driver and a reference to the SharedSecret in the volume attributes: // // spec: -// volumes: -// - name: shared-secret -// csi: -// driver: csi.sharedresource.openshift.io -// volumeAttributes: -// sharedSecret: my-share +// +// volumes: +// - name: shared-secret +// csi: +// driver: csi.sharedresource.openshift.io +// volumeAttributes: +// sharedSecret: my-share // // For the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedSecret object // within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating // such Role and RoleBinding objects. // -// `oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share` -// `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default` +// `oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share` +// `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default` // // Shared resource objects, in this case Secrets, have default permissions of list, get, and watch for system authenticated users. // @@ -44,7 +45,7 @@ type SharedSecret struct { metav1.ObjectMeta `json:"metadata,omitempty"` // spec is the specification of the desired shared secret - // +kubebuilder:validation:Required + // +required Spec SharedSecretSpec `json:"spec,omitempty"` // status is the observed status of the shared secret @@ -71,10 +72,10 @@ type SharedSecretList struct { // SharedSecretReference contains information about which Secret to share type SharedSecretReference struct { // name represents the name of the Secret that is being referenced. - // +kubebuilder:validation:Required + // +required Name string `json:"name"` // namespace represents the namespace where the referenced Secret is located. - // +kubebuilder:validation:Required + // +required Namespace string `json:"namespace"` } @@ -82,7 +83,7 @@ type SharedSecretReference struct { // +k8s:openapi-gen=true type SharedSecretSpec struct { // secretRef is a reference to the Secret to share - // +kubebuilder:validation:Required + // +required SecretRef SharedSecretReference `json:"secretRef"` // description is a user readable explanation of what the backing resource provides. Description string `json:"description,omitempty"` diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go index f432d63f77..ea6334d147 100644 --- a/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go @@ -12,7 +12,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE var map_SharedConfigMap = map[string]string{ - "": "SharedConfigMap allows a ConfigMap to be shared across namespaces. Pods can mount the shared ConfigMap by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedConfigMap in the volume attributes:\n\nspec:\n volumes:\n - name: shared-configmap\n csi:\n driver: csi.sharedresource.openshift.io\n volumeAttributes:\n sharedConfigMap: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedConfigMap object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n `oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share`\n `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case ConfigMaps, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", + "": "SharedConfigMap allows a ConfigMap to be shared across namespaces. Pods can mount the shared ConfigMap by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedConfigMap in the volume attributes:\n\nspec:\n\n\tvolumes:\n\t- name: shared-configmap\n\t csi:\n\t driver: csi.sharedresource.openshift.io\n\t volumeAttributes:\n\t sharedConfigMap: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedConfigMap object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n\t`oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share`\n\t`oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case ConfigMaps, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec is the specification of the desired shared configmap", "status": "status is the observed status of the shared configmap", @@ -61,7 +61,7 @@ func (SharedConfigMapStatus) SwaggerDoc() map[string]string { } var map_SharedSecret = map[string]string{ - "": "SharedSecret allows a Secret to be shared across namespaces. Pods can mount the shared Secret by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedSecret in the volume attributes:\n\nspec:\n volumes:\n - name: shared-secret\n csi:\n driver: csi.sharedresource.openshift.io\n volumeAttributes:\n sharedSecret: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedSecret object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n `oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share`\n `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case Secrets, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", + "": "SharedSecret allows a Secret to be shared across namespaces. Pods can mount the shared Secret by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedSecret in the volume attributes:\n\nspec:\n\n\tvolumes:\n\t- name: shared-secret\n\t csi:\n\t driver: csi.sharedresource.openshift.io\n\t volumeAttributes:\n\t sharedSecret: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedSecret object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n\t`oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share`\n\t`oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case Secrets, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec is the specification of the desired shared secret", "status": "status is the observed status of the shared secret", diff --git a/vendor/github.com/openshift/api/template/v1/generated.proto b/vendor/github.com/openshift/api/template/v1/generated.proto index 5ff4d7b1d6..8f27eb48a0 100644 --- a/vendor/github.com/openshift/api/template/v1/generated.proto +++ b/vendor/github.com/openshift/api/template/v1/generated.proto @@ -42,7 +42,7 @@ message BrokerTemplateInstanceList { // BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance. message BrokerTemplateInstanceSpec { - // templateinstance is a reference to a TemplateInstance object residing + // templateInstance is a reference to a TemplateInstance object residing // in a namespace. optional .k8s.io.api.core.v1.ObjectReference templateInstance = 1; @@ -50,7 +50,7 @@ message BrokerTemplateInstanceSpec { // containing the necessary template parameters. optional .k8s.io.api.core.v1.ObjectReference secret = 2; - // bindingids is a list of 'binding_id's provided during successive bind + // bindingIDs is a list of 'binding_id's provided during successive bind // calls to the template service broker. repeated string bindingIDs = 3; } @@ -67,17 +67,17 @@ message ExtraValue { // Parameter defines a name/value variable that is to be processed during // the Template to Config transformation. message Parameter { - // Name must be set and it can be referenced in Template + // name must be set and it can be referenced in Template // Items using ${PARAMETER_NAME}. Required. optional string name = 1; // Optional: The name that will show in UI instead of parameter 'Name' optional string displayName = 2; - // Description of a parameter. Optional. + // description of a parameter. Optional. optional string description = 3; - // Value holds the Parameter data. If specified, the generator will be + // value holds the Parameter data. If specified, the generator will be // ignored. The value replaces all occurrences of the Parameter ${Name} // expression during the Template to Config transformation. Optional. optional string value = 4; @@ -101,7 +101,7 @@ message Parameter { // "[a-zA-Z0-9]{8}" | "hW4yQU5i" optional string generate = 5; - // From is an input value for the generator. Optional. + // from is an input value for the generator. Optional. optional string from = 6; // Optional: Indicates the parameter must have a value. Defaults to false. @@ -164,21 +164,21 @@ message TemplateInstance { // TemplateInstanceCondition contains condition information for a // TemplateInstance. message TemplateInstanceCondition { - // Type of the condition, currently Ready or InstantiateFailure. + // type of the condition, currently Ready or InstantiateFailure. optional string type = 1; - // Status of the condition, one of True, False or Unknown. + // status of the condition, one of True, False or Unknown. optional string status = 2; - // LastTransitionTime is the last time a condition status transitioned from + // lastTransitionTime is the last time a condition status transitioned from // one state to another. optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - // Reason is a brief machine readable explanation for the condition's last + // reason is a brief machine readable explanation for the condition's last // transition. optional string reason = 4; - // Message is a human readable description of the details of the last + // message is a human readable description of the details of the last // transition, complementing reason. optional string message = 5; } @@ -243,7 +243,7 @@ message TemplateInstanceStatus { // TemplateInstance's current state. repeated TemplateInstanceCondition conditions = 1; - // Objects references the objects created by the TemplateInstance. + // objects references the objects created by the TemplateInstance. repeated TemplateInstanceObject objects = 2; } @@ -256,7 +256,7 @@ message TemplateList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is a list of templates + // items is a list of templates repeated Template items = 2; } diff --git a/vendor/github.com/openshift/api/template/v1/types.go b/vendor/github.com/openshift/api/template/v1/types.go index 9d95912b28..5510b0f90b 100644 --- a/vendor/github.com/openshift/api/template/v1/types.go +++ b/vendor/github.com/openshift/api/template/v1/types.go @@ -61,24 +61,24 @@ type TemplateList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is a list of templates + // items is a list of templates Items []Template `json:"items" protobuf:"bytes,2,rep,name=items"` } // Parameter defines a name/value variable that is to be processed during // the Template to Config transformation. type Parameter struct { - // Name must be set and it can be referenced in Template + // name must be set and it can be referenced in Template // Items using ${PARAMETER_NAME}. Required. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Optional: The name that will show in UI instead of parameter 'Name' DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"` - // Description of a parameter. Optional. + // description of a parameter. Optional. Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"` - // Value holds the Parameter data. If specified, the generator will be + // value holds the Parameter data. If specified, the generator will be // ignored. The value replaces all occurrences of the Parameter ${Name} // expression during the Template to Config transformation. Optional. Value string `json:"value,omitempty" protobuf:"bytes,4,opt,name=value"` @@ -103,7 +103,7 @@ type Parameter struct { // Generate string `json:"generate,omitempty" protobuf:"bytes,5,opt,name=generate"` - // From is an input value for the generator. Optional. + // from is an input value for the generator. Optional. From string `json:"from,omitempty" protobuf:"bytes,6,opt,name=from"` // Optional: Indicates the parameter must have a value. Defaults to false. @@ -181,24 +181,24 @@ type TemplateInstanceStatus struct { // TemplateInstance's current state. Conditions []TemplateInstanceCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` - // Objects references the objects created by the TemplateInstance. + // objects references the objects created by the TemplateInstance. Objects []TemplateInstanceObject `json:"objects,omitempty" protobuf:"bytes,2,rep,name=objects"` } // TemplateInstanceCondition contains condition information for a // TemplateInstance. type TemplateInstanceCondition struct { - // Type of the condition, currently Ready or InstantiateFailure. + // type of the condition, currently Ready or InstantiateFailure. Type TemplateInstanceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TemplateInstanceConditionType"` - // Status of the condition, one of True, False or Unknown. + // status of the condition, one of True, False or Unknown. Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"` - // LastTransitionTime is the last time a condition status transitioned from + // lastTransitionTime is the last time a condition status transitioned from // one state to another. LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // Reason is a brief machine readable explanation for the condition's last + // reason is a brief machine readable explanation for the condition's last // transition. Reason string `json:"reason" protobuf:"bytes,4,opt,name=reason"` - // Message is a human readable description of the details of the last + // message is a human readable description of the details of the last // transition, complementing reason. Message string `json:"message" protobuf:"bytes,5,opt,name=message"` } @@ -263,7 +263,7 @@ type BrokerTemplateInstance struct { // BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance. type BrokerTemplateInstanceSpec struct { - // templateinstance is a reference to a TemplateInstance object residing + // templateInstance is a reference to a TemplateInstance object residing // in a namespace. TemplateInstance corev1.ObjectReference `json:"templateInstance" protobuf:"bytes,1,opt,name=templateInstance"` @@ -271,7 +271,7 @@ type BrokerTemplateInstanceSpec struct { // containing the necessary template parameters. Secret corev1.ObjectReference `json:"secret" protobuf:"bytes,2,opt,name=secret"` - // bindingids is a list of 'binding_id's provided during successive bind + // bindingIDs is a list of 'binding_id's provided during successive bind // calls to the template service broker. BindingIDs []string `json:"bindingIDs,omitempty" protobuf:"bytes,3,rep,name=bindingIDs"` } diff --git a/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go index 8ed3822c8d..761390d02f 100644 --- a/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go @@ -33,9 +33,9 @@ func (BrokerTemplateInstanceList) SwaggerDoc() map[string]string { var map_BrokerTemplateInstanceSpec = map[string]string{ "": "BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance.", - "templateInstance": "templateinstance is a reference to a TemplateInstance object residing in a namespace.", + "templateInstance": "templateInstance is a reference to a TemplateInstance object residing in a namespace.", "secret": "secret is a reference to a Secret object residing in a namespace, containing the necessary template parameters.", - "bindingIDs": "bindingids is a list of 'binding_id's provided during successive bind calls to the template service broker.", + "bindingIDs": "bindingIDs is a list of 'binding_id's provided during successive bind calls to the template service broker.", } func (BrokerTemplateInstanceSpec) SwaggerDoc() map[string]string { @@ -44,12 +44,12 @@ func (BrokerTemplateInstanceSpec) SwaggerDoc() map[string]string { var map_Parameter = map[string]string{ "": "Parameter defines a name/value variable that is to be processed during the Template to Config transformation.", - "name": "Name must be set and it can be referenced in Template Items using ${PARAMETER_NAME}. Required.", + "name": "name must be set and it can be referenced in Template Items using ${PARAMETER_NAME}. Required.", "displayName": "Optional: The name that will show in UI instead of parameter 'Name'", - "description": "Description of a parameter. Optional.", - "value": "Value holds the Parameter data. If specified, the generator will be ignored. The value replaces all occurrences of the Parameter ${Name} expression during the Template to Config transformation. Optional.", + "description": "description of a parameter. Optional.", + "value": "value holds the Parameter data. If specified, the generator will be ignored. The value replaces all occurrences of the Parameter ${Name} expression during the Template to Config transformation. Optional.", "generate": "generate specifies the generator to be used to generate random string from an input value specified by From field. The result string is stored into Value field. If empty, no generator is being used, leaving the result Value untouched. Optional.\n\nThe only supported generator is \"expression\", which accepts a \"from\" value in the form of a simple regular expression containing the range expression \"[a-zA-Z0-9]\", and the length expression \"a{length}\".\n\nExamples:\n\nfrom | value", - "from": "From is an input value for the generator. Optional.", + "from": "from is an input value for the generator. Optional.", "required": "Optional: Indicates the parameter must have a value. Defaults to false.", } @@ -83,11 +83,11 @@ func (TemplateInstance) SwaggerDoc() map[string]string { var map_TemplateInstanceCondition = map[string]string{ "": "TemplateInstanceCondition contains condition information for a TemplateInstance.", - "type": "Type of the condition, currently Ready or InstantiateFailure.", - "status": "Status of the condition, one of True, False or Unknown.", - "lastTransitionTime": "LastTransitionTime is the last time a condition status transitioned from one state to another.", - "reason": "Reason is a brief machine readable explanation for the condition's last transition.", - "message": "Message is a human readable description of the details of the last transition, complementing reason.", + "type": "type of the condition, currently Ready or InstantiateFailure.", + "status": "status of the condition, one of True, False or Unknown.", + "lastTransitionTime": "lastTransitionTime is the last time a condition status transitioned from one state to another.", + "reason": "reason is a brief machine readable explanation for the condition's last transition.", + "message": "message is a human readable description of the details of the last transition, complementing reason.", } func (TemplateInstanceCondition) SwaggerDoc() map[string]string { @@ -139,7 +139,7 @@ func (TemplateInstanceSpec) SwaggerDoc() map[string]string { var map_TemplateInstanceStatus = map[string]string{ "": "TemplateInstanceStatus describes the current state of a TemplateInstance.", "conditions": "conditions represent the latest available observations of a TemplateInstance's current state.", - "objects": "Objects references the objects created by the TemplateInstance.", + "objects": "objects references the objects created by the TemplateInstance.", } func (TemplateInstanceStatus) SwaggerDoc() map[string]string { @@ -149,7 +149,7 @@ func (TemplateInstanceStatus) SwaggerDoc() map[string]string { var map_TemplateList = map[string]string{ "": "TemplateList is a list of Template objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of templates", + "items": "items is a list of templates", } func (TemplateList) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/user/v1/generated.proto b/vendor/github.com/openshift/api/user/v1/generated.proto index c1c2b8156b..f07b446ad4 100644 --- a/vendor/github.com/openshift/api/user/v1/generated.proto +++ b/vendor/github.com/openshift/api/user/v1/generated.proto @@ -21,7 +21,7 @@ message Group { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Users is the list of users in this group. + // users is the list of users in this group. optional OptionalNames users = 2; } @@ -34,7 +34,7 @@ message GroupList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of groups + // items is the list of groups repeated Group items = 2; } @@ -51,17 +51,17 @@ message Identity { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // ProviderName is the source of identity information + // providerName is the source of identity information optional string providerName = 2; - // ProviderUserName uniquely represents this identity in the scope of the provider + // providerUserName uniquely represents this identity in the scope of the provider optional string providerUserName = 3; - // User is a reference to the user this identity is associated with + // user is a reference to the user this identity is associated with // Both Name and UID must be set optional .k8s.io.api.core.v1.ObjectReference user = 4; - // Extra holds extra information about this identity + // extra holds extra information about this identity map extra = 5; } @@ -74,7 +74,7 @@ message IdentityList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of identities + // items is the list of identities repeated Identity items = 2; } @@ -100,14 +100,14 @@ message User { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // FullName is the full name of user + // fullName is the full name of user optional string fullName = 2; - // Identities are the identities associated with this user + // identities are the identities associated with this user // +optional repeated string identities = 3; - // Groups specifies group names this user is a member of. + // groups specifies group names this user is a member of. // This field is deprecated and will be removed in a future release. // Instead, create a Group object containing the name of this User. repeated string groups = 4; @@ -122,10 +122,10 @@ message UserIdentityMapping { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Identity is a reference to an identity + // identity is a reference to an identity optional .k8s.io.api.core.v1.ObjectReference identity = 2; - // User is a reference to a user + // user is a reference to a user optional .k8s.io.api.core.v1.ObjectReference user = 3; } @@ -138,7 +138,7 @@ message UserList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - // Items is the list of users + // items is the list of users repeated User items = 2; } diff --git a/vendor/github.com/openshift/api/user/v1/types.go b/vendor/github.com/openshift/api/user/v1/types.go index 7014bbfac7..64ae8c8300 100644 --- a/vendor/github.com/openshift/api/user/v1/types.go +++ b/vendor/github.com/openshift/api/user/v1/types.go @@ -26,14 +26,14 @@ type User struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // FullName is the full name of user + // fullName is the full name of user FullName string `json:"fullName,omitempty" protobuf:"bytes,2,opt,name=fullName"` - // Identities are the identities associated with this user + // identities are the identities associated with this user // +optional Identities []string `json:"identities,omitempty" protobuf:"bytes,3,rep,name=identities"` - // Groups specifies group names this user is a member of. + // groups specifies group names this user is a member of. // This field is deprecated and will be removed in a future release. // Instead, create a Group object containing the name of this User. Groups []string `json:"groups" protobuf:"bytes,4,rep,name=groups"` @@ -52,7 +52,7 @@ type UserList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of users + // items is the list of users Items []User `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -75,17 +75,17 @@ type Identity struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // ProviderName is the source of identity information + // providerName is the source of identity information ProviderName string `json:"providerName" protobuf:"bytes,2,opt,name=providerName"` - // ProviderUserName uniquely represents this identity in the scope of the provider + // providerUserName uniquely represents this identity in the scope of the provider ProviderUserName string `json:"providerUserName" protobuf:"bytes,3,opt,name=providerUserName"` - // User is a reference to the user this identity is associated with + // user is a reference to the user this identity is associated with // Both Name and UID must be set User corev1.ObjectReference `json:"user" protobuf:"bytes,4,opt,name=user"` - // Extra holds extra information about this identity + // extra holds extra information about this identity Extra map[string]string `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` } @@ -102,7 +102,7 @@ type IdentityList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of identities + // items is the list of identities Items []Identity `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -122,9 +122,9 @@ type UserIdentityMapping struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Identity is a reference to an identity + // identity is a reference to an identity Identity corev1.ObjectReference `json:"identity,omitempty" protobuf:"bytes,2,opt,name=identity"` - // User is a reference to a user + // user is a reference to a user User corev1.ObjectReference `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` } @@ -152,7 +152,7 @@ type Group struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Users is the list of users in this group. + // users is the list of users in this group. Users OptionalNames `json:"users" protobuf:"bytes,2,rep,name=users"` } @@ -169,6 +169,6 @@ type GroupList struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of groups + // items is the list of groups Items []Group `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go index 5844723a72..d85e7dfc58 100644 --- a/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go @@ -14,7 +14,7 @@ package v1 var map_Group = map[string]string{ "": "Group represents a referenceable set of Users\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "users": "Users is the list of users in this group.", + "users": "users is the list of users in this group.", } func (Group) SwaggerDoc() map[string]string { @@ -24,7 +24,7 @@ func (Group) SwaggerDoc() map[string]string { var map_GroupList = map[string]string{ "": "GroupList is a collection of Groups\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of groups", + "items": "items is the list of groups", } func (GroupList) SwaggerDoc() map[string]string { @@ -34,10 +34,10 @@ func (GroupList) SwaggerDoc() map[string]string { var map_Identity = map[string]string{ "": "Identity records a successful authentication of a user with an identity provider. The information about the source of authentication is stored on the identity, and the identity is then associated with a single user object. Multiple identities can reference a single user. Information retrieved from the authentication provider is stored in the extra field using a schema determined by the provider.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "providerName": "ProviderName is the source of identity information", - "providerUserName": "ProviderUserName uniquely represents this identity in the scope of the provider", - "user": "User is a reference to the user this identity is associated with Both Name and UID must be set", - "extra": "Extra holds extra information about this identity", + "providerName": "providerName is the source of identity information", + "providerUserName": "providerUserName uniquely represents this identity in the scope of the provider", + "user": "user is a reference to the user this identity is associated with Both Name and UID must be set", + "extra": "extra holds extra information about this identity", } func (Identity) SwaggerDoc() map[string]string { @@ -47,7 +47,7 @@ func (Identity) SwaggerDoc() map[string]string { var map_IdentityList = map[string]string{ "": "IdentityList is a collection of Identities\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of identities", + "items": "items is the list of identities", } func (IdentityList) SwaggerDoc() map[string]string { @@ -57,9 +57,9 @@ func (IdentityList) SwaggerDoc() map[string]string { var map_User = map[string]string{ "": "Upon log in, every user of the system receives a User and Identity resource. Administrators may directly manipulate the attributes of the users for their own tracking, or set groups via the API. The user name is unique and is chosen based on the value provided by the identity provider - if a user already exists with the incoming name, the user name may have a number appended to it depending on the configuration of the system.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "fullName": "FullName is the full name of user", - "identities": "Identities are the identities associated with this user", - "groups": "Groups specifies group names this user is a member of. This field is deprecated and will be removed in a future release. Instead, create a Group object containing the name of this User.", + "fullName": "fullName is the full name of user", + "identities": "identities are the identities associated with this user", + "groups": "groups specifies group names this user is a member of. This field is deprecated and will be removed in a future release. Instead, create a Group object containing the name of this User.", } func (User) SwaggerDoc() map[string]string { @@ -69,8 +69,8 @@ func (User) SwaggerDoc() map[string]string { var map_UserIdentityMapping = map[string]string{ "": "UserIdentityMapping maps a user to an identity\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "identity": "Identity is a reference to an identity", - "user": "User is a reference to a user", + "identity": "identity is a reference to an identity", + "user": "user is a reference to a user", } func (UserIdentityMapping) SwaggerDoc() map[string]string { @@ -80,7 +80,7 @@ func (UserIdentityMapping) SwaggerDoc() map[string]string { var map_UserList = map[string]string{ "": "UserList is a collection of Users\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of users", + "items": "items is the list of users", } func (UserList) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go deleted file mode 100644 index 2a9a4fa686..0000000000 --- a/vendor/github.com/openshift/client-go/build/listers/build/v1/build.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/openshift/api/build/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" -) - -// BuildLister helps list Builds. -// All objects returned here must be treated as read-only. -type BuildLister interface { - // List lists all Builds in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Build, err error) - // Builds returns an object that can list and get Builds. - Builds(namespace string) BuildNamespaceLister - BuildListerExpansion -} - -// buildLister implements the BuildLister interface. -type buildLister struct { - listers.ResourceIndexer[*v1.Build] -} - -// NewBuildLister returns a new BuildLister. -func NewBuildLister(indexer cache.Indexer) BuildLister { - return &buildLister{listers.New[*v1.Build](indexer, v1.Resource("build"))} -} - -// Builds returns an object that can list and get Builds. -func (s *buildLister) Builds(namespace string) BuildNamespaceLister { - return buildNamespaceLister{listers.NewNamespaced[*v1.Build](s.ResourceIndexer, namespace)} -} - -// BuildNamespaceLister helps list and get Builds. -// All objects returned here must be treated as read-only. -type BuildNamespaceLister interface { - // List lists all Builds in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Build, err error) - // Get retrieves the Build from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.Build, error) - BuildNamespaceListerExpansion -} - -// buildNamespaceLister implements the BuildNamespaceLister -// interface. -type buildNamespaceLister struct { - listers.ResourceIndexer[*v1.Build] -} diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go deleted file mode 100644 index b4366077f9..0000000000 --- a/vendor/github.com/openshift/client-go/build/listers/build/v1/buildconfig.go +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/openshift/api/build/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/listers" - "k8s.io/client-go/tools/cache" -) - -// BuildConfigLister helps list BuildConfigs. -// All objects returned here must be treated as read-only. -type BuildConfigLister interface { - // List lists all BuildConfigs in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.BuildConfig, err error) - // BuildConfigs returns an object that can list and get BuildConfigs. - BuildConfigs(namespace string) BuildConfigNamespaceLister - BuildConfigListerExpansion -} - -// buildConfigLister implements the BuildConfigLister interface. -type buildConfigLister struct { - listers.ResourceIndexer[*v1.BuildConfig] -} - -// NewBuildConfigLister returns a new BuildConfigLister. -func NewBuildConfigLister(indexer cache.Indexer) BuildConfigLister { - return &buildConfigLister{listers.New[*v1.BuildConfig](indexer, v1.Resource("buildconfig"))} -} - -// BuildConfigs returns an object that can list and get BuildConfigs. -func (s *buildConfigLister) BuildConfigs(namespace string) BuildConfigNamespaceLister { - return buildConfigNamespaceLister{listers.NewNamespaced[*v1.BuildConfig](s.ResourceIndexer, namespace)} -} - -// BuildConfigNamespaceLister helps list and get BuildConfigs. -// All objects returned here must be treated as read-only. -type BuildConfigNamespaceLister interface { - // List lists all BuildConfigs in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.BuildConfig, err error) - // Get retrieves the BuildConfig from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.BuildConfig, error) - BuildConfigNamespaceListerExpansion -} - -// buildConfigNamespaceLister implements the BuildConfigNamespaceLister -// interface. -type buildConfigNamespaceLister struct { - listers.ResourceIndexer[*v1.BuildConfig] -} diff --git a/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go deleted file mode 100644 index 1fc9faecdd..0000000000 --- a/vendor/github.com/openshift/client-go/build/listers/build/v1/expansion_generated.go +++ /dev/null @@ -1,19 +0,0 @@ -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -// BuildListerExpansion allows custom methods to be added to -// BuildLister. -type BuildListerExpansion interface{} - -// BuildNamespaceListerExpansion allows custom methods to be added to -// BuildNamespaceLister. -type BuildNamespaceListerExpansion interface{} - -// BuildConfigListerExpansion allows custom methods to be added to -// BuildConfigLister. -type BuildConfigListerExpansion interface{} - -// BuildConfigNamespaceListerExpansion allows custom methods to be added to -// BuildConfigNamespaceLister. -type BuildConfigNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go index e5b1b74eac..b217e5bdcd 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsplatformstatus.go @@ -5,9 +5,10 @@ package v1 // AWSPlatformStatusApplyConfiguration represents a declarative configuration of the AWSPlatformStatus type for use // with apply. type AWSPlatformStatusApplyConfiguration struct { - Region *string `json:"region,omitempty"` - ServiceEndpoints []AWSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` - ResourceTags []AWSResourceTagApplyConfiguration `json:"resourceTags,omitempty"` + Region *string `json:"region,omitempty"` + ServiceEndpoints []AWSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` + ResourceTags []AWSResourceTagApplyConfiguration `json:"resourceTags,omitempty"` + CloudLoadBalancerConfig *CloudLoadBalancerConfigApplyConfiguration `json:"cloudLoadBalancerConfig,omitempty"` } // AWSPlatformStatusApplyConfiguration constructs a declarative configuration of the AWSPlatformStatus type for use with @@ -49,3 +50,11 @@ func (b *AWSPlatformStatusApplyConfiguration) WithResourceTags(values ...*AWSRes } return b } + +// WithCloudLoadBalancerConfig sets the CloudLoadBalancerConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CloudLoadBalancerConfig field is set to the value of the last call. +func (b *AWSPlatformStatusApplyConfiguration) WithCloudLoadBalancerConfig(value *CloudLoadBalancerConfigApplyConfiguration) *AWSPlatformStatusApplyConfiguration { + b.CloudLoadBalancerConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustermonitoring.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustermonitoring.go new file mode 100644 index 0000000000..10f8ff8d93 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustermonitoring.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apiconfigv1 "github.com/openshift/api/config/v1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterMonitoringApplyConfiguration represents a declarative configuration of the ClusterMonitoring type for use +// with apply. +type ClusterMonitoringApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterMonitoringSpecApplyConfiguration `json:"spec,omitempty"` + Status *apiconfigv1.ClusterMonitoringStatus `json:"status,omitempty"` +} + +// ClusterMonitoring constructs a declarative configuration of the ClusterMonitoring type for use with +// apply. +func ClusterMonitoring(name string) *ClusterMonitoringApplyConfiguration { + b := &ClusterMonitoringApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterMonitoring") + b.WithAPIVersion("config.openshift.io/v1") + return b +} + +// ExtractClusterMonitoring extracts the applied configuration owned by fieldManager from +// clusterMonitoring. If no managedFields are found in clusterMonitoring for fieldManager, a +// ClusterMonitoringApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterMonitoring must be a unmodified ClusterMonitoring API object that was retrieved from the Kubernetes API. +// ExtractClusterMonitoring provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterMonitoring(clusterMonitoring *apiconfigv1.ClusterMonitoring, fieldManager string) (*ClusterMonitoringApplyConfiguration, error) { + return extractClusterMonitoring(clusterMonitoring, fieldManager, "") +} + +// ExtractClusterMonitoringStatus is the same as ExtractClusterMonitoring except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterMonitoringStatus(clusterMonitoring *apiconfigv1.ClusterMonitoring, fieldManager string) (*ClusterMonitoringApplyConfiguration, error) { + return extractClusterMonitoring(clusterMonitoring, fieldManager, "status") +} + +func extractClusterMonitoring(clusterMonitoring *apiconfigv1.ClusterMonitoring, fieldManager string, subresource string) (*ClusterMonitoringApplyConfiguration, error) { + b := &ClusterMonitoringApplyConfiguration{} + err := managedfields.ExtractInto(clusterMonitoring, internal.Parser().Type("com.github.openshift.api.config.v1.ClusterMonitoring"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterMonitoring.Name) + + b.WithKind("ClusterMonitoring") + b.WithAPIVersion("config.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithKind(value string) *ClusterMonitoringApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithAPIVersion(value string) *ClusterMonitoringApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithName(value string) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithGenerateName(value string) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithNamespace(value string) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithUID(value types.UID) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithResourceVersion(value string) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithGeneration(value int64) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterMonitoringApplyConfiguration) WithLabels(entries map[string]string) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterMonitoringApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterMonitoringApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterMonitoringApplyConfiguration) WithFinalizers(values ...string) *ClusterMonitoringApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ClusterMonitoringApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithSpec(value *ClusterMonitoringSpecApplyConfiguration) *ClusterMonitoringApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ClusterMonitoringApplyConfiguration) WithStatus(value apiconfigv1.ClusterMonitoringStatus) *ClusterMonitoringApplyConfiguration { + b.Status = &value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterMonitoringApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustermonitoringspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustermonitoringspec.go new file mode 100644 index 0000000000..33d08de2da --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/clustermonitoringspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ClusterMonitoringSpecApplyConfiguration represents a declarative configuration of the ClusterMonitoringSpec type for use +// with apply. +type ClusterMonitoringSpecApplyConfiguration struct { + UserDefined *UserDefinedMonitoringApplyConfiguration `json:"userDefined,omitempty"` +} + +// ClusterMonitoringSpecApplyConfiguration constructs a declarative configuration of the ClusterMonitoringSpec type for use with +// apply. +func ClusterMonitoringSpec() *ClusterMonitoringSpecApplyConfiguration { + return &ClusterMonitoringSpecApplyConfiguration{} +} + +// WithUserDefined sets the UserDefined field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserDefined field is set to the value of the last call. +func (b *ClusterMonitoringSpecApplyConfiguration) WithUserDefined(value *UserDefinedMonitoringApplyConfiguration) *ClusterMonitoringSpecApplyConfiguration { + b.UserDefined = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go index 37965a1384..f1c513f4cb 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nodespec.go @@ -9,8 +9,9 @@ import ( // NodeSpecApplyConfiguration represents a declarative configuration of the NodeSpec type for use // with apply. type NodeSpecApplyConfiguration struct { - CgroupMode *v1.CgroupMode `json:"cgroupMode,omitempty"` - WorkerLatencyProfile *v1.WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` + CgroupMode *v1.CgroupMode `json:"cgroupMode,omitempty"` + WorkerLatencyProfile *v1.WorkerLatencyProfileType `json:"workerLatencyProfile,omitempty"` + MinimumKubeletVersion *string `json:"minimumKubeletVersion,omitempty"` } // NodeSpecApplyConfiguration constructs a declarative configuration of the NodeSpec type for use with @@ -34,3 +35,11 @@ func (b *NodeSpecApplyConfiguration) WithWorkerLatencyProfile(value v1.WorkerLat b.WorkerLatencyProfile = &value return b } + +// WithMinimumKubeletVersion sets the MinimumKubeletVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MinimumKubeletVersion field is set to the value of the last call. +func (b *NodeSpecApplyConfiguration) WithMinimumKubeletVersion(value string) *NodeSpecApplyConfiguration { + b.MinimumKubeletVersion = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go index 4ffecd9266..9247e46a05 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/release.go @@ -9,10 +9,11 @@ import ( // ReleaseApplyConfiguration represents a declarative configuration of the Release type for use // with apply. type ReleaseApplyConfiguration struct { - Version *string `json:"version,omitempty"` - Image *string `json:"image,omitempty"` - URL *v1.URL `json:"url,omitempty"` - Channels []string `json:"channels,omitempty"` + Architecture *v1.ClusterVersionArchitecture `json:"architecture,omitempty"` + Version *string `json:"version,omitempty"` + Image *string `json:"image,omitempty"` + URL *v1.URL `json:"url,omitempty"` + Channels []string `json:"channels,omitempty"` } // ReleaseApplyConfiguration constructs a declarative configuration of the Release type for use with @@ -21,6 +22,14 @@ func Release() *ReleaseApplyConfiguration { return &ReleaseApplyConfiguration{} } +// WithArchitecture sets the Architecture field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Architecture field is set to the value of the last call. +func (b *ReleaseApplyConfiguration) WithArchitecture(value v1.ClusterVersionArchitecture) *ReleaseApplyConfiguration { + b.Architecture = &value + return b +} + // WithVersion sets the Version field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/userdefinedmonitoring.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/userdefinedmonitoring.go new file mode 100644 index 0000000000..6765555282 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/userdefinedmonitoring.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// UserDefinedMonitoringApplyConfiguration represents a declarative configuration of the UserDefinedMonitoring type for use +// with apply. +type UserDefinedMonitoringApplyConfiguration struct { + Mode *v1.UserDefinedMode `json:"mode,omitempty"` +} + +// UserDefinedMonitoringApplyConfiguration constructs a declarative configuration of the UserDefinedMonitoring type for use with +// apply. +func UserDefinedMonitoring() *UserDefinedMonitoringApplyConfiguration { + return &UserDefinedMonitoringApplyConfiguration{} +} + +// WithMode sets the Mode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Mode field is set to the value of the last call. +func (b *UserDefinedMonitoringApplyConfiguration) WithMode(value v1.UserDefinedMode) *UserDefinedMonitoringApplyConfiguration { + b.Mode = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainhostgroup.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainhostgroup.go new file mode 100644 index 0000000000..f590263a1f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainhostgroup.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSphereFailureDomainHostGroupApplyConfiguration represents a declarative configuration of the VSphereFailureDomainHostGroup type for use +// with apply. +type VSphereFailureDomainHostGroupApplyConfiguration struct { + VMGroup *string `json:"vmGroup,omitempty"` + HostGroup *string `json:"hostGroup,omitempty"` + VMHostRule *string `json:"vmHostRule,omitempty"` +} + +// VSphereFailureDomainHostGroupApplyConfiguration constructs a declarative configuration of the VSphereFailureDomainHostGroup type for use with +// apply. +func VSphereFailureDomainHostGroup() *VSphereFailureDomainHostGroupApplyConfiguration { + return &VSphereFailureDomainHostGroupApplyConfiguration{} +} + +// WithVMGroup sets the VMGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VMGroup field is set to the value of the last call. +func (b *VSphereFailureDomainHostGroupApplyConfiguration) WithVMGroup(value string) *VSphereFailureDomainHostGroupApplyConfiguration { + b.VMGroup = &value + return b +} + +// WithHostGroup sets the HostGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostGroup field is set to the value of the last call. +func (b *VSphereFailureDomainHostGroupApplyConfiguration) WithHostGroup(value string) *VSphereFailureDomainHostGroupApplyConfiguration { + b.HostGroup = &value + return b +} + +// WithVMHostRule sets the VMHostRule field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VMHostRule field is set to the value of the last call. +func (b *VSphereFailureDomainHostGroupApplyConfiguration) WithVMHostRule(value string) *VSphereFailureDomainHostGroupApplyConfiguration { + b.VMHostRule = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainregionaffinity.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainregionaffinity.go new file mode 100644 index 0000000000..316e8e4d62 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainregionaffinity.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// VSphereFailureDomainRegionAffinityApplyConfiguration represents a declarative configuration of the VSphereFailureDomainRegionAffinity type for use +// with apply. +type VSphereFailureDomainRegionAffinityApplyConfiguration struct { + Type *v1.VSphereFailureDomainRegionType `json:"type,omitempty"` +} + +// VSphereFailureDomainRegionAffinityApplyConfiguration constructs a declarative configuration of the VSphereFailureDomainRegionAffinity type for use with +// apply. +func VSphereFailureDomainRegionAffinity() *VSphereFailureDomainRegionAffinityApplyConfiguration { + return &VSphereFailureDomainRegionAffinityApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *VSphereFailureDomainRegionAffinityApplyConfiguration) WithType(value v1.VSphereFailureDomainRegionType) *VSphereFailureDomainRegionAffinityApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainzoneaffinity.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainzoneaffinity.go new file mode 100644 index 0000000000..a00c370f13 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vspherefailuredomainzoneaffinity.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// VSphereFailureDomainZoneAffinityApplyConfiguration represents a declarative configuration of the VSphereFailureDomainZoneAffinity type for use +// with apply. +type VSphereFailureDomainZoneAffinityApplyConfiguration struct { + Type *v1.VSphereFailureDomainZoneType `json:"type,omitempty"` + HostGroup *VSphereFailureDomainHostGroupApplyConfiguration `json:"hostGroup,omitempty"` +} + +// VSphereFailureDomainZoneAffinityApplyConfiguration constructs a declarative configuration of the VSphereFailureDomainZoneAffinity type for use with +// apply. +func VSphereFailureDomainZoneAffinity() *VSphereFailureDomainZoneAffinityApplyConfiguration { + return &VSphereFailureDomainZoneAffinityApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *VSphereFailureDomainZoneAffinityApplyConfiguration) WithType(value v1.VSphereFailureDomainZoneType) *VSphereFailureDomainZoneAffinityApplyConfiguration { + b.Type = &value + return b +} + +// WithHostGroup sets the HostGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostGroup field is set to the value of the last call. +func (b *VSphereFailureDomainZoneAffinityApplyConfiguration) WithHostGroup(value *VSphereFailureDomainHostGroupApplyConfiguration) *VSphereFailureDomainZoneAffinityApplyConfiguration { + b.HostGroup = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go index 1c5ec2cf10..aeb2388825 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go @@ -5,11 +5,13 @@ package v1 // VSpherePlatformFailureDomainSpecApplyConfiguration represents a declarative configuration of the VSpherePlatformFailureDomainSpec type for use // with apply. type VSpherePlatformFailureDomainSpecApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Region *string `json:"region,omitempty"` - Zone *string `json:"zone,omitempty"` - Server *string `json:"server,omitempty"` - Topology *VSpherePlatformTopologyApplyConfiguration `json:"topology,omitempty"` + Name *string `json:"name,omitempty"` + Region *string `json:"region,omitempty"` + Zone *string `json:"zone,omitempty"` + RegionAffinity *VSphereFailureDomainRegionAffinityApplyConfiguration `json:"regionAffinity,omitempty"` + ZoneAffinity *VSphereFailureDomainZoneAffinityApplyConfiguration `json:"zoneAffinity,omitempty"` + Server *string `json:"server,omitempty"` + Topology *VSpherePlatformTopologyApplyConfiguration `json:"topology,omitempty"` } // VSpherePlatformFailureDomainSpecApplyConfiguration constructs a declarative configuration of the VSpherePlatformFailureDomainSpec type for use with @@ -42,6 +44,22 @@ func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithZone(value stri return b } +// WithRegionAffinity sets the RegionAffinity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RegionAffinity field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithRegionAffinity(value *VSphereFailureDomainRegionAffinityApplyConfiguration) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.RegionAffinity = value + return b +} + +// WithZoneAffinity sets the ZoneAffinity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ZoneAffinity field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithZoneAffinity(value *VSphereFailureDomainZoneAffinityApplyConfiguration) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.ZoneAffinity = value + return b +} + // WithServer sets the Server field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Server field is set to the value of the last call. diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go index 003e4d2eff..611454559c 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go @@ -139,6 +139,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.config.v1.AWSPlatformStatus map: fields: + - name: cloudLoadBalancerConfig + type: + namedType: com.github.openshift.api.config.v1.CloudLoadBalancerConfig + default: + dnsType: PlatformDefault - name: region type: scalar: string @@ -573,6 +578,46 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.ClusterMonitoring + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1.ClusterMonitoringSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1.ClusterMonitoringStatus + default: {} +- name: com.github.openshift.api.config.v1.ClusterMonitoringSpec + map: + fields: + - name: userDefined + type: + namedType: com.github.openshift.api.config.v1.UserDefinedMonitoring + default: {} +- name: com.github.openshift.api.config.v1.ClusterMonitoringStatus + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable - name: com.github.openshift.api.config.v1.ClusterNetworkEntry map: fields: @@ -2167,6 +2212,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: cgroupMode type: scalar: string + - name: minimumKubeletVersion + type: + scalar: string + default: "" - name: workerLatencyProfile type: scalar: string @@ -2197,9 +2246,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: namedType: com.github.openshift.api.config.v1.NutanixResourceIdentifier - elementRelationship: associative - keys: - - type + elementRelationship: atomic - name: com.github.openshift.api.config.v1.NutanixPlatformLoadBalancer map: fields: @@ -3044,6 +3091,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.config.v1.Release map: fields: + - name: architecture + type: + scalar: string - name: channels type: list: @@ -3361,6 +3411,13 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.UserDefinedMonitoring + map: + fields: + - name: mode + type: + scalar: string + default: "" - name: com.github.openshift.api.config.v1.UsernameClaimMapping map: fields: @@ -3382,6 +3439,45 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.VSphereFailureDomainHostGroup + map: + fields: + - name: hostGroup + type: + scalar: string + default: "" + - name: vmGroup + type: + scalar: string + default: "" + - name: vmHostRule + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.VSphereFailureDomainRegionAffinity + map: + fields: + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type +- name: com.github.openshift.api.config.v1.VSphereFailureDomainZoneAffinity + map: + fields: + - name: hostGroup + type: + namedType: com.github.openshift.api.config.v1.VSphereFailureDomainHostGroup + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: hostGroup + discriminatorValue: HostGroup - name: com.github.openshift.api.config.v1.VSpherePlatformFailureDomainSpec map: fields: @@ -3393,6 +3489,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + - name: regionAffinity + type: + namedType: com.github.openshift.api.config.v1.VSphereFailureDomainRegionAffinity - name: server type: scalar: string @@ -3405,6 +3504,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + - name: zoneAffinity + type: + namedType: com.github.openshift.api.config.v1.VSphereFailureDomainZoneAffinity - name: com.github.openshift.api.config.v1.VSpherePlatformLoadBalancer map: fields: diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go index a470354475..fa2ad5c5ec 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go @@ -82,6 +82,10 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.CloudLoadBalancerIPsApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ClusterCondition"): return &configv1.ClusterConditionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterMonitoring"): + return &configv1.ClusterMonitoringApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ClusterMonitoringSpec"): + return &configv1.ClusterMonitoringSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ClusterNetworkEntry"): return &configv1.ClusterNetworkEntryApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ClusterOperator"): @@ -380,10 +384,18 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.UpdateApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("UpdateHistory"): return &configv1.UpdateHistoryApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("UserDefinedMonitoring"): + return &configv1.UserDefinedMonitoringApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("UsernameClaimMapping"): return &configv1.UsernameClaimMappingApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("UsernamePrefix"): return &configv1.UsernamePrefixApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VSphereFailureDomainHostGroup"): + return &configv1.VSphereFailureDomainHostGroupApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VSphereFailureDomainRegionAffinity"): + return &configv1.VSphereFailureDomainRegionAffinityApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VSphereFailureDomainZoneAffinity"): + return &configv1.VSphereFailureDomainZoneAffinityApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("VSpherePlatformFailureDomainSpec"): return &configv1.VSpherePlatformFailureDomainSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("VSpherePlatformLoadBalancer"): diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clustermonitoring.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clustermonitoring.go new file mode 100644 index 0000000000..c8ecde7bc9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/clustermonitoring.go @@ -0,0 +1,57 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ClusterMonitoringsGetter has a method to return a ClusterMonitoringInterface. +// A group's client should implement this interface. +type ClusterMonitoringsGetter interface { + ClusterMonitorings() ClusterMonitoringInterface +} + +// ClusterMonitoringInterface has methods to work with ClusterMonitoring resources. +type ClusterMonitoringInterface interface { + Create(ctx context.Context, clusterMonitoring *v1.ClusterMonitoring, opts metav1.CreateOptions) (*v1.ClusterMonitoring, error) + Update(ctx context.Context, clusterMonitoring *v1.ClusterMonitoring, opts metav1.UpdateOptions) (*v1.ClusterMonitoring, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, clusterMonitoring *v1.ClusterMonitoring, opts metav1.UpdateOptions) (*v1.ClusterMonitoring, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterMonitoring, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterMonitoringList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterMonitoring, err error) + Apply(ctx context.Context, clusterMonitoring *configv1.ClusterMonitoringApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterMonitoring, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, clusterMonitoring *configv1.ClusterMonitoringApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterMonitoring, err error) + ClusterMonitoringExpansion +} + +// clusterMonitorings implements ClusterMonitoringInterface +type clusterMonitorings struct { + *gentype.ClientWithListAndApply[*v1.ClusterMonitoring, *v1.ClusterMonitoringList, *configv1.ClusterMonitoringApplyConfiguration] +} + +// newClusterMonitorings returns a ClusterMonitorings +func newClusterMonitorings(c *ConfigV1Client) *clusterMonitorings { + return &clusterMonitorings{ + gentype.NewClientWithListAndApply[*v1.ClusterMonitoring, *v1.ClusterMonitoringList, *configv1.ClusterMonitoringApplyConfiguration]( + "clustermonitorings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.ClusterMonitoring { return &v1.ClusterMonitoring{} }, + func() *v1.ClusterMonitoringList { return &v1.ClusterMonitoringList{} }), + } +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go index de4f2fa32a..5cd48daf71 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go @@ -15,6 +15,7 @@ type ConfigV1Interface interface { APIServersGetter AuthenticationsGetter BuildsGetter + ClusterMonitoringsGetter ClusterOperatorsGetter ClusterVersionsGetter ConsolesGetter @@ -52,6 +53,10 @@ func (c *ConfigV1Client) Builds() BuildInterface { return newBuilds(c) } +func (c *ConfigV1Client) ClusterMonitorings() ClusterMonitoringInterface { + return newClusterMonitorings(c) +} + func (c *ConfigV1Client) ClusterOperators() ClusterOperatorInterface { return newClusterOperators(c) } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clustermonitoring.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clustermonitoring.go new file mode 100644 index 0000000000..fc9f60e181 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clustermonitoring.go @@ -0,0 +1,170 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterMonitorings implements ClusterMonitoringInterface +type FakeClusterMonitorings struct { + Fake *FakeConfigV1 +} + +var clustermonitoringsResource = v1.SchemeGroupVersion.WithResource("clustermonitorings") + +var clustermonitoringsKind = v1.SchemeGroupVersion.WithKind("ClusterMonitoring") + +// Get takes name of the clusterMonitoring, and returns the corresponding clusterMonitoring object, and an error if there is any. +func (c *FakeClusterMonitorings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterMonitoring, err error) { + emptyResult := &v1.ClusterMonitoring{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(clustermonitoringsResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterMonitoring), err +} + +// List takes label and field selectors, and returns the list of ClusterMonitorings that match those selectors. +func (c *FakeClusterMonitorings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterMonitoringList, err error) { + emptyResult := &v1.ClusterMonitoringList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(clustermonitoringsResource, clustermonitoringsKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ClusterMonitoringList{ListMeta: obj.(*v1.ClusterMonitoringList).ListMeta} + for _, item := range obj.(*v1.ClusterMonitoringList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterMonitorings. +func (c *FakeClusterMonitorings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(clustermonitoringsResource, opts)) +} + +// Create takes the representation of a clusterMonitoring and creates it. Returns the server's representation of the clusterMonitoring, and an error, if there is any. +func (c *FakeClusterMonitorings) Create(ctx context.Context, clusterMonitoring *v1.ClusterMonitoring, opts metav1.CreateOptions) (result *v1.ClusterMonitoring, err error) { + emptyResult := &v1.ClusterMonitoring{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(clustermonitoringsResource, clusterMonitoring, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterMonitoring), err +} + +// Update takes the representation of a clusterMonitoring and updates it. Returns the server's representation of the clusterMonitoring, and an error, if there is any. +func (c *FakeClusterMonitorings) Update(ctx context.Context, clusterMonitoring *v1.ClusterMonitoring, opts metav1.UpdateOptions) (result *v1.ClusterMonitoring, err error) { + emptyResult := &v1.ClusterMonitoring{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(clustermonitoringsResource, clusterMonitoring, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterMonitoring), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusterMonitorings) UpdateStatus(ctx context.Context, clusterMonitoring *v1.ClusterMonitoring, opts metav1.UpdateOptions) (result *v1.ClusterMonitoring, err error) { + emptyResult := &v1.ClusterMonitoring{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(clustermonitoringsResource, "status", clusterMonitoring, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterMonitoring), err +} + +// Delete takes name of the clusterMonitoring and deletes it. Returns an error if one occurs. +func (c *FakeClusterMonitorings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(clustermonitoringsResource, name, opts), &v1.ClusterMonitoring{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterMonitorings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(clustermonitoringsResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1.ClusterMonitoringList{}) + return err +} + +// Patch applies the patch and returns the patched clusterMonitoring. +func (c *FakeClusterMonitorings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterMonitoring, err error) { + emptyResult := &v1.ClusterMonitoring{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clustermonitoringsResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterMonitoring), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied clusterMonitoring. +func (c *FakeClusterMonitorings) Apply(ctx context.Context, clusterMonitoring *configv1.ClusterMonitoringApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterMonitoring, err error) { + if clusterMonitoring == nil { + return nil, fmt.Errorf("clusterMonitoring provided to Apply must not be nil") + } + data, err := json.Marshal(clusterMonitoring) + if err != nil { + return nil, err + } + name := clusterMonitoring.Name + if name == nil { + return nil, fmt.Errorf("clusterMonitoring.Name must be provided to Apply") + } + emptyResult := &v1.ClusterMonitoring{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clustermonitoringsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterMonitoring), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeClusterMonitorings) ApplyStatus(ctx context.Context, clusterMonitoring *configv1.ClusterMonitoringApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterMonitoring, err error) { + if clusterMonitoring == nil { + return nil, fmt.Errorf("clusterMonitoring provided to Apply must not be nil") + } + data, err := json.Marshal(clusterMonitoring) + if err != nil { + return nil, err + } + name := clusterMonitoring.Name + if name == nil { + return nil, fmt.Errorf("clusterMonitoring.Name must be provided to Apply") + } + emptyResult := &v1.ClusterMonitoring{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clustermonitoringsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ClusterMonitoring), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go index b105e491cf..e33c005d7b 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go @@ -24,6 +24,10 @@ func (c *FakeConfigV1) Builds() v1.BuildInterface { return &FakeBuilds{c} } +func (c *FakeConfigV1) ClusterMonitorings() v1.ClusterMonitoringInterface { + return &FakeClusterMonitorings{c} +} + func (c *FakeConfigV1) ClusterOperators() v1.ClusterOperatorInterface { return &FakeClusterOperators{c} } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go index a56721ba9d..acaec6b2c5 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go @@ -8,6 +8,8 @@ type AuthenticationExpansion interface{} type BuildExpansion interface{} +type ClusterMonitoringExpansion interface{} + type ClusterOperatorExpansion interface{} type ClusterVersionExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clustermonitoring.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clustermonitoring.go new file mode 100644 index 0000000000..d8314ac11c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clustermonitoring.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterMonitoringInformer provides access to a shared informer and lister for +// ClusterMonitorings. +type ClusterMonitoringInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterMonitoringLister +} + +type clusterMonitoringInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterMonitoringInformer constructs a new informer for ClusterMonitoring type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterMonitoringInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterMonitoringInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterMonitoringInformer constructs a new informer for ClusterMonitoring type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterMonitoringInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterMonitorings().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterMonitorings().Watch(context.TODO(), options) + }, + }, + &configv1.ClusterMonitoring{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterMonitoringInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterMonitoringInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterMonitoringInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.ClusterMonitoring{}, f.defaultInformer) +} + +func (f *clusterMonitoringInformer) Lister() v1.ClusterMonitoringLister { + return v1.NewClusterMonitoringLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go index f49b1d2287..1a027516a8 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go @@ -14,6 +14,8 @@ type Interface interface { Authentications() AuthenticationInformer // Builds returns a BuildInformer. Builds() BuildInformer + // ClusterMonitorings returns a ClusterMonitoringInformer. + ClusterMonitorings() ClusterMonitoringInformer // ClusterOperators returns a ClusterOperatorInformer. ClusterOperators() ClusterOperatorInformer // ClusterVersions returns a ClusterVersionInformer. @@ -78,6 +80,11 @@ func (v *version) Builds() BuildInformer { return &buildInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// ClusterMonitorings returns a ClusterMonitoringInformer. +func (v *version) ClusterMonitorings() ClusterMonitoringInformer { + return &clusterMonitoringInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // ClusterOperators returns a ClusterOperatorInformer. func (v *version) ClusterOperators() ClusterOperatorInformer { return &clusterOperatorInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go index 20b3b4e606..aeb6540056 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go @@ -44,6 +44,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Authentications().Informer()}, nil case v1.SchemeGroupVersion.WithResource("builds"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Builds().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("clustermonitorings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ClusterMonitorings().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusteroperators"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ClusterOperators().Informer()}, nil case v1.SchemeGroupVersion.WithResource("clusterversions"): diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/clustermonitoring.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/clustermonitoring.go new file mode 100644 index 0000000000..e9ed085c16 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/clustermonitoring.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// ClusterMonitoringLister helps list ClusterMonitorings. +// All objects returned here must be treated as read-only. +type ClusterMonitoringLister interface { + // List lists all ClusterMonitorings in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ClusterMonitoring, err error) + // Get retrieves the ClusterMonitoring from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ClusterMonitoring, error) + ClusterMonitoringListerExpansion +} + +// clusterMonitoringLister implements the ClusterMonitoringLister interface. +type clusterMonitoringLister struct { + listers.ResourceIndexer[*v1.ClusterMonitoring] +} + +// NewClusterMonitoringLister returns a new ClusterMonitoringLister. +func NewClusterMonitoringLister(indexer cache.Indexer) ClusterMonitoringLister { + return &clusterMonitoringLister{listers.New[*v1.ClusterMonitoring](indexer, v1.Resource("clustermonitoring"))} +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go index b5d6fc088b..58b7a032d7 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go @@ -14,6 +14,10 @@ type AuthenticationListerExpansion interface{} // BuildLister. type BuildListerExpansion interface{} +// ClusterMonitoringListerExpansion allows custom methods to be added to +// ClusterMonitoringLister. +type ClusterMonitoringListerExpansion interface{} + // ClusterOperatorListerExpansion allows custom methods to be added to // ClusterOperatorLister. type ClusterOperatorListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/utils.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/utils.go new file mode 100644 index 0000000000..185a4f267d --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/utils.go @@ -0,0 +1,64 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package applyconfigurations + +import ( + v1 "github.com/openshift/api/image/v1" + imagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + internal "github.com/openshift/client-go/image/applyconfigurations/internal" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no +// apply configuration type exists for the given GroupVersionKind. +func ForKind(kind schema.GroupVersionKind) interface{} { + switch kind { + // Group=image.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithKind("Image"): + return &imagev1.ImageApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageLayer"): + return &imagev1.ImageLayerApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageLookupPolicy"): + return &imagev1.ImageLookupPolicyApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageManifest"): + return &imagev1.ImageManifestApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageSignature"): + return &imagev1.ImageSignatureApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageStream"): + return &imagev1.ImageStreamApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageStreamMapping"): + return &imagev1.ImageStreamMappingApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageStreamSpec"): + return &imagev1.ImageStreamSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageStreamStatus"): + return &imagev1.ImageStreamStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("NamedTagEventList"): + return &imagev1.NamedTagEventListApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("SignatureCondition"): + return &imagev1.SignatureConditionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("SignatureGenericEntity"): + return &imagev1.SignatureGenericEntityApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("SignatureIssuer"): + return &imagev1.SignatureIssuerApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("SignatureSubject"): + return &imagev1.SignatureSubjectApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TagEvent"): + return &imagev1.TagEventApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TagEventCondition"): + return &imagev1.TagEventConditionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TagImportPolicy"): + return &imagev1.TagImportPolicyApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TagReference"): + return &imagev1.TagReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TagReferencePolicy"): + return &imagev1.TagReferencePolicyApplyConfiguration{} + + } + return nil +} + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/vendor/github.com/openshift/client-go/machine/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/clientset_generated.go similarity index 77% rename from vendor/github.com/openshift/client-go/machine/clientset/versioned/fake/clientset_generated.go rename to vendor/github.com/openshift/client-go/image/clientset/versioned/fake/clientset_generated.go index ae11348709..92f7207bad 100644 --- a/vendor/github.com/openshift/client-go/machine/clientset/versioned/fake/clientset_generated.go +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/clientset_generated.go @@ -3,12 +3,10 @@ package fake import ( - applyconfigurations "github.com/openshift/client-go/machine/applyconfigurations" - clientset "github.com/openshift/client-go/machine/clientset/versioned" - machinev1 "github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1" - fakemachinev1 "github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1/fake" - machinev1beta1 "github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1" - fakemachinev1beta1 "github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake" + applyconfigurations "github.com/openshift/client-go/image/applyconfigurations" + clientset "github.com/openshift/client-go/image/clientset/versioned" + imagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + fakeimagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" @@ -102,12 +100,7 @@ var ( _ testing.FakeClient = &Clientset{} ) -// MachineV1 retrieves the MachineV1Client -func (c *Clientset) MachineV1() machinev1.MachineV1Interface { - return &fakemachinev1.FakeMachineV1{Fake: &c.Fake} -} - -// MachineV1beta1 retrieves the MachineV1beta1Client -func (c *Clientset) MachineV1beta1() machinev1beta1.MachineV1beta1Interface { - return &fakemachinev1beta1.FakeMachineV1beta1{Fake: &c.Fake} +// ImageV1 retrieves the ImageV1Client +func (c *Clientset) ImageV1() imagev1.ImageV1Interface { + return &fakeimagev1.FakeImageV1{Fake: &c.Fake} } diff --git a/vendor/github.com/openshift/client-go/machine/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/doc.go similarity index 100% rename from vendor/github.com/openshift/client-go/machine/clientset/versioned/fake/doc.go rename to vendor/github.com/openshift/client-go/image/clientset/versioned/fake/doc.go diff --git a/vendor/github.com/openshift/client-go/machine/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/register.go similarity index 88% rename from vendor/github.com/openshift/client-go/machine/clientset/versioned/fake/register.go rename to vendor/github.com/openshift/client-go/image/clientset/versioned/fake/register.go index f9ffa0cc98..d68424bc4e 100644 --- a/vendor/github.com/openshift/client-go/machine/clientset/versioned/fake/register.go +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/fake/register.go @@ -3,8 +3,7 @@ package fake import ( - machinev1 "github.com/openshift/api/machine/v1" - machinev1beta1 "github.com/openshift/api/machine/v1beta1" + imagev1 "github.com/openshift/api/image/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -16,8 +15,7 @@ var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ - machinev1.AddToScheme, - machinev1beta1.AddToScheme, + imagev1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1/fake/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/doc.go similarity index 100% rename from vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1/fake/doc.go rename to vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/doc.go diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image.go new file mode 100644 index 0000000000..e4da2a3e96 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image.go @@ -0,0 +1,135 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1 "github.com/openshift/api/image/v1" + imagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImages implements ImageInterface +type FakeImages struct { + Fake *FakeImageV1 +} + +var imagesResource = v1.SchemeGroupVersion.WithResource("images") + +var imagesKind = v1.SchemeGroupVersion.WithKind("Image") + +// Get takes name of the image, and returns the corresponding image object, and an error if there is any. +func (c *FakeImages) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Image, err error) { + emptyResult := &v1.Image{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(imagesResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.Image), err +} + +// List takes label and field selectors, and returns the list of Images that match those selectors. +func (c *FakeImages) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageList, err error) { + emptyResult := &v1.ImageList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(imagesResource, imagesKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ImageList{ListMeta: obj.(*v1.ImageList).ListMeta} + for _, item := range obj.(*v1.ImageList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested images. +func (c *FakeImages) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(imagesResource, opts)) +} + +// Create takes the representation of a image and creates it. Returns the server's representation of the image, and an error, if there is any. +func (c *FakeImages) Create(ctx context.Context, image *v1.Image, opts metav1.CreateOptions) (result *v1.Image, err error) { + emptyResult := &v1.Image{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(imagesResource, image, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.Image), err +} + +// Update takes the representation of a image and updates it. Returns the server's representation of the image, and an error, if there is any. +func (c *FakeImages) Update(ctx context.Context, image *v1.Image, opts metav1.UpdateOptions) (result *v1.Image, err error) { + emptyResult := &v1.Image{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(imagesResource, image, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.Image), err +} + +// Delete takes name of the image and deletes it. Returns an error if one occurs. +func (c *FakeImages) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(imagesResource, name, opts), &v1.Image{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImages) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(imagesResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1.ImageList{}) + return err +} + +// Patch applies the patch and returns the patched image. +func (c *FakeImages) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Image, err error) { + emptyResult := &v1.Image{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(imagesResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.Image), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied image. +func (c *FakeImages) Apply(ctx context.Context, image *imagev1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Image, err error) { + if image == nil { + return nil, fmt.Errorf("image provided to Apply must not be nil") + } + data, err := json.Marshal(image) + if err != nil { + return nil, err + } + name := image.Name + if name == nil { + return nil, fmt.Errorf("image.Name must be provided to Apply") + } + emptyResult := &v1.Image{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(imagesResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.Image), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image_client.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image_client.go new file mode 100644 index 0000000000..c135a79bb6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_image_client.go @@ -0,0 +1,52 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeImageV1 struct { + *testing.Fake +} + +func (c *FakeImageV1) Images() v1.ImageInterface { + return &FakeImages{c} +} + +func (c *FakeImageV1) ImageSignatures() v1.ImageSignatureInterface { + return &FakeImageSignatures{c} +} + +func (c *FakeImageV1) ImageStreams(namespace string) v1.ImageStreamInterface { + return &FakeImageStreams{c, namespace} +} + +func (c *FakeImageV1) ImageStreamImages(namespace string) v1.ImageStreamImageInterface { + return &FakeImageStreamImages{c, namespace} +} + +func (c *FakeImageV1) ImageStreamImports(namespace string) v1.ImageStreamImportInterface { + return &FakeImageStreamImports{c, namespace} +} + +func (c *FakeImageV1) ImageStreamMappings(namespace string) v1.ImageStreamMappingInterface { + return &FakeImageStreamMappings{c, namespace} +} + +func (c *FakeImageV1) ImageStreamTags(namespace string) v1.ImageStreamTagInterface { + return &FakeImageStreamTags{c, namespace} +} + +func (c *FakeImageV1) ImageTags(namespace string) v1.ImageTagInterface { + return &FakeImageTags{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeImageV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagesignature.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagesignature.go new file mode 100644 index 0000000000..0a186b2ff9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagesignature.go @@ -0,0 +1,38 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/openshift/api/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + testing "k8s.io/client-go/testing" +) + +// FakeImageSignatures implements ImageSignatureInterface +type FakeImageSignatures struct { + Fake *FakeImageV1 +} + +var imagesignaturesResource = v1.SchemeGroupVersion.WithResource("imagesignatures") + +var imagesignaturesKind = v1.SchemeGroupVersion.WithKind("ImageSignature") + +// Create takes the representation of a imageSignature and creates it. Returns the server's representation of the imageSignature, and an error, if there is any. +func (c *FakeImageSignatures) Create(ctx context.Context, imageSignature *v1.ImageSignature, opts metav1.CreateOptions) (result *v1.ImageSignature, err error) { + emptyResult := &v1.ImageSignature{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(imagesignaturesResource, imageSignature, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageSignature), err +} + +// Delete takes name of the imageSignature and deletes it. Returns an error if one occurs. +func (c *FakeImageSignatures) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(imagesignaturesResource, name, opts), &v1.ImageSignature{}) + return err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go new file mode 100644 index 0000000000..3ba173567e --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestream.go @@ -0,0 +1,205 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1 "github.com/openshift/api/image/v1" + imagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreams implements ImageStreamInterface +type FakeImageStreams struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamsResource = v1.SchemeGroupVersion.WithResource("imagestreams") + +var imagestreamsKind = v1.SchemeGroupVersion.WithKind("ImageStream") + +// Get takes name of the imageStream, and returns the corresponding imageStream object, and an error if there is any. +func (c *FakeImageStreams) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageStream, err error) { + emptyResult := &v1.ImageStream{} + obj, err := c.Fake. + Invokes(testing.NewGetActionWithOptions(imagestreamsResource, c.ns, name, options), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageStream), err +} + +// List takes label and field selectors, and returns the list of ImageStreams that match those selectors. +func (c *FakeImageStreams) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageStreamList, err error) { + emptyResult := &v1.ImageStreamList{} + obj, err := c.Fake. + Invokes(testing.NewListActionWithOptions(imagestreamsResource, imagestreamsKind, c.ns, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ImageStreamList{ListMeta: obj.(*v1.ImageStreamList).ListMeta} + for _, item := range obj.(*v1.ImageStreamList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested imageStreams. +func (c *FakeImageStreams) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchActionWithOptions(imagestreamsResource, c.ns, opts)) + +} + +// Create takes the representation of a imageStream and creates it. Returns the server's representation of the imageStream, and an error, if there is any. +func (c *FakeImageStreams) Create(ctx context.Context, imageStream *v1.ImageStream, opts metav1.CreateOptions) (result *v1.ImageStream, err error) { + emptyResult := &v1.ImageStream{} + obj, err := c.Fake. + Invokes(testing.NewCreateActionWithOptions(imagestreamsResource, c.ns, imageStream, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageStream), err +} + +// Update takes the representation of a imageStream and updates it. Returns the server's representation of the imageStream, and an error, if there is any. +func (c *FakeImageStreams) Update(ctx context.Context, imageStream *v1.ImageStream, opts metav1.UpdateOptions) (result *v1.ImageStream, err error) { + emptyResult := &v1.ImageStream{} + obj, err := c.Fake. + Invokes(testing.NewUpdateActionWithOptions(imagestreamsResource, c.ns, imageStream, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageStream), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeImageStreams) UpdateStatus(ctx context.Context, imageStream *v1.ImageStream, opts metav1.UpdateOptions) (result *v1.ImageStream, err error) { + emptyResult := &v1.ImageStream{} + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceActionWithOptions(imagestreamsResource, "status", c.ns, imageStream, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageStream), err +} + +// Delete takes name of the imageStream and deletes it. Returns an error if one occurs. +func (c *FakeImageStreams) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(imagestreamsResource, c.ns, name, opts), &v1.ImageStream{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImageStreams) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionActionWithOptions(imagestreamsResource, c.ns, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1.ImageStreamList{}) + return err +} + +// Patch applies the patch and returns the patched imageStream. +func (c *FakeImageStreams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageStream, err error) { + emptyResult := &v1.ImageStream{} + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(imagestreamsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageStream), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied imageStream. +func (c *FakeImageStreams) Apply(ctx context.Context, imageStream *imagev1.ImageStreamApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageStream, err error) { + if imageStream == nil { + return nil, fmt.Errorf("imageStream provided to Apply must not be nil") + } + data, err := json.Marshal(imageStream) + if err != nil { + return nil, err + } + name := imageStream.Name + if name == nil { + return nil, fmt.Errorf("imageStream.Name must be provided to Apply") + } + emptyResult := &v1.ImageStream{} + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(imagestreamsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageStream), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeImageStreams) ApplyStatus(ctx context.Context, imageStream *imagev1.ImageStreamApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageStream, err error) { + if imageStream == nil { + return nil, fmt.Errorf("imageStream provided to Apply must not be nil") + } + data, err := json.Marshal(imageStream) + if err != nil { + return nil, err + } + name := imageStream.Name + if name == nil { + return nil, fmt.Errorf("imageStream.Name must be provided to Apply") + } + emptyResult := &v1.ImageStream{} + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(imagestreamsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageStream), err +} + +// Secrets takes name of the imageStream, and returns the corresponding secretList object, and an error if there is any. +func (c *FakeImageStreams) Secrets(ctx context.Context, imageStreamName string, options metav1.GetOptions) (result *v1.SecretList, err error) { + emptyResult := &v1.SecretList{} + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceActionWithOptions(imagestreamsResource, c.ns, "secrets", imageStreamName, options), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.SecretList), err +} + +// Layers takes name of the imageStream, and returns the corresponding imageStreamLayers object, and an error if there is any. +func (c *FakeImageStreams) Layers(ctx context.Context, imageStreamName string, options metav1.GetOptions) (result *v1.ImageStreamLayers, err error) { + emptyResult := &v1.ImageStreamLayers{} + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceActionWithOptions(imagestreamsResource, c.ns, "layers", imageStreamName, options), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageStreamLayers), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimage.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimage.go new file mode 100644 index 0000000000..4b3949eb50 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimage.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/openshift/api/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamImages implements ImageStreamImageInterface +type FakeImageStreamImages struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamimagesResource = v1.SchemeGroupVersion.WithResource("imagestreamimages") + +var imagestreamimagesKind = v1.SchemeGroupVersion.WithKind("ImageStreamImage") + +// Get takes name of the imageStreamImage, and returns the corresponding imageStreamImage object, and an error if there is any. +func (c *FakeImageStreamImages) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageStreamImage, err error) { + emptyResult := &v1.ImageStreamImage{} + obj, err := c.Fake. + Invokes(testing.NewGetActionWithOptions(imagestreamimagesResource, c.ns, name, options), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageStreamImage), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimport.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimport.go new file mode 100644 index 0000000000..07fea75630 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamimport.go @@ -0,0 +1,33 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/openshift/api/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamImports implements ImageStreamImportInterface +type FakeImageStreamImports struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamimportsResource = v1.SchemeGroupVersion.WithResource("imagestreamimports") + +var imagestreamimportsKind = v1.SchemeGroupVersion.WithKind("ImageStreamImport") + +// Create takes the representation of a imageStreamImport and creates it. Returns the server's representation of the imageStreamImport, and an error, if there is any. +func (c *FakeImageStreamImports) Create(ctx context.Context, imageStreamImport *v1.ImageStreamImport, opts metav1.CreateOptions) (result *v1.ImageStreamImport, err error) { + emptyResult := &v1.ImageStreamImport{} + obj, err := c.Fake. + Invokes(testing.NewCreateActionWithOptions(imagestreamimportsResource, c.ns, imageStreamImport, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageStreamImport), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreammapping.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreammapping.go new file mode 100644 index 0000000000..74c405d76f --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreammapping.go @@ -0,0 +1,60 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1 "github.com/openshift/api/image/v1" + imagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamMappings implements ImageStreamMappingInterface +type FakeImageStreamMappings struct { + Fake *FakeImageV1 + ns string +} + +var imagestreammappingsResource = v1.SchemeGroupVersion.WithResource("imagestreammappings") + +var imagestreammappingsKind = v1.SchemeGroupVersion.WithKind("ImageStreamMapping") + +// Apply takes the given apply declarative configuration, applies it and returns the applied imageStreamMapping. +func (c *FakeImageStreamMappings) Apply(ctx context.Context, imageStreamMapping *imagev1.ImageStreamMappingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageStreamMapping, err error) { + if imageStreamMapping == nil { + return nil, fmt.Errorf("imageStreamMapping provided to Apply must not be nil") + } + data, err := json.Marshal(imageStreamMapping) + if err != nil { + return nil, err + } + name := imageStreamMapping.Name + if name == nil { + return nil, fmt.Errorf("imageStreamMapping.Name must be provided to Apply") + } + emptyResult := &v1.ImageStreamMapping{} + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(imagestreammappingsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageStreamMapping), err +} + +// Create takes the representation of a imageStreamMapping and creates it. Returns the server's representation of the status, and an error, if there is any. +func (c *FakeImageStreamMappings) Create(ctx context.Context, imageStreamMapping *v1.ImageStreamMapping, opts metav1.CreateOptions) (result *metav1.Status, err error) { + emptyResult := &metav1.Status{} + obj, err := c.Fake. + Invokes(testing.NewCreateActionWithOptions(imagestreammappingsResource, c.ns, imageStreamMapping, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*metav1.Status), err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamtag.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamtag.go new file mode 100644 index 0000000000..ddabe4bfd3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagestreamtag.go @@ -0,0 +1,89 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/openshift/api/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + testing "k8s.io/client-go/testing" +) + +// FakeImageStreamTags implements ImageStreamTagInterface +type FakeImageStreamTags struct { + Fake *FakeImageV1 + ns string +} + +var imagestreamtagsResource = v1.SchemeGroupVersion.WithResource("imagestreamtags") + +var imagestreamtagsKind = v1.SchemeGroupVersion.WithKind("ImageStreamTag") + +// Get takes name of the imageStreamTag, and returns the corresponding imageStreamTag object, and an error if there is any. +func (c *FakeImageStreamTags) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageStreamTag, err error) { + emptyResult := &v1.ImageStreamTag{} + obj, err := c.Fake. + Invokes(testing.NewGetActionWithOptions(imagestreamtagsResource, c.ns, name, options), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageStreamTag), err +} + +// List takes label and field selectors, and returns the list of ImageStreamTags that match those selectors. +func (c *FakeImageStreamTags) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageStreamTagList, err error) { + emptyResult := &v1.ImageStreamTagList{} + obj, err := c.Fake. + Invokes(testing.NewListActionWithOptions(imagestreamtagsResource, imagestreamtagsKind, c.ns, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ImageStreamTagList{ListMeta: obj.(*v1.ImageStreamTagList).ListMeta} + for _, item := range obj.(*v1.ImageStreamTagList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Create takes the representation of a imageStreamTag and creates it. Returns the server's representation of the imageStreamTag, and an error, if there is any. +func (c *FakeImageStreamTags) Create(ctx context.Context, imageStreamTag *v1.ImageStreamTag, opts metav1.CreateOptions) (result *v1.ImageStreamTag, err error) { + emptyResult := &v1.ImageStreamTag{} + obj, err := c.Fake. + Invokes(testing.NewCreateActionWithOptions(imagestreamtagsResource, c.ns, imageStreamTag, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageStreamTag), err +} + +// Update takes the representation of a imageStreamTag and updates it. Returns the server's representation of the imageStreamTag, and an error, if there is any. +func (c *FakeImageStreamTags) Update(ctx context.Context, imageStreamTag *v1.ImageStreamTag, opts metav1.UpdateOptions) (result *v1.ImageStreamTag, err error) { + emptyResult := &v1.ImageStreamTag{} + obj, err := c.Fake. + Invokes(testing.NewUpdateActionWithOptions(imagestreamtagsResource, c.ns, imageStreamTag, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageStreamTag), err +} + +// Delete takes name of the imageStreamTag and deletes it. Returns an error if one occurs. +func (c *FakeImageStreamTags) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(imagestreamtagsResource, c.ns, name, opts), &v1.ImageStreamTag{}) + + return err +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagetag.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagetag.go new file mode 100644 index 0000000000..06bd9b02ca --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake/fake_imagetag.go @@ -0,0 +1,89 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "github.com/openshift/api/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + testing "k8s.io/client-go/testing" +) + +// FakeImageTags implements ImageTagInterface +type FakeImageTags struct { + Fake *FakeImageV1 + ns string +} + +var imagetagsResource = v1.SchemeGroupVersion.WithResource("imagetags") + +var imagetagsKind = v1.SchemeGroupVersion.WithKind("ImageTag") + +// Get takes name of the imageTag, and returns the corresponding imageTag object, and an error if there is any. +func (c *FakeImageTags) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageTag, err error) { + emptyResult := &v1.ImageTag{} + obj, err := c.Fake. + Invokes(testing.NewGetActionWithOptions(imagetagsResource, c.ns, name, options), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageTag), err +} + +// List takes label and field selectors, and returns the list of ImageTags that match those selectors. +func (c *FakeImageTags) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageTagList, err error) { + emptyResult := &v1.ImageTagList{} + obj, err := c.Fake. + Invokes(testing.NewListActionWithOptions(imagetagsResource, imagetagsKind, c.ns, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.ImageTagList{ListMeta: obj.(*v1.ImageTagList).ListMeta} + for _, item := range obj.(*v1.ImageTagList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Create takes the representation of a imageTag and creates it. Returns the server's representation of the imageTag, and an error, if there is any. +func (c *FakeImageTags) Create(ctx context.Context, imageTag *v1.ImageTag, opts metav1.CreateOptions) (result *v1.ImageTag, err error) { + emptyResult := &v1.ImageTag{} + obj, err := c.Fake. + Invokes(testing.NewCreateActionWithOptions(imagetagsResource, c.ns, imageTag, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageTag), err +} + +// Update takes the representation of a imageTag and updates it. Returns the server's representation of the imageTag, and an error, if there is any. +func (c *FakeImageTags) Update(ctx context.Context, imageTag *v1.ImageTag, opts metav1.UpdateOptions) (result *v1.ImageTag, err error) { + emptyResult := &v1.ImageTag{} + obj, err := c.Fake. + Invokes(testing.NewUpdateActionWithOptions(imagetagsResource, c.ns, imageTag, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.ImageTag), err +} + +// Delete takes name of the imageTag and deletes it. Returns an error if one occurs. +func (c *FakeImageTags) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(imagetagsResource, c.ns, name, opts), &v1.ImageTag{}) + + return err +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go similarity index 96% rename from vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go rename to vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go index 0d3e90f337..ecb2d2f958 100644 --- a/vendor/github.com/openshift/client-go/build/informers/externalversions/factory.go +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/factory.go @@ -7,9 +7,9 @@ import ( sync "sync" time "time" - versioned "github.com/openshift/client-go/build/clientset/versioned" - build "github.com/openshift/client-go/build/informers/externalversions/build" - internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + versioned "github.com/openshift/client-go/image/clientset/versioned" + image "github.com/openshift/client-go/image/informers/externalversions/image" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -238,9 +238,9 @@ type SharedInformerFactory interface { // client. InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer - Build() build.Interface + Image() image.Interface } -func (f *sharedInformerFactory) Build() build.Interface { - return build.New(f, f.namespace, f.tweakListOptions) +func (f *sharedInformerFactory) Image() image.Interface { + return image.New(f, f.namespace, f.tweakListOptions) } diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go similarity index 81% rename from vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go rename to vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go index e8b2035b70..55f59dedef 100644 --- a/vendor/github.com/openshift/client-go/build/informers/externalversions/generic.go +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/generic.go @@ -5,7 +5,7 @@ package externalversions import ( "fmt" - v1 "github.com/openshift/api/build/v1" + v1 "github.com/openshift/api/image/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) @@ -36,11 +36,11 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=build.openshift.io, Version=v1 - case v1.SchemeGroupVersion.WithResource("builds"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Build().V1().Builds().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("buildconfigs"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Build().V1().BuildConfigs().Informer()}, nil + // Group=image.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("images"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Image().V1().Images().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("imagestreams"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Image().V1().ImageStreams().Informer()}, nil } diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go similarity index 84% rename from vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go rename to vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go index 01a651928a..092550ed3d 100644 --- a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/interface.go +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/interface.go @@ -1,10 +1,10 @@ // Code generated by informer-gen. DO NOT EDIT. -package build +package image import ( - v1 "github.com/openshift/client-go/build/informers/externalversions/build/v1" - internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/image/informers/externalversions/image/v1" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" ) // Interface provides access to each of this group's versions. diff --git a/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go new file mode 100644 index 0000000000..ee2d0a7067 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/image.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + imagev1 "github.com/openshift/api/image/v1" + versioned "github.com/openshift/client-go/image/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/image/listers/image/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageInformer provides access to a shared informer and lister for +// Images. +type ImageInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ImageLister +} + +type imageInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().Images().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ImageV1().Images().Watch(context.TODO(), options) + }, + }, + &imagev1.Image{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&imagev1.Image{}, f.defaultInformer) +} + +func (f *imageInformer) Lister() v1.ImageLister { + return v1.NewImageLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go similarity index 57% rename from vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go rename to vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go index 28012f8c6d..4a94cc5c7d 100644 --- a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/buildconfig.go +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/imagestream.go @@ -6,69 +6,69 @@ import ( "context" time "time" - buildv1 "github.com/openshift/api/build/v1" - versioned "github.com/openshift/client-go/build/clientset/versioned" - internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/build/listers/build/v1" + imagev1 "github.com/openshift/api/image/v1" + versioned "github.com/openshift/client-go/image/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/image/listers/image/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" ) -// BuildConfigInformer provides access to a shared informer and lister for -// BuildConfigs. -type BuildConfigInformer interface { +// ImageStreamInformer provides access to a shared informer and lister for +// ImageStreams. +type ImageStreamInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.BuildConfigLister + Lister() v1.ImageStreamLister } -type buildConfigInformer struct { +type imageStreamInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc namespace string } -// NewBuildConfigInformer constructs a new informer for BuildConfig type. +// NewImageStreamInformer constructs a new informer for ImageStream type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewBuildConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredBuildConfigInformer(client, namespace, resyncPeriod, indexers, nil) +func NewImageStreamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageStreamInformer(client, namespace, resyncPeriod, indexers, nil) } -// NewFilteredBuildConfigInformer constructs a new informer for BuildConfig type. +// NewFilteredImageStreamInformer constructs a new informer for ImageStream type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredBuildConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredImageStreamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.BuildV1().BuildConfigs(namespace).List(context.TODO(), options) + return client.ImageV1().ImageStreams(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.BuildV1().BuildConfigs(namespace).Watch(context.TODO(), options) + return client.ImageV1().ImageStreams(namespace).Watch(context.TODO(), options) }, }, - &buildv1.BuildConfig{}, + &imagev1.ImageStream{}, resyncPeriod, indexers, ) } -func (f *buildConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredBuildConfigInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *imageStreamInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageStreamInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *buildConfigInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&buildv1.BuildConfig{}, f.defaultInformer) +func (f *imageStreamInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&imagev1.ImageStream{}, f.defaultInformer) } -func (f *buildConfigInformer) Lister() v1.BuildConfigLister { - return v1.NewBuildConfigLister(f.Informer().GetIndexer()) +func (f *imageStreamInformer) Lister() v1.ImageStreamLister { + return v1.NewImageStreamLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go similarity index 55% rename from vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go rename to vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go index da69fc9bb6..fd35c4df1a 100644 --- a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/interface.go +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/image/v1/interface.go @@ -3,15 +3,15 @@ package v1 import ( - internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" + internalinterfaces "github.com/openshift/client-go/image/informers/externalversions/internalinterfaces" ) // Interface provides access to all the informers in this group version. type Interface interface { - // Builds returns a BuildInformer. - Builds() BuildInformer - // BuildConfigs returns a BuildConfigInformer. - BuildConfigs() BuildConfigInformer + // Images returns a ImageInformer. + Images() ImageInformer + // ImageStreams returns a ImageStreamInformer. + ImageStreams() ImageStreamInformer } type version struct { @@ -25,12 +25,12 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// Builds returns a BuildInformer. -func (v *version) Builds() BuildInformer { - return &buildInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +// Images returns a ImageInformer. +func (v *version) Images() ImageInformer { + return &imageInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } -// BuildConfigs returns a BuildConfigInformer. -func (v *version) BuildConfigs() BuildConfigInformer { - return &buildConfigInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +// ImageStreams returns a ImageStreamInformer. +func (v *version) ImageStreams() ImageStreamInformer { + return &imageStreamInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go similarity index 92% rename from vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go rename to vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go index 1bcbd5975a..c35dcbfa44 100644 --- a/vendor/github.com/openshift/client-go/build/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/vendor/github.com/openshift/client-go/image/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -5,7 +5,7 @@ package internalinterfaces import ( time "time" - versioned "github.com/openshift/client-go/build/clientset/versioned" + versioned "github.com/openshift/client-go/image/clientset/versioned" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go new file mode 100644 index 0000000000..308b6db702 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/expansion_generated.go @@ -0,0 +1,31 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// ImageListerExpansion allows custom methods to be added to +// ImageLister. +type ImageListerExpansion interface{} + +// ImageStreamListerExpansion allows custom methods to be added to +// ImageStreamLister. +type ImageStreamListerExpansion interface{} + +// ImageStreamNamespaceListerExpansion allows custom methods to be added to +// ImageStreamNamespaceLister. +type ImageStreamNamespaceListerExpansion interface{} + +// ImageStreamTagListerExpansion allows custom methods to be added to +// ImageStreamTagLister. +type ImageStreamTagListerExpansion interface{} + +// ImageStreamTagNamespaceListerExpansion allows custom methods to be added to +// ImageStreamTagNamespaceLister. +type ImageStreamTagNamespaceListerExpansion interface{} + +// ImageTagListerExpansion allows custom methods to be added to +// ImageTagLister. +type ImageTagListerExpansion interface{} + +// ImageTagNamespaceListerExpansion allows custom methods to be added to +// ImageTagNamespaceLister. +type ImageTagNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go new file mode 100644 index 0000000000..1a4cb748a7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/image.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// ImageLister helps list Images. +// All objects returned here must be treated as read-only. +type ImageLister interface { + // List lists all Images in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Image, err error) + // Get retrieves the Image from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Image, error) + ImageListerExpansion +} + +// imageLister implements the ImageLister interface. +type imageLister struct { + listers.ResourceIndexer[*v1.Image] +} + +// NewImageLister returns a new ImageLister. +func NewImageLister(indexer cache.Indexer) ImageLister { + return &imageLister{listers.New[*v1.Image](indexer, v1.Resource("image"))} +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go new file mode 100644 index 0000000000..b2a567c556 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestream.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// ImageStreamLister helps list ImageStreams. +// All objects returned here must be treated as read-only. +type ImageStreamLister interface { + // List lists all ImageStreams in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStream, err error) + // ImageStreams returns an object that can list and get ImageStreams. + ImageStreams(namespace string) ImageStreamNamespaceLister + ImageStreamListerExpansion +} + +// imageStreamLister implements the ImageStreamLister interface. +type imageStreamLister struct { + listers.ResourceIndexer[*v1.ImageStream] +} + +// NewImageStreamLister returns a new ImageStreamLister. +func NewImageStreamLister(indexer cache.Indexer) ImageStreamLister { + return &imageStreamLister{listers.New[*v1.ImageStream](indexer, v1.Resource("imagestream"))} +} + +// ImageStreams returns an object that can list and get ImageStreams. +func (s *imageStreamLister) ImageStreams(namespace string) ImageStreamNamespaceLister { + return imageStreamNamespaceLister{listers.NewNamespaced[*v1.ImageStream](s.ResourceIndexer, namespace)} +} + +// ImageStreamNamespaceLister helps list and get ImageStreams. +// All objects returned here must be treated as read-only. +type ImageStreamNamespaceLister interface { + // List lists all ImageStreams in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStream, err error) + // Get retrieves the ImageStream from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ImageStream, error) + ImageStreamNamespaceListerExpansion +} + +// imageStreamNamespaceLister implements the ImageStreamNamespaceLister +// interface. +type imageStreamNamespaceLister struct { + listers.ResourceIndexer[*v1.ImageStream] +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go new file mode 100644 index 0000000000..1e227c2200 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagestreamtag.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// ImageStreamTagLister helps list ImageStreamTags. +// All objects returned here must be treated as read-only. +type ImageStreamTagLister interface { + // List lists all ImageStreamTags in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStreamTag, err error) + // ImageStreamTags returns an object that can list and get ImageStreamTags. + ImageStreamTags(namespace string) ImageStreamTagNamespaceLister + ImageStreamTagListerExpansion +} + +// imageStreamTagLister implements the ImageStreamTagLister interface. +type imageStreamTagLister struct { + listers.ResourceIndexer[*v1.ImageStreamTag] +} + +// NewImageStreamTagLister returns a new ImageStreamTagLister. +func NewImageStreamTagLister(indexer cache.Indexer) ImageStreamTagLister { + return &imageStreamTagLister{listers.New[*v1.ImageStreamTag](indexer, v1.Resource("imagestreamtag"))} +} + +// ImageStreamTags returns an object that can list and get ImageStreamTags. +func (s *imageStreamTagLister) ImageStreamTags(namespace string) ImageStreamTagNamespaceLister { + return imageStreamTagNamespaceLister{listers.NewNamespaced[*v1.ImageStreamTag](s.ResourceIndexer, namespace)} +} + +// ImageStreamTagNamespaceLister helps list and get ImageStreamTags. +// All objects returned here must be treated as read-only. +type ImageStreamTagNamespaceLister interface { + // List lists all ImageStreamTags in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageStreamTag, err error) + // Get retrieves the ImageStreamTag from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ImageStreamTag, error) + ImageStreamTagNamespaceListerExpansion +} + +// imageStreamTagNamespaceLister implements the ImageStreamTagNamespaceLister +// interface. +type imageStreamTagNamespaceLister struct { + listers.ResourceIndexer[*v1.ImageStreamTag] +} diff --git a/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go new file mode 100644 index 0000000000..8cb9dc0d9f --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/listers/image/v1/imagetag.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// ImageTagLister helps list ImageTags. +// All objects returned here must be treated as read-only. +type ImageTagLister interface { + // List lists all ImageTags in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageTag, err error) + // ImageTags returns an object that can list and get ImageTags. + ImageTags(namespace string) ImageTagNamespaceLister + ImageTagListerExpansion +} + +// imageTagLister implements the ImageTagLister interface. +type imageTagLister struct { + listers.ResourceIndexer[*v1.ImageTag] +} + +// NewImageTagLister returns a new ImageTagLister. +func NewImageTagLister(indexer cache.Indexer) ImageTagLister { + return &imageTagLister{listers.New[*v1.ImageTag](indexer, v1.Resource("imagetag"))} +} + +// ImageTags returns an object that can list and get ImageTags. +func (s *imageTagLister) ImageTags(namespace string) ImageTagNamespaceLister { + return imageTagNamespaceLister{listers.NewNamespaced[*v1.ImageTag](s.ResourceIndexer, namespace)} +} + +// ImageTagNamespaceLister helps list and get ImageTags. +// All objects returned here must be treated as read-only. +type ImageTagNamespaceLister interface { + // List lists all ImageTags in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.ImageTag, err error) + // Get retrieves the ImageTag from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.ImageTag, error) + ImageTagNamespaceListerExpansion +} + +// imageTagNamespaceLister implements the ImageTagNamespaceLister +// interface. +type imageTagNamespaceLister struct { + listers.ResourceIndexer[*v1.ImageTag] +} diff --git a/vendor/github.com/openshift/client-go/machine/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/machine/applyconfigurations/internal/internal.go index d478ac7feb..2141bcab25 100644 --- a/vendor/github.com/openshift/client-go/machine/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/machine/applyconfigurations/internal/internal.go @@ -115,6 +115,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.machine.v1.ControlPlaneMachineSetSpec map: fields: + - name: machineNamePrefix + type: + scalar: string - name: replicas type: scalar: numeric diff --git a/vendor/github.com/openshift/client-go/machine/applyconfigurations/machine/v1/controlplanemachinesetspec.go b/vendor/github.com/openshift/client-go/machine/applyconfigurations/machine/v1/controlplanemachinesetspec.go index 30aa1d0523..fe2e2159b9 100644 --- a/vendor/github.com/openshift/client-go/machine/applyconfigurations/machine/v1/controlplanemachinesetspec.go +++ b/vendor/github.com/openshift/client-go/machine/applyconfigurations/machine/v1/controlplanemachinesetspec.go @@ -10,11 +10,12 @@ import ( // ControlPlaneMachineSetSpecApplyConfiguration represents a declarative configuration of the ControlPlaneMachineSetSpec type for use // with apply. type ControlPlaneMachineSetSpecApplyConfiguration struct { - State *v1.ControlPlaneMachineSetState `json:"state,omitempty"` - Replicas *int32 `json:"replicas,omitempty"` - Strategy *ControlPlaneMachineSetStrategyApplyConfiguration `json:"strategy,omitempty"` - Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - Template *ControlPlaneMachineSetTemplateApplyConfiguration `json:"template,omitempty"` + MachineNamePrefix *string `json:"machineNamePrefix,omitempty"` + State *v1.ControlPlaneMachineSetState `json:"state,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + Strategy *ControlPlaneMachineSetStrategyApplyConfiguration `json:"strategy,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Template *ControlPlaneMachineSetTemplateApplyConfiguration `json:"template,omitempty"` } // ControlPlaneMachineSetSpecApplyConfiguration constructs a declarative configuration of the ControlPlaneMachineSetSpec type for use with @@ -23,6 +24,14 @@ func ControlPlaneMachineSetSpec() *ControlPlaneMachineSetSpecApplyConfiguration return &ControlPlaneMachineSetSpecApplyConfiguration{} } +// WithMachineNamePrefix sets the MachineNamePrefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MachineNamePrefix field is set to the value of the last call. +func (b *ControlPlaneMachineSetSpecApplyConfiguration) WithMachineNamePrefix(value string) *ControlPlaneMachineSetSpecApplyConfiguration { + b.MachineNamePrefix = &value + return b +} + // WithState sets the State field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the State field is set to the value of the last call. diff --git a/vendor/github.com/openshift/client-go/machine/applyconfigurations/utils.go b/vendor/github.com/openshift/client-go/machine/applyconfigurations/utils.go deleted file mode 100644 index e99d1bcba6..0000000000 --- a/vendor/github.com/openshift/client-go/machine/applyconfigurations/utils.go +++ /dev/null @@ -1,100 +0,0 @@ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package applyconfigurations - -import ( - v1 "github.com/openshift/api/machine/v1" - v1beta1 "github.com/openshift/api/machine/v1beta1" - internal "github.com/openshift/client-go/machine/applyconfigurations/internal" - machinev1 "github.com/openshift/client-go/machine/applyconfigurations/machine/v1" - machinev1beta1 "github.com/openshift/client-go/machine/applyconfigurations/machine/v1beta1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - testing "k8s.io/client-go/testing" -) - -// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no -// apply configuration type exists for the given GroupVersionKind. -func ForKind(kind schema.GroupVersionKind) interface{} { - switch kind { - // Group=machine.openshift.io, Version=v1 - case v1.SchemeGroupVersion.WithKind("AWSFailureDomain"): - return &machinev1.AWSFailureDomainApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("AWSFailureDomainPlacement"): - return &machinev1.AWSFailureDomainPlacementApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("AWSResourceFilter"): - return &machinev1.AWSResourceFilterApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("AWSResourceReference"): - return &machinev1.AWSResourceReferenceApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("AzureFailureDomain"): - return &machinev1.AzureFailureDomainApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("ControlPlaneMachineSet"): - return &machinev1.ControlPlaneMachineSetApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("ControlPlaneMachineSetSpec"): - return &machinev1.ControlPlaneMachineSetSpecApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("ControlPlaneMachineSetStatus"): - return &machinev1.ControlPlaneMachineSetStatusApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("ControlPlaneMachineSetStrategy"): - return &machinev1.ControlPlaneMachineSetStrategyApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("ControlPlaneMachineSetTemplate"): - return &machinev1.ControlPlaneMachineSetTemplateApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("ControlPlaneMachineSetTemplateObjectMeta"): - return &machinev1.ControlPlaneMachineSetTemplateObjectMetaApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("FailureDomains"): - return &machinev1.FailureDomainsApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("GCPFailureDomain"): - return &machinev1.GCPFailureDomainApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("NutanixFailureDomainReference"): - return &machinev1.NutanixFailureDomainReferenceApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("OpenShiftMachineV1Beta1MachineTemplate"): - return &machinev1.OpenShiftMachineV1Beta1MachineTemplateApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("OpenStackFailureDomain"): - return &machinev1.OpenStackFailureDomainApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("RootVolume"): - return &machinev1.RootVolumeApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("VSphereFailureDomain"): - return &machinev1.VSphereFailureDomainApplyConfiguration{} - - // Group=machine.openshift.io, Version=v1beta1 - case v1beta1.SchemeGroupVersion.WithKind("Condition"): - return &machinev1beta1.ConditionApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("LastOperation"): - return &machinev1beta1.LastOperationApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("LifecycleHook"): - return &machinev1beta1.LifecycleHookApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("LifecycleHooks"): - return &machinev1beta1.LifecycleHooksApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("Machine"): - return &machinev1beta1.MachineApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("MachineHealthCheck"): - return &machinev1beta1.MachineHealthCheckApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("MachineHealthCheckSpec"): - return &machinev1beta1.MachineHealthCheckSpecApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("MachineHealthCheckStatus"): - return &machinev1beta1.MachineHealthCheckStatusApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("MachineSet"): - return &machinev1beta1.MachineSetApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("MachineSetSpec"): - return &machinev1beta1.MachineSetSpecApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("MachineSetStatus"): - return &machinev1beta1.MachineSetStatusApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("MachineSpec"): - return &machinev1beta1.MachineSpecApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("MachineStatus"): - return &machinev1beta1.MachineStatusApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("MachineTemplateSpec"): - return &machinev1beta1.MachineTemplateSpecApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("ObjectMeta"): - return &machinev1beta1.ObjectMetaApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("ProviderSpec"): - return &machinev1beta1.ProviderSpecApplyConfiguration{} - case v1beta1.SchemeGroupVersion.WithKind("UnhealthyCondition"): - return &machinev1beta1.UnhealthyConditionApplyConfiguration{} - - } - return nil -} - -func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { - return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} -} diff --git a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1/fake/fake_controlplanemachineset.go b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1/fake/fake_controlplanemachineset.go deleted file mode 100644 index 710b87b0f5..0000000000 --- a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1/fake/fake_controlplanemachineset.go +++ /dev/null @@ -1,181 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "github.com/openshift/api/machine/v1" - machinev1 "github.com/openshift/client-go/machine/applyconfigurations/machine/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeControlPlaneMachineSets implements ControlPlaneMachineSetInterface -type FakeControlPlaneMachineSets struct { - Fake *FakeMachineV1 - ns string -} - -var controlplanemachinesetsResource = v1.SchemeGroupVersion.WithResource("controlplanemachinesets") - -var controlplanemachinesetsKind = v1.SchemeGroupVersion.WithKind("ControlPlaneMachineSet") - -// Get takes name of the controlPlaneMachineSet, and returns the corresponding controlPlaneMachineSet object, and an error if there is any. -func (c *FakeControlPlaneMachineSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ControlPlaneMachineSet, err error) { - emptyResult := &v1.ControlPlaneMachineSet{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(controlplanemachinesetsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ControlPlaneMachineSet), err -} - -// List takes label and field selectors, and returns the list of ControlPlaneMachineSets that match those selectors. -func (c *FakeControlPlaneMachineSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ControlPlaneMachineSetList, err error) { - emptyResult := &v1.ControlPlaneMachineSetList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(controlplanemachinesetsResource, controlplanemachinesetsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ControlPlaneMachineSetList{ListMeta: obj.(*v1.ControlPlaneMachineSetList).ListMeta} - for _, item := range obj.(*v1.ControlPlaneMachineSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested controlPlaneMachineSets. -func (c *FakeControlPlaneMachineSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(controlplanemachinesetsResource, c.ns, opts)) - -} - -// Create takes the representation of a controlPlaneMachineSet and creates it. Returns the server's representation of the controlPlaneMachineSet, and an error, if there is any. -func (c *FakeControlPlaneMachineSets) Create(ctx context.Context, controlPlaneMachineSet *v1.ControlPlaneMachineSet, opts metav1.CreateOptions) (result *v1.ControlPlaneMachineSet, err error) { - emptyResult := &v1.ControlPlaneMachineSet{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(controlplanemachinesetsResource, c.ns, controlPlaneMachineSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ControlPlaneMachineSet), err -} - -// Update takes the representation of a controlPlaneMachineSet and updates it. Returns the server's representation of the controlPlaneMachineSet, and an error, if there is any. -func (c *FakeControlPlaneMachineSets) Update(ctx context.Context, controlPlaneMachineSet *v1.ControlPlaneMachineSet, opts metav1.UpdateOptions) (result *v1.ControlPlaneMachineSet, err error) { - emptyResult := &v1.ControlPlaneMachineSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(controlplanemachinesetsResource, c.ns, controlPlaneMachineSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ControlPlaneMachineSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeControlPlaneMachineSets) UpdateStatus(ctx context.Context, controlPlaneMachineSet *v1.ControlPlaneMachineSet, opts metav1.UpdateOptions) (result *v1.ControlPlaneMachineSet, err error) { - emptyResult := &v1.ControlPlaneMachineSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(controlplanemachinesetsResource, "status", c.ns, controlPlaneMachineSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ControlPlaneMachineSet), err -} - -// Delete takes name of the controlPlaneMachineSet and deletes it. Returns an error if one occurs. -func (c *FakeControlPlaneMachineSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(controlplanemachinesetsResource, c.ns, name, opts), &v1.ControlPlaneMachineSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeControlPlaneMachineSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(controlplanemachinesetsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ControlPlaneMachineSetList{}) - return err -} - -// Patch applies the patch and returns the patched controlPlaneMachineSet. -func (c *FakeControlPlaneMachineSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControlPlaneMachineSet, err error) { - emptyResult := &v1.ControlPlaneMachineSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(controlplanemachinesetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ControlPlaneMachineSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controlPlaneMachineSet. -func (c *FakeControlPlaneMachineSets) Apply(ctx context.Context, controlPlaneMachineSet *machinev1.ControlPlaneMachineSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ControlPlaneMachineSet, err error) { - if controlPlaneMachineSet == nil { - return nil, fmt.Errorf("controlPlaneMachineSet provided to Apply must not be nil") - } - data, err := json.Marshal(controlPlaneMachineSet) - if err != nil { - return nil, err - } - name := controlPlaneMachineSet.Name - if name == nil { - return nil, fmt.Errorf("controlPlaneMachineSet.Name must be provided to Apply") - } - emptyResult := &v1.ControlPlaneMachineSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(controlplanemachinesetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ControlPlaneMachineSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeControlPlaneMachineSets) ApplyStatus(ctx context.Context, controlPlaneMachineSet *machinev1.ControlPlaneMachineSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ControlPlaneMachineSet, err error) { - if controlPlaneMachineSet == nil { - return nil, fmt.Errorf("controlPlaneMachineSet provided to Apply must not be nil") - } - data, err := json.Marshal(controlPlaneMachineSet) - if err != nil { - return nil, err - } - name := controlPlaneMachineSet.Name - if name == nil { - return nil, fmt.Errorf("controlPlaneMachineSet.Name must be provided to Apply") - } - emptyResult := &v1.ControlPlaneMachineSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(controlplanemachinesetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1.ControlPlaneMachineSet), err -} diff --git a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1/fake/fake_machine_client.go b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1/fake/fake_machine_client.go deleted file mode 100644 index 8e45935458..0000000000 --- a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1/fake/fake_machine_client.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeMachineV1 struct { - *testing.Fake -} - -func (c *FakeMachineV1) ControlPlaneMachineSets(namespace string) v1.ControlPlaneMachineSetInterface { - return &FakeControlPlaneMachineSets{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeMachineV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake/fake_machine.go b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake/fake_machine.go deleted file mode 100644 index cbc5cfaea0..0000000000 --- a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake/fake_machine.go +++ /dev/null @@ -1,181 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "github.com/openshift/api/machine/v1beta1" - machinev1beta1 "github.com/openshift/client-go/machine/applyconfigurations/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeMachines implements MachineInterface -type FakeMachines struct { - Fake *FakeMachineV1beta1 - ns string -} - -var machinesResource = v1beta1.SchemeGroupVersion.WithResource("machines") - -var machinesKind = v1beta1.SchemeGroupVersion.WithKind("Machine") - -// Get takes name of the machine, and returns the corresponding machine object, and an error if there is any. -func (c *FakeMachines) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Machine, err error) { - emptyResult := &v1beta1.Machine{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(machinesResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Machine), err -} - -// List takes label and field selectors, and returns the list of Machines that match those selectors. -func (c *FakeMachines) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MachineList, err error) { - emptyResult := &v1beta1.MachineList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(machinesResource, machinesKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.MachineList{ListMeta: obj.(*v1beta1.MachineList).ListMeta} - for _, item := range obj.(*v1beta1.MachineList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machines. -func (c *FakeMachines) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(machinesResource, c.ns, opts)) - -} - -// Create takes the representation of a machine and creates it. Returns the server's representation of the machine, and an error, if there is any. -func (c *FakeMachines) Create(ctx context.Context, machine *v1beta1.Machine, opts v1.CreateOptions) (result *v1beta1.Machine, err error) { - emptyResult := &v1beta1.Machine{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(machinesResource, c.ns, machine, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Machine), err -} - -// Update takes the representation of a machine and updates it. Returns the server's representation of the machine, and an error, if there is any. -func (c *FakeMachines) Update(ctx context.Context, machine *v1beta1.Machine, opts v1.UpdateOptions) (result *v1beta1.Machine, err error) { - emptyResult := &v1beta1.Machine{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(machinesResource, c.ns, machine, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Machine), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeMachines) UpdateStatus(ctx context.Context, machine *v1beta1.Machine, opts v1.UpdateOptions) (result *v1beta1.Machine, err error) { - emptyResult := &v1beta1.Machine{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(machinesResource, "status", c.ns, machine, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Machine), err -} - -// Delete takes name of the machine and deletes it. Returns an error if one occurs. -func (c *FakeMachines) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(machinesResource, c.ns, name, opts), &v1beta1.Machine{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachines) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(machinesResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.MachineList{}) - return err -} - -// Patch applies the patch and returns the patched machine. -func (c *FakeMachines) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Machine, err error) { - emptyResult := &v1beta1.Machine{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(machinesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Machine), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied machine. -func (c *FakeMachines) Apply(ctx context.Context, machine *machinev1beta1.MachineApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Machine, err error) { - if machine == nil { - return nil, fmt.Errorf("machine provided to Apply must not be nil") - } - data, err := json.Marshal(machine) - if err != nil { - return nil, err - } - name := machine.Name - if name == nil { - return nil, fmt.Errorf("machine.Name must be provided to Apply") - } - emptyResult := &v1beta1.Machine{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(machinesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Machine), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeMachines) ApplyStatus(ctx context.Context, machine *machinev1beta1.MachineApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Machine, err error) { - if machine == nil { - return nil, fmt.Errorf("machine provided to Apply must not be nil") - } - data, err := json.Marshal(machine) - if err != nil { - return nil, err - } - name := machine.Name - if name == nil { - return nil, fmt.Errorf("machine.Name must be provided to Apply") - } - emptyResult := &v1beta1.Machine{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(machinesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.Machine), err -} diff --git a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake/fake_machine_client.go b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake/fake_machine_client.go deleted file mode 100644 index f6dd022d40..0000000000 --- a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake/fake_machine_client.go +++ /dev/null @@ -1,32 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeMachineV1beta1 struct { - *testing.Fake -} - -func (c *FakeMachineV1beta1) Machines(namespace string) v1beta1.MachineInterface { - return &FakeMachines{c, namespace} -} - -func (c *FakeMachineV1beta1) MachineHealthChecks(namespace string) v1beta1.MachineHealthCheckInterface { - return &FakeMachineHealthChecks{c, namespace} -} - -func (c *FakeMachineV1beta1) MachineSets(namespace string) v1beta1.MachineSetInterface { - return &FakeMachineSets{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeMachineV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake/fake_machinehealthcheck.go b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake/fake_machinehealthcheck.go deleted file mode 100644 index e8394abacf..0000000000 --- a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake/fake_machinehealthcheck.go +++ /dev/null @@ -1,181 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "github.com/openshift/api/machine/v1beta1" - machinev1beta1 "github.com/openshift/client-go/machine/applyconfigurations/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeMachineHealthChecks implements MachineHealthCheckInterface -type FakeMachineHealthChecks struct { - Fake *FakeMachineV1beta1 - ns string -} - -var machinehealthchecksResource = v1beta1.SchemeGroupVersion.WithResource("machinehealthchecks") - -var machinehealthchecksKind = v1beta1.SchemeGroupVersion.WithKind("MachineHealthCheck") - -// Get takes name of the machineHealthCheck, and returns the corresponding machineHealthCheck object, and an error if there is any. -func (c *FakeMachineHealthChecks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MachineHealthCheck, err error) { - emptyResult := &v1beta1.MachineHealthCheck{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(machinehealthchecksResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MachineHealthCheck), err -} - -// List takes label and field selectors, and returns the list of MachineHealthChecks that match those selectors. -func (c *FakeMachineHealthChecks) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MachineHealthCheckList, err error) { - emptyResult := &v1beta1.MachineHealthCheckList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(machinehealthchecksResource, machinehealthchecksKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.MachineHealthCheckList{ListMeta: obj.(*v1beta1.MachineHealthCheckList).ListMeta} - for _, item := range obj.(*v1beta1.MachineHealthCheckList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machineHealthChecks. -func (c *FakeMachineHealthChecks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(machinehealthchecksResource, c.ns, opts)) - -} - -// Create takes the representation of a machineHealthCheck and creates it. Returns the server's representation of the machineHealthCheck, and an error, if there is any. -func (c *FakeMachineHealthChecks) Create(ctx context.Context, machineHealthCheck *v1beta1.MachineHealthCheck, opts v1.CreateOptions) (result *v1beta1.MachineHealthCheck, err error) { - emptyResult := &v1beta1.MachineHealthCheck{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(machinehealthchecksResource, c.ns, machineHealthCheck, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MachineHealthCheck), err -} - -// Update takes the representation of a machineHealthCheck and updates it. Returns the server's representation of the machineHealthCheck, and an error, if there is any. -func (c *FakeMachineHealthChecks) Update(ctx context.Context, machineHealthCheck *v1beta1.MachineHealthCheck, opts v1.UpdateOptions) (result *v1beta1.MachineHealthCheck, err error) { - emptyResult := &v1beta1.MachineHealthCheck{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(machinehealthchecksResource, c.ns, machineHealthCheck, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MachineHealthCheck), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeMachineHealthChecks) UpdateStatus(ctx context.Context, machineHealthCheck *v1beta1.MachineHealthCheck, opts v1.UpdateOptions) (result *v1beta1.MachineHealthCheck, err error) { - emptyResult := &v1beta1.MachineHealthCheck{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(machinehealthchecksResource, "status", c.ns, machineHealthCheck, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MachineHealthCheck), err -} - -// Delete takes name of the machineHealthCheck and deletes it. Returns an error if one occurs. -func (c *FakeMachineHealthChecks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(machinehealthchecksResource, c.ns, name, opts), &v1beta1.MachineHealthCheck{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachineHealthChecks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(machinehealthchecksResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.MachineHealthCheckList{}) - return err -} - -// Patch applies the patch and returns the patched machineHealthCheck. -func (c *FakeMachineHealthChecks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MachineHealthCheck, err error) { - emptyResult := &v1beta1.MachineHealthCheck{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(machinehealthchecksResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MachineHealthCheck), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied machineHealthCheck. -func (c *FakeMachineHealthChecks) Apply(ctx context.Context, machineHealthCheck *machinev1beta1.MachineHealthCheckApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.MachineHealthCheck, err error) { - if machineHealthCheck == nil { - return nil, fmt.Errorf("machineHealthCheck provided to Apply must not be nil") - } - data, err := json.Marshal(machineHealthCheck) - if err != nil { - return nil, err - } - name := machineHealthCheck.Name - if name == nil { - return nil, fmt.Errorf("machineHealthCheck.Name must be provided to Apply") - } - emptyResult := &v1beta1.MachineHealthCheck{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(machinehealthchecksResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MachineHealthCheck), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeMachineHealthChecks) ApplyStatus(ctx context.Context, machineHealthCheck *machinev1beta1.MachineHealthCheckApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.MachineHealthCheck, err error) { - if machineHealthCheck == nil { - return nil, fmt.Errorf("machineHealthCheck provided to Apply must not be nil") - } - data, err := json.Marshal(machineHealthCheck) - if err != nil { - return nil, err - } - name := machineHealthCheck.Name - if name == nil { - return nil, fmt.Errorf("machineHealthCheck.Name must be provided to Apply") - } - emptyResult := &v1beta1.MachineHealthCheck{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(machinehealthchecksResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MachineHealthCheck), err -} diff --git a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake/fake_machineset.go b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake/fake_machineset.go deleted file mode 100644 index 829683c5a5..0000000000 --- a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake/fake_machineset.go +++ /dev/null @@ -1,181 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "github.com/openshift/api/machine/v1beta1" - machinev1beta1 "github.com/openshift/client-go/machine/applyconfigurations/machine/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeMachineSets implements MachineSetInterface -type FakeMachineSets struct { - Fake *FakeMachineV1beta1 - ns string -} - -var machinesetsResource = v1beta1.SchemeGroupVersion.WithResource("machinesets") - -var machinesetsKind = v1beta1.SchemeGroupVersion.WithKind("MachineSet") - -// Get takes name of the machineSet, and returns the corresponding machineSet object, and an error if there is any. -func (c *FakeMachineSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MachineSet, err error) { - emptyResult := &v1beta1.MachineSet{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(machinesetsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MachineSet), err -} - -// List takes label and field selectors, and returns the list of MachineSets that match those selectors. -func (c *FakeMachineSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MachineSetList, err error) { - emptyResult := &v1beta1.MachineSetList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(machinesetsResource, machinesetsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.MachineSetList{ListMeta: obj.(*v1beta1.MachineSetList).ListMeta} - for _, item := range obj.(*v1beta1.MachineSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machineSets. -func (c *FakeMachineSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(machinesetsResource, c.ns, opts)) - -} - -// Create takes the representation of a machineSet and creates it. Returns the server's representation of the machineSet, and an error, if there is any. -func (c *FakeMachineSets) Create(ctx context.Context, machineSet *v1beta1.MachineSet, opts v1.CreateOptions) (result *v1beta1.MachineSet, err error) { - emptyResult := &v1beta1.MachineSet{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(machinesetsResource, c.ns, machineSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MachineSet), err -} - -// Update takes the representation of a machineSet and updates it. Returns the server's representation of the machineSet, and an error, if there is any. -func (c *FakeMachineSets) Update(ctx context.Context, machineSet *v1beta1.MachineSet, opts v1.UpdateOptions) (result *v1beta1.MachineSet, err error) { - emptyResult := &v1beta1.MachineSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(machinesetsResource, c.ns, machineSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MachineSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeMachineSets) UpdateStatus(ctx context.Context, machineSet *v1beta1.MachineSet, opts v1.UpdateOptions) (result *v1beta1.MachineSet, err error) { - emptyResult := &v1beta1.MachineSet{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(machinesetsResource, "status", c.ns, machineSet, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MachineSet), err -} - -// Delete takes name of the machineSet and deletes it. Returns an error if one occurs. -func (c *FakeMachineSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(machinesetsResource, c.ns, name, opts), &v1beta1.MachineSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachineSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(machinesetsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.MachineSetList{}) - return err -} - -// Patch applies the patch and returns the patched machineSet. -func (c *FakeMachineSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MachineSet, err error) { - emptyResult := &v1beta1.MachineSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(machinesetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MachineSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied machineSet. -func (c *FakeMachineSets) Apply(ctx context.Context, machineSet *machinev1beta1.MachineSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.MachineSet, err error) { - if machineSet == nil { - return nil, fmt.Errorf("machineSet provided to Apply must not be nil") - } - data, err := json.Marshal(machineSet) - if err != nil { - return nil, err - } - name := machineSet.Name - if name == nil { - return nil, fmt.Errorf("machineSet.Name must be provided to Apply") - } - emptyResult := &v1beta1.MachineSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(machinesetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MachineSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeMachineSets) ApplyStatus(ctx context.Context, machineSet *machinev1beta1.MachineSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.MachineSet, err error) { - if machineSet == nil { - return nil, fmt.Errorf("machineSet provided to Apply must not be nil") - } - data, err := json.Marshal(machineSet) - if err != nil { - return nil, err - } - name := machineSet.Name - if name == nil { - return nil, fmt.Errorf("machineSet.Name must be provided to Apply") - } - emptyResult := &v1beta1.MachineSet{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(machinesetsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1beta1.MachineSet), err -} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go index 4d678f4d6b..da0071b083 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal/internal.go @@ -73,6 +73,26 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: __untyped_deduced_ elementRelationship: separable +- name: com.github.openshift.api.machineconfiguration.v1.MachineOSBuild + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: com.github.openshift.api.machineconfiguration.v1.MachineOSConfig + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable - name: com.github.openshift.api.machineconfiguration.v1alpha1.BuildInputs map: fields: diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/imagesecretobjectreference.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/imagesecretobjectreference.go new file mode 100644 index 0000000000..5c10080173 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/imagesecretobjectreference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageSecretObjectReferenceApplyConfiguration represents a declarative configuration of the ImageSecretObjectReference type for use +// with apply. +type ImageSecretObjectReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// ImageSecretObjectReferenceApplyConfiguration constructs a declarative configuration of the ImageSecretObjectReference type for use with +// apply. +func ImageSecretObjectReference() *ImageSecretObjectReferenceApplyConfiguration { + return &ImageSecretObjectReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageSecretObjectReferenceApplyConfiguration) WithName(value string) *ImageSecretObjectReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolreference.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolreference.go new file mode 100644 index 0000000000..fa0207b339 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolreference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MachineConfigPoolReferenceApplyConfiguration represents a declarative configuration of the MachineConfigPoolReference type for use +// with apply. +type MachineConfigPoolReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// MachineConfigPoolReferenceApplyConfiguration constructs a declarative configuration of the MachineConfigPoolReference type for use with +// apply. +func MachineConfigPoolReference() *MachineConfigPoolReferenceApplyConfiguration { + return &MachineConfigPoolReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MachineConfigPoolReferenceApplyConfiguration) WithName(value string) *MachineConfigPoolReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigreference.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigreference.go new file mode 100644 index 0000000000..45c3c741f1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigreference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MachineConfigReferenceApplyConfiguration represents a declarative configuration of the MachineConfigReference type for use +// with apply. +type MachineConfigReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// MachineConfigReferenceApplyConfiguration constructs a declarative configuration of the MachineConfigReference type for use with +// apply. +func MachineConfigReference() *MachineConfigReferenceApplyConfiguration { + return &MachineConfigReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MachineConfigReferenceApplyConfiguration) WithName(value string) *MachineConfigReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosbuild.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosbuild.go new file mode 100644 index 0000000000..632e7c526d --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosbuild.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apimachineconfigurationv1 "github.com/openshift/api/machineconfiguration/v1" + internal "github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MachineOSBuildApplyConfiguration represents a declarative configuration of the MachineOSBuild type for use +// with apply. +type MachineOSBuildApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *MachineOSBuildSpecApplyConfiguration `json:"spec,omitempty"` + Status *MachineOSBuildStatusApplyConfiguration `json:"status,omitempty"` +} + +// MachineOSBuild constructs a declarative configuration of the MachineOSBuild type for use with +// apply. +func MachineOSBuild(name string) *MachineOSBuildApplyConfiguration { + b := &MachineOSBuildApplyConfiguration{} + b.WithName(name) + b.WithKind("MachineOSBuild") + b.WithAPIVersion("machineconfiguration.openshift.io/v1") + return b +} + +// ExtractMachineOSBuild extracts the applied configuration owned by fieldManager from +// machineOSBuild. If no managedFields are found in machineOSBuild for fieldManager, a +// MachineOSBuildApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// machineOSBuild must be a unmodified MachineOSBuild API object that was retrieved from the Kubernetes API. +// ExtractMachineOSBuild provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractMachineOSBuild(machineOSBuild *apimachineconfigurationv1.MachineOSBuild, fieldManager string) (*MachineOSBuildApplyConfiguration, error) { + return extractMachineOSBuild(machineOSBuild, fieldManager, "") +} + +// ExtractMachineOSBuildStatus is the same as ExtractMachineOSBuild except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractMachineOSBuildStatus(machineOSBuild *apimachineconfigurationv1.MachineOSBuild, fieldManager string) (*MachineOSBuildApplyConfiguration, error) { + return extractMachineOSBuild(machineOSBuild, fieldManager, "status") +} + +func extractMachineOSBuild(machineOSBuild *apimachineconfigurationv1.MachineOSBuild, fieldManager string, subresource string) (*MachineOSBuildApplyConfiguration, error) { + b := &MachineOSBuildApplyConfiguration{} + err := managedfields.ExtractInto(machineOSBuild, internal.Parser().Type("com.github.openshift.api.machineconfiguration.v1.MachineOSBuild"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(machineOSBuild.Name) + + b.WithKind("MachineOSBuild") + b.WithAPIVersion("machineconfiguration.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *MachineOSBuildApplyConfiguration) WithKind(value string) *MachineOSBuildApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *MachineOSBuildApplyConfiguration) WithAPIVersion(value string) *MachineOSBuildApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MachineOSBuildApplyConfiguration) WithName(value string) *MachineOSBuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *MachineOSBuildApplyConfiguration) WithGenerateName(value string) *MachineOSBuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *MachineOSBuildApplyConfiguration) WithNamespace(value string) *MachineOSBuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *MachineOSBuildApplyConfiguration) WithUID(value types.UID) *MachineOSBuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *MachineOSBuildApplyConfiguration) WithResourceVersion(value string) *MachineOSBuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *MachineOSBuildApplyConfiguration) WithGeneration(value int64) *MachineOSBuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *MachineOSBuildApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MachineOSBuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *MachineOSBuildApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MachineOSBuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *MachineOSBuildApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MachineOSBuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *MachineOSBuildApplyConfiguration) WithLabels(entries map[string]string) *MachineOSBuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *MachineOSBuildApplyConfiguration) WithAnnotations(entries map[string]string) *MachineOSBuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *MachineOSBuildApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *MachineOSBuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *MachineOSBuildApplyConfiguration) WithFinalizers(values ...string) *MachineOSBuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *MachineOSBuildApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *MachineOSBuildApplyConfiguration) WithSpec(value *MachineOSBuildSpecApplyConfiguration) *MachineOSBuildApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *MachineOSBuildApplyConfiguration) WithStatus(value *MachineOSBuildStatusApplyConfiguration) *MachineOSBuildApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MachineOSBuildApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosbuilderreference.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosbuilderreference.go new file mode 100644 index 0000000000..91a14fed96 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosbuilderreference.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/machineconfiguration/v1" +) + +// MachineOSBuilderReferenceApplyConfiguration represents a declarative configuration of the MachineOSBuilderReference type for use +// with apply. +type MachineOSBuilderReferenceApplyConfiguration struct { + ImageBuilderType *v1.MachineOSImageBuilderType `json:"imageBuilderType,omitempty"` + Job *ObjectReferenceApplyConfiguration `json:"job,omitempty"` +} + +// MachineOSBuilderReferenceApplyConfiguration constructs a declarative configuration of the MachineOSBuilderReference type for use with +// apply. +func MachineOSBuilderReference() *MachineOSBuilderReferenceApplyConfiguration { + return &MachineOSBuilderReferenceApplyConfiguration{} +} + +// WithImageBuilderType sets the ImageBuilderType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageBuilderType field is set to the value of the last call. +func (b *MachineOSBuilderReferenceApplyConfiguration) WithImageBuilderType(value v1.MachineOSImageBuilderType) *MachineOSBuilderReferenceApplyConfiguration { + b.ImageBuilderType = &value + return b +} + +// WithJob sets the Job field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Job field is set to the value of the last call. +func (b *MachineOSBuilderReferenceApplyConfiguration) WithJob(value *ObjectReferenceApplyConfiguration) *MachineOSBuilderReferenceApplyConfiguration { + b.Job = value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosbuildspec.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosbuildspec.go new file mode 100644 index 0000000000..1f39ab14b4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosbuildspec.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + machineconfigurationv1 "github.com/openshift/api/machineconfiguration/v1" +) + +// MachineOSBuildSpecApplyConfiguration represents a declarative configuration of the MachineOSBuildSpec type for use +// with apply. +type MachineOSBuildSpecApplyConfiguration struct { + MachineConfig *MachineConfigReferenceApplyConfiguration `json:"machineConfig,omitempty"` + MachineOSConfig *MachineOSConfigReferenceApplyConfiguration `json:"machineOSConfig,omitempty"` + RenderedImagePushSpec *machineconfigurationv1.ImageTagFormat `json:"renderedImagePushSpec,omitempty"` +} + +// MachineOSBuildSpecApplyConfiguration constructs a declarative configuration of the MachineOSBuildSpec type for use with +// apply. +func MachineOSBuildSpec() *MachineOSBuildSpecApplyConfiguration { + return &MachineOSBuildSpecApplyConfiguration{} +} + +// WithMachineConfig sets the MachineConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MachineConfig field is set to the value of the last call. +func (b *MachineOSBuildSpecApplyConfiguration) WithMachineConfig(value *MachineConfigReferenceApplyConfiguration) *MachineOSBuildSpecApplyConfiguration { + b.MachineConfig = value + return b +} + +// WithMachineOSConfig sets the MachineOSConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MachineOSConfig field is set to the value of the last call. +func (b *MachineOSBuildSpecApplyConfiguration) WithMachineOSConfig(value *MachineOSConfigReferenceApplyConfiguration) *MachineOSBuildSpecApplyConfiguration { + b.MachineOSConfig = value + return b +} + +// WithRenderedImagePushSpec sets the RenderedImagePushSpec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RenderedImagePushSpec field is set to the value of the last call. +func (b *MachineOSBuildSpecApplyConfiguration) WithRenderedImagePushSpec(value machineconfigurationv1.ImageTagFormat) *MachineOSBuildSpecApplyConfiguration { + b.RenderedImagePushSpec = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosbuildstatus.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosbuildstatus.go new file mode 100644 index 0000000000..2ef3c5140c --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosbuildstatus.go @@ -0,0 +1,84 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apimachineconfigurationv1 "github.com/openshift/api/machineconfiguration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MachineOSBuildStatusApplyConfiguration represents a declarative configuration of the MachineOSBuildStatus type for use +// with apply. +type MachineOSBuildStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` + Builder *MachineOSBuilderReferenceApplyConfiguration `json:"builder,omitempty"` + RelatedObjects []ObjectReferenceApplyConfiguration `json:"relatedObjects,omitempty"` + BuildStart *metav1.Time `json:"buildStart,omitempty"` + BuildEnd *metav1.Time `json:"buildEnd,omitempty"` + DigestedImagePushSpec *apimachineconfigurationv1.ImageDigestFormat `json:"digestedImagePushSpec,omitempty"` +} + +// MachineOSBuildStatusApplyConfiguration constructs a declarative configuration of the MachineOSBuildStatus type for use with +// apply. +func MachineOSBuildStatus() *MachineOSBuildStatusApplyConfiguration { + return &MachineOSBuildStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *MachineOSBuildStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *MachineOSBuildStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithBuilder sets the Builder field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Builder field is set to the value of the last call. +func (b *MachineOSBuildStatusApplyConfiguration) WithBuilder(value *MachineOSBuilderReferenceApplyConfiguration) *MachineOSBuildStatusApplyConfiguration { + b.Builder = value + return b +} + +// WithRelatedObjects adds the given value to the RelatedObjects field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RelatedObjects field. +func (b *MachineOSBuildStatusApplyConfiguration) WithRelatedObjects(values ...*ObjectReferenceApplyConfiguration) *MachineOSBuildStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRelatedObjects") + } + b.RelatedObjects = append(b.RelatedObjects, *values[i]) + } + return b +} + +// WithBuildStart sets the BuildStart field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BuildStart field is set to the value of the last call. +func (b *MachineOSBuildStatusApplyConfiguration) WithBuildStart(value metav1.Time) *MachineOSBuildStatusApplyConfiguration { + b.BuildStart = &value + return b +} + +// WithBuildEnd sets the BuildEnd field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BuildEnd field is set to the value of the last call. +func (b *MachineOSBuildStatusApplyConfiguration) WithBuildEnd(value metav1.Time) *MachineOSBuildStatusApplyConfiguration { + b.BuildEnd = &value + return b +} + +// WithDigestedImagePushSpec sets the DigestedImagePushSpec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DigestedImagePushSpec field is set to the value of the last call. +func (b *MachineOSBuildStatusApplyConfiguration) WithDigestedImagePushSpec(value apimachineconfigurationv1.ImageDigestFormat) *MachineOSBuildStatusApplyConfiguration { + b.DigestedImagePushSpec = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosconfig.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosconfig.go new file mode 100644 index 0000000000..15cc867113 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosconfig.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apimachineconfigurationv1 "github.com/openshift/api/machineconfiguration/v1" + internal "github.com/openshift/client-go/machineconfiguration/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MachineOSConfigApplyConfiguration represents a declarative configuration of the MachineOSConfig type for use +// with apply. +type MachineOSConfigApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *MachineOSConfigSpecApplyConfiguration `json:"spec,omitempty"` + Status *MachineOSConfigStatusApplyConfiguration `json:"status,omitempty"` +} + +// MachineOSConfig constructs a declarative configuration of the MachineOSConfig type for use with +// apply. +func MachineOSConfig(name string) *MachineOSConfigApplyConfiguration { + b := &MachineOSConfigApplyConfiguration{} + b.WithName(name) + b.WithKind("MachineOSConfig") + b.WithAPIVersion("machineconfiguration.openshift.io/v1") + return b +} + +// ExtractMachineOSConfig extracts the applied configuration owned by fieldManager from +// machineOSConfig. If no managedFields are found in machineOSConfig for fieldManager, a +// MachineOSConfigApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// machineOSConfig must be a unmodified MachineOSConfig API object that was retrieved from the Kubernetes API. +// ExtractMachineOSConfig provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractMachineOSConfig(machineOSConfig *apimachineconfigurationv1.MachineOSConfig, fieldManager string) (*MachineOSConfigApplyConfiguration, error) { + return extractMachineOSConfig(machineOSConfig, fieldManager, "") +} + +// ExtractMachineOSConfigStatus is the same as ExtractMachineOSConfig except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractMachineOSConfigStatus(machineOSConfig *apimachineconfigurationv1.MachineOSConfig, fieldManager string) (*MachineOSConfigApplyConfiguration, error) { + return extractMachineOSConfig(machineOSConfig, fieldManager, "status") +} + +func extractMachineOSConfig(machineOSConfig *apimachineconfigurationv1.MachineOSConfig, fieldManager string, subresource string) (*MachineOSConfigApplyConfiguration, error) { + b := &MachineOSConfigApplyConfiguration{} + err := managedfields.ExtractInto(machineOSConfig, internal.Parser().Type("com.github.openshift.api.machineconfiguration.v1.MachineOSConfig"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(machineOSConfig.Name) + + b.WithKind("MachineOSConfig") + b.WithAPIVersion("machineconfiguration.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *MachineOSConfigApplyConfiguration) WithKind(value string) *MachineOSConfigApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *MachineOSConfigApplyConfiguration) WithAPIVersion(value string) *MachineOSConfigApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MachineOSConfigApplyConfiguration) WithName(value string) *MachineOSConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *MachineOSConfigApplyConfiguration) WithGenerateName(value string) *MachineOSConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *MachineOSConfigApplyConfiguration) WithNamespace(value string) *MachineOSConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *MachineOSConfigApplyConfiguration) WithUID(value types.UID) *MachineOSConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *MachineOSConfigApplyConfiguration) WithResourceVersion(value string) *MachineOSConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *MachineOSConfigApplyConfiguration) WithGeneration(value int64) *MachineOSConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *MachineOSConfigApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MachineOSConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *MachineOSConfigApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MachineOSConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *MachineOSConfigApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MachineOSConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *MachineOSConfigApplyConfiguration) WithLabels(entries map[string]string) *MachineOSConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *MachineOSConfigApplyConfiguration) WithAnnotations(entries map[string]string) *MachineOSConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *MachineOSConfigApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *MachineOSConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *MachineOSConfigApplyConfiguration) WithFinalizers(values ...string) *MachineOSConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *MachineOSConfigApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *MachineOSConfigApplyConfiguration) WithSpec(value *MachineOSConfigSpecApplyConfiguration) *MachineOSConfigApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *MachineOSConfigApplyConfiguration) WithStatus(value *MachineOSConfigStatusApplyConfiguration) *MachineOSConfigApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MachineOSConfigApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosconfigreference.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosconfigreference.go new file mode 100644 index 0000000000..13afe3f088 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosconfigreference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// MachineOSConfigReferenceApplyConfiguration represents a declarative configuration of the MachineOSConfigReference type for use +// with apply. +type MachineOSConfigReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// MachineOSConfigReferenceApplyConfiguration constructs a declarative configuration of the MachineOSConfigReference type for use with +// apply. +func MachineOSConfigReference() *MachineOSConfigReferenceApplyConfiguration { + return &MachineOSConfigReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MachineOSConfigReferenceApplyConfiguration) WithName(value string) *MachineOSConfigReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosconfigspec.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosconfigspec.go new file mode 100644 index 0000000000..6a9be7eded --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosconfigspec.go @@ -0,0 +1,77 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + machineconfigurationv1 "github.com/openshift/api/machineconfiguration/v1" +) + +// MachineOSConfigSpecApplyConfiguration represents a declarative configuration of the MachineOSConfigSpec type for use +// with apply. +type MachineOSConfigSpecApplyConfiguration struct { + MachineConfigPool *MachineConfigPoolReferenceApplyConfiguration `json:"machineConfigPool,omitempty"` + ImageBuilder *MachineOSImageBuilderApplyConfiguration `json:"imageBuilder,omitempty"` + BaseImagePullSecret *ImageSecretObjectReferenceApplyConfiguration `json:"baseImagePullSecret,omitempty"` + RenderedImagePushSecret *ImageSecretObjectReferenceApplyConfiguration `json:"renderedImagePushSecret,omitempty"` + RenderedImagePushSpec *machineconfigurationv1.ImageTagFormat `json:"renderedImagePushSpec,omitempty"` + Containerfile []MachineOSContainerfileApplyConfiguration `json:"containerFile,omitempty"` +} + +// MachineOSConfigSpecApplyConfiguration constructs a declarative configuration of the MachineOSConfigSpec type for use with +// apply. +func MachineOSConfigSpec() *MachineOSConfigSpecApplyConfiguration { + return &MachineOSConfigSpecApplyConfiguration{} +} + +// WithMachineConfigPool sets the MachineConfigPool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MachineConfigPool field is set to the value of the last call. +func (b *MachineOSConfigSpecApplyConfiguration) WithMachineConfigPool(value *MachineConfigPoolReferenceApplyConfiguration) *MachineOSConfigSpecApplyConfiguration { + b.MachineConfigPool = value + return b +} + +// WithImageBuilder sets the ImageBuilder field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageBuilder field is set to the value of the last call. +func (b *MachineOSConfigSpecApplyConfiguration) WithImageBuilder(value *MachineOSImageBuilderApplyConfiguration) *MachineOSConfigSpecApplyConfiguration { + b.ImageBuilder = value + return b +} + +// WithBaseImagePullSecret sets the BaseImagePullSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BaseImagePullSecret field is set to the value of the last call. +func (b *MachineOSConfigSpecApplyConfiguration) WithBaseImagePullSecret(value *ImageSecretObjectReferenceApplyConfiguration) *MachineOSConfigSpecApplyConfiguration { + b.BaseImagePullSecret = value + return b +} + +// WithRenderedImagePushSecret sets the RenderedImagePushSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RenderedImagePushSecret field is set to the value of the last call. +func (b *MachineOSConfigSpecApplyConfiguration) WithRenderedImagePushSecret(value *ImageSecretObjectReferenceApplyConfiguration) *MachineOSConfigSpecApplyConfiguration { + b.RenderedImagePushSecret = value + return b +} + +// WithRenderedImagePushSpec sets the RenderedImagePushSpec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RenderedImagePushSpec field is set to the value of the last call. +func (b *MachineOSConfigSpecApplyConfiguration) WithRenderedImagePushSpec(value machineconfigurationv1.ImageTagFormat) *MachineOSConfigSpecApplyConfiguration { + b.RenderedImagePushSpec = &value + return b +} + +// WithContainerfile adds the given value to the Containerfile field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Containerfile field. +func (b *MachineOSConfigSpecApplyConfiguration) WithContainerfile(values ...*MachineOSContainerfileApplyConfiguration) *MachineOSConfigSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithContainerfile") + } + b.Containerfile = append(b.Containerfile, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosconfigstatus.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosconfigstatus.go new file mode 100644 index 0000000000..185381e8fd --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosconfigstatus.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + machineconfigurationv1 "github.com/openshift/api/machineconfiguration/v1" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MachineOSConfigStatusApplyConfiguration represents a declarative configuration of the MachineOSConfigStatus type for use +// with apply. +type MachineOSConfigStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + CurrentImagePullSpec *machineconfigurationv1.ImageDigestFormat `json:"currentImagePullSpec,omitempty"` + MachineOSBuild *ObjectReferenceApplyConfiguration `json:"machineOSBuild,omitempty"` +} + +// MachineOSConfigStatusApplyConfiguration constructs a declarative configuration of the MachineOSConfigStatus type for use with +// apply. +func MachineOSConfigStatus() *MachineOSConfigStatusApplyConfiguration { + return &MachineOSConfigStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *MachineOSConfigStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *MachineOSConfigStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *MachineOSConfigStatusApplyConfiguration) WithObservedGeneration(value int64) *MachineOSConfigStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithCurrentImagePullSpec sets the CurrentImagePullSpec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CurrentImagePullSpec field is set to the value of the last call. +func (b *MachineOSConfigStatusApplyConfiguration) WithCurrentImagePullSpec(value machineconfigurationv1.ImageDigestFormat) *MachineOSConfigStatusApplyConfiguration { + b.CurrentImagePullSpec = &value + return b +} + +// WithMachineOSBuild sets the MachineOSBuild field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MachineOSBuild field is set to the value of the last call. +func (b *MachineOSConfigStatusApplyConfiguration) WithMachineOSBuild(value *ObjectReferenceApplyConfiguration) *MachineOSConfigStatusApplyConfiguration { + b.MachineOSBuild = value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineoscontainerfile.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineoscontainerfile.go new file mode 100644 index 0000000000..a39d174086 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineoscontainerfile.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/machineconfiguration/v1" +) + +// MachineOSContainerfileApplyConfiguration represents a declarative configuration of the MachineOSContainerfile type for use +// with apply. +type MachineOSContainerfileApplyConfiguration struct { + ContainerfileArch *v1.ContainerfileArch `json:"containerfileArch,omitempty"` + Content *string `json:"content,omitempty"` +} + +// MachineOSContainerfileApplyConfiguration constructs a declarative configuration of the MachineOSContainerfile type for use with +// apply. +func MachineOSContainerfile() *MachineOSContainerfileApplyConfiguration { + return &MachineOSContainerfileApplyConfiguration{} +} + +// WithContainerfileArch sets the ContainerfileArch field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ContainerfileArch field is set to the value of the last call. +func (b *MachineOSContainerfileApplyConfiguration) WithContainerfileArch(value v1.ContainerfileArch) *MachineOSContainerfileApplyConfiguration { + b.ContainerfileArch = &value + return b +} + +// WithContent sets the Content field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Content field is set to the value of the last call. +func (b *MachineOSContainerfileApplyConfiguration) WithContent(value string) *MachineOSContainerfileApplyConfiguration { + b.Content = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosimagebuilder.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosimagebuilder.go new file mode 100644 index 0000000000..6edf70fe85 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineosimagebuilder.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/machineconfiguration/v1" +) + +// MachineOSImageBuilderApplyConfiguration represents a declarative configuration of the MachineOSImageBuilder type for use +// with apply. +type MachineOSImageBuilderApplyConfiguration struct { + ImageBuilderType *v1.MachineOSImageBuilderType `json:"imageBuilderType,omitempty"` +} + +// MachineOSImageBuilderApplyConfiguration constructs a declarative configuration of the MachineOSImageBuilder type for use with +// apply. +func MachineOSImageBuilder() *MachineOSImageBuilderApplyConfiguration { + return &MachineOSImageBuilderApplyConfiguration{} +} + +// WithImageBuilderType sets the ImageBuilderType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageBuilderType field is set to the value of the last call. +func (b *MachineOSImageBuilderApplyConfiguration) WithImageBuilderType(value v1.MachineOSImageBuilderType) *MachineOSImageBuilderApplyConfiguration { + b.ImageBuilderType = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/objectreference.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/objectreference.go new file mode 100644 index 0000000000..dfbc465e71 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/objectreference.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ObjectReferenceApplyConfiguration represents a declarative configuration of the ObjectReference type for use +// with apply. +type ObjectReferenceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ObjectReferenceApplyConfiguration constructs a declarative configuration of the ObjectReference type for use with +// apply. +func ObjectReference() *ObjectReferenceApplyConfiguration { + return &ObjectReferenceApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *ObjectReferenceApplyConfiguration) WithGroup(value string) *ObjectReferenceApplyConfiguration { + b.Group = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ObjectReferenceApplyConfiguration) WithResource(value string) *ObjectReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ObjectReferenceApplyConfiguration) WithNamespace(value string) *ObjectReferenceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ObjectReferenceApplyConfiguration) WithName(value string) *ObjectReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/utils.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/utils.go index 3798c8996b..0d93d1223f 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/utils.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/utils.go @@ -42,6 +42,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &machineconfigurationv1.ControllerConfigStatusConditionApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("ImageRegistryBundle"): return &machineconfigurationv1.ImageRegistryBundleApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ImageSecretObjectReference"): + return &machineconfigurationv1.ImageSecretObjectReferenceApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("KubeletConfig"): return &machineconfigurationv1.KubeletConfigApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("KubeletConfigCondition"): @@ -56,16 +58,42 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &machineconfigurationv1.MachineConfigPoolApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("MachineConfigPoolCondition"): return &machineconfigurationv1.MachineConfigPoolConditionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MachineConfigPoolReference"): + return &machineconfigurationv1.MachineConfigPoolReferenceApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("MachineConfigPoolSpec"): return &machineconfigurationv1.MachineConfigPoolSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("MachineConfigPoolStatus"): return &machineconfigurationv1.MachineConfigPoolStatusApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("MachineConfigPoolStatusConfiguration"): return &machineconfigurationv1.MachineConfigPoolStatusConfigurationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MachineConfigReference"): + return &machineconfigurationv1.MachineConfigReferenceApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("MachineConfigSpec"): return &machineconfigurationv1.MachineConfigSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MachineOSBuild"): + return &machineconfigurationv1.MachineOSBuildApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MachineOSBuilderReference"): + return &machineconfigurationv1.MachineOSBuilderReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MachineOSBuildSpec"): + return &machineconfigurationv1.MachineOSBuildSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MachineOSBuildStatus"): + return &machineconfigurationv1.MachineOSBuildStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MachineOSConfig"): + return &machineconfigurationv1.MachineOSConfigApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MachineOSConfigReference"): + return &machineconfigurationv1.MachineOSConfigReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MachineOSConfigSpec"): + return &machineconfigurationv1.MachineOSConfigSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MachineOSConfigStatus"): + return &machineconfigurationv1.MachineOSConfigStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MachineOSContainerfile"): + return &machineconfigurationv1.MachineOSContainerfileApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("MachineOSImageBuilder"): + return &machineconfigurationv1.MachineOSImageBuilderApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("NetworkInfo"): return &machineconfigurationv1.NetworkInfoApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("ObjectReference"): + return &machineconfigurationv1.ObjectReferenceApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("PinnedImageSetRef"): return &machineconfigurationv1.PinnedImageSetRefApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("PoolSynchronizerStatus"): diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/fake/fake_machineconfiguration_client.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/fake/fake_machineconfiguration_client.go index c9ef0eeb40..8983b09104 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/fake/fake_machineconfiguration_client.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/fake/fake_machineconfiguration_client.go @@ -32,6 +32,14 @@ func (c *FakeMachineconfigurationV1) MachineConfigPools() v1.MachineConfigPoolIn return &FakeMachineConfigPools{c} } +func (c *FakeMachineconfigurationV1) MachineOSBuilds() v1.MachineOSBuildInterface { + return &FakeMachineOSBuilds{c} +} + +func (c *FakeMachineconfigurationV1) MachineOSConfigs() v1.MachineOSConfigInterface { + return &FakeMachineOSConfigs{c} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeMachineconfigurationV1) RESTClient() rest.Interface { diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/fake/fake_machineosbuild.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/fake/fake_machineosbuild.go new file mode 100644 index 0000000000..40cc28a633 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/fake/fake_machineosbuild.go @@ -0,0 +1,170 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1 "github.com/openshift/api/machineconfiguration/v1" + machineconfigurationv1 "github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeMachineOSBuilds implements MachineOSBuildInterface +type FakeMachineOSBuilds struct { + Fake *FakeMachineconfigurationV1 +} + +var machineosbuildsResource = v1.SchemeGroupVersion.WithResource("machineosbuilds") + +var machineosbuildsKind = v1.SchemeGroupVersion.WithKind("MachineOSBuild") + +// Get takes name of the machineOSBuild, and returns the corresponding machineOSBuild object, and an error if there is any. +func (c *FakeMachineOSBuilds) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MachineOSBuild, err error) { + emptyResult := &v1.MachineOSBuild{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(machineosbuildsResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.MachineOSBuild), err +} + +// List takes label and field selectors, and returns the list of MachineOSBuilds that match those selectors. +func (c *FakeMachineOSBuilds) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MachineOSBuildList, err error) { + emptyResult := &v1.MachineOSBuildList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(machineosbuildsResource, machineosbuildsKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.MachineOSBuildList{ListMeta: obj.(*v1.MachineOSBuildList).ListMeta} + for _, item := range obj.(*v1.MachineOSBuildList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested machineOSBuilds. +func (c *FakeMachineOSBuilds) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(machineosbuildsResource, opts)) +} + +// Create takes the representation of a machineOSBuild and creates it. Returns the server's representation of the machineOSBuild, and an error, if there is any. +func (c *FakeMachineOSBuilds) Create(ctx context.Context, machineOSBuild *v1.MachineOSBuild, opts metav1.CreateOptions) (result *v1.MachineOSBuild, err error) { + emptyResult := &v1.MachineOSBuild{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(machineosbuildsResource, machineOSBuild, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.MachineOSBuild), err +} + +// Update takes the representation of a machineOSBuild and updates it. Returns the server's representation of the machineOSBuild, and an error, if there is any. +func (c *FakeMachineOSBuilds) Update(ctx context.Context, machineOSBuild *v1.MachineOSBuild, opts metav1.UpdateOptions) (result *v1.MachineOSBuild, err error) { + emptyResult := &v1.MachineOSBuild{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(machineosbuildsResource, machineOSBuild, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.MachineOSBuild), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeMachineOSBuilds) UpdateStatus(ctx context.Context, machineOSBuild *v1.MachineOSBuild, opts metav1.UpdateOptions) (result *v1.MachineOSBuild, err error) { + emptyResult := &v1.MachineOSBuild{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(machineosbuildsResource, "status", machineOSBuild, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.MachineOSBuild), err +} + +// Delete takes name of the machineOSBuild and deletes it. Returns an error if one occurs. +func (c *FakeMachineOSBuilds) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(machineosbuildsResource, name, opts), &v1.MachineOSBuild{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeMachineOSBuilds) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(machineosbuildsResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1.MachineOSBuildList{}) + return err +} + +// Patch applies the patch and returns the patched machineOSBuild. +func (c *FakeMachineOSBuilds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MachineOSBuild, err error) { + emptyResult := &v1.MachineOSBuild{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(machineosbuildsResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.MachineOSBuild), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied machineOSBuild. +func (c *FakeMachineOSBuilds) Apply(ctx context.Context, machineOSBuild *machineconfigurationv1.MachineOSBuildApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MachineOSBuild, err error) { + if machineOSBuild == nil { + return nil, fmt.Errorf("machineOSBuild provided to Apply must not be nil") + } + data, err := json.Marshal(machineOSBuild) + if err != nil { + return nil, err + } + name := machineOSBuild.Name + if name == nil { + return nil, fmt.Errorf("machineOSBuild.Name must be provided to Apply") + } + emptyResult := &v1.MachineOSBuild{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(machineosbuildsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.MachineOSBuild), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeMachineOSBuilds) ApplyStatus(ctx context.Context, machineOSBuild *machineconfigurationv1.MachineOSBuildApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MachineOSBuild, err error) { + if machineOSBuild == nil { + return nil, fmt.Errorf("machineOSBuild provided to Apply must not be nil") + } + data, err := json.Marshal(machineOSBuild) + if err != nil { + return nil, err + } + name := machineOSBuild.Name + if name == nil { + return nil, fmt.Errorf("machineOSBuild.Name must be provided to Apply") + } + emptyResult := &v1.MachineOSBuild{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(machineosbuildsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.MachineOSBuild), err +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/fake/fake_machineosconfig.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/fake/fake_machineosconfig.go new file mode 100644 index 0000000000..aa07b72b9c --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/fake/fake_machineosconfig.go @@ -0,0 +1,170 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1 "github.com/openshift/api/machineconfiguration/v1" + machineconfigurationv1 "github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeMachineOSConfigs implements MachineOSConfigInterface +type FakeMachineOSConfigs struct { + Fake *FakeMachineconfigurationV1 +} + +var machineosconfigsResource = v1.SchemeGroupVersion.WithResource("machineosconfigs") + +var machineosconfigsKind = v1.SchemeGroupVersion.WithKind("MachineOSConfig") + +// Get takes name of the machineOSConfig, and returns the corresponding machineOSConfig object, and an error if there is any. +func (c *FakeMachineOSConfigs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MachineOSConfig, err error) { + emptyResult := &v1.MachineOSConfig{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(machineosconfigsResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.MachineOSConfig), err +} + +// List takes label and field selectors, and returns the list of MachineOSConfigs that match those selectors. +func (c *FakeMachineOSConfigs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MachineOSConfigList, err error) { + emptyResult := &v1.MachineOSConfigList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(machineosconfigsResource, machineosconfigsKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.MachineOSConfigList{ListMeta: obj.(*v1.MachineOSConfigList).ListMeta} + for _, item := range obj.(*v1.MachineOSConfigList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested machineOSConfigs. +func (c *FakeMachineOSConfigs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(machineosconfigsResource, opts)) +} + +// Create takes the representation of a machineOSConfig and creates it. Returns the server's representation of the machineOSConfig, and an error, if there is any. +func (c *FakeMachineOSConfigs) Create(ctx context.Context, machineOSConfig *v1.MachineOSConfig, opts metav1.CreateOptions) (result *v1.MachineOSConfig, err error) { + emptyResult := &v1.MachineOSConfig{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(machineosconfigsResource, machineOSConfig, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.MachineOSConfig), err +} + +// Update takes the representation of a machineOSConfig and updates it. Returns the server's representation of the machineOSConfig, and an error, if there is any. +func (c *FakeMachineOSConfigs) Update(ctx context.Context, machineOSConfig *v1.MachineOSConfig, opts metav1.UpdateOptions) (result *v1.MachineOSConfig, err error) { + emptyResult := &v1.MachineOSConfig{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(machineosconfigsResource, machineOSConfig, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.MachineOSConfig), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeMachineOSConfigs) UpdateStatus(ctx context.Context, machineOSConfig *v1.MachineOSConfig, opts metav1.UpdateOptions) (result *v1.MachineOSConfig, err error) { + emptyResult := &v1.MachineOSConfig{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(machineosconfigsResource, "status", machineOSConfig, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.MachineOSConfig), err +} + +// Delete takes name of the machineOSConfig and deletes it. Returns an error if one occurs. +func (c *FakeMachineOSConfigs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(machineosconfigsResource, name, opts), &v1.MachineOSConfig{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeMachineOSConfigs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(machineosconfigsResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1.MachineOSConfigList{}) + return err +} + +// Patch applies the patch and returns the patched machineOSConfig. +func (c *FakeMachineOSConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MachineOSConfig, err error) { + emptyResult := &v1.MachineOSConfig{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(machineosconfigsResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.MachineOSConfig), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied machineOSConfig. +func (c *FakeMachineOSConfigs) Apply(ctx context.Context, machineOSConfig *machineconfigurationv1.MachineOSConfigApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MachineOSConfig, err error) { + if machineOSConfig == nil { + return nil, fmt.Errorf("machineOSConfig provided to Apply must not be nil") + } + data, err := json.Marshal(machineOSConfig) + if err != nil { + return nil, err + } + name := machineOSConfig.Name + if name == nil { + return nil, fmt.Errorf("machineOSConfig.Name must be provided to Apply") + } + emptyResult := &v1.MachineOSConfig{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(machineosconfigsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.MachineOSConfig), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeMachineOSConfigs) ApplyStatus(ctx context.Context, machineOSConfig *machineconfigurationv1.MachineOSConfigApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MachineOSConfig, err error) { + if machineOSConfig == nil { + return nil, fmt.Errorf("machineOSConfig provided to Apply must not be nil") + } + data, err := json.Marshal(machineOSConfig) + if err != nil { + return nil, err + } + name := machineOSConfig.Name + if name == nil { + return nil, fmt.Errorf("machineOSConfig.Name must be provided to Apply") + } + emptyResult := &v1.MachineOSConfig{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(machineosconfigsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.MachineOSConfig), err +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/generated_expansion.go index cce54d166a..a277355544 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/generated_expansion.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/generated_expansion.go @@ -11,3 +11,7 @@ type KubeletConfigExpansion interface{} type MachineConfigExpansion interface{} type MachineConfigPoolExpansion interface{} + +type MachineOSBuildExpansion interface{} + +type MachineOSConfigExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineconfiguration_client.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineconfiguration_client.go index 6bc99dd14a..c595fc4467 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineconfiguration_client.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineconfiguration_client.go @@ -17,6 +17,8 @@ type MachineconfigurationV1Interface interface { KubeletConfigsGetter MachineConfigsGetter MachineConfigPoolsGetter + MachineOSBuildsGetter + MachineOSConfigsGetter } // MachineconfigurationV1Client is used to interact with features provided by the machineconfiguration.openshift.io group. @@ -44,6 +46,14 @@ func (c *MachineconfigurationV1Client) MachineConfigPools() MachineConfigPoolInt return newMachineConfigPools(c) } +func (c *MachineconfigurationV1Client) MachineOSBuilds() MachineOSBuildInterface { + return newMachineOSBuilds(c) +} + +func (c *MachineconfigurationV1Client) MachineOSConfigs() MachineOSConfigInterface { + return newMachineOSConfigs(c) +} + // NewForConfig creates a new MachineconfigurationV1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineosbuild.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineosbuild.go new file mode 100644 index 0000000000..41ead8964e --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineosbuild.go @@ -0,0 +1,57 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "github.com/openshift/api/machineconfiguration/v1" + machineconfigurationv1 "github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1" + scheme "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// MachineOSBuildsGetter has a method to return a MachineOSBuildInterface. +// A group's client should implement this interface. +type MachineOSBuildsGetter interface { + MachineOSBuilds() MachineOSBuildInterface +} + +// MachineOSBuildInterface has methods to work with MachineOSBuild resources. +type MachineOSBuildInterface interface { + Create(ctx context.Context, machineOSBuild *v1.MachineOSBuild, opts metav1.CreateOptions) (*v1.MachineOSBuild, error) + Update(ctx context.Context, machineOSBuild *v1.MachineOSBuild, opts metav1.UpdateOptions) (*v1.MachineOSBuild, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, machineOSBuild *v1.MachineOSBuild, opts metav1.UpdateOptions) (*v1.MachineOSBuild, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.MachineOSBuild, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.MachineOSBuildList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MachineOSBuild, err error) + Apply(ctx context.Context, machineOSBuild *machineconfigurationv1.MachineOSBuildApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MachineOSBuild, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, machineOSBuild *machineconfigurationv1.MachineOSBuildApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MachineOSBuild, err error) + MachineOSBuildExpansion +} + +// machineOSBuilds implements MachineOSBuildInterface +type machineOSBuilds struct { + *gentype.ClientWithListAndApply[*v1.MachineOSBuild, *v1.MachineOSBuildList, *machineconfigurationv1.MachineOSBuildApplyConfiguration] +} + +// newMachineOSBuilds returns a MachineOSBuilds +func newMachineOSBuilds(c *MachineconfigurationV1Client) *machineOSBuilds { + return &machineOSBuilds{ + gentype.NewClientWithListAndApply[*v1.MachineOSBuild, *v1.MachineOSBuildList, *machineconfigurationv1.MachineOSBuildApplyConfiguration]( + "machineosbuilds", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.MachineOSBuild { return &v1.MachineOSBuild{} }, + func() *v1.MachineOSBuildList { return &v1.MachineOSBuildList{} }), + } +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineosconfig.go b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineosconfig.go new file mode 100644 index 0000000000..0bc3bf4645 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/clientset/versioned/typed/machineconfiguration/v1/machineosconfig.go @@ -0,0 +1,57 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "github.com/openshift/api/machineconfiguration/v1" + machineconfigurationv1 "github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1" + scheme "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// MachineOSConfigsGetter has a method to return a MachineOSConfigInterface. +// A group's client should implement this interface. +type MachineOSConfigsGetter interface { + MachineOSConfigs() MachineOSConfigInterface +} + +// MachineOSConfigInterface has methods to work with MachineOSConfig resources. +type MachineOSConfigInterface interface { + Create(ctx context.Context, machineOSConfig *v1.MachineOSConfig, opts metav1.CreateOptions) (*v1.MachineOSConfig, error) + Update(ctx context.Context, machineOSConfig *v1.MachineOSConfig, opts metav1.UpdateOptions) (*v1.MachineOSConfig, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, machineOSConfig *v1.MachineOSConfig, opts metav1.UpdateOptions) (*v1.MachineOSConfig, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.MachineOSConfig, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.MachineOSConfigList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MachineOSConfig, err error) + Apply(ctx context.Context, machineOSConfig *machineconfigurationv1.MachineOSConfigApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MachineOSConfig, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, machineOSConfig *machineconfigurationv1.MachineOSConfigApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MachineOSConfig, err error) + MachineOSConfigExpansion +} + +// machineOSConfigs implements MachineOSConfigInterface +type machineOSConfigs struct { + *gentype.ClientWithListAndApply[*v1.MachineOSConfig, *v1.MachineOSConfigList, *machineconfigurationv1.MachineOSConfigApplyConfiguration] +} + +// newMachineOSConfigs returns a MachineOSConfigs +func newMachineOSConfigs(c *MachineconfigurationV1Client) *machineOSConfigs { + return &machineOSConfigs{ + gentype.NewClientWithListAndApply[*v1.MachineOSConfig, *v1.MachineOSConfigList, *machineconfigurationv1.MachineOSConfigApplyConfiguration]( + "machineosconfigs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.MachineOSConfig { return &v1.MachineOSConfig{} }, + func() *v1.MachineOSConfigList { return &v1.MachineOSConfigList{} }), + } +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/generic.go index 2c95cc84ef..6f4c64ba3d 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/generic.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/generic.go @@ -48,6 +48,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1().MachineConfigs().Informer()}, nil case v1.SchemeGroupVersion.WithResource("machineconfigpools"): return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1().MachineConfigPools().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("machineosbuilds"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1().MachineOSBuilds().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("machineosconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Machineconfiguration().V1().MachineOSConfigs().Informer()}, nil // Group=machineconfiguration.openshift.io, Version=v1alpha1 case v1alpha1.SchemeGroupVersion.WithResource("machineconfignodes"): diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/interface.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/interface.go index f0613fa8a8..abf342f5cd 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/interface.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/interface.go @@ -18,6 +18,10 @@ type Interface interface { MachineConfigs() MachineConfigInformer // MachineConfigPools returns a MachineConfigPoolInformer. MachineConfigPools() MachineConfigPoolInformer + // MachineOSBuilds returns a MachineOSBuildInformer. + MachineOSBuilds() MachineOSBuildInformer + // MachineOSConfigs returns a MachineOSConfigInformer. + MachineOSConfigs() MachineOSConfigInformer } type version struct { @@ -55,3 +59,13 @@ func (v *version) MachineConfigs() MachineConfigInformer { func (v *version) MachineConfigPools() MachineConfigPoolInformer { return &machineConfigPoolInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } + +// MachineOSBuilds returns a MachineOSBuildInformer. +func (v *version) MachineOSBuilds() MachineOSBuildInformer { + return &machineOSBuildInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// MachineOSConfigs returns a MachineOSConfigInformer. +func (v *version) MachineOSConfigs() MachineOSConfigInformer { + return &machineOSConfigInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineosbuild.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineosbuild.go new file mode 100644 index 0000000000..5013801c2c --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineosbuild.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + machineconfigurationv1 "github.com/openshift/api/machineconfiguration/v1" + versioned "github.com/openshift/client-go/machineconfiguration/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/machineconfiguration/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// MachineOSBuildInformer provides access to a shared informer and lister for +// MachineOSBuilds. +type MachineOSBuildInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.MachineOSBuildLister +} + +type machineOSBuildInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMachineOSBuildInformer constructs a new informer for MachineOSBuild type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMachineOSBuildInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMachineOSBuildInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMachineOSBuildInformer constructs a new informer for MachineOSBuild type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMachineOSBuildInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineOSBuilds().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineOSBuilds().Watch(context.TODO(), options) + }, + }, + &machineconfigurationv1.MachineOSBuild{}, + resyncPeriod, + indexers, + ) +} + +func (f *machineOSBuildInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMachineOSBuildInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *machineOSBuildInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&machineconfigurationv1.MachineOSBuild{}, f.defaultInformer) +} + +func (f *machineOSBuildInformer) Lister() v1.MachineOSBuildLister { + return v1.NewMachineOSBuildLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineosconfig.go b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineosconfig.go new file mode 100644 index 0000000000..8fddfd8228 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1/machineosconfig.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + machineconfigurationv1 "github.com/openshift/api/machineconfiguration/v1" + versioned "github.com/openshift/client-go/machineconfiguration/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/machineconfiguration/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// MachineOSConfigInformer provides access to a shared informer and lister for +// MachineOSConfigs. +type MachineOSConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.MachineOSConfigLister +} + +type machineOSConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMachineOSConfigInformer constructs a new informer for MachineOSConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMachineOSConfigInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMachineOSConfigInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMachineOSConfigInformer constructs a new informer for MachineOSConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMachineOSConfigInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineOSConfigs().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.MachineconfigurationV1().MachineOSConfigs().Watch(context.TODO(), options) + }, + }, + &machineconfigurationv1.MachineOSConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *machineOSConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMachineOSConfigInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *machineOSConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&machineconfigurationv1.MachineOSConfig{}, f.defaultInformer) +} + +func (f *machineOSConfigInformer) Lister() v1.MachineOSConfigLister { + return v1.NewMachineOSConfigLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/expansion_generated.go index 8882e1ec19..5ed9d8f07c 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/expansion_generated.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/expansion_generated.go @@ -21,3 +21,11 @@ type MachineConfigListerExpansion interface{} // MachineConfigPoolListerExpansion allows custom methods to be added to // MachineConfigPoolLister. type MachineConfigPoolListerExpansion interface{} + +// MachineOSBuildListerExpansion allows custom methods to be added to +// MachineOSBuildLister. +type MachineOSBuildListerExpansion interface{} + +// MachineOSConfigListerExpansion allows custom methods to be added to +// MachineOSConfigLister. +type MachineOSConfigListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/machineosbuild.go b/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/machineosbuild.go new file mode 100644 index 0000000000..dca053fac1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/machineosbuild.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/machineconfiguration/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// MachineOSBuildLister helps list MachineOSBuilds. +// All objects returned here must be treated as read-only. +type MachineOSBuildLister interface { + // List lists all MachineOSBuilds in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.MachineOSBuild, err error) + // Get retrieves the MachineOSBuild from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.MachineOSBuild, error) + MachineOSBuildListerExpansion +} + +// machineOSBuildLister implements the MachineOSBuildLister interface. +type machineOSBuildLister struct { + listers.ResourceIndexer[*v1.MachineOSBuild] +} + +// NewMachineOSBuildLister returns a new MachineOSBuildLister. +func NewMachineOSBuildLister(indexer cache.Indexer) MachineOSBuildLister { + return &machineOSBuildLister{listers.New[*v1.MachineOSBuild](indexer, v1.Resource("machineosbuild"))} +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/machineosconfig.go b/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/machineosconfig.go new file mode 100644 index 0000000000..219e169aa8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1/machineosconfig.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/machineconfiguration/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// MachineOSConfigLister helps list MachineOSConfigs. +// All objects returned here must be treated as read-only. +type MachineOSConfigLister interface { + // List lists all MachineOSConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.MachineOSConfig, err error) + // Get retrieves the MachineOSConfig from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.MachineOSConfig, error) + MachineOSConfigListerExpansion +} + +// machineOSConfigLister implements the MachineOSConfigLister interface. +type machineOSConfigLister struct { + listers.ResourceIndexer[*v1.MachineOSConfig] +} + +// NewMachineOSConfigLister returns a new MachineOSConfigLister. +func NewMachineOSConfigLister(indexer cache.Indexer) MachineOSConfigLister { + return &machineOSConfigLister{listers.New[*v1.MachineOSConfig](indexer, v1.Resource("machineosconfig"))} +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go index 60be55fbb0..3232279642 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go @@ -1542,9 +1542,23 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.operator.v1.IPsecConfig map: fields: + - name: full + type: + namedType: com.github.openshift.api.operator.v1.IPsecFullModeConfig - name: mode type: scalar: string + unions: + - discriminator: mode + fields: + - fieldName: full + discriminatorValue: Full +- name: com.github.openshift.api.operator.v1.IPsecFullModeConfig + map: + fields: + - name: encapsulation + type: + scalar: string - name: com.github.openshift.api.operator.v1.IPv4GatewayConfig map: fields: @@ -1775,6 +1789,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: httpHeaders type: namedType: com.github.openshift.api.operator.v1.IngressControllerHTTPHeaders + - name: idleConnectionTerminationPolicy + type: + scalar: string + default: Immediate - name: logging type: namedType: com.github.openshift.api.operator.v1.IngressControllerLogging @@ -2897,6 +2915,81 @@ var schemaYAML = typed.YAMLObject(`types: - name: latestAvailableRevision type: scalar: numeric +- name: com.github.openshift.api.operator.v1.OLM + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1.OLMSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1.OLMStatus + default: {} +- name: com.github.openshift.api.operator.v1.OLMSpec + map: + fields: + - name: logLevel + type: + scalar: string + - name: managementState + type: + scalar: string + default: "" + - name: observedConfig + type: + namedType: __untyped_atomic_ + - name: operatorLogLevel + type: + scalar: string + - name: unsupportedConfigOverrides + type: + namedType: __untyped_atomic_ +- name: com.github.openshift.api.operator.v1.OLMStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.OperatorCondition + elementRelationship: associative + keys: + - type + - name: generations + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.GenerationStatus + elementRelationship: associative + keys: + - group + - resource + - namespace + - name + - name: latestAvailableRevision + type: + scalar: numeric + - name: observedGeneration + type: + scalar: numeric + - name: readyReplicas + type: + scalar: numeric + default: 0 + - name: version + type: + scalar: string - name: com.github.openshift.api.operator.v1.OVNKubernetesConfig map: fields: @@ -3111,7 +3204,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.operator.v1.OpenStackLoadBalancerParameters map: fields: - - name: loadBalancerIP + - name: floatingIP type: scalar: string - name: com.github.openshift.api.operator.v1.OperatorCondition @@ -3811,6 +3904,39 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperator + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorStatus + default: {} +- name: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorSpec + map: + fields: + - name: operatorLogLevel + type: + scalar: string +- name: com.github.openshift.api.operator.v1alpha1.ClusterVersionOperatorStatus + map: + fields: + - name: observedGeneration + type: + scalar: numeric - name: com.github.openshift.api.operator.v1alpha1.EtcdBackup map: fields: diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go index fc57030c73..e8ed1738a4 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollerspec.go @@ -13,23 +13,24 @@ import ( // IngressControllerSpecApplyConfiguration represents a declarative configuration of the IngressControllerSpec type for use // with apply. type IngressControllerSpecApplyConfiguration struct { - Domain *string `json:"domain,omitempty"` - HttpErrorCodePages *v1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"` - Replicas *int32 `json:"replicas,omitempty"` - EndpointPublishingStrategy *EndpointPublishingStrategyApplyConfiguration `json:"endpointPublishingStrategy,omitempty"` - DefaultCertificate *corev1.LocalObjectReference `json:"defaultCertificate,omitempty"` - NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - RouteSelector *metav1.LabelSelectorApplyConfiguration `json:"routeSelector,omitempty"` - NodePlacement *NodePlacementApplyConfiguration `json:"nodePlacement,omitempty"` - TLSSecurityProfile *v1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` - ClientTLS *ClientTLSApplyConfiguration `json:"clientTLS,omitempty"` - RouteAdmission *RouteAdmissionPolicyApplyConfiguration `json:"routeAdmission,omitempty"` - Logging *IngressControllerLoggingApplyConfiguration `json:"logging,omitempty"` - HTTPHeaders *IngressControllerHTTPHeadersApplyConfiguration `json:"httpHeaders,omitempty"` - HTTPEmptyRequestsPolicy *apioperatorv1.HTTPEmptyRequestsPolicy `json:"httpEmptyRequestsPolicy,omitempty"` - TuningOptions *IngressControllerTuningOptionsApplyConfiguration `json:"tuningOptions,omitempty"` - UnsupportedConfigOverrides *runtime.RawExtension `json:"unsupportedConfigOverrides,omitempty"` - HTTPCompression *HTTPCompressionPolicyApplyConfiguration `json:"httpCompression,omitempty"` + Domain *string `json:"domain,omitempty"` + HttpErrorCodePages *v1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + EndpointPublishingStrategy *EndpointPublishingStrategyApplyConfiguration `json:"endpointPublishingStrategy,omitempty"` + DefaultCertificate *corev1.LocalObjectReference `json:"defaultCertificate,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + RouteSelector *metav1.LabelSelectorApplyConfiguration `json:"routeSelector,omitempty"` + NodePlacement *NodePlacementApplyConfiguration `json:"nodePlacement,omitempty"` + TLSSecurityProfile *v1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + ClientTLS *ClientTLSApplyConfiguration `json:"clientTLS,omitempty"` + RouteAdmission *RouteAdmissionPolicyApplyConfiguration `json:"routeAdmission,omitempty"` + Logging *IngressControllerLoggingApplyConfiguration `json:"logging,omitempty"` + HTTPHeaders *IngressControllerHTTPHeadersApplyConfiguration `json:"httpHeaders,omitempty"` + HTTPEmptyRequestsPolicy *apioperatorv1.HTTPEmptyRequestsPolicy `json:"httpEmptyRequestsPolicy,omitempty"` + TuningOptions *IngressControllerTuningOptionsApplyConfiguration `json:"tuningOptions,omitempty"` + UnsupportedConfigOverrides *runtime.RawExtension `json:"unsupportedConfigOverrides,omitempty"` + HTTPCompression *HTTPCompressionPolicyApplyConfiguration `json:"httpCompression,omitempty"` + IdleConnectionTerminationPolicy *apioperatorv1.IngressControllerConnectionTerminationPolicy `json:"idleConnectionTerminationPolicy,omitempty"` } // IngressControllerSpecApplyConfiguration constructs a declarative configuration of the IngressControllerSpec type for use with @@ -173,3 +174,11 @@ func (b *IngressControllerSpecApplyConfiguration) WithHTTPCompression(value *HTT b.HTTPCompression = value return b } + +// WithIdleConnectionTerminationPolicy sets the IdleConnectionTerminationPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IdleConnectionTerminationPolicy field is set to the value of the last call. +func (b *IngressControllerSpecApplyConfiguration) WithIdleConnectionTerminationPolicy(value apioperatorv1.IngressControllerConnectionTerminationPolicy) *IngressControllerSpecApplyConfiguration { + b.IdleConnectionTerminationPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecconfig.go index ebe738b9f9..e6e7317fe8 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecconfig.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecconfig.go @@ -9,7 +9,8 @@ import ( // IPsecConfigApplyConfiguration represents a declarative configuration of the IPsecConfig type for use // with apply. type IPsecConfigApplyConfiguration struct { - Mode *v1.IPsecMode `json:"mode,omitempty"` + Mode *v1.IPsecMode `json:"mode,omitempty"` + Full *IPsecFullModeConfigApplyConfiguration `json:"full,omitempty"` } // IPsecConfigApplyConfiguration constructs a declarative configuration of the IPsecConfig type for use with @@ -25,3 +26,11 @@ func (b *IPsecConfigApplyConfiguration) WithMode(value v1.IPsecMode) *IPsecConfi b.Mode = &value return b } + +// WithFull sets the Full field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Full field is set to the value of the last call. +func (b *IPsecConfigApplyConfiguration) WithFull(value *IPsecFullModeConfigApplyConfiguration) *IPsecConfigApplyConfiguration { + b.Full = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecfullmodeconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecfullmodeconfig.go new file mode 100644 index 0000000000..afdb76c288 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ipsecfullmodeconfig.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/operator/v1" +) + +// IPsecFullModeConfigApplyConfiguration represents a declarative configuration of the IPsecFullModeConfig type for use +// with apply. +type IPsecFullModeConfigApplyConfiguration struct { + Encapsulation *v1.Encapsulation `json:"encapsulation,omitempty"` +} + +// IPsecFullModeConfigApplyConfiguration constructs a declarative configuration of the IPsecFullModeConfig type for use with +// apply. +func IPsecFullModeConfig() *IPsecFullModeConfigApplyConfiguration { + return &IPsecFullModeConfigApplyConfiguration{} +} + +// WithEncapsulation sets the Encapsulation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Encapsulation field is set to the value of the last call. +func (b *IPsecFullModeConfigApplyConfiguration) WithEncapsulation(value v1.Encapsulation) *IPsecFullModeConfigApplyConfiguration { + b.Encapsulation = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go new file mode 100644 index 0000000000..d1fb739538 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olm.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apioperatorv1 "github.com/openshift/api/operator/v1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// OLMApplyConfiguration represents a declarative configuration of the OLM type for use +// with apply. +type OLMApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *OLMSpecApplyConfiguration `json:"spec,omitempty"` + Status *OLMStatusApplyConfiguration `json:"status,omitempty"` +} + +// OLM constructs a declarative configuration of the OLM type for use with +// apply. +func OLM(name string) *OLMApplyConfiguration { + b := &OLMApplyConfiguration{} + b.WithName(name) + b.WithKind("OLM") + b.WithAPIVersion("operator.openshift.io/v1") + return b +} + +// ExtractOLM extracts the applied configuration owned by fieldManager from +// oLM. If no managedFields are found in oLM for fieldManager, a +// OLMApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// oLM must be a unmodified OLM API object that was retrieved from the Kubernetes API. +// ExtractOLM provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractOLM(oLM *apioperatorv1.OLM, fieldManager string) (*OLMApplyConfiguration, error) { + return extractOLM(oLM, fieldManager, "") +} + +// ExtractOLMStatus is the same as ExtractOLM except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractOLMStatus(oLM *apioperatorv1.OLM, fieldManager string) (*OLMApplyConfiguration, error) { + return extractOLM(oLM, fieldManager, "status") +} + +func extractOLM(oLM *apioperatorv1.OLM, fieldManager string, subresource string) (*OLMApplyConfiguration, error) { + b := &OLMApplyConfiguration{} + err := managedfields.ExtractInto(oLM, internal.Parser().Type("com.github.openshift.api.operator.v1.OLM"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(oLM.Name) + + b.WithKind("OLM") + b.WithAPIVersion("operator.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithKind(value string) *OLMApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithAPIVersion(value string) *OLMApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithName(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithGenerateName(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithNamespace(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithUID(value types.UID) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithResourceVersion(value string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithGeneration(value int64) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithCreationTimestamp(value metav1.Time) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *OLMApplyConfiguration) WithLabels(entries map[string]string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *OLMApplyConfiguration) WithAnnotations(entries map[string]string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *OLMApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *OLMApplyConfiguration) WithFinalizers(values ...string) *OLMApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *OLMApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithSpec(value *OLMSpecApplyConfiguration) *OLMApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *OLMApplyConfiguration) WithStatus(value *OLMStatusApplyConfiguration) *OLMApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *OLMApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmspec.go new file mode 100644 index 0000000000..6639c0c46c --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmspec.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// OLMSpecApplyConfiguration represents a declarative configuration of the OLMSpec type for use +// with apply. +type OLMSpecApplyConfiguration struct { + OperatorSpecApplyConfiguration `json:",inline"` +} + +// OLMSpecApplyConfiguration constructs a declarative configuration of the OLMSpec type for use with +// apply. +func OLMSpec() *OLMSpecApplyConfiguration { + return &OLMSpecApplyConfiguration{} +} + +// WithManagementState sets the ManagementState field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManagementState field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithManagementState(value operatorv1.ManagementState) *OLMSpecApplyConfiguration { + b.ManagementState = &value + return b +} + +// WithLogLevel sets the LogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogLevel field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithLogLevel(value operatorv1.LogLevel) *OLMSpecApplyConfiguration { + b.LogLevel = &value + return b +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithOperatorLogLevel(value operatorv1.LogLevel) *OLMSpecApplyConfiguration { + b.OperatorLogLevel = &value + return b +} + +// WithUnsupportedConfigOverrides sets the UnsupportedConfigOverrides field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnsupportedConfigOverrides field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithUnsupportedConfigOverrides(value runtime.RawExtension) *OLMSpecApplyConfiguration { + b.UnsupportedConfigOverrides = &value + return b +} + +// WithObservedConfig sets the ObservedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedConfig field is set to the value of the last call. +func (b *OLMSpecApplyConfiguration) WithObservedConfig(value runtime.RawExtension) *OLMSpecApplyConfiguration { + b.ObservedConfig = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmstatus.go new file mode 100644 index 0000000000..be8e647f3a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/olmstatus.go @@ -0,0 +1,73 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// OLMStatusApplyConfiguration represents a declarative configuration of the OLMStatus type for use +// with apply. +type OLMStatusApplyConfiguration struct { + OperatorStatusApplyConfiguration `json:",inline"` +} + +// OLMStatusApplyConfiguration constructs a declarative configuration of the OLMStatus type for use with +// apply. +func OLMStatus() *OLMStatusApplyConfiguration { + return &OLMStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithObservedGeneration(value int64) *OLMStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *OLMStatusApplyConfiguration) WithConditions(values ...*OperatorConditionApplyConfiguration) *OLMStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithVersion(value string) *OLMStatusApplyConfiguration { + b.Version = &value + return b +} + +// WithReadyReplicas sets the ReadyReplicas field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyReplicas field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithReadyReplicas(value int32) *OLMStatusApplyConfiguration { + b.ReadyReplicas = &value + return b +} + +// WithLatestAvailableRevision sets the LatestAvailableRevision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LatestAvailableRevision field is set to the value of the last call. +func (b *OLMStatusApplyConfiguration) WithLatestAvailableRevision(value int32) *OLMStatusApplyConfiguration { + b.LatestAvailableRevision = &value + return b +} + +// WithGenerations adds the given value to the Generations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Generations field. +func (b *OLMStatusApplyConfiguration) WithGenerations(values ...*GenerationStatusApplyConfiguration) *OLMStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithGenerations") + } + b.Generations = append(b.Generations, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go index 07d523555b..811b2330b7 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/openstackloadbalancerparameters.go @@ -5,7 +5,7 @@ package v1 // OpenStackLoadBalancerParametersApplyConfiguration represents a declarative configuration of the OpenStackLoadBalancerParameters type for use // with apply. type OpenStackLoadBalancerParametersApplyConfiguration struct { - LoadBalancerIP *string `json:"loadBalancerIP,omitempty"` + FloatingIP *string `json:"floatingIP,omitempty"` } // OpenStackLoadBalancerParametersApplyConfiguration constructs a declarative configuration of the OpenStackLoadBalancerParameters type for use with @@ -14,10 +14,10 @@ func OpenStackLoadBalancerParameters() *OpenStackLoadBalancerParametersApplyConf return &OpenStackLoadBalancerParametersApplyConfiguration{} } -// WithLoadBalancerIP sets the LoadBalancerIP field in the declarative configuration to the given value +// WithFloatingIP sets the FloatingIP field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the LoadBalancerIP field is set to the value of the last call. -func (b *OpenStackLoadBalancerParametersApplyConfiguration) WithLoadBalancerIP(value string) *OpenStackLoadBalancerParametersApplyConfiguration { - b.LoadBalancerIP = &value +// If called multiple times, the FloatingIP field is set to the value of the last call. +func (b *OpenStackLoadBalancerParametersApplyConfiguration) WithFloatingIP(value string) *OpenStackLoadBalancerParametersApplyConfiguration { + b.FloatingIP = &value return b } diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1/clusterversionoperator.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1/clusterversionoperator.go new file mode 100644 index 0000000000..b6d9afcf43 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1/clusterversionoperator.go @@ -0,0 +1,246 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" + internal "github.com/openshift/client-go/operator/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterVersionOperatorApplyConfiguration represents a declarative configuration of the ClusterVersionOperator type for use +// with apply. +type ClusterVersionOperatorApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterVersionOperatorSpecApplyConfiguration `json:"spec,omitempty"` + Status *ClusterVersionOperatorStatusApplyConfiguration `json:"status,omitempty"` +} + +// ClusterVersionOperator constructs a declarative configuration of the ClusterVersionOperator type for use with +// apply. +func ClusterVersionOperator(name string) *ClusterVersionOperatorApplyConfiguration { + b := &ClusterVersionOperatorApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterVersionOperator") + b.WithAPIVersion("operator.openshift.io/v1alpha1") + return b +} + +// ExtractClusterVersionOperator extracts the applied configuration owned by fieldManager from +// clusterVersionOperator. If no managedFields are found in clusterVersionOperator for fieldManager, a +// ClusterVersionOperatorApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterVersionOperator must be a unmodified ClusterVersionOperator API object that was retrieved from the Kubernetes API. +// ExtractClusterVersionOperator provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterVersionOperator(clusterVersionOperator *operatorv1alpha1.ClusterVersionOperator, fieldManager string) (*ClusterVersionOperatorApplyConfiguration, error) { + return extractClusterVersionOperator(clusterVersionOperator, fieldManager, "") +} + +// ExtractClusterVersionOperatorStatus is the same as ExtractClusterVersionOperator except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterVersionOperatorStatus(clusterVersionOperator *operatorv1alpha1.ClusterVersionOperator, fieldManager string) (*ClusterVersionOperatorApplyConfiguration, error) { + return extractClusterVersionOperator(clusterVersionOperator, fieldManager, "status") +} + +func extractClusterVersionOperator(clusterVersionOperator *operatorv1alpha1.ClusterVersionOperator, fieldManager string, subresource string) (*ClusterVersionOperatorApplyConfiguration, error) { + b := &ClusterVersionOperatorApplyConfiguration{} + err := managedfields.ExtractInto(clusterVersionOperator, internal.Parser().Type("com.github.openshift.api.operator.v1alpha1.ClusterVersionOperator"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterVersionOperator.Name) + + b.WithKind("ClusterVersionOperator") + b.WithAPIVersion("operator.openshift.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterVersionOperatorApplyConfiguration) WithKind(value string) *ClusterVersionOperatorApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterVersionOperatorApplyConfiguration) WithAPIVersion(value string) *ClusterVersionOperatorApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterVersionOperatorApplyConfiguration) WithName(value string) *ClusterVersionOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterVersionOperatorApplyConfiguration) WithGenerateName(value string) *ClusterVersionOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterVersionOperatorApplyConfiguration) WithNamespace(value string) *ClusterVersionOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterVersionOperatorApplyConfiguration) WithUID(value types.UID) *ClusterVersionOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterVersionOperatorApplyConfiguration) WithResourceVersion(value string) *ClusterVersionOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterVersionOperatorApplyConfiguration) WithGeneration(value int64) *ClusterVersionOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterVersionOperatorApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterVersionOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterVersionOperatorApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterVersionOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterVersionOperatorApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterVersionOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterVersionOperatorApplyConfiguration) WithLabels(entries map[string]string) *ClusterVersionOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterVersionOperatorApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterVersionOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterVersionOperatorApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterVersionOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterVersionOperatorApplyConfiguration) WithFinalizers(values ...string) *ClusterVersionOperatorApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ClusterVersionOperatorApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ClusterVersionOperatorApplyConfiguration) WithSpec(value *ClusterVersionOperatorSpecApplyConfiguration) *ClusterVersionOperatorApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ClusterVersionOperatorApplyConfiguration) WithStatus(value *ClusterVersionOperatorStatusApplyConfiguration) *ClusterVersionOperatorApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterVersionOperatorApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1/clusterversionoperatorspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1/clusterversionoperatorspec.go new file mode 100644 index 0000000000..61a64b1ed7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1/clusterversionoperatorspec.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "github.com/openshift/api/operator/v1" +) + +// ClusterVersionOperatorSpecApplyConfiguration represents a declarative configuration of the ClusterVersionOperatorSpec type for use +// with apply. +type ClusterVersionOperatorSpecApplyConfiguration struct { + OperatorLogLevel *v1.LogLevel `json:"operatorLogLevel,omitempty"` +} + +// ClusterVersionOperatorSpecApplyConfiguration constructs a declarative configuration of the ClusterVersionOperatorSpec type for use with +// apply. +func ClusterVersionOperatorSpec() *ClusterVersionOperatorSpecApplyConfiguration { + return &ClusterVersionOperatorSpecApplyConfiguration{} +} + +// WithOperatorLogLevel sets the OperatorLogLevel field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorLogLevel field is set to the value of the last call. +func (b *ClusterVersionOperatorSpecApplyConfiguration) WithOperatorLogLevel(value v1.LogLevel) *ClusterVersionOperatorSpecApplyConfiguration { + b.OperatorLogLevel = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1/clusterversionoperatorstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1/clusterversionoperatorstatus.go new file mode 100644 index 0000000000..cad0232efc --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1/clusterversionoperatorstatus.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ClusterVersionOperatorStatusApplyConfiguration represents a declarative configuration of the ClusterVersionOperatorStatus type for use +// with apply. +type ClusterVersionOperatorStatusApplyConfiguration struct { + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` +} + +// ClusterVersionOperatorStatusApplyConfiguration constructs a declarative configuration of the ClusterVersionOperatorStatus type for use with +// apply. +func ClusterVersionOperatorStatus() *ClusterVersionOperatorStatusApplyConfiguration { + return &ClusterVersionOperatorStatusApplyConfiguration{} +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *ClusterVersionOperatorStatusApplyConfiguration) WithObservedGeneration(value int64) *ClusterVersionOperatorStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/utils.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/utils.go index 21e0148a1a..82203a6d6c 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/utils.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/utils.go @@ -210,6 +210,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &operatorv1.IPFIXConfigApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("IPsecConfig"): return &operatorv1.IPsecConfigApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("IPsecFullModeConfig"): + return &operatorv1.IPsecFullModeConfigApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("IPv4GatewayConfig"): return &operatorv1.IPv4GatewayConfigApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("IPv4OVNKubernetesConfig"): @@ -302,6 +304,12 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &operatorv1.NodeStatusApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("OAuthAPIServerStatus"): return &operatorv1.OAuthAPIServerStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OLM"): + return &operatorv1.OLMApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OLMSpec"): + return &operatorv1.OLMSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("OLMStatus"): + return &operatorv1.OLMStatusApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("OpenShiftAPIServer"): return &operatorv1.OpenShiftAPIServerApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("OpenShiftAPIServerSpec"): @@ -412,6 +420,12 @@ func ForKind(kind schema.GroupVersionKind) interface{} { // Group=operator.openshift.io, Version=v1alpha1 case v1alpha1.SchemeGroupVersion.WithKind("BackupJobReference"): return &operatorv1alpha1.BackupJobReferenceApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ClusterVersionOperator"): + return &operatorv1alpha1.ClusterVersionOperatorApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ClusterVersionOperatorSpec"): + return &operatorv1alpha1.ClusterVersionOperatorSpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ClusterVersionOperatorStatus"): + return &operatorv1alpha1.ClusterVersionOperatorStatusApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("EtcdBackup"): return &operatorv1alpha1.EtcdBackupApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("EtcdBackupSpec"): diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_olm.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_olm.go new file mode 100644 index 0000000000..2b5f3806cb --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_olm.go @@ -0,0 +1,170 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeOLMs implements OLMInterface +type FakeOLMs struct { + Fake *FakeOperatorV1 +} + +var olmsResource = v1.SchemeGroupVersion.WithResource("olms") + +var olmsKind = v1.SchemeGroupVersion.WithKind("OLM") + +// Get takes name of the oLM, and returns the corresponding oLM object, and an error if there is any. +func (c *FakeOLMs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.OLM, err error) { + emptyResult := &v1.OLM{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(olmsResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.OLM), err +} + +// List takes label and field selectors, and returns the list of OLMs that match those selectors. +func (c *FakeOLMs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.OLMList, err error) { + emptyResult := &v1.OLMList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(olmsResource, olmsKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.OLMList{ListMeta: obj.(*v1.OLMList).ListMeta} + for _, item := range obj.(*v1.OLMList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested oLMs. +func (c *FakeOLMs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(olmsResource, opts)) +} + +// Create takes the representation of a oLM and creates it. Returns the server's representation of the oLM, and an error, if there is any. +func (c *FakeOLMs) Create(ctx context.Context, oLM *v1.OLM, opts metav1.CreateOptions) (result *v1.OLM, err error) { + emptyResult := &v1.OLM{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(olmsResource, oLM, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.OLM), err +} + +// Update takes the representation of a oLM and updates it. Returns the server's representation of the oLM, and an error, if there is any. +func (c *FakeOLMs) Update(ctx context.Context, oLM *v1.OLM, opts metav1.UpdateOptions) (result *v1.OLM, err error) { + emptyResult := &v1.OLM{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(olmsResource, oLM, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.OLM), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeOLMs) UpdateStatus(ctx context.Context, oLM *v1.OLM, opts metav1.UpdateOptions) (result *v1.OLM, err error) { + emptyResult := &v1.OLM{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(olmsResource, "status", oLM, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.OLM), err +} + +// Delete takes name of the oLM and deletes it. Returns an error if one occurs. +func (c *FakeOLMs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(olmsResource, name, opts), &v1.OLM{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeOLMs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(olmsResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1.OLMList{}) + return err +} + +// Patch applies the patch and returns the patched oLM. +func (c *FakeOLMs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OLM, err error) { + emptyResult := &v1.OLM{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(olmsResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.OLM), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied oLM. +func (c *FakeOLMs) Apply(ctx context.Context, oLM *operatorv1.OLMApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OLM, err error) { + if oLM == nil { + return nil, fmt.Errorf("oLM provided to Apply must not be nil") + } + data, err := json.Marshal(oLM) + if err != nil { + return nil, err + } + name := oLM.Name + if name == nil { + return nil, fmt.Errorf("oLM.Name must be provided to Apply") + } + emptyResult := &v1.OLM{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(olmsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.OLM), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeOLMs) ApplyStatus(ctx context.Context, oLM *operatorv1.OLMApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OLM, err error) { + if oLM == nil { + return nil, fmt.Errorf("oLM provided to Apply must not be nil") + } + data, err := json.Marshal(oLM) + if err != nil { + return nil, err + } + name := oLM.Name + if name == nil { + return nil, fmt.Errorf("oLM.Name must be provided to Apply") + } + emptyResult := &v1.OLM{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(olmsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1.OLM), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_operator_client.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_operator_client.go index 45d39303ed..c852656105 100644 --- a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_operator_client.go +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_operator_client.go @@ -76,6 +76,10 @@ func (c *FakeOperatorV1) Networks() v1.NetworkInterface { return &FakeNetworks{c} } +func (c *FakeOperatorV1) OLMs() v1.OLMInterface { + return &FakeOLMs{c} +} + func (c *FakeOperatorV1) OpenShiftAPIServers() v1.OpenShiftAPIServerInterface { return &FakeOpenShiftAPIServers{c} } diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go index dd8a5efd39..67d774a2a7 100644 --- a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go @@ -34,6 +34,8 @@ type MachineConfigurationExpansion interface{} type NetworkExpansion interface{} +type OLMExpansion interface{} + type OpenShiftAPIServerExpansion interface{} type OpenShiftControllerManagerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/olm.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/olm.go new file mode 100644 index 0000000000..6adb68c89a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/olm.go @@ -0,0 +1,57 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "github.com/openshift/api/operator/v1" + operatorv1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// OLMsGetter has a method to return a OLMInterface. +// A group's client should implement this interface. +type OLMsGetter interface { + OLMs() OLMInterface +} + +// OLMInterface has methods to work with OLM resources. +type OLMInterface interface { + Create(ctx context.Context, oLM *v1.OLM, opts metav1.CreateOptions) (*v1.OLM, error) + Update(ctx context.Context, oLM *v1.OLM, opts metav1.UpdateOptions) (*v1.OLM, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, oLM *v1.OLM, opts metav1.UpdateOptions) (*v1.OLM, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.OLM, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.OLMList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OLM, err error) + Apply(ctx context.Context, oLM *operatorv1.OLMApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OLM, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, oLM *operatorv1.OLMApplyConfiguration, opts metav1.ApplyOptions) (result *v1.OLM, err error) + OLMExpansion +} + +// oLMs implements OLMInterface +type oLMs struct { + *gentype.ClientWithListAndApply[*v1.OLM, *v1.OLMList, *operatorv1.OLMApplyConfiguration] +} + +// newOLMs returns a OLMs +func newOLMs(c *OperatorV1Client) *oLMs { + return &oLMs{ + gentype.NewClientWithListAndApply[*v1.OLM, *v1.OLMList, *operatorv1.OLMApplyConfiguration]( + "olms", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1.OLM { return &v1.OLM{} }, + func() *v1.OLMList { return &v1.OLMList{} }), + } +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go index 996fad76d7..9ba66e07ce 100644 --- a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go @@ -28,6 +28,7 @@ type OperatorV1Interface interface { KubeStorageVersionMigratorsGetter MachineConfigurationsGetter NetworksGetter + OLMsGetter OpenShiftAPIServersGetter OpenShiftControllerManagersGetter ServiceCAsGetter @@ -105,6 +106,10 @@ func (c *OperatorV1Client) Networks() NetworkInterface { return newNetworks(c) } +func (c *OperatorV1Client) OLMs() OLMInterface { + return newOLMs(c) +} + func (c *OperatorV1Client) OpenShiftAPIServers() OpenShiftAPIServerInterface { return newOpenShiftAPIServers(c) } diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/clusterversionoperator.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/clusterversionoperator.go new file mode 100644 index 0000000000..cb105fe549 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/clusterversionoperator.go @@ -0,0 +1,57 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + + v1alpha1 "github.com/openshift/api/operator/v1alpha1" + operatorv1alpha1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// ClusterVersionOperatorsGetter has a method to return a ClusterVersionOperatorInterface. +// A group's client should implement this interface. +type ClusterVersionOperatorsGetter interface { + ClusterVersionOperators() ClusterVersionOperatorInterface +} + +// ClusterVersionOperatorInterface has methods to work with ClusterVersionOperator resources. +type ClusterVersionOperatorInterface interface { + Create(ctx context.Context, clusterVersionOperator *v1alpha1.ClusterVersionOperator, opts v1.CreateOptions) (*v1alpha1.ClusterVersionOperator, error) + Update(ctx context.Context, clusterVersionOperator *v1alpha1.ClusterVersionOperator, opts v1.UpdateOptions) (*v1alpha1.ClusterVersionOperator, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, clusterVersionOperator *v1alpha1.ClusterVersionOperator, opts v1.UpdateOptions) (*v1alpha1.ClusterVersionOperator, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterVersionOperator, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterVersionOperatorList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterVersionOperator, err error) + Apply(ctx context.Context, clusterVersionOperator *operatorv1alpha1.ClusterVersionOperatorApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterVersionOperator, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, clusterVersionOperator *operatorv1alpha1.ClusterVersionOperatorApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterVersionOperator, err error) + ClusterVersionOperatorExpansion +} + +// clusterVersionOperators implements ClusterVersionOperatorInterface +type clusterVersionOperators struct { + *gentype.ClientWithListAndApply[*v1alpha1.ClusterVersionOperator, *v1alpha1.ClusterVersionOperatorList, *operatorv1alpha1.ClusterVersionOperatorApplyConfiguration] +} + +// newClusterVersionOperators returns a ClusterVersionOperators +func newClusterVersionOperators(c *OperatorV1alpha1Client) *clusterVersionOperators { + return &clusterVersionOperators{ + gentype.NewClientWithListAndApply[*v1alpha1.ClusterVersionOperator, *v1alpha1.ClusterVersionOperatorList, *operatorv1alpha1.ClusterVersionOperatorApplyConfiguration]( + "clusterversionoperators", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *v1alpha1.ClusterVersionOperator { return &v1alpha1.ClusterVersionOperator{} }, + func() *v1alpha1.ClusterVersionOperatorList { return &v1alpha1.ClusterVersionOperatorList{} }), + } +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_clusterversionoperator.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_clusterversionoperator.go new file mode 100644 index 0000000000..da8d9ea5b8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_clusterversionoperator.go @@ -0,0 +1,170 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha1 "github.com/openshift/api/operator/v1alpha1" + operatorv1alpha1 "github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterVersionOperators implements ClusterVersionOperatorInterface +type FakeClusterVersionOperators struct { + Fake *FakeOperatorV1alpha1 +} + +var clusterversionoperatorsResource = v1alpha1.SchemeGroupVersion.WithResource("clusterversionoperators") + +var clusterversionoperatorsKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterVersionOperator") + +// Get takes name of the clusterVersionOperator, and returns the corresponding clusterVersionOperator object, and an error if there is any. +func (c *FakeClusterVersionOperators) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterVersionOperator, err error) { + emptyResult := &v1alpha1.ClusterVersionOperator{} + obj, err := c.Fake. + Invokes(testing.NewRootGetActionWithOptions(clusterversionoperatorsResource, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.ClusterVersionOperator), err +} + +// List takes label and field selectors, and returns the list of ClusterVersionOperators that match those selectors. +func (c *FakeClusterVersionOperators) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterVersionOperatorList, err error) { + emptyResult := &v1alpha1.ClusterVersionOperatorList{} + obj, err := c.Fake. + Invokes(testing.NewRootListActionWithOptions(clusterversionoperatorsResource, clusterversionoperatorsKind, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ClusterVersionOperatorList{ListMeta: obj.(*v1alpha1.ClusterVersionOperatorList).ListMeta} + for _, item := range obj.(*v1alpha1.ClusterVersionOperatorList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterVersionOperators. +func (c *FakeClusterVersionOperators) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchActionWithOptions(clusterversionoperatorsResource, opts)) +} + +// Create takes the representation of a clusterVersionOperator and creates it. Returns the server's representation of the clusterVersionOperator, and an error, if there is any. +func (c *FakeClusterVersionOperators) Create(ctx context.Context, clusterVersionOperator *v1alpha1.ClusterVersionOperator, opts v1.CreateOptions) (result *v1alpha1.ClusterVersionOperator, err error) { + emptyResult := &v1alpha1.ClusterVersionOperator{} + obj, err := c.Fake. + Invokes(testing.NewRootCreateActionWithOptions(clusterversionoperatorsResource, clusterVersionOperator, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.ClusterVersionOperator), err +} + +// Update takes the representation of a clusterVersionOperator and updates it. Returns the server's representation of the clusterVersionOperator, and an error, if there is any. +func (c *FakeClusterVersionOperators) Update(ctx context.Context, clusterVersionOperator *v1alpha1.ClusterVersionOperator, opts v1.UpdateOptions) (result *v1alpha1.ClusterVersionOperator, err error) { + emptyResult := &v1alpha1.ClusterVersionOperator{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateActionWithOptions(clusterversionoperatorsResource, clusterVersionOperator, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.ClusterVersionOperator), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusterVersionOperators) UpdateStatus(ctx context.Context, clusterVersionOperator *v1alpha1.ClusterVersionOperator, opts v1.UpdateOptions) (result *v1alpha1.ClusterVersionOperator, err error) { + emptyResult := &v1alpha1.ClusterVersionOperator{} + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceActionWithOptions(clusterversionoperatorsResource, "status", clusterVersionOperator, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.ClusterVersionOperator), err +} + +// Delete takes name of the clusterVersionOperator and deletes it. Returns an error if one occurs. +func (c *FakeClusterVersionOperators) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(clusterversionoperatorsResource, name, opts), &v1alpha1.ClusterVersionOperator{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterVersionOperators) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionActionWithOptions(clusterversionoperatorsResource, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.ClusterVersionOperatorList{}) + return err +} + +// Patch applies the patch and returns the patched clusterVersionOperator. +func (c *FakeClusterVersionOperators) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterVersionOperator, err error) { + emptyResult := &v1alpha1.ClusterVersionOperator{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterversionoperatorsResource, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.ClusterVersionOperator), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied clusterVersionOperator. +func (c *FakeClusterVersionOperators) Apply(ctx context.Context, clusterVersionOperator *operatorv1alpha1.ClusterVersionOperatorApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterVersionOperator, err error) { + if clusterVersionOperator == nil { + return nil, fmt.Errorf("clusterVersionOperator provided to Apply must not be nil") + } + data, err := json.Marshal(clusterVersionOperator) + if err != nil { + return nil, err + } + name := clusterVersionOperator.Name + if name == nil { + return nil, fmt.Errorf("clusterVersionOperator.Name must be provided to Apply") + } + emptyResult := &v1alpha1.ClusterVersionOperator{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterversionoperatorsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.ClusterVersionOperator), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeClusterVersionOperators) ApplyStatus(ctx context.Context, clusterVersionOperator *operatorv1alpha1.ClusterVersionOperatorApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterVersionOperator, err error) { + if clusterVersionOperator == nil { + return nil, fmt.Errorf("clusterVersionOperator provided to Apply must not be nil") + } + data, err := json.Marshal(clusterVersionOperator) + if err != nil { + return nil, err + } + name := clusterVersionOperator.Name + if name == nil { + return nil, fmt.Errorf("clusterVersionOperator.Name must be provided to Apply") + } + emptyResult := &v1alpha1.ClusterVersionOperator{} + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceActionWithOptions(clusterversionoperatorsResource, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(*v1alpha1.ClusterVersionOperator), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_operator_client.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_operator_client.go index a9da578ff6..44aa969193 100644 --- a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_operator_client.go +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_operator_client.go @@ -12,6 +12,10 @@ type FakeOperatorV1alpha1 struct { *testing.Fake } +func (c *FakeOperatorV1alpha1) ClusterVersionOperators() v1alpha1.ClusterVersionOperatorInterface { + return &FakeClusterVersionOperators{c} +} + func (c *FakeOperatorV1alpha1) EtcdBackups() v1alpha1.EtcdBackupInterface { return &FakeEtcdBackups{c} } diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/generated_expansion.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/generated_expansion.go index 95bf40eb56..33de3f9657 100644 --- a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/generated_expansion.go +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/generated_expansion.go @@ -2,6 +2,8 @@ package v1alpha1 +type ClusterVersionOperatorExpansion interface{} + type EtcdBackupExpansion interface{} type ImageContentSourcePolicyExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/operator_client.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/operator_client.go index 999166e956..07a9fbcf7c 100644 --- a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/operator_client.go +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/operator_client.go @@ -12,6 +12,7 @@ import ( type OperatorV1alpha1Interface interface { RESTClient() rest.Interface + ClusterVersionOperatorsGetter EtcdBackupsGetter ImageContentSourcePoliciesGetter OLMsGetter @@ -22,6 +23,10 @@ type OperatorV1alpha1Client struct { restClient rest.Interface } +func (c *OperatorV1alpha1Client) ClusterVersionOperators() ClusterVersionOperatorInterface { + return newClusterVersionOperators(c) +} + func (c *OperatorV1alpha1Client) EtcdBackups() EtcdBackupInterface { return newEtcdBackups(c) } diff --git a/vendor/github.com/openshift/client-go/operator/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/operator/informers/externalversions/generic.go index f255f0f371..fbc83e5ef5 100644 --- a/vendor/github.com/openshift/client-go/operator/informers/externalversions/generic.go +++ b/vendor/github.com/openshift/client-go/operator/informers/externalversions/generic.go @@ -70,6 +70,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1().MachineConfigurations().Informer()}, nil case v1.SchemeGroupVersion.WithResource("networks"): return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1().Networks().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("olms"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1().OLMs().Informer()}, nil case v1.SchemeGroupVersion.WithResource("openshiftapiservers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1().OpenShiftAPIServers().Informer()}, nil case v1.SchemeGroupVersion.WithResource("openshiftcontrollermanagers"): @@ -84,6 +86,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1().Storages().Informer()}, nil // Group=operator.openshift.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("clusterversionoperators"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1alpha1().ClusterVersionOperators().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("etcdbackups"): return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1alpha1().EtcdBackups().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("imagecontentsourcepolicies"): diff --git a/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/interface.go b/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/interface.go index a6331e803d..c5169b9fb9 100644 --- a/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/interface.go +++ b/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/interface.go @@ -40,6 +40,8 @@ type Interface interface { MachineConfigurations() MachineConfigurationInformer // Networks returns a NetworkInformer. Networks() NetworkInformer + // OLMs returns a OLMInformer. + OLMs() OLMInformer // OpenShiftAPIServers returns a OpenShiftAPIServerInformer. OpenShiftAPIServers() OpenShiftAPIServerInformer // OpenShiftControllerManagers returns a OpenShiftControllerManagerInformer. @@ -145,6 +147,11 @@ func (v *version) Networks() NetworkInformer { return &networkInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// OLMs returns a OLMInformer. +func (v *version) OLMs() OLMInformer { + return &oLMInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // OpenShiftAPIServers returns a OpenShiftAPIServerInformer. func (v *version) OpenShiftAPIServers() OpenShiftAPIServerInformer { return &openShiftAPIServerInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/olm.go b/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/olm.go new file mode 100644 index 0000000000..eafc07f47b --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/olm.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + operatorv1 "github.com/openshift/api/operator/v1" + versioned "github.com/openshift/client-go/operator/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/operator/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/operator/listers/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// OLMInformer provides access to a shared informer and lister for +// OLMs. +type OLMInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.OLMLister +} + +type oLMInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewOLMInformer constructs a new informer for OLM type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewOLMInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredOLMInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredOLMInformer constructs a new informer for OLM type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredOLMInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OperatorV1().OLMs().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OperatorV1().OLMs().Watch(context.TODO(), options) + }, + }, + &operatorv1.OLM{}, + resyncPeriod, + indexers, + ) +} + +func (f *oLMInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredOLMInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *oLMInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&operatorv1.OLM{}, f.defaultInformer) +} + +func (f *oLMInformer) Lister() v1.OLMLister { + return v1.NewOLMLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1alpha1/clusterversionoperator.go b/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1alpha1/clusterversionoperator.go new file mode 100644 index 0000000000..998622c33e --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1alpha1/clusterversionoperator.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" + versioned "github.com/openshift/client-go/operator/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/operator/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/openshift/client-go/operator/listers/operator/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterVersionOperatorInformer provides access to a shared informer and lister for +// ClusterVersionOperators. +type ClusterVersionOperatorInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ClusterVersionOperatorLister +} + +type clusterVersionOperatorInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterVersionOperatorInformer constructs a new informer for ClusterVersionOperator type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterVersionOperatorInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterVersionOperatorInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterVersionOperatorInformer constructs a new informer for ClusterVersionOperator type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterVersionOperatorInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OperatorV1alpha1().ClusterVersionOperators().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OperatorV1alpha1().ClusterVersionOperators().Watch(context.TODO(), options) + }, + }, + &operatorv1alpha1.ClusterVersionOperator{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterVersionOperatorInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterVersionOperatorInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterVersionOperatorInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&operatorv1alpha1.ClusterVersionOperator{}, f.defaultInformer) +} + +func (f *clusterVersionOperatorInformer) Lister() v1alpha1.ClusterVersionOperatorLister { + return v1alpha1.NewClusterVersionOperatorLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1alpha1/interface.go b/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1alpha1/interface.go index f3e8998f50..3b52c11824 100644 --- a/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1alpha1/interface.go +++ b/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1alpha1/interface.go @@ -8,6 +8,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // ClusterVersionOperators returns a ClusterVersionOperatorInformer. + ClusterVersionOperators() ClusterVersionOperatorInformer // EtcdBackups returns a EtcdBackupInformer. EtcdBackups() EtcdBackupInformer // ImageContentSourcePolicies returns a ImageContentSourcePolicyInformer. @@ -27,6 +29,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// ClusterVersionOperators returns a ClusterVersionOperatorInformer. +func (v *version) ClusterVersionOperators() ClusterVersionOperatorInformer { + return &clusterVersionOperatorInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // EtcdBackups returns a EtcdBackupInformer. func (v *version) EtcdBackups() EtcdBackupInformer { return &etcdBackupInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/github.com/openshift/client-go/operator/listers/operator/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/operator/listers/operator/v1/expansion_generated.go index d2e75d0c36..9690ac240a 100644 --- a/vendor/github.com/openshift/client-go/operator/listers/operator/v1/expansion_generated.go +++ b/vendor/github.com/openshift/client-go/operator/listers/operator/v1/expansion_generated.go @@ -70,6 +70,10 @@ type MachineConfigurationListerExpansion interface{} // NetworkLister. type NetworkListerExpansion interface{} +// OLMListerExpansion allows custom methods to be added to +// OLMLister. +type OLMListerExpansion interface{} + // OpenShiftAPIServerListerExpansion allows custom methods to be added to // OpenShiftAPIServerLister. type OpenShiftAPIServerListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/operator/listers/operator/v1/olm.go b/vendor/github.com/openshift/client-go/operator/listers/operator/v1/olm.go new file mode 100644 index 0000000000..c1ce009556 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/listers/operator/v1/olm.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/operator/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// OLMLister helps list OLMs. +// All objects returned here must be treated as read-only. +type OLMLister interface { + // List lists all OLMs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.OLM, err error) + // Get retrieves the OLM from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.OLM, error) + OLMListerExpansion +} + +// oLMLister implements the OLMLister interface. +type oLMLister struct { + listers.ResourceIndexer[*v1.OLM] +} + +// NewOLMLister returns a new OLMLister. +func NewOLMLister(indexer cache.Indexer) OLMLister { + return &oLMLister{listers.New[*v1.OLM](indexer, v1.Resource("olm"))} +} diff --git a/vendor/github.com/openshift/client-go/operator/listers/operator/v1alpha1/clusterversionoperator.go b/vendor/github.com/openshift/client-go/operator/listers/operator/v1alpha1/clusterversionoperator.go new file mode 100644 index 0000000000..ad504829b6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/listers/operator/v1alpha1/clusterversionoperator.go @@ -0,0 +1,32 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/openshift/api/operator/v1alpha1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// ClusterVersionOperatorLister helps list ClusterVersionOperators. +// All objects returned here must be treated as read-only. +type ClusterVersionOperatorLister interface { + // List lists all ClusterVersionOperators in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.ClusterVersionOperator, err error) + // Get retrieves the ClusterVersionOperator from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.ClusterVersionOperator, error) + ClusterVersionOperatorListerExpansion +} + +// clusterVersionOperatorLister implements the ClusterVersionOperatorLister interface. +type clusterVersionOperatorLister struct { + listers.ResourceIndexer[*v1alpha1.ClusterVersionOperator] +} + +// NewClusterVersionOperatorLister returns a new ClusterVersionOperatorLister. +func NewClusterVersionOperatorLister(indexer cache.Indexer) ClusterVersionOperatorLister { + return &clusterVersionOperatorLister{listers.New[*v1alpha1.ClusterVersionOperator](indexer, v1alpha1.Resource("clusterversionoperator"))} +} diff --git a/vendor/github.com/openshift/client-go/operator/listers/operator/v1alpha1/expansion_generated.go b/vendor/github.com/openshift/client-go/operator/listers/operator/v1alpha1/expansion_generated.go index 91415d5933..03d9d98683 100644 --- a/vendor/github.com/openshift/client-go/operator/listers/operator/v1alpha1/expansion_generated.go +++ b/vendor/github.com/openshift/client-go/operator/listers/operator/v1alpha1/expansion_generated.go @@ -2,6 +2,10 @@ package v1alpha1 +// ClusterVersionOperatorListerExpansion allows custom methods to be added to +// ClusterVersionOperatorLister. +type ClusterVersionOperatorListerExpansion interface{} + // EtcdBackupListerExpansion allows custom methods to be added to // EtcdBackupLister. type EtcdBackupListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/internal/internal.go new file mode 100644 index 0000000000..3775c9b7a1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/internal/internal.go @@ -0,0 +1,396 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + "fmt" + "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.route.v1.LocalObjectReference + map: + fields: + - name: name + type: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.route.v1.Route + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.route.v1.RouteSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.route.v1.RouteStatus + default: {} +- name: com.github.openshift.api.route.v1.RouteHTTPHeader + map: + fields: + - name: action + type: + namedType: com.github.openshift.api.route.v1.RouteHTTPHeaderActionUnion + default: {} + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.route.v1.RouteHTTPHeaderActionUnion + map: + fields: + - name: set + type: + namedType: com.github.openshift.api.route.v1.RouteSetHTTPHeader + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: set + discriminatorValue: Set +- name: com.github.openshift.api.route.v1.RouteHTTPHeaderActions + map: + fields: + - name: request + type: + list: + elementType: + namedType: com.github.openshift.api.route.v1.RouteHTTPHeader + elementRelationship: associative + keys: + - name + - name: response + type: + list: + elementType: + namedType: com.github.openshift.api.route.v1.RouteHTTPHeader + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.route.v1.RouteHTTPHeaders + map: + fields: + - name: actions + type: + namedType: com.github.openshift.api.route.v1.RouteHTTPHeaderActions + default: {} +- name: com.github.openshift.api.route.v1.RouteIngress + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.route.v1.RouteIngressCondition + elementRelationship: associative + keys: + - type + - name: host + type: + scalar: string + - name: routerCanonicalHostname + type: + scalar: string + - name: routerName + type: + scalar: string + - name: wildcardPolicy + type: + scalar: string +- name: com.github.openshift.api.route.v1.RouteIngressCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.route.v1.RoutePort + map: + fields: + - name: targetPort + type: + namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString +- name: com.github.openshift.api.route.v1.RouteSetHTTPHeader + map: + fields: + - name: value + type: + scalar: string + default: "" +- name: com.github.openshift.api.route.v1.RouteSpec + map: + fields: + - name: alternateBackends + type: + list: + elementType: + namedType: com.github.openshift.api.route.v1.RouteTargetReference + elementRelationship: associative + keys: + - name + - kind + - name: host + type: + scalar: string + - name: httpHeaders + type: + namedType: com.github.openshift.api.route.v1.RouteHTTPHeaders + - name: path + type: + scalar: string + - name: port + type: + namedType: com.github.openshift.api.route.v1.RoutePort + - name: subdomain + type: + scalar: string + - name: tls + type: + namedType: com.github.openshift.api.route.v1.TLSConfig + - name: to + type: + namedType: com.github.openshift.api.route.v1.RouteTargetReference + default: {} + - name: wildcardPolicy + type: + scalar: string +- name: com.github.openshift.api.route.v1.RouteStatus + map: + fields: + - name: ingress + type: + list: + elementType: + namedType: com.github.openshift.api.route.v1.RouteIngress + elementRelationship: atomic +- name: com.github.openshift.api.route.v1.RouteTargetReference + map: + fields: + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: weight + type: + scalar: numeric +- name: com.github.openshift.api.route.v1.TLSConfig + map: + fields: + - name: caCertificate + type: + scalar: string + - name: certificate + type: + scalar: string + - name: destinationCACertificate + type: + scalar: string + - name: externalCertificate + type: + namedType: com.github.openshift.api.route.v1.LocalObjectReference + - name: insecureEdgeTerminationPolicy + type: + scalar: string + - name: key + type: + scalar: string + - name: termination + type: + scalar: string + default: "" +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: io.k8s.apimachinery.pkg.util.intstr.IntOrString + scalar: untyped +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/localobjectreference.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/localobjectreference.go new file mode 100644 index 0000000000..c0b6f455e9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/localobjectreference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// LocalObjectReferenceApplyConfiguration represents a declarative configuration of the LocalObjectReference type for use +// with apply. +type LocalObjectReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// LocalObjectReferenceApplyConfiguration constructs a declarative configuration of the LocalObjectReference type for use with +// apply. +func LocalObjectReference() *LocalObjectReferenceApplyConfiguration { + return &LocalObjectReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *LocalObjectReferenceApplyConfiguration) WithName(value string) *LocalObjectReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/route.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/route.go new file mode 100644 index 0000000000..c08d46b9b5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/route.go @@ -0,0 +1,248 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apiroutev1 "github.com/openshift/api/route/v1" + internal "github.com/openshift/client-go/route/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// RouteApplyConfiguration represents a declarative configuration of the Route type for use +// with apply. +type RouteApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *RouteSpecApplyConfiguration `json:"spec,omitempty"` + Status *RouteStatusApplyConfiguration `json:"status,omitempty"` +} + +// Route constructs a declarative configuration of the Route type for use with +// apply. +func Route(name, namespace string) *RouteApplyConfiguration { + b := &RouteApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("Route") + b.WithAPIVersion("route.openshift.io/v1") + return b +} + +// ExtractRoute extracts the applied configuration owned by fieldManager from +// route. If no managedFields are found in route for fieldManager, a +// RouteApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// route must be a unmodified Route API object that was retrieved from the Kubernetes API. +// ExtractRoute provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractRoute(route *apiroutev1.Route, fieldManager string) (*RouteApplyConfiguration, error) { + return extractRoute(route, fieldManager, "") +} + +// ExtractRouteStatus is the same as ExtractRoute except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractRouteStatus(route *apiroutev1.Route, fieldManager string) (*RouteApplyConfiguration, error) { + return extractRoute(route, fieldManager, "status") +} + +func extractRoute(route *apiroutev1.Route, fieldManager string, subresource string) (*RouteApplyConfiguration, error) { + b := &RouteApplyConfiguration{} + err := managedfields.ExtractInto(route, internal.Parser().Type("com.github.openshift.api.route.v1.Route"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(route.Name) + b.WithNamespace(route.Namespace) + + b.WithKind("Route") + b.WithAPIVersion("route.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithKind(value string) *RouteApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithAPIVersion(value string) *RouteApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithName(value string) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithGenerateName(value string) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithNamespace(value string) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithUID(value types.UID) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithResourceVersion(value string) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithGeneration(value int64) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *RouteApplyConfiguration) WithLabels(entries map[string]string) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *RouteApplyConfiguration) WithAnnotations(entries map[string]string) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *RouteApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *RouteApplyConfiguration) WithFinalizers(values ...string) *RouteApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *RouteApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithSpec(value *RouteSpecApplyConfiguration) *RouteApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *RouteApplyConfiguration) WithStatus(value *RouteStatusApplyConfiguration) *RouteApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RouteApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.Name +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheader.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheader.go new file mode 100644 index 0000000000..6223a38a14 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheader.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RouteHTTPHeaderApplyConfiguration represents a declarative configuration of the RouteHTTPHeader type for use +// with apply. +type RouteHTTPHeaderApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Action *RouteHTTPHeaderActionUnionApplyConfiguration `json:"action,omitempty"` +} + +// RouteHTTPHeaderApplyConfiguration constructs a declarative configuration of the RouteHTTPHeader type for use with +// apply. +func RouteHTTPHeader() *RouteHTTPHeaderApplyConfiguration { + return &RouteHTTPHeaderApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RouteHTTPHeaderApplyConfiguration) WithName(value string) *RouteHTTPHeaderApplyConfiguration { + b.Name = &value + return b +} + +// WithAction sets the Action field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Action field is set to the value of the last call. +func (b *RouteHTTPHeaderApplyConfiguration) WithAction(value *RouteHTTPHeaderActionUnionApplyConfiguration) *RouteHTTPHeaderApplyConfiguration { + b.Action = value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaderactions.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaderactions.go new file mode 100644 index 0000000000..2a9f4af162 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaderactions.go @@ -0,0 +1,42 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RouteHTTPHeaderActionsApplyConfiguration represents a declarative configuration of the RouteHTTPHeaderActions type for use +// with apply. +type RouteHTTPHeaderActionsApplyConfiguration struct { + Response []RouteHTTPHeaderApplyConfiguration `json:"response,omitempty"` + Request []RouteHTTPHeaderApplyConfiguration `json:"request,omitempty"` +} + +// RouteHTTPHeaderActionsApplyConfiguration constructs a declarative configuration of the RouteHTTPHeaderActions type for use with +// apply. +func RouteHTTPHeaderActions() *RouteHTTPHeaderActionsApplyConfiguration { + return &RouteHTTPHeaderActionsApplyConfiguration{} +} + +// WithResponse adds the given value to the Response field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Response field. +func (b *RouteHTTPHeaderActionsApplyConfiguration) WithResponse(values ...*RouteHTTPHeaderApplyConfiguration) *RouteHTTPHeaderActionsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResponse") + } + b.Response = append(b.Response, *values[i]) + } + return b +} + +// WithRequest adds the given value to the Request field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Request field. +func (b *RouteHTTPHeaderActionsApplyConfiguration) WithRequest(values ...*RouteHTTPHeaderApplyConfiguration) *RouteHTTPHeaderActionsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRequest") + } + b.Request = append(b.Request, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaderactionunion.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaderactionunion.go new file mode 100644 index 0000000000..6dfe8916b2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaderactionunion.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/route/v1" +) + +// RouteHTTPHeaderActionUnionApplyConfiguration represents a declarative configuration of the RouteHTTPHeaderActionUnion type for use +// with apply. +type RouteHTTPHeaderActionUnionApplyConfiguration struct { + Type *v1.RouteHTTPHeaderActionType `json:"type,omitempty"` + Set *RouteSetHTTPHeaderApplyConfiguration `json:"set,omitempty"` +} + +// RouteHTTPHeaderActionUnionApplyConfiguration constructs a declarative configuration of the RouteHTTPHeaderActionUnion type for use with +// apply. +func RouteHTTPHeaderActionUnion() *RouteHTTPHeaderActionUnionApplyConfiguration { + return &RouteHTTPHeaderActionUnionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *RouteHTTPHeaderActionUnionApplyConfiguration) WithType(value v1.RouteHTTPHeaderActionType) *RouteHTTPHeaderActionUnionApplyConfiguration { + b.Type = &value + return b +} + +// WithSet sets the Set field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Set field is set to the value of the last call. +func (b *RouteHTTPHeaderActionUnionApplyConfiguration) WithSet(value *RouteSetHTTPHeaderApplyConfiguration) *RouteHTTPHeaderActionUnionApplyConfiguration { + b.Set = value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaders.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaders.go new file mode 100644 index 0000000000..0dd34776a5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routehttpheaders.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RouteHTTPHeadersApplyConfiguration represents a declarative configuration of the RouteHTTPHeaders type for use +// with apply. +type RouteHTTPHeadersApplyConfiguration struct { + Actions *RouteHTTPHeaderActionsApplyConfiguration `json:"actions,omitempty"` +} + +// RouteHTTPHeadersApplyConfiguration constructs a declarative configuration of the RouteHTTPHeaders type for use with +// apply. +func RouteHTTPHeaders() *RouteHTTPHeadersApplyConfiguration { + return &RouteHTTPHeadersApplyConfiguration{} +} + +// WithActions sets the Actions field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Actions field is set to the value of the last call. +func (b *RouteHTTPHeadersApplyConfiguration) WithActions(value *RouteHTTPHeaderActionsApplyConfiguration) *RouteHTTPHeadersApplyConfiguration { + b.Actions = value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeingress.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeingress.go new file mode 100644 index 0000000000..2468d1dd51 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeingress.go @@ -0,0 +1,68 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + routev1 "github.com/openshift/api/route/v1" +) + +// RouteIngressApplyConfiguration represents a declarative configuration of the RouteIngress type for use +// with apply. +type RouteIngressApplyConfiguration struct { + Host *string `json:"host,omitempty"` + RouterName *string `json:"routerName,omitempty"` + Conditions []RouteIngressConditionApplyConfiguration `json:"conditions,omitempty"` + WildcardPolicy *routev1.WildcardPolicyType `json:"wildcardPolicy,omitempty"` + RouterCanonicalHostname *string `json:"routerCanonicalHostname,omitempty"` +} + +// RouteIngressApplyConfiguration constructs a declarative configuration of the RouteIngress type for use with +// apply. +func RouteIngress() *RouteIngressApplyConfiguration { + return &RouteIngressApplyConfiguration{} +} + +// WithHost sets the Host field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Host field is set to the value of the last call. +func (b *RouteIngressApplyConfiguration) WithHost(value string) *RouteIngressApplyConfiguration { + b.Host = &value + return b +} + +// WithRouterName sets the RouterName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RouterName field is set to the value of the last call. +func (b *RouteIngressApplyConfiguration) WithRouterName(value string) *RouteIngressApplyConfiguration { + b.RouterName = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *RouteIngressApplyConfiguration) WithConditions(values ...*RouteIngressConditionApplyConfiguration) *RouteIngressApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithWildcardPolicy sets the WildcardPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WildcardPolicy field is set to the value of the last call. +func (b *RouteIngressApplyConfiguration) WithWildcardPolicy(value routev1.WildcardPolicyType) *RouteIngressApplyConfiguration { + b.WildcardPolicy = &value + return b +} + +// WithRouterCanonicalHostname sets the RouterCanonicalHostname field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RouterCanonicalHostname field is set to the value of the last call. +func (b *RouteIngressApplyConfiguration) WithRouterCanonicalHostname(value string) *RouteIngressApplyConfiguration { + b.RouterCanonicalHostname = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeingresscondition.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeingresscondition.go new file mode 100644 index 0000000000..191ce55757 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeingresscondition.go @@ -0,0 +1,65 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/route/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// RouteIngressConditionApplyConfiguration represents a declarative configuration of the RouteIngressCondition type for use +// with apply. +type RouteIngressConditionApplyConfiguration struct { + Type *v1.RouteIngressConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` +} + +// RouteIngressConditionApplyConfiguration constructs a declarative configuration of the RouteIngressCondition type for use with +// apply. +func RouteIngressCondition() *RouteIngressConditionApplyConfiguration { + return &RouteIngressConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *RouteIngressConditionApplyConfiguration) WithType(value v1.RouteIngressConditionType) *RouteIngressConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *RouteIngressConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *RouteIngressConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *RouteIngressConditionApplyConfiguration) WithReason(value string) *RouteIngressConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *RouteIngressConditionApplyConfiguration) WithMessage(value string) *RouteIngressConditionApplyConfiguration { + b.Message = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *RouteIngressConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *RouteIngressConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeport.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeport.go new file mode 100644 index 0000000000..d26e4564cb --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routeport.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// RoutePortApplyConfiguration represents a declarative configuration of the RoutePort type for use +// with apply. +type RoutePortApplyConfiguration struct { + TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` +} + +// RoutePortApplyConfiguration constructs a declarative configuration of the RoutePort type for use with +// apply. +func RoutePort() *RoutePortApplyConfiguration { + return &RoutePortApplyConfiguration{} +} + +// WithTargetPort sets the TargetPort field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TargetPort field is set to the value of the last call. +func (b *RoutePortApplyConfiguration) WithTargetPort(value intstr.IntOrString) *RoutePortApplyConfiguration { + b.TargetPort = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routesethttpheader.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routesethttpheader.go new file mode 100644 index 0000000000..cc1438e9ed --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routesethttpheader.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RouteSetHTTPHeaderApplyConfiguration represents a declarative configuration of the RouteSetHTTPHeader type for use +// with apply. +type RouteSetHTTPHeaderApplyConfiguration struct { + Value *string `json:"value,omitempty"` +} + +// RouteSetHTTPHeaderApplyConfiguration constructs a declarative configuration of the RouteSetHTTPHeader type for use with +// apply. +func RouteSetHTTPHeader() *RouteSetHTTPHeaderApplyConfiguration { + return &RouteSetHTTPHeaderApplyConfiguration{} +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *RouteSetHTTPHeaderApplyConfiguration) WithValue(value string) *RouteSetHTTPHeaderApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routespec.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routespec.go new file mode 100644 index 0000000000..09b6fd421f --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routespec.go @@ -0,0 +1,104 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + routev1 "github.com/openshift/api/route/v1" +) + +// RouteSpecApplyConfiguration represents a declarative configuration of the RouteSpec type for use +// with apply. +type RouteSpecApplyConfiguration struct { + Host *string `json:"host,omitempty"` + Subdomain *string `json:"subdomain,omitempty"` + Path *string `json:"path,omitempty"` + To *RouteTargetReferenceApplyConfiguration `json:"to,omitempty"` + AlternateBackends []RouteTargetReferenceApplyConfiguration `json:"alternateBackends,omitempty"` + Port *RoutePortApplyConfiguration `json:"port,omitempty"` + TLS *TLSConfigApplyConfiguration `json:"tls,omitempty"` + WildcardPolicy *routev1.WildcardPolicyType `json:"wildcardPolicy,omitempty"` + HTTPHeaders *RouteHTTPHeadersApplyConfiguration `json:"httpHeaders,omitempty"` +} + +// RouteSpecApplyConfiguration constructs a declarative configuration of the RouteSpec type for use with +// apply. +func RouteSpec() *RouteSpecApplyConfiguration { + return &RouteSpecApplyConfiguration{} +} + +// WithHost sets the Host field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Host field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithHost(value string) *RouteSpecApplyConfiguration { + b.Host = &value + return b +} + +// WithSubdomain sets the Subdomain field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Subdomain field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithSubdomain(value string) *RouteSpecApplyConfiguration { + b.Subdomain = &value + return b +} + +// WithPath sets the Path field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Path field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithPath(value string) *RouteSpecApplyConfiguration { + b.Path = &value + return b +} + +// WithTo sets the To field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the To field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithTo(value *RouteTargetReferenceApplyConfiguration) *RouteSpecApplyConfiguration { + b.To = value + return b +} + +// WithAlternateBackends adds the given value to the AlternateBackends field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AlternateBackends field. +func (b *RouteSpecApplyConfiguration) WithAlternateBackends(values ...*RouteTargetReferenceApplyConfiguration) *RouteSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAlternateBackends") + } + b.AlternateBackends = append(b.AlternateBackends, *values[i]) + } + return b +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithPort(value *RoutePortApplyConfiguration) *RouteSpecApplyConfiguration { + b.Port = value + return b +} + +// WithTLS sets the TLS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLS field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithTLS(value *TLSConfigApplyConfiguration) *RouteSpecApplyConfiguration { + b.TLS = value + return b +} + +// WithWildcardPolicy sets the WildcardPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the WildcardPolicy field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithWildcardPolicy(value routev1.WildcardPolicyType) *RouteSpecApplyConfiguration { + b.WildcardPolicy = &value + return b +} + +// WithHTTPHeaders sets the HTTPHeaders field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPHeaders field is set to the value of the last call. +func (b *RouteSpecApplyConfiguration) WithHTTPHeaders(value *RouteHTTPHeadersApplyConfiguration) *RouteSpecApplyConfiguration { + b.HTTPHeaders = value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routestatus.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routestatus.go new file mode 100644 index 0000000000..c4f5881c3b --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routestatus.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RouteStatusApplyConfiguration represents a declarative configuration of the RouteStatus type for use +// with apply. +type RouteStatusApplyConfiguration struct { + Ingress []RouteIngressApplyConfiguration `json:"ingress,omitempty"` +} + +// RouteStatusApplyConfiguration constructs a declarative configuration of the RouteStatus type for use with +// apply. +func RouteStatus() *RouteStatusApplyConfiguration { + return &RouteStatusApplyConfiguration{} +} + +// WithIngress adds the given value to the Ingress field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ingress field. +func (b *RouteStatusApplyConfiguration) WithIngress(values ...*RouteIngressApplyConfiguration) *RouteStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithIngress") + } + b.Ingress = append(b.Ingress, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routetargetreference.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routetargetreference.go new file mode 100644 index 0000000000..3521a17e23 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/routetargetreference.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// RouteTargetReferenceApplyConfiguration represents a declarative configuration of the RouteTargetReference type for use +// with apply. +type RouteTargetReferenceApplyConfiguration struct { + Kind *string `json:"kind,omitempty"` + Name *string `json:"name,omitempty"` + Weight *int32 `json:"weight,omitempty"` +} + +// RouteTargetReferenceApplyConfiguration constructs a declarative configuration of the RouteTargetReference type for use with +// apply. +func RouteTargetReference() *RouteTargetReferenceApplyConfiguration { + return &RouteTargetReferenceApplyConfiguration{} +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *RouteTargetReferenceApplyConfiguration) WithKind(value string) *RouteTargetReferenceApplyConfiguration { + b.Kind = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *RouteTargetReferenceApplyConfiguration) WithName(value string) *RouteTargetReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithWeight sets the Weight field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Weight field is set to the value of the last call. +func (b *RouteTargetReferenceApplyConfiguration) WithWeight(value int32) *RouteTargetReferenceApplyConfiguration { + b.Weight = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/tlsconfig.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/tlsconfig.go new file mode 100644 index 0000000000..e76edd2b86 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/route/v1/tlsconfig.go @@ -0,0 +1,81 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/route/v1" +) + +// TLSConfigApplyConfiguration represents a declarative configuration of the TLSConfig type for use +// with apply. +type TLSConfigApplyConfiguration struct { + Termination *v1.TLSTerminationType `json:"termination,omitempty"` + Certificate *string `json:"certificate,omitempty"` + Key *string `json:"key,omitempty"` + CACertificate *string `json:"caCertificate,omitempty"` + DestinationCACertificate *string `json:"destinationCACertificate,omitempty"` + InsecureEdgeTerminationPolicy *v1.InsecureEdgeTerminationPolicyType `json:"insecureEdgeTerminationPolicy,omitempty"` + ExternalCertificate *LocalObjectReferenceApplyConfiguration `json:"externalCertificate,omitempty"` +} + +// TLSConfigApplyConfiguration constructs a declarative configuration of the TLSConfig type for use with +// apply. +func TLSConfig() *TLSConfigApplyConfiguration { + return &TLSConfigApplyConfiguration{} +} + +// WithTermination sets the Termination field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Termination field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithTermination(value v1.TLSTerminationType) *TLSConfigApplyConfiguration { + b.Termination = &value + return b +} + +// WithCertificate sets the Certificate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Certificate field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithCertificate(value string) *TLSConfigApplyConfiguration { + b.Certificate = &value + return b +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithKey(value string) *TLSConfigApplyConfiguration { + b.Key = &value + return b +} + +// WithCACertificate sets the CACertificate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CACertificate field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithCACertificate(value string) *TLSConfigApplyConfiguration { + b.CACertificate = &value + return b +} + +// WithDestinationCACertificate sets the DestinationCACertificate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DestinationCACertificate field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithDestinationCACertificate(value string) *TLSConfigApplyConfiguration { + b.DestinationCACertificate = &value + return b +} + +// WithInsecureEdgeTerminationPolicy sets the InsecureEdgeTerminationPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InsecureEdgeTerminationPolicy field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithInsecureEdgeTerminationPolicy(value v1.InsecureEdgeTerminationPolicyType) *TLSConfigApplyConfiguration { + b.InsecureEdgeTerminationPolicy = &value + return b +} + +// WithExternalCertificate sets the ExternalCertificate field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExternalCertificate field is set to the value of the last call. +func (b *TLSConfigApplyConfiguration) WithExternalCertificate(value *LocalObjectReferenceApplyConfiguration) *TLSConfigApplyConfiguration { + b.ExternalCertificate = value + return b +} diff --git a/vendor/github.com/openshift/client-go/route/applyconfigurations/utils.go b/vendor/github.com/openshift/client-go/route/applyconfigurations/utils.go new file mode 100644 index 0000000000..50b3409ea4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/applyconfigurations/utils.go @@ -0,0 +1,54 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package applyconfigurations + +import ( + v1 "github.com/openshift/api/route/v1" + internal "github.com/openshift/client-go/route/applyconfigurations/internal" + routev1 "github.com/openshift/client-go/route/applyconfigurations/route/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no +// apply configuration type exists for the given GroupVersionKind. +func ForKind(kind schema.GroupVersionKind) interface{} { + switch kind { + // Group=route.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithKind("LocalObjectReference"): + return &routev1.LocalObjectReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("Route"): + return &routev1.RouteApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RouteHTTPHeader"): + return &routev1.RouteHTTPHeaderApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RouteHTTPHeaderActions"): + return &routev1.RouteHTTPHeaderActionsApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RouteHTTPHeaderActionUnion"): + return &routev1.RouteHTTPHeaderActionUnionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RouteHTTPHeaders"): + return &routev1.RouteHTTPHeadersApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RouteIngress"): + return &routev1.RouteIngressApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RouteIngressCondition"): + return &routev1.RouteIngressConditionApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RoutePort"): + return &routev1.RoutePortApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RouteSetHTTPHeader"): + return &routev1.RouteSetHTTPHeaderApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RouteSpec"): + return &routev1.RouteSpecApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RouteStatus"): + return &routev1.RouteStatusApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("RouteTargetReference"): + return &routev1.RouteTargetReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("TLSConfig"): + return &routev1.TLSConfigApplyConfiguration{} + + } + return nil +} + +func NewTypeConverter(scheme *runtime.Scheme) *testing.TypeConverter { + return &testing.TypeConverter{Scheme: scheme, TypeResolver: internal.Parser()} +} diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/clientset.go new file mode 100644 index 0000000000..23c80b601f --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/clientset.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + routev1 "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + RouteV1() routev1.RouteV1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + routeV1 *routev1.RouteV1Client +} + +// RouteV1 retrieves the RouteV1Client +func (c *Clientset) RouteV1() routev1.RouteV1Interface { + return c.routeV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.routeV1, err = routev1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.routeV1 = routev1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..18bb8d3193 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,106 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + applyconfigurations "github.com/openshift/client-go/route/applyconfigurations" + clientset "github.com/openshift/client-go/route/clientset/versioned" + routev1 "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1" + fakeroutev1 "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + applyconfigurations.NewTypeConverter(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// RouteV1 retrieves the RouteV1Client +func (c *Clientset) RouteV1() routev1.RouteV1Interface { + return &fakeroutev1.FakeRouteV1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..3630ed1cd1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..21e116a50e --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/fake/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + routev1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000..14db57a58f --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000..53ac82ff5d --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + routev1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/doc.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/doc.go new file mode 100644 index 0000000000..225e6b2be3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake/doc.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/fake/doc.go similarity index 100% rename from vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake/doc.go rename to vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/fake/doc.go diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/fake/fake_route.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/fake/fake_route.go new file mode 100644 index 0000000000..acf7eb8c5a --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/fake/fake_route.go @@ -0,0 +1,181 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1 "github.com/openshift/api/route/v1" + routev1 "github.com/openshift/client-go/route/applyconfigurations/route/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeRoutes implements RouteInterface +type FakeRoutes struct { + Fake *FakeRouteV1 + ns string +} + +var routesResource = v1.SchemeGroupVersion.WithResource("routes") + +var routesKind = v1.SchemeGroupVersion.WithKind("Route") + +// Get takes name of the route, and returns the corresponding route object, and an error if there is any. +func (c *FakeRoutes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Route, err error) { + emptyResult := &v1.Route{} + obj, err := c.Fake. + Invokes(testing.NewGetActionWithOptions(routesResource, c.ns, name, options), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.Route), err +} + +// List takes label and field selectors, and returns the list of Routes that match those selectors. +func (c *FakeRoutes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RouteList, err error) { + emptyResult := &v1.RouteList{} + obj, err := c.Fake. + Invokes(testing.NewListActionWithOptions(routesResource, routesKind, c.ns, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.RouteList{ListMeta: obj.(*v1.RouteList).ListMeta} + for _, item := range obj.(*v1.RouteList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested routes. +func (c *FakeRoutes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchActionWithOptions(routesResource, c.ns, opts)) + +} + +// Create takes the representation of a route and creates it. Returns the server's representation of the route, and an error, if there is any. +func (c *FakeRoutes) Create(ctx context.Context, route *v1.Route, opts metav1.CreateOptions) (result *v1.Route, err error) { + emptyResult := &v1.Route{} + obj, err := c.Fake. + Invokes(testing.NewCreateActionWithOptions(routesResource, c.ns, route, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.Route), err +} + +// Update takes the representation of a route and updates it. Returns the server's representation of the route, and an error, if there is any. +func (c *FakeRoutes) Update(ctx context.Context, route *v1.Route, opts metav1.UpdateOptions) (result *v1.Route, err error) { + emptyResult := &v1.Route{} + obj, err := c.Fake. + Invokes(testing.NewUpdateActionWithOptions(routesResource, c.ns, route, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.Route), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeRoutes) UpdateStatus(ctx context.Context, route *v1.Route, opts metav1.UpdateOptions) (result *v1.Route, err error) { + emptyResult := &v1.Route{} + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceActionWithOptions(routesResource, "status", c.ns, route, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.Route), err +} + +// Delete takes name of the route and deletes it. Returns an error if one occurs. +func (c *FakeRoutes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(routesResource, c.ns, name, opts), &v1.Route{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRoutes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionActionWithOptions(routesResource, c.ns, opts, listOpts) + + _, err := c.Fake.Invokes(action, &v1.RouteList{}) + return err +} + +// Patch applies the patch and returns the patched route. +func (c *FakeRoutes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Route, err error) { + emptyResult := &v1.Route{} + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(routesResource, c.ns, name, pt, data, opts, subresources...), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.Route), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied route. +func (c *FakeRoutes) Apply(ctx context.Context, route *routev1.RouteApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Route, err error) { + if route == nil { + return nil, fmt.Errorf("route provided to Apply must not be nil") + } + data, err := json.Marshal(route) + if err != nil { + return nil, err + } + name := route.Name + if name == nil { + return nil, fmt.Errorf("route.Name must be provided to Apply") + } + emptyResult := &v1.Route{} + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(routesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.Route), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeRoutes) ApplyStatus(ctx context.Context, route *routev1.RouteApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Route, err error) { + if route == nil { + return nil, fmt.Errorf("route provided to Apply must not be nil") + } + data, err := json.Marshal(route) + if err != nil { + return nil, err + } + name := route.Name + if name == nil { + return nil, fmt.Errorf("route.Name must be provided to Apply") + } + emptyResult := &v1.Route{} + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(routesResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(*v1.Route), err +} diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/fake/fake_route_client.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/fake/fake_route_client.go new file mode 100644 index 0000000000..f942964366 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/fake/fake_route_client.go @@ -0,0 +1,24 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeRouteV1 struct { + *testing.Fake +} + +func (c *FakeRouteV1) Routes(namespace string) v1.RouteInterface { + return &FakeRoutes{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeRouteV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/generated_expansion.go new file mode 100644 index 0000000000..4f2173b6fc --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/generated_expansion.go @@ -0,0 +1,5 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type RouteExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/route.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/route.go new file mode 100644 index 0000000000..c37f331af8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/route.go @@ -0,0 +1,57 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "github.com/openshift/api/route/v1" + routev1 "github.com/openshift/client-go/route/applyconfigurations/route/v1" + scheme "github.com/openshift/client-go/route/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// RoutesGetter has a method to return a RouteInterface. +// A group's client should implement this interface. +type RoutesGetter interface { + Routes(namespace string) RouteInterface +} + +// RouteInterface has methods to work with Route resources. +type RouteInterface interface { + Create(ctx context.Context, route *v1.Route, opts metav1.CreateOptions) (*v1.Route, error) + Update(ctx context.Context, route *v1.Route, opts metav1.UpdateOptions) (*v1.Route, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, route *v1.Route, opts metav1.UpdateOptions) (*v1.Route, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Route, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.RouteList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Route, err error) + Apply(ctx context.Context, route *routev1.RouteApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Route, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, route *routev1.RouteApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Route, err error) + RouteExpansion +} + +// routes implements RouteInterface +type routes struct { + *gentype.ClientWithListAndApply[*v1.Route, *v1.RouteList, *routev1.RouteApplyConfiguration] +} + +// newRoutes returns a Routes +func newRoutes(c *RouteV1Client, namespace string) *routes { + return &routes{ + gentype.NewClientWithListAndApply[*v1.Route, *v1.RouteList, *routev1.RouteApplyConfiguration]( + "routes", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1.Route { return &v1.Route{} }, + func() *v1.RouteList { return &v1.RouteList{} }), + } +} diff --git a/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/route_client.go b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/route_client.go new file mode 100644 index 0000000000..e71d826c9f --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/route_client.go @@ -0,0 +1,91 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + v1 "github.com/openshift/api/route/v1" + "github.com/openshift/client-go/route/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type RouteV1Interface interface { + RESTClient() rest.Interface + RoutesGetter +} + +// RouteV1Client is used to interact with features provided by the route.openshift.io group. +type RouteV1Client struct { + restClient rest.Interface +} + +func (c *RouteV1Client) Routes(namespace string) RouteInterface { + return newRoutes(c, namespace) +} + +// NewForConfig creates a new RouteV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*RouteV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new RouteV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*RouteV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &RouteV1Client{client}, nil +} + +// NewForConfigOrDie creates a new RouteV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *RouteV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RouteV1Client for the given RESTClient. +func New(c rest.Interface) *RouteV1Client { + return &RouteV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RouteV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/route/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/route/informers/externalversions/factory.go new file mode 100644 index 0000000000..8d23ff72b4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/informers/externalversions/factory.go @@ -0,0 +1,246 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/route/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/route/informers/externalversions/internalinterfaces" + route "github.com/openshift/client-go/route/informers/externalversions/route" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + Route() route.Interface +} + +func (f *sharedInformerFactory) Route() route.Interface { + return route.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/route/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/route/informers/externalversions/generic.go new file mode 100644 index 0000000000..e0067144fd --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/informers/externalversions/generic.go @@ -0,0 +1,46 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/openshift/api/route/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=route.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("routes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Route().V1().Routes().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/route/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/route/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..1f807bab67 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/route/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/client-go/route/informers/externalversions/route/interface.go b/vendor/github.com/openshift/client-go/route/informers/externalversions/route/interface.go new file mode 100644 index 0000000000..69e1be333c --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/informers/externalversions/route/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package route + +import ( + internalinterfaces "github.com/openshift/client-go/route/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/route/informers/externalversions/route/v1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/route/informers/externalversions/route/v1/interface.go b/vendor/github.com/openshift/client-go/route/informers/externalversions/route/v1/interface.go new file mode 100644 index 0000000000..63ee15aecf --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/informers/externalversions/route/v1/interface.go @@ -0,0 +1,29 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/route/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Routes returns a RouteInformer. + Routes() RouteInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Routes returns a RouteInformer. +func (v *version) Routes() RouteInformer { + return &routeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go b/vendor/github.com/openshift/client-go/route/informers/externalversions/route/v1/route.go similarity index 58% rename from vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go rename to vendor/github.com/openshift/client-go/route/informers/externalversions/route/v1/route.go index 2055ed96f5..adb8459582 100644 --- a/vendor/github.com/openshift/client-go/build/informers/externalversions/build/v1/build.go +++ b/vendor/github.com/openshift/client-go/route/informers/externalversions/route/v1/route.go @@ -6,69 +6,69 @@ import ( "context" time "time" - buildv1 "github.com/openshift/api/build/v1" - versioned "github.com/openshift/client-go/build/clientset/versioned" - internalinterfaces "github.com/openshift/client-go/build/informers/externalversions/internalinterfaces" - v1 "github.com/openshift/client-go/build/listers/build/v1" + routev1 "github.com/openshift/api/route/v1" + versioned "github.com/openshift/client-go/route/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/route/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/route/listers/route/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" ) -// BuildInformer provides access to a shared informer and lister for -// Builds. -type BuildInformer interface { +// RouteInformer provides access to a shared informer and lister for +// Routes. +type RouteInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.BuildLister + Lister() v1.RouteLister } -type buildInformer struct { +type routeInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc namespace string } -// NewBuildInformer constructs a new informer for Build type. +// NewRouteInformer constructs a new informer for Route type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewBuildInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredBuildInformer(client, namespace, resyncPeriod, indexers, nil) +func NewRouteInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRouteInformer(client, namespace, resyncPeriod, indexers, nil) } -// NewFilteredBuildInformer constructs a new informer for Build type. +// NewFilteredRouteInformer constructs a new informer for Route type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredBuildInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredRouteInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.BuildV1().Builds(namespace).List(context.TODO(), options) + return client.RouteV1().Routes(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.BuildV1().Builds(namespace).Watch(context.TODO(), options) + return client.RouteV1().Routes(namespace).Watch(context.TODO(), options) }, }, - &buildv1.Build{}, + &routev1.Route{}, resyncPeriod, indexers, ) } -func (f *buildInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredBuildInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *routeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRouteInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *buildInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&buildv1.Build{}, f.defaultInformer) +func (f *routeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&routev1.Route{}, f.defaultInformer) } -func (f *buildInformer) Lister() v1.BuildLister { - return v1.NewBuildLister(f.Informer().GetIndexer()) +func (f *routeInformer) Lister() v1.RouteLister { + return v1.NewRouteLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/route/listers/route/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/route/listers/route/v1/expansion_generated.go new file mode 100644 index 0000000000..74feb63800 --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/listers/route/v1/expansion_generated.go @@ -0,0 +1,11 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// RouteListerExpansion allows custom methods to be added to +// RouteLister. +type RouteListerExpansion interface{} + +// RouteNamespaceListerExpansion allows custom methods to be added to +// RouteNamespaceLister. +type RouteNamespaceListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/route/listers/route/v1/route.go b/vendor/github.com/openshift/client-go/route/listers/route/v1/route.go new file mode 100644 index 0000000000..f6dc2f852f --- /dev/null +++ b/vendor/github.com/openshift/client-go/route/listers/route/v1/route.go @@ -0,0 +1,54 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/route/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/listers" + "k8s.io/client-go/tools/cache" +) + +// RouteLister helps list Routes. +// All objects returned here must be treated as read-only. +type RouteLister interface { + // List lists all Routes in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Route, err error) + // Routes returns an object that can list and get Routes. + Routes(namespace string) RouteNamespaceLister + RouteListerExpansion +} + +// routeLister implements the RouteLister interface. +type routeLister struct { + listers.ResourceIndexer[*v1.Route] +} + +// NewRouteLister returns a new RouteLister. +func NewRouteLister(indexer cache.Indexer) RouteLister { + return &routeLister{listers.New[*v1.Route](indexer, v1.Resource("route"))} +} + +// Routes returns an object that can list and get Routes. +func (s *routeLister) Routes(namespace string) RouteNamespaceLister { + return routeNamespaceLister{listers.NewNamespaced[*v1.Route](s.ResourceIndexer, namespace)} +} + +// RouteNamespaceLister helps list and get Routes. +// All objects returned here must be treated as read-only. +type RouteNamespaceLister interface { + // List lists all Routes in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.Route, err error) + // Get retrieves the Route from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.Route, error) + RouteNamespaceListerExpansion +} + +// routeNamespaceLister implements the RouteNamespaceLister +// interface. +type routeNamespaceLister struct { + listers.ResourceIndexer[*v1.Route] +} diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh old mode 100755 new mode 100644 diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh old mode 100755 new mode 100644 diff --git a/vendor/modules.txt b/vendor/modules.txt index 3603b66adf..279a1ad8a8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -968,7 +968,7 @@ github.com/opencontainers/runc/libcontainer/user # github.com/opencontainers/runtime-spec v1.1.0 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/openshift/api v0.0.0-20241106222702-2429e35d6633 +# github.com/openshift/api v0.0.0-20250102185430-d6d8306a24ec ## explicit; go 1.22.0 github.com/openshift/api github.com/openshift/api/annotations @@ -1050,18 +1050,13 @@ github.com/openshift/api/template github.com/openshift/api/template/v1 github.com/openshift/api/user github.com/openshift/api/user/v1 -# github.com/openshift/client-go v0.0.0-20241001162912-da6d55e4611f +# github.com/openshift/client-go v0.0.0-20250106104058-89709a455e2a ## explicit; go 1.22.0 github.com/openshift/client-go/build/applyconfigurations/build/v1 github.com/openshift/client-go/build/applyconfigurations/internal github.com/openshift/client-go/build/clientset/versioned github.com/openshift/client-go/build/clientset/versioned/scheme github.com/openshift/client-go/build/clientset/versioned/typed/build/v1 -github.com/openshift/client-go/build/informers/externalversions -github.com/openshift/client-go/build/informers/externalversions/build -github.com/openshift/client-go/build/informers/externalversions/build/v1 -github.com/openshift/client-go/build/informers/externalversions/internalinterfaces -github.com/openshift/client-go/build/listers/build/v1 github.com/openshift/client-go/config/applyconfigurations github.com/openshift/client-go/config/applyconfigurations/config/v1 github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1 @@ -1080,22 +1075,26 @@ github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1 github.com/openshift/client-go/config/informers/externalversions/internalinterfaces github.com/openshift/client-go/config/listers/config/v1 github.com/openshift/client-go/config/listers/config/v1alpha1 +github.com/openshift/client-go/image/applyconfigurations github.com/openshift/client-go/image/applyconfigurations/image/v1 github.com/openshift/client-go/image/applyconfigurations/internal github.com/openshift/client-go/image/clientset/versioned +github.com/openshift/client-go/image/clientset/versioned/fake github.com/openshift/client-go/image/clientset/versioned/scheme github.com/openshift/client-go/image/clientset/versioned/typed/image/v1 -github.com/openshift/client-go/machine/applyconfigurations +github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake +github.com/openshift/client-go/image/informers/externalversions +github.com/openshift/client-go/image/informers/externalversions/image +github.com/openshift/client-go/image/informers/externalversions/image/v1 +github.com/openshift/client-go/image/informers/externalversions/internalinterfaces +github.com/openshift/client-go/image/listers/image/v1 github.com/openshift/client-go/machine/applyconfigurations/internal github.com/openshift/client-go/machine/applyconfigurations/machine/v1 github.com/openshift/client-go/machine/applyconfigurations/machine/v1beta1 github.com/openshift/client-go/machine/clientset/versioned -github.com/openshift/client-go/machine/clientset/versioned/fake github.com/openshift/client-go/machine/clientset/versioned/scheme github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1 -github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1/fake github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1 -github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/fake github.com/openshift/client-go/machine/informers/externalversions github.com/openshift/client-go/machine/informers/externalversions/internalinterfaces github.com/openshift/client-go/machine/informers/externalversions/machine @@ -1139,6 +1138,19 @@ github.com/openshift/client-go/operator/informers/externalversions/operator/v1 github.com/openshift/client-go/operator/informers/externalversions/operator/v1alpha1 github.com/openshift/client-go/operator/listers/operator/v1 github.com/openshift/client-go/operator/listers/operator/v1alpha1 +github.com/openshift/client-go/route/applyconfigurations +github.com/openshift/client-go/route/applyconfigurations/internal +github.com/openshift/client-go/route/applyconfigurations/route/v1 +github.com/openshift/client-go/route/clientset/versioned +github.com/openshift/client-go/route/clientset/versioned/fake +github.com/openshift/client-go/route/clientset/versioned/scheme +github.com/openshift/client-go/route/clientset/versioned/typed/route/v1 +github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/fake +github.com/openshift/client-go/route/informers/externalversions +github.com/openshift/client-go/route/informers/externalversions/internalinterfaces +github.com/openshift/client-go/route/informers/externalversions/route +github.com/openshift/client-go/route/informers/externalversions/route/v1 +github.com/openshift/client-go/route/listers/route/v1 # github.com/openshift/library-go v0.0.0-20241022210936-abb8c75b88dc ## explicit; go 1.22.0 github.com/openshift/library-go/pkg/apiserver/jsonpatch @@ -1286,6 +1298,8 @@ github.com/securego/gosec/v2/analyzers github.com/securego/gosec/v2/cwe github.com/securego/gosec/v2/issue github.com/securego/gosec/v2/rules +# github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 +## explicit; go 1.13 # github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c ## explicit github.com/shazow/go-diff/difflib