diff --git a/api/v1alpha1/workflow_types.go b/api/v1alpha1/workflow_types.go index 09a4d5a5b..bf216b1fb 100644 --- a/api/v1alpha1/workflow_types.go +++ b/api/v1alpha1/workflow_types.go @@ -13,15 +13,17 @@ type ( WorkflowState string WorkflowConditionType string TemplateRendering string + BootMode string ) const ( - WorkflowStateWaiting = WorkflowState("STATE_WAITING") - WorkflowStatePending = WorkflowState("STATE_PENDING") - WorkflowStateRunning = WorkflowState("STATE_RUNNING") - WorkflowStateSuccess = WorkflowState("STATE_SUCCESS") - WorkflowStateFailed = WorkflowState("STATE_FAILED") - WorkflowStateTimeout = WorkflowState("STATE_TIMEOUT") + WorkflowStatePreparing = WorkflowState("STATE_PREPARING") + WorkflowStatePending = WorkflowState("STATE_PENDING") + WorkflowStateRunning = WorkflowState("STATE_RUNNING") + WorkflowStatePost = WorkflowState("STATE_POST") + WorkflowStateSuccess = WorkflowState("STATE_SUCCESS") + WorkflowStateFailed = WorkflowState("STATE_FAILED") + WorkflowStateTimeout = WorkflowState("STATE_TIMEOUT") NetbootJobFailed WorkflowConditionType = "NetbootJobFailed" NetbootJobComplete WorkflowConditionType = "NetbootJobComplete" @@ -34,6 +36,9 @@ const ( TemplateRenderingSuccessful TemplateRendering = "successful" TemplateRenderingFailed TemplateRendering = "failed" + + BootModeNetboot BootMode = "netboot" + BootModeISO BootMode = "iso" ) // +kubebuilder:subresource:status @@ -86,18 +91,31 @@ type BootOptions struct { // A HardwareRef must be provided. // +optional ToggleAllowNetboot bool `json:"toggleAllowNetboot,omitempty"` - // OneTimeNetboot indicates whether the controller should create a job.bmc.tinkerbell.org object for getting the associated hardware - // into a netbooting state. + + // ISOURL is the URL of the ISO that will be one-time booted. When this field is set, the controller will create a job.bmc.tinkerbell.org object + // for getting the associated hardware into a CDROM booting state. // A HardwareRef that contains a spec.BmcRef must be provided. // +optional - OneTimeNetboot bool `json:"oneTimeNetboot,omitempty"` + // +kubebuilder:validation:Format=url + ISOURL string `json:"isoURL,omitempty"` + + // BootMode is the type of booting that will be done. + // +optional + // +kubebuilder:validation:Enum=netboot;iso + BootMode BootMode `json:"bootMode,omitempty"` } // BootOptionsStatus holds the state of any boot options. type BootOptionsStatus struct { - // OneTimeNetboot holds the state of a specific job.bmc.tinkerbell.org object created. - // Only used when BootOptions.OneTimeNetboot is true. - OneTimeNetboot OneTimeNetbootStatus `json:"netbootJob,omitempty"` + // AllowNetboot holds the state of the the controller's interactions with the allowPXE field in a Hardware object. + AllowNetboot AllowNetbootStatus `json:"allowNetboot,omitempty"` + // Jobs holds the state of any job.bmc.tinkerbell.org objects created. + Jobs map[string]JobStatus `json:"jobs,omitempty"` +} + +type AllowNetbootStatus struct { + ToggledTrue bool `json:"toggledTrue,omitempty"` + ToggledFalse bool `json:"toggledFalse,omitempty"` } // WorkflowStatus defines the observed state of a Workflow. @@ -130,8 +148,8 @@ type WorkflowStatus struct { Conditions []WorkflowCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` } -// OneTimeNetbootStatus holds the state of a specific job.bmc.tinkerbell.org object created. -type OneTimeNetbootStatus struct { +// JobStatus holds the state of a specific job.bmc.tinkerbell.org object created. +type JobStatus struct { // UID is the UID of the job.bmc.tinkerbell.org object associated with this workflow. // This is used to uniquely identify the job.bmc.tinkerbell.org object, as // all objects for a specific Hardware/Machine.bmc.tinkerbell.org are created with the same name. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index c436f887f..aeda7acb1 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -62,6 +62,21 @@ func (in *Action) DeepCopy() *Action { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowNetbootStatus) DeepCopyInto(out *AllowNetbootStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowNetbootStatus. +func (in *AllowNetbootStatus) DeepCopy() *AllowNetbootStatus { + if in == nil { + return nil + } + out := new(AllowNetbootStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BootOptions) DeepCopyInto(out *BootOptions) { *out = *in @@ -80,7 +95,14 @@ func (in *BootOptions) DeepCopy() *BootOptions { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BootOptionsStatus) DeepCopyInto(out *BootOptionsStatus) { *out = *in - out.OneTimeNetboot = in.OneTimeNetboot + out.AllowNetboot = in.AllowNetboot + if in.Jobs != nil { + in, out := &in.Jobs, &out.Jobs + *out = make(map[string]JobStatus, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootOptionsStatus. @@ -356,6 +378,21 @@ func (in *Interface) DeepCopy() *Interface { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobStatus) DeepCopyInto(out *JobStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus. +func (in *JobStatus) DeepCopy() *JobStatus { + if in == nil { + return nil + } + out := new(JobStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetadataCustom) DeepCopyInto(out *MetadataCustom) { *out = *in @@ -732,21 +769,6 @@ func (in *OSIE) DeepCopy() *OSIE { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OneTimeNetbootStatus) DeepCopyInto(out *OneTimeNetbootStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OneTimeNetbootStatus. -func (in *OneTimeNetbootStatus) DeepCopy() *OneTimeNetbootStatus { - if in == nil { - return nil - } - out := new(OneTimeNetbootStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Task) DeepCopyInto(out *Task) { *out = *in @@ -979,7 +1001,7 @@ func (in *WorkflowSpec) DeepCopy() *WorkflowSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkflowStatus) DeepCopyInto(out *WorkflowStatus) { *out = *in - out.BootOptions = in.BootOptions + in.BootOptions.DeepCopyInto(&out.BootOptions) if in.Tasks != nil { in, out := &in.Tasks, &out.Tasks *out = make([]Task, len(*in)) diff --git a/cmd/tink-controller/main.go b/cmd/tink-controller/main.go index 354990bdf..1f5fadfc6 100644 --- a/cmd/tink-controller/main.go +++ b/cmd/tink-controller/main.go @@ -12,6 +12,7 @@ import ( "github.com/spf13/viper" "github.com/tinkerbell/tink/internal/deprecated/controller" "go.uber.org/zap" + "go.uber.org/zap/zapcore" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ctrl "sigs.k8s.io/controller-runtime" @@ -27,6 +28,7 @@ type Config struct { MetricsAddr string ProbeAddr string EnableLeaderElection bool + LogLevel int } func (c *Config) AddFlags(fs *pflag.FlagSet) { @@ -40,6 +42,7 @@ func (c *Config) AddFlags(fs *pflag.FlagSet) { fs.BoolVar(&c.EnableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") + fs.IntVar(&c.LogLevel, "log-level", 0, "Log level (0: info, 1: debug)") } func main() { @@ -52,15 +55,14 @@ func main() { func NewRootCommand() *cobra.Command { var config Config - zlog, err := zap.NewProduction() - if err != nil { - panic(err) - } - logger := zapr.NewLogger(zlog).WithName("github.com/tinkerbell/tink") - cmd := &cobra.Command{ Use: "tink-controller", PreRunE: func(cmd *cobra.Command, _ []string) error { + zlog, err := zap.NewProduction() + if err != nil { + panic(err) + } + logger := zapr.NewLogger(zlog).WithName("github.com/tinkerbell/tink") viper, err := createViper(logger) if err != nil { return fmt.Errorf("config init: %w", err) @@ -68,6 +70,19 @@ func NewRootCommand() *cobra.Command { return applyViper(viper, cmd) }, RunE: func(cmd *cobra.Command, _ []string) error { + zc := zap.NewProductionConfig() + switch config.LogLevel { + case 1: + zc.Level = zap.NewAtomicLevelAt(zapcore.Level(-1)) + default: + zc.Level = zap.NewAtomicLevelAt(zapcore.Level(0)) + } + zlog, err := zc.Build() + if err != nil { + panic(err) + } + + logger := zapr.NewLogger(zlog).WithName("github.com/tinkerbell/tink") logger.Info("Starting controller version " + version) ccfg := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( diff --git a/config/crd/bases/tinkerbell.org_workflows.yaml b/config/crd/bases/tinkerbell.org_workflows.yaml index 5eb69d478..d1693b833 100644 --- a/config/crd/bases/tinkerbell.org_workflows.yaml +++ b/config/crd/bases/tinkerbell.org_workflows.yaml @@ -58,12 +58,19 @@ spec: bootOptions: description: BootOptions are options that control the booting of Hardware. properties: - oneTimeNetboot: + bootMode: + description: BootMode is the type of booting that will be done. + enum: + - netboot + - iso + type: string + isoURL: description: |- - OneTimeNetboot indicates whether the controller should create a job.bmc.tinkerbell.org object for getting the associated hardware - into a netbooting state. + ISOURL is the URL of the ISO that will be one-time booted. When this field is set, the controller will create a job.bmc.tinkerbell.org object + for getting the associated hardware into a CDROM booting state. A HardwareRef that contains a spec.BmcRef must be provided. - type: boolean + format: url + type: string toggleAllowNetboot: description: |- ToggleAllowNetboot indicates whether the controller should toggle the field in the associated hardware for allowing PXE booting. @@ -89,27 +96,36 @@ spec: bootOptions: description: BootOptions holds the state of any boot options. properties: - netbootJob: - description: |- - OneTimeNetboot holds the state of a specific job.bmc.tinkerbell.org object created. - Only used when BootOptions.OneTimeNetboot is true. + allowNetboot: + description: AllowNetboot holds the state of the the controller's interactions with the allowPXE field in a Hardware object. properties: - complete: - description: Complete indicates whether the created job.bmc.tinkerbell.org has reported its conditions as complete. + toggledFalse: type: boolean - existingJobDeleted: - description: |- - ExistingJobDeleted indicates whether any existing job.bmc.tinkerbell.org was deleted. - The name of each job.bmc.tinkerbell.org object created by the controller is the same, so only one can exist at a time. - Using the same name was chosen so that there is only ever 1 job.bmc.tinkerbell.org per Hardware/Machine.bmc.tinkerbell.org. - This makes clean up easier and we dont just orphan jobs every time. + toggledTrue: type: boolean - uid: - description: |- - UID is the UID of the job.bmc.tinkerbell.org object associated with this workflow. - This is used to uniquely identify the job.bmc.tinkerbell.org object, as - all objects for a specific Hardware/Machine.bmc.tinkerbell.org are created with the same name. - type: string + type: object + jobs: + additionalProperties: + description: JobStatus holds the state of a specific job.bmc.tinkerbell.org object created. + properties: + complete: + description: Complete indicates whether the created job.bmc.tinkerbell.org has reported its conditions as complete. + type: boolean + existingJobDeleted: + description: |- + ExistingJobDeleted indicates whether any existing job.bmc.tinkerbell.org was deleted. + The name of each job.bmc.tinkerbell.org object created by the controller is the same, so only one can exist at a time. + Using the same name was chosen so that there is only ever 1 job.bmc.tinkerbell.org per Hardware/Machine.bmc.tinkerbell.org. + This makes clean up easier and we dont just orphan jobs every time. + type: boolean + uid: + description: |- + UID is the UID of the job.bmc.tinkerbell.org object associated with this workflow. + This is used to uniquely identify the job.bmc.tinkerbell.org object, as + all objects for a specific Hardware/Machine.bmc.tinkerbell.org are created with the same name. + type: string + type: object + description: Jobs holds the state of any job.bmc.tinkerbell.org objects created. type: object type: object conditions: diff --git a/go.mod b/go.mod index 1fe33b8a5..5eb3f3922 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ toolchain go1.22.2 require ( github.com/Masterminds/sprig/v3 v3.3.0 github.com/avast/retry-go v3.0.0+incompatible + github.com/cenkalti/backoff/v4 v4.3.0 github.com/distribution/reference v0.6.0 github.com/docker/docker v27.3.1+incompatible github.com/equinix-labs/otel-init-go v0.0.9 @@ -36,7 +37,6 @@ require ( k8s.io/api v0.31.1 k8s.io/apimachinery v0.31.1 k8s.io/client-go v0.31.1 - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 knative.dev/pkg v0.0.0-20240917091217-aaab500c26c4 sigs.k8s.io/controller-runtime v0.19.0 sigs.k8s.io/yaml v1.4.0 @@ -48,7 +48,6 @@ require ( github.com/Masterminds/semver/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.4.14 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -134,6 +133,7 @@ require ( k8s.io/apiextensions-apiserver v0.31.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/internal/deprecated/workflow/bootops.go b/internal/deprecated/workflow/bootops.go deleted file mode 100644 index 76e55a35d..000000000 --- a/internal/deprecated/workflow/bootops.go +++ /dev/null @@ -1,196 +0,0 @@ -package workflow - -import ( - "context" - "fmt" - "time" - - rufio "github.com/tinkerbell/rufio/api/v1alpha1" - "github.com/tinkerbell/tink/api/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "knative.dev/pkg/ptr" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const ( - bmcJobName = "tink-controller-%s-one-time-netboot" -) - -// handleExistingJob ensures that an existing job.bmc.tinkerbell.org is removed. -func handleExistingJob(ctx context.Context, cc client.Client, wf *v1alpha1.Workflow) (reconcile.Result, error) { - if wf.Status.BootOptions.OneTimeNetboot.ExistingJobDeleted { - return reconcile.Result{}, nil - } - name := fmt.Sprintf(bmcJobName, wf.Spec.HardwareRef) - namespace := wf.Namespace - if err := cc.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, &rufio.Job{}); client.IgnoreNotFound(err) != nil || err == nil { - existingJob := &rufio.Job{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}} - opts := []client.DeleteOption{ - client.GracePeriodSeconds(0), - client.PropagationPolicy(metav1.DeletePropagationForeground), - } - if err := cc.Delete(ctx, existingJob, opts...); client.IgnoreNotFound(err) != nil { - return reconcile.Result{}, fmt.Errorf("error deleting job.bmc.tinkerbell.org object: %w", err) - } - return reconcile.Result{Requeue: true}, nil - } - wf.Status.BootOptions.OneTimeNetboot.ExistingJobDeleted = true - - return reconcile.Result{Requeue: true}, nil -} - -func handleJobCreation(ctx context.Context, cc client.Client, wf *v1alpha1.Workflow) (reconcile.Result, error) { - if wf.Status.BootOptions.OneTimeNetboot.UID == "" && wf.Status.BootOptions.OneTimeNetboot.ExistingJobDeleted { - existingJob := &rufio.Job{} - if err := cc.Get(ctx, client.ObjectKey{Name: fmt.Sprintf(bmcJobName, wf.Spec.HardwareRef), Namespace: wf.Namespace}, existingJob); err == nil { - wf.Status.BootOptions.OneTimeNetboot.UID = existingJob.GetUID() - return reconcile.Result{Requeue: true}, nil - } - hw := &v1alpha1.Hardware{ObjectMeta: metav1.ObjectMeta{Name: wf.Spec.HardwareRef, Namespace: wf.Namespace}} - if err := cc.Get(ctx, client.ObjectKey{Name: wf.Spec.HardwareRef, Namespace: wf.Namespace}, hw); err != nil { - return reconcile.Result{}, fmt.Errorf("error getting hardware %s: %w", wf.Spec.HardwareRef, err) - } - if hw.Spec.BMCRef == nil { - return reconcile.Result{}, fmt.Errorf("hardware %s does not have a BMC, cannot perform one time netboot", hw.Name) - } - if err := createNetbootJob(ctx, cc, hw, wf.Namespace); err != nil { - wf.Status.SetCondition(v1alpha1.WorkflowCondition{ - Type: v1alpha1.NetbootJobSetupFailed, - Status: metav1.ConditionTrue, - Reason: "Error", - Message: fmt.Sprintf("error creating job: %v", err), - Time: &metav1.Time{Time: metav1.Now().UTC()}, - }) - return reconcile.Result{}, fmt.Errorf("error creating job.bmc.tinkerbell.org object: %w", err) - } - wf.Status.SetCondition(v1alpha1.WorkflowCondition{ - Type: v1alpha1.NetbootJobSetupComplete, - Status: metav1.ConditionTrue, - Reason: "Created", - Message: "job created", - Time: &metav1.Time{Time: metav1.Now().UTC()}, - }) - return reconcile.Result{Requeue: true}, nil - } - - return reconcile.Result{}, nil -} - -func handleJobComplete(ctx context.Context, cc client.Client, wf *v1alpha1.Workflow) (reconcile.Result, error) { - if !wf.Status.BootOptions.OneTimeNetboot.Complete && wf.Status.BootOptions.OneTimeNetboot.UID != "" && wf.Status.BootOptions.OneTimeNetboot.ExistingJobDeleted { - existingJob := &rufio.Job{} - jobName := fmt.Sprintf(bmcJobName, wf.Spec.HardwareRef) - if err := cc.Get(ctx, client.ObjectKey{Name: jobName, Namespace: wf.Namespace}, existingJob); err != nil { - return reconcile.Result{}, fmt.Errorf("error getting one time netboot job: %w", err) - } - if existingJob.HasCondition(rufio.JobFailed, rufio.ConditionTrue) { - wf.Status.SetCondition(v1alpha1.WorkflowCondition{ - Type: v1alpha1.NetbootJobFailed, - Status: metav1.ConditionTrue, - Reason: "Error", - Message: "one time netboot job failed", - Time: &metav1.Time{Time: metav1.Now().UTC()}, - }) - return reconcile.Result{}, fmt.Errorf("one time netboot job failed") - } - if existingJob.HasCondition(rufio.JobCompleted, rufio.ConditionTrue) { - wf.Status.SetCondition(v1alpha1.WorkflowCondition{ - Type: v1alpha1.NetbootJobComplete, - Status: metav1.ConditionTrue, - Reason: "Complete", - Message: "one time netboot job completed", - Time: &metav1.Time{Time: metav1.Now().UTC()}, - }) - wf.Status.State = v1alpha1.WorkflowStatePending - wf.Status.BootOptions.OneTimeNetboot.Complete = true - return reconcile.Result{Requeue: true}, nil - } - if !wf.Status.HasCondition(v1alpha1.NetbootJobRunning, metav1.ConditionTrue) { - wf.Status.SetCondition(v1alpha1.WorkflowCondition{ - Type: v1alpha1.NetbootJobRunning, - Status: metav1.ConditionTrue, - Reason: "Running", - Message: "one time netboot job running", - Time: &metav1.Time{Time: metav1.Now().UTC()}, - }) - } - return reconcile.Result{RequeueAfter: 5 * time.Second}, nil - } - - return reconcile.Result{}, nil -} - -func createNetbootJob(ctx context.Context, cc client.Client, hw *v1alpha1.Hardware, ns string) error { - name := fmt.Sprintf(bmcJobName, hw.Name) - efiBoot := func() bool { - for _, iface := range hw.Spec.Interfaces { - if iface.DHCP != nil && iface.DHCP.UEFI { - return true - } - } - return false - }() - job := &rufio.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - Annotations: map[string]string{ - "tink-controller-auto-created": "true", - }, - Labels: map[string]string{ - "tink-controller-auto-created": "true", - }, - }, - Spec: rufio.JobSpec{ - MachineRef: rufio.MachineRef{ - Name: hw.Spec.BMCRef.Name, - Namespace: ns, - }, - Tasks: []rufio.Action{ - { - PowerAction: rufio.PowerHardOff.Ptr(), - }, - { - OneTimeBootDeviceAction: &rufio.OneTimeBootDeviceAction{ - Devices: []rufio.BootDevice{ - rufio.PXE, - }, - EFIBoot: efiBoot, - }, - }, - { - PowerAction: rufio.PowerOn.Ptr(), - }, - }, - }, - } - if err := cc.Create(ctx, job); err != nil { - return fmt.Errorf("error creating job.bmc.tinkerbell.org object for netbooting machine: %w", err) - } - - return nil -} - -// handleHardwareAllowPXE sets the allowPXE field on the hardware interfaces to true before a workflow runs and false after a workflow completes successfully. -// If hardware is nil then it will be retrieved using the client. -func handleHardwareAllowPXE(ctx context.Context, cc client.Client, stored *v1alpha1.Workflow, hardware *v1alpha1.Hardware, allowPXE bool) error { - if hardware == nil && stored != nil { - hardware = &v1alpha1.Hardware{} - if err := cc.Get(ctx, client.ObjectKey{Name: stored.Spec.HardwareRef, Namespace: stored.Namespace}, hardware); err != nil { - return fmt.Errorf("hardware not found: name=%v; namespace=%v, error: %w", stored.Spec.HardwareRef, stored.Namespace, err) - } - } else if stored == nil { - return fmt.Errorf("workflow and hardware cannot both be nil") - } - - for _, iface := range hardware.Spec.Interfaces { - iface.Netboot.AllowPXE = ptr.Bool(allowPXE) - } - - if err := cc.Update(ctx, hardware); err != nil { - return fmt.Errorf("error updating allow pxe: %w", err) - } - - return nil -} diff --git a/internal/deprecated/workflow/bootops_test.go b/internal/deprecated/workflow/bootops_test.go deleted file mode 100644 index 19c4e3216..000000000 --- a/internal/deprecated/workflow/bootops_test.go +++ /dev/null @@ -1,520 +0,0 @@ -package workflow - -import ( - "context" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - rufio "github.com/tinkerbell/rufio/api/v1alpha1" - "github.com/tinkerbell/tink/api/v1alpha1" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func TestHandleExistingJob(t *testing.T) { - tests := map[string]struct { - workflow *v1alpha1.Workflow - wantWorkflow *v1alpha1.Workflow - wantResult reconcile.Result - job *rufio.Job - }{ - "existing job deleted": { - workflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Spec: v1alpha1.WorkflowSpec{ - HardwareRef: "machine1", - }, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - ExistingJobDeleted: false, - }, - }, - }, - }, - wantWorkflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Spec: v1alpha1.WorkflowSpec{ - HardwareRef: "machine1", - }, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - ExistingJobDeleted: false, - }, - }, - }, - }, - wantResult: reconcile.Result{Requeue: true}, - job: &rufio.Job{ - ObjectMeta: v1.ObjectMeta{ - Name: "tink-controller-machine1-one-time-netboot", - Namespace: "default", - }, - }, - }, - "no existing job": { - workflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Spec: v1alpha1.WorkflowSpec{ - HardwareRef: "machine1", - }, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{}, - }, - }, - }, - wantWorkflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Spec: v1alpha1.WorkflowSpec{ - HardwareRef: "machine1", - }, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - ExistingJobDeleted: true, - }, - }, - }, - }, - wantResult: reconcile.Result{Requeue: true}, - }, - "existing job already deleted": { - workflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Spec: v1alpha1.WorkflowSpec{ - HardwareRef: "machine1", - }, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - ExistingJobDeleted: true, - }, - }, - }, - }, - wantWorkflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Spec: v1alpha1.WorkflowSpec{ - HardwareRef: "machine1", - }, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - ExistingJobDeleted: true, - }, - }, - }, - }, - wantResult: reconcile.Result{}, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - runtimescheme := runtime.NewScheme() - rufio.AddToScheme(runtimescheme) - v1alpha1.AddToScheme(runtimescheme) - clientBulider := GetFakeClientBuilder().WithScheme(runtimescheme) - if tc.job != nil { - clientBulider.WithRuntimeObjects(tc.job) - } - cc := clientBulider.Build() - - r, err := handleExistingJob(context.Background(), cc, tc.workflow) - if err != nil { - t.Fatalf("handleExistingJob() err = %v, want nil", err) - } - if diff := cmp.Diff(tc.wantResult, r); diff != "" { - t.Errorf("handleExistingJob() mismatch (-want +got):\n%s", diff) - } - if diff := cmp.Diff(tc.wantWorkflow, tc.workflow); diff != "" { - t.Errorf("handleExistingJob() mismatch (-want +got):\n%s", diff) - } - }) - } -} - -func TestHandleJobCreation(t *testing.T) { - uid := uuid.NewUUID() - tests := map[string]struct { - workflow *v1alpha1.Workflow - wantWorkflow *v1alpha1.Workflow - wantResult reconcile.Result - job *rufio.Job - }{ - "creation already done": { - workflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - ExistingJobDeleted: true, - UID: uid, - }, - }, - }, - }, - wantWorkflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - ExistingJobDeleted: true, - UID: uid, - }, - }, - }, - }, - wantResult: reconcile.Result{}, - }, - "create new job": { - workflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Spec: v1alpha1.WorkflowSpec{ - HardwareRef: "machine1", - }, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - ExistingJobDeleted: true, - }, - }, - }, - }, - wantWorkflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Spec: v1alpha1.WorkflowSpec{ - HardwareRef: "machine1", - }, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - ExistingJobDeleted: true, - }, - }, - Conditions: []v1alpha1.WorkflowCondition{ - {Type: v1alpha1.NetbootJobSetupComplete, Status: v1.ConditionTrue, Reason: "Created", Message: "job created"}, - }, - }, - }, - wantResult: reconcile.Result{Requeue: true}, - job: &rufio.Job{ - ObjectMeta: v1.ObjectMeta{ - Name: "tink-controller-machine1-one-time-netboot", - Namespace: "default", - ResourceVersion: "1", - Labels: map[string]string{ - "tink-controller-auto-created": "true", - }, - Annotations: map[string]string{ - "tink-controller-auto-created": "true", - }, - }, - Spec: rufio.JobSpec{ - MachineRef: rufio.MachineRef{ - Name: "machine1", - Namespace: "default", - }, - Tasks: []rufio.Action{ - {PowerAction: ptr.To(rufio.PowerHardOff)}, - {OneTimeBootDeviceAction: &rufio.OneTimeBootDeviceAction{Devices: []rufio.BootDevice{rufio.PXE}}}, - {PowerAction: ptr.To(rufio.PowerOn)}, - }, - }, - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - runtimescheme := runtime.NewScheme() - rufio.AddToScheme(runtimescheme) - v1alpha1.AddToScheme(runtimescheme) - clientBulider := GetFakeClientBuilder().WithScheme(runtimescheme) - clientBulider.WithRuntimeObjects(&v1alpha1.Hardware{ - ObjectMeta: v1.ObjectMeta{ - Name: "machine1", - Namespace: "default", - }, - Spec: v1alpha1.HardwareSpec{ - BMCRef: &corev1.TypedLocalObjectReference{Name: "machine1"}, - }, - }) - cc := clientBulider.Build() - - r, err := handleJobCreation(context.Background(), cc, tc.workflow) - if err != nil { - t.Fatalf("handleJobCreation() err = %v, want nil", err) - } - if diff := cmp.Diff(tc.wantResult, r); diff != "" { - t.Errorf("handleJobCreation() mismatch (-want +got):\n%s", diff) - } - if diff := cmp.Diff(tc.wantWorkflow, tc.workflow, cmpopts.IgnoreFields(v1alpha1.WorkflowCondition{}, "Time")); diff != "" { - t.Errorf("handleJobCreation() mismatch (-want +got):\n%s", diff) - } - // check if the job is created - if tc.job != nil { - job := &rufio.Job{} - if err := cc.Get(context.Background(), client.ObjectKey{Name: tc.job.Name, Namespace: tc.job.Namespace}, job); err != nil { - t.Fatalf("handleJobCreation() job not created: %v", err) - } - if diff := cmp.Diff(tc.job, job); diff != "" { - t.Errorf("handleJobCreation() mismatch (-want +got):\n%s", diff) - } - } - }) - } -} - -func TestHandleJobComplete(t *testing.T) { - uid := uuid.NewUUID() - tests := map[string]struct { - workflow *v1alpha1.Workflow - wantWorkflow *v1alpha1.Workflow - wantResult reconcile.Result - job *rufio.Job - shouldError bool - }{ - "status for existing job complete": { - workflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - Complete: true, - }, - }, - }, - }, - wantWorkflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - Complete: true, - }, - }, - }, - }, - wantResult: reconcile.Result{}, - }, - "existing job not complete": { - workflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Spec: v1alpha1.WorkflowSpec{HardwareRef: "machine1"}, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - Complete: false, - UID: uid, - ExistingJobDeleted: true, - }, - }, - }, - }, - wantWorkflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Spec: v1alpha1.WorkflowSpec{HardwareRef: "machine1"}, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - Complete: false, - UID: uid, - ExistingJobDeleted: true, - }, - }, - Conditions: []v1alpha1.WorkflowCondition{ - {Type: v1alpha1.NetbootJobRunning, Status: v1.ConditionTrue, Reason: "Running", Message: "one time netboot job running"}, - }, - }, - }, - wantResult: reconcile.Result{RequeueAfter: 5 * time.Second}, - job: &rufio.Job{ - ObjectMeta: v1.ObjectMeta{ - Name: "tink-controller-machine1-one-time-netboot", - Namespace: "default", - }, - Status: rufio.JobStatus{ - Conditions: []rufio.JobCondition{ - {Type: rufio.JobRunning, Status: rufio.ConditionTrue}, - }, - }, - }, - }, - "existing job failed": { - workflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Spec: v1alpha1.WorkflowSpec{HardwareRef: "machine1"}, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - Complete: false, - UID: uid, - ExistingJobDeleted: true, - }, - }, - }, - }, - wantWorkflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Spec: v1alpha1.WorkflowSpec{HardwareRef: "machine1"}, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - Complete: false, - UID: uid, - ExistingJobDeleted: true, - }, - }, - Conditions: []v1alpha1.WorkflowCondition{ - {Type: v1alpha1.NetbootJobFailed, Status: v1.ConditionTrue, Reason: "Error", Message: "one time netboot job failed"}, - }, - }, - }, - wantResult: reconcile.Result{}, - job: &rufio.Job{ - ObjectMeta: v1.ObjectMeta{ - Name: "tink-controller-machine1-one-time-netboot", - Namespace: "default", - }, - Status: rufio.JobStatus{ - Conditions: []rufio.JobCondition{ - {Type: rufio.JobFailed, Status: rufio.ConditionTrue}, - }, - }, - }, - shouldError: true, - }, - "existing job completed": { - workflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Spec: v1alpha1.WorkflowSpec{HardwareRef: "machine1"}, - Status: v1alpha1.WorkflowStatus{ - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - Complete: false, - UID: uid, - ExistingJobDeleted: true, - }, - }, - }, - }, - wantWorkflow: &v1alpha1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: "workflow1", - Namespace: "default", - }, - Spec: v1alpha1.WorkflowSpec{HardwareRef: "machine1"}, - Status: v1alpha1.WorkflowStatus{ - State: v1alpha1.WorkflowStatePending, - BootOptions: v1alpha1.BootOptionsStatus{ - OneTimeNetboot: v1alpha1.OneTimeNetbootStatus{ - Complete: true, - UID: uid, - ExistingJobDeleted: true, - }, - }, - Conditions: []v1alpha1.WorkflowCondition{ - {Type: v1alpha1.NetbootJobComplete, Status: v1.ConditionTrue, Reason: "Complete", Message: "one time netboot job completed"}, - }, - }, - }, - wantResult: reconcile.Result{Requeue: true}, - job: &rufio.Job{ - ObjectMeta: v1.ObjectMeta{ - Name: "tink-controller-machine1-one-time-netboot", - Namespace: "default", - }, - Status: rufio.JobStatus{ - Conditions: []rufio.JobCondition{ - {Type: rufio.JobCompleted, Status: rufio.ConditionTrue}, - }, - }, - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - runtimescheme := runtime.NewScheme() - rufio.AddToScheme(runtimescheme) - v1alpha1.AddToScheme(runtimescheme) - clientBulider := GetFakeClientBuilder().WithScheme(runtimescheme) - if tc.job != nil { - clientBulider.WithRuntimeObjects(tc.job) - } - cc := clientBulider.Build() - - r, err := handleJobComplete(context.Background(), cc, tc.workflow) - if err != nil && !tc.shouldError { - t.Fatalf("handleJobComplete() err = %v, want nil", err) - } - if diff := cmp.Diff(tc.wantResult, r); diff != "" { - t.Errorf("result mismatch (-want +got):\n%s", diff) - } - if diff := cmp.Diff(tc.wantWorkflow, tc.workflow, cmpopts.IgnoreFields(v1alpha1.WorkflowCondition{}, "Time")); diff != "" { - t.Errorf("workflow mismatch (-want +got):\n%s", diff) - } - }) - } -} diff --git a/internal/deprecated/workflow/hardware.go b/internal/deprecated/workflow/hardware.go new file mode 100644 index 000000000..f31cf1bc7 --- /dev/null +++ b/internal/deprecated/workflow/hardware.go @@ -0,0 +1,122 @@ +package workflow + +import ( + "context" + "fmt" + + "github.com/tinkerbell/tink/api/v1alpha1" + "github.com/tinkerbell/tink/internal/ptr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// setAllowPXE sets the allowPXE field on the hardware network interfaces. +// If hardware is nil then it will be retrieved using the client. +// The hardware object will be updated in the cluster. +func setAllowPXE(ctx context.Context, cc client.Client, w *v1alpha1.Workflow, h *v1alpha1.Hardware, allowPXE bool) error { + if h == nil && w == nil { + return fmt.Errorf("both workflow and hardware cannot be nil") + } + if h == nil { + h = &v1alpha1.Hardware{} + if err := cc.Get(ctx, client.ObjectKey{Name: w.Spec.HardwareRef, Namespace: w.Namespace}, h); err != nil { + return fmt.Errorf("hardware not found: name=%v; namespace=%v, error: %w", w.Spec.HardwareRef, w.Namespace, err) + } + } + + for _, iface := range h.Spec.Interfaces { + iface.Netboot.AllowPXE = ptr.Bool(allowPXE) + } + + if err := cc.Update(ctx, h); err != nil { + return fmt.Errorf("error updating allow pxe: %w", err) + } + + return nil +} + +// hardwareFrom retrieves the in cluster hardware object defined in the given workflow. +func hardwareFrom(ctx context.Context, cc client.Client, w *v1alpha1.Workflow) (*v1alpha1.Hardware, error) { + if w == nil { + return nil, fmt.Errorf("workflow is nil") + } + if w.Spec.HardwareRef == "" { + return nil, fmt.Errorf("hardware ref is empty") + } + h := &v1alpha1.Hardware{} + if err := cc.Get(ctx, client.ObjectKey{Name: w.Spec.HardwareRef, Namespace: w.Namespace}, h); err != nil { + return nil, fmt.Errorf("hardware not found: name=%v; namespace=%v, error: %w", w.Spec.HardwareRef, w.Namespace, err) + } + + return h, nil +} + +// toggleHardware toggles the allowPXE field on the hardware network interfaces. +// It is idempotent and uses the Workflow.Status.BootOptionsStatus.AllowNetboot fields for idempotent checks. +// This function will update the Workflow status. +func (s *state) toggleHardware(ctx context.Context, allowPXE bool) error { + // 1. check if we've already set the allowPXE field to the desired value + // 2. if not, set the allowPXE field to the desired value + // 3. return a WorkflowCondition with the result of the operation + + hw, err := hardwareFrom(ctx, s.client, s.workflow) + if err != nil { + s.workflow.Status.SetCondition(v1alpha1.WorkflowCondition{ + Type: v1alpha1.ToggleAllowNetbootTrue, + Status: metav1.ConditionFalse, + Reason: "Error", + Message: fmt.Sprintf("error getting hardware: %v", err), + Time: &metav1.Time{Time: metav1.Now().UTC()}, + }) + + return err + } + + if allowPXE { + if s.workflow.Status.BootOptions.AllowNetboot.ToggledTrue { + return nil + } + if err := setAllowPXE(ctx, s.client, s.workflow, hw, allowPXE); err != nil { + s.workflow.Status.SetCondition(v1alpha1.WorkflowCondition{ + Type: v1alpha1.ToggleAllowNetbootTrue, + Status: metav1.ConditionFalse, + Reason: "Error", + Message: fmt.Sprintf("error setting allowPXE to %v: %v", allowPXE, err), + Time: &metav1.Time{Time: metav1.Now().UTC()}, + }) + return err + } + s.workflow.Status.BootOptions.AllowNetboot.ToggledTrue = true + s.workflow.Status.SetCondition(v1alpha1.WorkflowCondition{ + Type: v1alpha1.ToggleAllowNetbootTrue, + Status: metav1.ConditionTrue, + Reason: "Complete", + Message: fmt.Sprintf("set allowPXE to %v", allowPXE), + Time: &metav1.Time{Time: metav1.Now().UTC()}, + }) + return nil + } + + if s.workflow.Status.BootOptions.AllowNetboot.ToggledFalse { + return nil + } + if err := setAllowPXE(ctx, s.client, s.workflow, hw, allowPXE); err != nil { + s.workflow.Status.SetCondition(v1alpha1.WorkflowCondition{ + Type: v1alpha1.ToggleAllowNetbootFalse, + Status: metav1.ConditionFalse, + Reason: "Error", + Message: fmt.Sprintf("error setting allowPXE to %v: %v", allowPXE, err), + Time: &metav1.Time{Time: metav1.Now().UTC()}, + }) + return err + } + s.workflow.Status.BootOptions.AllowNetboot.ToggledFalse = true + s.workflow.Status.SetCondition(v1alpha1.WorkflowCondition{ + Type: v1alpha1.ToggleAllowNetbootFalse, + Status: metav1.ConditionTrue, + Reason: "Complete", + Message: fmt.Sprintf("set allowPXE to %v", allowPXE), + Time: &metav1.Time{Time: metav1.Now().UTC()}, + }) + return nil +} diff --git a/internal/deprecated/workflow/job.go b/internal/deprecated/workflow/job.go new file mode 100644 index 000000000..181f85121 --- /dev/null +++ b/internal/deprecated/workflow/job.go @@ -0,0 +1,218 @@ +package workflow + +import ( + "context" + "fmt" + "time" + + rufio "github.com/tinkerbell/rufio/api/v1alpha1" + "github.com/tinkerbell/tink/api/v1alpha1" + "github.com/tinkerbell/tink/internal/deprecated/workflow/journal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type jobName string + +const ( + jobNameNetboot jobName = "netboot" + jobNameISOMount jobName = "iso-mount" + jobNameISOEject jobName = "iso-eject" +) + +func (j jobName) String() string { + return string(j) +} + +// this function will update the Workflow status. +func (s *state) handleJob(ctx context.Context, actions []rufio.Action, name jobName) (reconcile.Result, error) { + // there are 3 phases. 1. Clean up existing 2. Create new 3. Track status + // 1. clean up existing job if it wasn't already deleted + if j := s.workflow.Status.BootOptions.Jobs[name.String()]; !j.ExistingJobDeleted { + journal.Log(ctx, "deleting existing job", "name", name) + result, err := s.deleteExisting(ctx, name) + if err != nil { + return result, err + } + + return result, nil + } + + // 2. create a new job + if uid := s.workflow.Status.BootOptions.Jobs[name.String()].UID; uid == "" { + journal.Log(ctx, "no uid found for job", "name", name) + result, err := s.createJob(ctx, actions, name) + if err != nil { + s.workflow.Status.SetCondition(v1alpha1.WorkflowCondition{ + Type: v1alpha1.NetbootJobSetupFailed, + Status: metav1.ConditionTrue, + Reason: "Error", + Message: fmt.Sprintf("error creating job: %v", err), + Time: &metav1.Time{Time: metav1.Now().UTC()}, + }) + return result, err + } + s.workflow.Status.SetCondition(v1alpha1.WorkflowCondition{ + Type: v1alpha1.NetbootJobSetupComplete, + Status: metav1.ConditionTrue, + Reason: "Created", + Message: "job created", + Time: &metav1.Time{Time: metav1.Now().UTC()}, + }) + return result, nil + } + + // 3. track status + if !s.workflow.Status.BootOptions.Jobs[name.String()].Complete { + journal.Log(ctx, "tracking job", "name", name) + // track status + r, tState, err := s.trackRunningJob(ctx, name) + if err != nil { + s.workflow.Status.SetCondition(v1alpha1.WorkflowCondition{ + Type: v1alpha1.NetbootJobFailed, + Status: metav1.ConditionTrue, + Reason: "Error", + Message: err.Error(), + Time: &metav1.Time{Time: metav1.Now().UTC()}, + }) + return r, err + } + if tState == trackedStateComplete { + s.workflow.Status.SetCondition(v1alpha1.WorkflowCondition{ + Type: v1alpha1.NetbootJobComplete, + Status: metav1.ConditionTrue, + Reason: "Complete", + Message: "job completed", + Time: &metav1.Time{Time: metav1.Now().UTC()}, + }) + } + return r, nil + } + + return reconcile.Result{Requeue: true}, nil +} + +func (s *state) deleteExisting(ctx context.Context, name jobName) (reconcile.Result, error) { + existingJob := &rufio.Job{ObjectMeta: metav1.ObjectMeta{Name: name.String(), Namespace: s.workflow.Namespace}} + opts := []client.DeleteOption{ + client.GracePeriodSeconds(0), + client.PropagationPolicy(metav1.DeletePropagationForeground), + } + if err := s.client.Delete(ctx, existingJob, opts...); client.IgnoreNotFound(err) != nil { + return reconcile.Result{}, fmt.Errorf("error deleting job.bmc.tinkerbell.org object: %w", err) + } + + jStatus := s.workflow.Status.BootOptions.Jobs[name.String()] + jStatus.ExistingJobDeleted = true + // if we delete an existing job, we need to remove any uid that was set. + jStatus.UID = "" + jStatus.Complete = false + s.workflow.Status.BootOptions.Jobs[name.String()] = jStatus + + return reconcile.Result{Requeue: true}, nil +} + +// This function will update the Workflow status. +func (s *state) createJob(ctx context.Context, actions []rufio.Action, name jobName) (reconcile.Result, error) { + // create a new job + // The assumption is that the UID is not set. UID checking is not handled here. + // 1. look up if there's an existing job with the same name, if so update the status with the UID and return + // 2. if there's no existing job, create a new job, update the status with the UID, and return + + rj := &rufio.Job{} + if err := s.client.Get(ctx, client.ObjectKey{Name: name.String(), Namespace: s.workflow.Namespace}, rj); err == nil { + journal.Log(ctx, "job already exists", "name", name) + if !rj.DeletionTimestamp.IsZero() { + journal.Log(ctx, "job is being deleted", "name", name) + return reconcile.Result{Requeue: true}, nil + } + // TODO(jacobweinstock): job exists means that the job name and uid from the status are the same. + // get the UID and update the status + jStatus := s.workflow.Status.BootOptions.Jobs[name.String()] + jStatus.UID = rj.GetUID() + s.workflow.Status.BootOptions.Jobs[name.String()] = jStatus + + return reconcile.Result{Requeue: true}, nil + } + + // create a new job + hw, err := hardwareFrom(ctx, s.client, s.workflow) + if err != nil { + return reconcile.Result{}, fmt.Errorf("error getting hardware: %w", err) + } + if hw.Spec.BMCRef == nil { + return reconcile.Result{}, fmt.Errorf("hardware %q does not have a BMC", hw.Name) + } + + if err := create(ctx, s.client, name.String(), hw, s.workflow.Namespace, actions); err != nil { + return reconcile.Result{}, fmt.Errorf("error creating job: %w", err) + } + journal.Log(ctx, "job created", "name", name) + + return reconcile.Result{Requeue: true}, nil +} + +type trackedState string + +var ( + trackedStateComplete trackedState = "complete" + trackedStateRunning trackedState = "running" + trackedStateError trackedState = "error" + trackedStateFailed trackedState = "failed" +) + +// This function will update the Workflow status. +func (s *state) trackRunningJob(ctx context.Context, name jobName) (reconcile.Result, trackedState, error) { + // track status + // get the job + rj := &rufio.Job{} + if err := s.client.Get(ctx, client.ObjectKey{Name: name.String(), Namespace: s.workflow.Namespace}, rj); err != nil { + return reconcile.Result{}, trackedStateError, fmt.Errorf("error getting job: %w", err) + } + if rj.HasCondition(rufio.JobFailed, rufio.ConditionTrue) { + journal.Log(ctx, "job failed", "name", name) + // job failed + return reconcile.Result{}, trackedStateFailed, fmt.Errorf("job failed") + } + if rj.HasCondition(rufio.JobCompleted, rufio.ConditionTrue) { + journal.Log(ctx, "job completed", "name", name) + // job completed + jStatus := s.workflow.Status.BootOptions.Jobs[name.String()] + jStatus.Complete = true + s.workflow.Status.BootOptions.Jobs[name.String()] = jStatus + + return reconcile.Result{}, trackedStateComplete, nil + } + // still running + journal.Log(ctx, "job still running", "name", name) + time.Sleep(s.backoff.NextBackOff()) + return reconcile.Result{Requeue: true}, trackedStateRunning, nil +} + +func create(ctx context.Context, cc client.Client, name string, hw *v1alpha1.Hardware, ns string, tasks []rufio.Action) error { + journal.Log(ctx, "creating job", "name", name) + if err := cc.Create(ctx, &rufio.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Annotations: map[string]string{ + "tink-controller-auto-created": "true", + }, + Labels: map[string]string{ + "tink-controller-auto-created": "true", + }, + }, + Spec: rufio.JobSpec{ + MachineRef: rufio.MachineRef{ + Name: hw.Spec.BMCRef.Name, + Namespace: ns, + }, + Tasks: tasks, + }, + }); err != nil { + return fmt.Errorf("error creating job.bmc.tinkerbell.org object for netbooting machine: %w", err) + } + + return nil +} diff --git a/internal/deprecated/workflow/job_test.go b/internal/deprecated/workflow/job_test.go new file mode 100644 index 000000000..f50294d5d --- /dev/null +++ b/internal/deprecated/workflow/job_test.go @@ -0,0 +1,235 @@ +package workflow + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + rufio "github.com/tinkerbell/rufio/api/v1alpha1" + "github.com/tinkerbell/tink/api/v1alpha1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func TestHandleJob(t *testing.T) { + tests := map[string]struct { + workflow *v1alpha1.Workflow + wantWorkflow *v1alpha1.WorkflowStatus + hardware *v1alpha1.Hardware + actions []rufio.Action + name jobName + wantError bool + wantResult reconcile.Result + job *rufio.Job + }{ + "existing job deleted, new job created and completed": { + workflow: &v1alpha1.Workflow{ + Status: v1alpha1.WorkflowStatus{ + BootOptions: v1alpha1.BootOptionsStatus{ + Jobs: map[string]v1alpha1.JobStatus{ + jobNameNetboot.String(): { + ExistingJobDeleted: true, + UID: types.UID("1234"), + Complete: true, + }, + }, + AllowNetboot: v1alpha1.AllowNetbootStatus{}, + }, + }, + }, + wantWorkflow: &v1alpha1.WorkflowStatus{ + BootOptions: v1alpha1.BootOptionsStatus{ + Jobs: map[string]v1alpha1.JobStatus{ + jobNameNetboot.String(): { + ExistingJobDeleted: true, + UID: types.UID("1234"), + Complete: true, + }, + }, + AllowNetboot: v1alpha1.AllowNetbootStatus{}, + }, + }, + hardware: &v1alpha1.Hardware{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hardware", + Namespace: "default", + }, + Spec: v1alpha1.HardwareSpec{ + BMCRef: &v1.TypedLocalObjectReference{ + Name: "test-bmc", + Kind: "machine.bmc.tinkerbell.org", + }, + }, + }, + name: jobNameNetboot, + wantResult: reconcile.Result{Requeue: true}, + }, + "existing job not deleted": { + workflow: &v1alpha1.Workflow{ + Status: v1alpha1.WorkflowStatus{ + BootOptions: v1alpha1.BootOptionsStatus{ + Jobs: map[string]v1alpha1.JobStatus{ + jobNameNetboot.String(): {}, + }, + AllowNetboot: v1alpha1.AllowNetbootStatus{}, + }, + }, + }, + wantWorkflow: &v1alpha1.WorkflowStatus{ + BootOptions: v1alpha1.BootOptionsStatus{ + Jobs: map[string]v1alpha1.JobStatus{ + jobNameNetboot.String(): { + ExistingJobDeleted: true, + }, + }, + AllowNetboot: v1alpha1.AllowNetbootStatus{}, + }, + }, + name: jobNameNetboot, + hardware: new(v1alpha1.Hardware), + wantResult: reconcile.Result{Requeue: true}, + }, + "existing job deleted, create new job": { + workflow: &v1alpha1.Workflow{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: v1alpha1.WorkflowSpec{ + HardwareRef: "test-hardware", + }, + Status: v1alpha1.WorkflowStatus{ + BootOptions: v1alpha1.BootOptionsStatus{ + Jobs: map[string]v1alpha1.JobStatus{ + jobNameNetboot.String(): { + ExistingJobDeleted: true, + }, + }, + AllowNetboot: v1alpha1.AllowNetbootStatus{}, + }, + }, + }, + wantWorkflow: &v1alpha1.WorkflowStatus{ + Conditions: []v1alpha1.WorkflowCondition{ + { + Type: v1alpha1.NetbootJobSetupComplete, + Status: metav1.ConditionTrue, + Reason: "Created", + Message: "job created", + }, + }, + BootOptions: v1alpha1.BootOptionsStatus{ + Jobs: map[string]v1alpha1.JobStatus{ + jobNameNetboot.String(): { + ExistingJobDeleted: true, + }, + }, + AllowNetboot: v1alpha1.AllowNetbootStatus{}, + }, + }, + hardware: &v1alpha1.Hardware{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hardware", + Namespace: "default", + }, + Spec: v1alpha1.HardwareSpec{ + BMCRef: &v1.TypedLocalObjectReference{ + Name: "test-bmc", + Kind: "machine.bmc.tinkerbell.org", + }, + }, + }, + actions: []rufio.Action{}, + name: jobNameNetboot, + wantResult: reconcile.Result{Requeue: true}, + }, + "existing job deleted, new job created": { + workflow: &v1alpha1.Workflow{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Status: v1alpha1.WorkflowStatus{ + BootOptions: v1alpha1.BootOptionsStatus{ + Jobs: map[string]v1alpha1.JobStatus{ + jobNameNetboot.String(): { + ExistingJobDeleted: true, + UID: types.UID("1234"), + }, + }, + AllowNetboot: v1alpha1.AllowNetbootStatus{}, + }, + }, + }, + wantWorkflow: &v1alpha1.WorkflowStatus{ + Conditions: []v1alpha1.WorkflowCondition{ + { + Type: v1alpha1.NetbootJobComplete, + Status: metav1.ConditionTrue, + Reason: "Complete", + Message: "job completed", + }, + }, + BootOptions: v1alpha1.BootOptionsStatus{ + Jobs: map[string]v1alpha1.JobStatus{ + jobNameNetboot.String(): { + ExistingJobDeleted: true, + UID: types.UID("1234"), + Complete: true, + }, + }, + AllowNetboot: v1alpha1.AllowNetbootStatus{}, + }, + }, + hardware: new(v1alpha1.Hardware), + actions: []rufio.Action{}, + name: jobNameNetboot, + wantResult: reconcile.Result{}, + job: &rufio.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobNameNetboot.String(), + Namespace: "default", + UID: types.UID("1234"), + }, + Status: rufio.JobStatus{ + Conditions: []rufio.JobCondition{ + { + Type: rufio.JobCompleted, + Status: rufio.ConditionTrue, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + scheme := runtime.NewScheme() + rufio.AddToScheme(scheme) + v1alpha1.AddToScheme(scheme) + clientBuilder := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(tc.hardware, tc.workflow) + if tc.job != nil { + clientBuilder.WithRuntimeObjects(tc.job) + } + s := &state{ + workflow: tc.workflow, + client: clientBuilder.Build(), + } + ctx := context.Background() + r, err := s.handleJob(ctx, tc.actions, tc.name) + if (err != nil) != tc.wantError { + t.Errorf("expected error: %v, got: %v", tc.wantError, err) + } + if diff := cmp.Diff(tc.wantResult, r); diff != "" { + t.Errorf("unexpected result (-want +got):\n%s", diff) + } + if diff := cmp.Diff(*tc.wantWorkflow, s.workflow.Status, cmpopts.IgnoreFields(v1alpha1.WorkflowCondition{}, "Time")); diff != "" { + t.Errorf("unexpected workflow status (-want +got):\n%s", diff) + } + }) + } +} diff --git a/internal/deprecated/workflow/journal/journal.go b/internal/deprecated/workflow/journal/journal.go new file mode 100644 index 000000000..0437ed7ae --- /dev/null +++ b/internal/deprecated/workflow/journal/journal.go @@ -0,0 +1,73 @@ +package journal + +import ( + "context" + "fmt" + "log/slog" + "path/filepath" + "runtime" + "strings" + "time" +) + +type CtxKey string + +const Name CtxKey = "journal" + +type Entry struct { + Msg string `json:"msg"` + Args map[string]any `json:"args,omitempty"` + Source slog.Source `json:"source"` + Time string `json:"time"` +} + +// New creates a slice of Entries in the provided context. +func New(ctx context.Context) context.Context { + e := &[]Entry{} + return context.WithValue(ctx, Name, e) +} + +// Log adds a new Entry to the journal in the provided context. +// Log is not thread-safe. +func Log(ctx context.Context, msg string, args ...any) { + t := time.Now().UTC().Format(time.RFC3339Nano) + m := make(map[string]any) + for i := 0; i < len(args); i += 2 { + k, ok := args[i].(string) + if !ok { + k = fmt.Sprintf("%v", args[i]) + } + m[k] = args[i+1] + } + e, ok := ctx.Value(Name).(*[]Entry) + if !ok { + return + } + *e = append(*e, Entry{Msg: msg, Args: m, Source: fileAndLine(), Time: t}) +} + +// Journal returns the journal from the provided context. +func Journal(ctx context.Context) []Entry { + e, ok := ctx.Value(Name).(*[]Entry) + if !ok { + return nil + } + return *e +} + +func fileAndLine() slog.Source { + pc, file, line, _ := runtime.Caller(2) + fn := runtime.FuncForPC(pc) + var fnName string + if fn == nil { + fnName = "?()" + } else { + fnName = strings.TrimLeft(filepath.Ext(fn.Name()), ".") + "()" + } + + return slog.Source{ + Function: fnName, + File: filepath.Base(file), + Line: line, + } +} diff --git a/internal/deprecated/workflow/journal/journal_test.go b/internal/deprecated/workflow/journal/journal_test.go new file mode 100644 index 000000000..175fcb65a --- /dev/null +++ b/internal/deprecated/workflow/journal/journal_test.go @@ -0,0 +1,68 @@ +package journal + +import ( + "context" + "log/slog" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" +) + +func TestJournal(t *testing.T) { + type input struct { + msg string + args []any + } + tests := map[string]struct { + want []Entry + inputs []input + }{ + "empty": { + want: []Entry{}, + }, + "single": { + want: []Entry{ + { + Msg: "one", + Args: map[string]any{"key": "value"}, + Source: slog.Source{ + File: "journal_test.go", + Function: "func1()", + }, + }, + }, + inputs: []input{ + {msg: "one", args: []any{"key", "value"}}, + }, + }, + "non normal key": { + want: []Entry{ + { + Msg: "msg", + Args: map[string]any{"1.1": "value"}, + Source: slog.Source{ + File: "journal_test.go", + Function: "func1()", + }, + }, + }, + inputs: []input{ + {msg: "msg", args: []any{1.1, "value"}}, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + ctx := New(context.Background()) + for _, input := range tc.inputs { + Log(ctx, input.msg, input.args...) + } + got := Journal(ctx) + if diff := cmp.Diff(tc.want, got, cmpopts.IgnoreFields(Entry{}, "Time"), cmpopts.IgnoreFields(slog.Source{}, "Line")); diff != "" { + t.Errorf("unexpected journal (-want +got):\n%s", diff) + } + }) + } +} diff --git a/internal/deprecated/workflow/post.go b/internal/deprecated/workflow/post.go new file mode 100644 index 000000000..6166f47eb --- /dev/null +++ b/internal/deprecated/workflow/post.go @@ -0,0 +1,50 @@ +package workflow + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + rufio "github.com/tinkerbell/rufio/api/v1alpha1" + "github.com/tinkerbell/tink/api/v1alpha1" + "github.com/tinkerbell/tink/internal/deprecated/workflow/journal" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func (s *state) postActions(ctx context.Context) (reconcile.Result, error) { + // 1. Handle toggling allowPXE in a hardware object if toggleAllowNetboot is true. + if s.workflow.Spec.BootOptions.ToggleAllowNetboot && !s.workflow.Status.BootOptions.AllowNetboot.ToggledFalse { + journal.Log(ctx, "toggling allowPXE false") + if err := s.toggleHardware(ctx, false); err != nil { + return reconcile.Result{}, err + } + } + + // 2. Handle ISO eject scenario. + if s.workflow.Spec.BootOptions.BootMode == v1alpha1.BootModeISO { + name := jobName(fmt.Sprintf("%s-%s", jobNameISOEject, s.workflow.GetName())) + if j := s.workflow.Status.BootOptions.Jobs[name.String()]; !j.ExistingJobDeleted || j.UID == "" || !j.Complete { + journal.Log(ctx, "boot mode iso") + if s.workflow.Spec.BootOptions.ISOURL == "" { + return reconcile.Result{}, errors.New("iso url must be a valid url") + } + actions := []rufio.Action{ + { + VirtualMediaAction: &rufio.VirtualMediaAction{ + MediaURL: "", // empty to unmount/eject the media + Kind: rufio.VirtualMediaCD, + }, + }, + } + + r, err := s.handleJob(ctx, actions, name) + if s.workflow.Status.BootOptions.Jobs[name.String()].Complete { + s.workflow.Status.State = v1alpha1.WorkflowStateSuccess + } + return r, err + } + } + + s.workflow.Status.State = v1alpha1.WorkflowStateSuccess + return reconcile.Result{}, nil +} diff --git a/internal/deprecated/workflow/pre.go b/internal/deprecated/workflow/pre.go new file mode 100644 index 000000000..54a837066 --- /dev/null +++ b/internal/deprecated/workflow/pre.go @@ -0,0 +1,126 @@ +package workflow + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + rufio "github.com/tinkerbell/rufio/api/v1alpha1" + "github.com/tinkerbell/tink/api/v1alpha1" + "github.com/tinkerbell/tink/internal/deprecated/workflow/journal" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// prepareWorkflow prepares the workflow for execution. +// The workflow (s.workflow) can be updated even if an error occurs. +// Any patching of the workflow object in a cluster is left up to the caller. +// At the moment prepareWorkflow requires the workflow have a hardwareRef and the object exists. +func (s *state) prepareWorkflow(ctx context.Context) (reconcile.Result, error) { + // handle bootoptions + // 1. Handle toggling allowPXE in a hardware object if toggleAllowNetboot is true. + if s.workflow.Spec.BootOptions.ToggleAllowNetboot && !s.workflow.Status.BootOptions.AllowNetboot.ToggledTrue { + journal.Log(ctx, "toggling allowPXE true") + if err := s.toggleHardware(ctx, true); err != nil { + return reconcile.Result{}, err + } + } + + // 2. Handle booting scenarios. + switch s.workflow.Spec.BootOptions.BootMode { + case v1alpha1.BootModeNetboot: + name := jobName(fmt.Sprintf("%s-%s", jobNameNetboot, s.workflow.GetName())) + if j := s.workflow.Status.BootOptions.Jobs[name.String()]; !j.ExistingJobDeleted || j.UID == "" || !j.Complete { + journal.Log(ctx, "boot mode netboot") + hw, err := hardwareFrom(ctx, s.client, s.workflow) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to get hardware") + } + efiBoot := func() bool { + for _, iface := range hw.Spec.Interfaces { + if iface.DHCP != nil && iface.DHCP.UEFI { + return true + } + } + return false + }() + actions := []rufio.Action{ + { + PowerAction: rufio.PowerHardOff.Ptr(), + }, + { + OneTimeBootDeviceAction: &rufio.OneTimeBootDeviceAction{ + Devices: []rufio.BootDevice{ + rufio.PXE, + }, + EFIBoot: efiBoot, + }, + }, + { + PowerAction: rufio.PowerOn.Ptr(), + }, + } + + r, err := s.handleJob(ctx, actions, name) + if s.workflow.Status.BootOptions.Jobs[name.String()].Complete && s.workflow.Status.State == v1alpha1.WorkflowStatePreparing { + s.workflow.Status.State = v1alpha1.WorkflowStatePending + } + return r, err + } + case v1alpha1.BootModeISO: + name := jobName(fmt.Sprintf("%s-%s", jobNameISOMount, s.workflow.GetName())) + if j := s.workflow.Status.BootOptions.Jobs[name.String()]; !j.ExistingJobDeleted || j.UID == "" || !j.Complete { + journal.Log(ctx, "boot mode iso") + if s.workflow.Spec.BootOptions.ISOURL == "" { + return reconcile.Result{}, errors.New("iso url must be a valid url") + } + hw, err := hardwareFrom(ctx, s.client, s.workflow) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to get hardware") + } + efiBoot := func() bool { + for _, iface := range hw.Spec.Interfaces { + if iface.DHCP != nil && iface.DHCP.UEFI { + return true + } + } + return false + }() + actions := []rufio.Action{ + { + PowerAction: rufio.PowerHardOff.Ptr(), + }, + { + VirtualMediaAction: &rufio.VirtualMediaAction{ + MediaURL: "", // empty to unmount/eject the media + Kind: rufio.VirtualMediaCD, + }, + }, + { + VirtualMediaAction: &rufio.VirtualMediaAction{ + MediaURL: s.workflow.Spec.BootOptions.ISOURL, + Kind: rufio.VirtualMediaCD, + }, + }, + { + OneTimeBootDeviceAction: &rufio.OneTimeBootDeviceAction{ + Devices: []rufio.BootDevice{ + rufio.CDROM, + }, + EFIBoot: efiBoot, + }, + }, + { + PowerAction: rufio.PowerOn.Ptr(), + }, + } + + r, err := s.handleJob(ctx, actions, name) + if s.workflow.Status.BootOptions.Jobs[name.String()].Complete && s.workflow.Status.State == v1alpha1.WorkflowStatePreparing { + s.workflow.Status.State = v1alpha1.WorkflowStatePending + } + return r, err + } + } + + return reconcile.Result{}, nil +} diff --git a/internal/deprecated/workflow/pre_test.go b/internal/deprecated/workflow/pre_test.go new file mode 100644 index 000000000..3f2f7814a --- /dev/null +++ b/internal/deprecated/workflow/pre_test.go @@ -0,0 +1,261 @@ +package workflow + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + rufio "github.com/tinkerbell/rufio/api/v1alpha1" + "github.com/tinkerbell/tink/api/v1alpha1" + "github.com/tinkerbell/tink/internal/deprecated/workflow/journal" + "github.com/tinkerbell/tink/internal/ptr" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func TestPrepareWorkflow(t *testing.T) { + tests := map[string]struct { + wantResult reconcile.Result + wantError bool + hardware *v1alpha1.Hardware + wantHardware *v1alpha1.Hardware + workflow *v1alpha1.Workflow + wantWorkflow *v1alpha1.Workflow + job *rufio.Job + }{ + "nothing to do": { + wantResult: reconcile.Result{}, + hardware: &v1alpha1.Hardware{}, + wantHardware: &v1alpha1.Hardware{}, + workflow: &v1alpha1.Workflow{}, + wantWorkflow: &v1alpha1.Workflow{}, + }, + "toggle allowPXE": { + wantResult: reconcile.Result{}, + hardware: &v1alpha1.Hardware{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hardware", + Namespace: "default", + }, + Spec: v1alpha1.HardwareSpec{ + Interfaces: []v1alpha1.Interface{ + { + Netboot: &v1alpha1.Netboot{ + AllowPXE: ptr.Bool(false), + }, + }, + }, + }, + }, + wantHardware: &v1alpha1.Hardware{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hardware", + Namespace: "default", + }, + Spec: v1alpha1.HardwareSpec{ + Interfaces: []v1alpha1.Interface{ + { + Netboot: &v1alpha1.Netboot{ + AllowPXE: ptr.Bool(true), + }, + }, + }, + }, + }, + workflow: &v1alpha1.Workflow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-workflow", + Namespace: "default", + }, + Spec: v1alpha1.WorkflowSpec{ + HardwareRef: "test-hardware", + BootOptions: v1alpha1.BootOptions{ + ToggleAllowNetboot: true, + }, + }, + }, + wantWorkflow: &v1alpha1.Workflow{ + Status: v1alpha1.WorkflowStatus{ + BootOptions: v1alpha1.BootOptionsStatus{ + AllowNetboot: v1alpha1.AllowNetbootStatus{ + ToggledTrue: true, + }, + }, + Conditions: []v1alpha1.WorkflowCondition{ + { + Type: v1alpha1.ToggleAllowNetbootTrue, + Status: metav1.ConditionTrue, + Reason: "Complete", + Message: "set allowPXE to true", + }, + }, + }, + }, + }, + "boot mode netboot": { + wantResult: reconcile.Result{Requeue: true}, + hardware: &v1alpha1.Hardware{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hardware", + Namespace: "default", + }, + Spec: v1alpha1.HardwareSpec{ + BMCRef: &v1.TypedLocalObjectReference{ + Name: "test-bmc", + Kind: "machine.bmc.tinkerbell.org", + }, + }, + }, + wantHardware: &v1alpha1.Hardware{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hardware", + Namespace: "default", + }, + Spec: v1alpha1.HardwareSpec{ + BMCRef: &v1.TypedLocalObjectReference{ + Name: "test-bmc", + Kind: "machine.bmc.tinkerbell.org", + }, + }, + }, + workflow: &v1alpha1.Workflow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-workflow", + Namespace: "default", + }, + Spec: v1alpha1.WorkflowSpec{ + HardwareRef: "test-hardware", + BootOptions: v1alpha1.BootOptions{ + BootMode: "netboot", + }, + }, + Status: v1alpha1.WorkflowStatus{ + BootOptions: v1alpha1.BootOptionsStatus{ + Jobs: map[string]v1alpha1.JobStatus{}, + }, + }, + }, + wantWorkflow: &v1alpha1.Workflow{ + Status: v1alpha1.WorkflowStatus{ + BootOptions: v1alpha1.BootOptionsStatus{ + Jobs: map[string]v1alpha1.JobStatus{ + fmt.Sprintf("%s-test-workflow", jobNameNetboot): {ExistingJobDeleted: true}, + }, + }, + }, + }, + }, + "boot mode iso": { + wantResult: reconcile.Result{Requeue: true}, + hardware: &v1alpha1.Hardware{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hardware", + Namespace: "default", + }, + Spec: v1alpha1.HardwareSpec{ + BMCRef: &v1.TypedLocalObjectReference{ + Name: "test-bmc", + Kind: "machine.bmc.tinkerbell.org", + }, + }, + }, + wantHardware: &v1alpha1.Hardware{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hardware", + Namespace: "default", + }, + Spec: v1alpha1.HardwareSpec{ + BMCRef: &v1.TypedLocalObjectReference{ + Name: "test-bmc", + Kind: "machine.bmc.tinkerbell.org", + }, + }, + }, + workflow: &v1alpha1.Workflow{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-workflow", + Namespace: "default", + }, + Spec: v1alpha1.WorkflowSpec{ + HardwareRef: "test-hardware", + BootOptions: v1alpha1.BootOptions{ + BootMode: "iso", + ISOURL: "http://example.com", + }, + }, + Status: v1alpha1.WorkflowStatus{ + BootOptions: v1alpha1.BootOptionsStatus{ + Jobs: map[string]v1alpha1.JobStatus{}, + }, + }, + }, + wantWorkflow: &v1alpha1.Workflow{ + Status: v1alpha1.WorkflowStatus{ + BootOptions: v1alpha1.BootOptionsStatus{ + Jobs: map[string]v1alpha1.JobStatus{ + fmt.Sprintf("%s-test-workflow", jobNameISOMount): {ExistingJobDeleted: true}, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + scheme := runtime.NewScheme() + rufio.AddToScheme(scheme) + v1alpha1.AddToScheme(scheme) + ro := []runtime.Object{} + if tc.hardware != nil { + ro = append(ro, tc.hardware) + } + if tc.workflow != nil { + ro = append(ro, tc.workflow) + } + clientBuilder := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(ro...) + if tc.job != nil { + clientBuilder.WithRuntimeObjects(tc.job) + } + s := &state{ + workflow: tc.workflow, + client: clientBuilder.Build(), + } + ctx := context.Background() + ctx = journal.New(ctx) + result, err := s.prepareWorkflow(ctx) + if (err != nil) != tc.wantError { + t.Errorf("expected error: %v, got: %v", tc.wantError, err) + } + if diff := cmp.Diff(result, tc.wantResult); diff != "" { + t.Errorf("unexpected result (-want +got):\n%s", diff) + t.Logf("journal: %v", journal.Journal(ctx)) + } + + // get the Hardware object in cluster + gotHardware := &v1alpha1.Hardware{} + if err := s.client.Get(ctx, types.NamespacedName{Name: tc.hardware.Name, Namespace: tc.hardware.Namespace}, gotHardware); err != nil { + t.Fatalf("error getting hardware: %v", err) + } + if diff := cmp.Diff(gotHardware.Spec, tc.wantHardware.Spec); diff != "" { + t.Errorf("unexpected hardware (-want +got):\n%s", diff) + for _, entry := range journal.Journal(ctx) { + t.Logf("journal: %+v", entry) + } + } + + if diff := cmp.Diff(tc.workflow.Status, tc.wantWorkflow.Status, cmpopts.IgnoreFields(v1alpha1.WorkflowCondition{}, "Time")); diff != "" { + t.Errorf("unexpected workflow status (-want +got):\n%s", diff) + for _, entry := range journal.Journal(ctx) { + t.Logf("journal: %+v", entry) + } + } + }) + } +} diff --git a/internal/deprecated/workflow/reconciler.go b/internal/deprecated/workflow/reconciler.go index 52781dc9b..5922fa669 100644 --- a/internal/deprecated/workflow/reconciler.go +++ b/internal/deprecated/workflow/reconciler.go @@ -6,8 +6,10 @@ import ( "fmt" "time" + "github.com/cenkalti/backoff/v4" "github.com/go-logr/logr" "github.com/tinkerbell/tink/api/v1alpha1" + "github.com/tinkerbell/tink/internal/deprecated/workflow/journal" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,12 +24,18 @@ import ( type Reconciler struct { client ctrlclient.Client nowFunc func() time.Time + backoff *backoff.ExponentialBackOff } +// TODO(jacobweinstock): add functional arguments to the signature. +// TODO(jacobweinstock): write functional argument for customizing the backoff. func NewReconciler(client ctrlclient.Client) *Reconciler { return &Reconciler{ client: client, nowFunc: time.Now, + backoff: backoff.NewExponentialBackOff([]backoff.ExponentialBackOffOpts{ + backoff.WithMaxInterval(5 * time.Second), // this should keep all NextBackOff's under 10 seconds + }...), } } @@ -38,6 +46,12 @@ func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { Complete(r) } +type state struct { + client ctrlclient.Client + workflow *v1alpha1.Workflow + backoff *backoff.ExponentialBackOff +} + // +kubebuilder:rbac:groups=tinkerbell.org,resources=hardware;hardware/status,verbs=get;list;watch;update;patch // +kubebuilder:rbac:groups=tinkerbell.org,resources=templates;templates/status,verbs=get;list;watch;update;patch // +kubebuilder:rbac:groups=tinkerbell.org,resources=workflows;workflows/status,verbs=get;list;watch;update;patch;delete @@ -45,8 +59,13 @@ func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { // Reconcile handles Workflow objects. This includes Template rendering, optional Hardware allowPXE toggling, and optional Hardware one-time netbooting. func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + ctx = journal.New(ctx) logger := ctrl.LoggerFrom(ctx) - logger.Info("Reconciling") + defer func() { + logger.V(1).Info("Reconcile code flow journal", "journal", journal.Journal(ctx)) + }() + logger.Info("Reconcile") + journal.Log(ctx, "starting reconcile") stored := &v1alpha1.Workflow{} if err := r.client.Get(ctx, req.NamespacedName, stored); err != nil { @@ -58,78 +77,56 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco if !stored.DeletionTimestamp.IsZero() { return reconcile.Result{}, nil } + if stored.Status.BootOptions.Jobs == nil { + stored.Status.BootOptions.Jobs = make(map[string]v1alpha1.JobStatus) + } wflow := stored.DeepCopy() switch wflow.Status.State { case "": + journal.Log(ctx, "new workflow") resp, err := r.processNewWorkflow(ctx, logger, wflow) - return resp, serrors.Join(err, mergePatchsStatus(ctx, r.client, stored, wflow)) - case v1alpha1.WorkflowStateWaiting: - // make sure any existing job is deleted - if !wflow.Status.BootOptions.OneTimeNetboot.ExistingJobDeleted { - rc, err := handleExistingJob(ctx, r.client, wflow) - - return rc, serrors.Join(err, mergePatchsStatus(ctx, r.client, stored, wflow)) + return resp, serrors.Join(err, mergePatchStatus(ctx, r.client, stored, wflow)) + case v1alpha1.WorkflowStatePreparing: + journal.Log(ctx, "preparing workflow") + s := &state{ + client: r.client, + workflow: wflow, + backoff: r.backoff, } + resp, err := s.prepareWorkflow(ctx) - // create a new job - if wflow.Status.BootOptions.OneTimeNetboot.UID == "" && wflow.Status.BootOptions.OneTimeNetboot.ExistingJobDeleted { - rc, err := handleJobCreation(ctx, r.client, wflow) - - return rc, serrors.Join(err, mergePatchsStatus(ctx, r.client, stored, wflow)) - } - - // check if the job is complete - if !wflow.Status.BootOptions.OneTimeNetboot.Complete && wflow.Status.BootOptions.OneTimeNetboot.UID != "" && wflow.Status.BootOptions.OneTimeNetboot.ExistingJobDeleted { - rc, err := handleJobComplete(ctx, r.client, wflow) - - return rc, serrors.Join(err, mergePatchsStatus(ctx, r.client, stored, wflow)) - } + return resp, serrors.Join(err, mergePatchStatus(ctx, r.client, stored, s.workflow)) case v1alpha1.WorkflowStateRunning: + journal.Log(ctx, "process running workflow") r.processRunningWorkflow(wflow) - // set the current action in the status - ca := runningAction(wflow) - if ca != "" && wflow.Status.CurrentAction != ca { - wflow.Status.CurrentAction = ca + + return reconcile.Result{}, mergePatchStatus(ctx, r.client, stored, wflow) + case v1alpha1.WorkflowStatePost: + journal.Log(ctx, "post actions") + s := &state{ + client: r.client, + workflow: wflow, + backoff: r.backoff, } + rc, err := s.postActions(ctx) - return reconcile.Result{}, mergePatchsStatus(ctx, r.client, stored, wflow) - case v1alpha1.WorkflowStatePending, v1alpha1.WorkflowStateTimeout, v1alpha1.WorkflowStateFailed: + return rc, serrors.Join(err, mergePatchStatus(ctx, r.client, stored, wflow)) + case v1alpha1.WorkflowStatePending, v1alpha1.WorkflowStateTimeout, v1alpha1.WorkflowStateFailed, v1alpha1.WorkflowStateSuccess: + journal.Log(ctx, "controller will not trigger another reconcile", "state", wflow.Status.State) return reconcile.Result{}, nil - case v1alpha1.WorkflowStateSuccess: - if wflow.Spec.BootOptions.ToggleAllowNetboot && !wflow.Status.HasCondition(v1alpha1.ToggleAllowNetbootFalse, metav1.ConditionTrue) { - // handle updating hardware allowPXE to false - if err := handleHardwareAllowPXE(ctx, r.client, wflow, nil, false); err != nil { - stored.Status.SetCondition(v1alpha1.WorkflowCondition{ - Type: v1alpha1.ToggleAllowNetbootFalse, - Status: metav1.ConditionTrue, - Reason: "Error", - Message: fmt.Sprintf("error setting Allow PXE: %v", err), - Time: &metav1.Time{Time: metav1.Now().UTC()}, - }) - return reconcile.Result{}, serrors.Join(err, mergePatchsStatus(ctx, r.client, stored, wflow)) - } - wflow.Status.SetCondition(v1alpha1.WorkflowCondition{ - Type: v1alpha1.ToggleAllowNetbootFalse, - Status: metav1.ConditionTrue, - Reason: "Complete", - Message: "set allowPXE to false", - Time: &metav1.Time{Time: metav1.Now().UTC()}, - }) - - return reconcile.Result{}, mergePatchsStatus(ctx, r.client, stored, wflow) - } } return reconcile.Result{}, nil } -// mergePatchsStatus merges an updated Workflow with an original Workflow and patches the Status object via the client (cc). -func mergePatchsStatus(ctx context.Context, cc ctrlclient.Client, original, updated *v1alpha1.Workflow) error { +// mergePatchStatus merges an updated Workflow with an original Workflow and patches the Status object via the client (cc). +func mergePatchStatus(ctx context.Context, cc ctrlclient.Client, original, updated *v1alpha1.Workflow) error { // Patch any changes, regardless of errors - if !equality.Semantic.DeepEqual(updated, original) { + if !equality.Semantic.DeepEqual(updated.Status, original.Status) { + journal.Log(ctx, "patching status") if err := cc.Status().Patch(ctx, updated, ctrlclient.MergeFrom(original)); err != nil { return fmt.Errorf("error patching status of workflow: %s, error: %w", updated.Name, err) } @@ -137,24 +134,13 @@ func mergePatchsStatus(ctx context.Context, cc ctrlclient.Client, original, upda return nil } -func runningAction(wf *v1alpha1.Workflow) string { - for _, task := range wf.Status.Tasks { - for _, action := range task.Actions { - if action.Status == v1alpha1.WorkflowStateRunning { - return action.Name - } - } - } - - return "" -} - func (r *Reconciler) processNewWorkflow(ctx context.Context, logger logr.Logger, stored *v1alpha1.Workflow) (reconcile.Result, error) { tpl := &v1alpha1.Template{} if err := r.client.Get(ctx, ctrlclient.ObjectKey{Name: stored.Spec.TemplateRef, Namespace: stored.Namespace}, tpl); err != nil { if errors.IsNotFound(err) { // Throw an error to raise awareness and take advantage of immediate requeue. logger.Error(err, "error getting Template object in processNewWorkflow function") + journal.Log(ctx, "template not found") stored.Status.TemplateRendering = v1alpha1.TemplateRenderingFailed stored.Status.SetCondition(v1alpha1.WorkflowCondition{ Type: v1alpha1.TemplateRenderedSuccess, @@ -184,6 +170,7 @@ func (r *Reconciler) processNewWorkflow(ctx context.Context, logger logr.Logger, err := r.client.Get(ctx, ctrlclient.ObjectKey{Name: stored.Spec.HardwareRef, Namespace: stored.Namespace}, &hardware) if ctrlclient.IgnoreNotFound(err) != nil { logger.Error(err, "error getting Hardware object in processNewWorkflow function") + journal.Log(ctx, "hardware not found") stored.Status.TemplateRendering = v1alpha1.TemplateRenderingFailed stored.Status.SetCondition(v1alpha1.WorkflowCondition{ Type: v1alpha1.TemplateRenderedSuccess, @@ -197,6 +184,7 @@ func (r *Reconciler) processNewWorkflow(ctx context.Context, logger logr.Logger, if stored.Spec.HardwareRef != "" && errors.IsNotFound(err) { logger.Error(err, "hardware not found in processNewWorkflow function") + journal.Log(ctx, "hardware not found") stored.Status.TemplateRendering = v1alpha1.TemplateRenderingFailed stored.Status.SetCondition(v1alpha1.WorkflowCondition{ Type: v1alpha1.TemplateRenderedSuccess, @@ -244,30 +232,9 @@ func (r *Reconciler) processNewWorkflow(ctx context.Context, logger logr.Logger, }) // set hardware allowPXE if requested. - if stored.Spec.BootOptions.ToggleAllowNetboot { - if err := handleHardwareAllowPXE(ctx, r.client, stored, &hardware, true); err != nil { - stored.Status.SetCondition(v1alpha1.WorkflowCondition{ - Type: v1alpha1.ToggleAllowNetbootTrue, - Status: metav1.ConditionFalse, - Reason: "Error", - Message: fmt.Sprintf("error setting allowPXE to true: %v", err), - Time: &metav1.Time{Time: metav1.Now().UTC()}, - }) - return reconcile.Result{}, err - } - stored.Status.SetCondition(v1alpha1.WorkflowCondition{ - Type: v1alpha1.ToggleAllowNetbootTrue, - Status: metav1.ConditionTrue, - Reason: "Complete", - Message: "set allowPXE to true", - Time: &metav1.Time{Time: metav1.Now().UTC()}, - }) - } - - // netboot the hardware if requested - if stored.Spec.BootOptions.OneTimeNetboot { - stored.Status.State = v1alpha1.WorkflowStateWaiting - return reconcile.Result{Requeue: true}, err + if stored.Spec.BootOptions.ToggleAllowNetboot || stored.Spec.BootOptions.BootMode != "" { + stored.Status.State = v1alpha1.WorkflowStatePreparing + return reconcile.Result{Requeue: true}, nil } stored.Status.State = v1alpha1.WorkflowStatePending @@ -324,6 +291,10 @@ func (r *Reconciler) processRunningWorkflow(stored *v1alpha1.Workflow) { // Mark the workflow as timed out stored.Status.State = v1alpha1.WorkflowStateTimeout } + // Update the current action in the status + if action.Status == v1alpha1.WorkflowStateRunning && stored.Status.CurrentAction != action.Name { + stored.Status.CurrentAction = action.Name + } } } } diff --git a/internal/deprecated/workflow/reconciler_test.go b/internal/deprecated/workflow/reconciler_test.go index 884ce3d3c..12b534dc9 100644 --- a/internal/deprecated/workflow/reconciler_test.go +++ b/internal/deprecated/workflow/reconciler_test.go @@ -187,7 +187,7 @@ func TestHandleHardwareAllowPXE(t *testing.T) { HardwareRef: "machine1", }, } - err := handleHardwareAllowPXE(context.Background(), fakeClient, wf, nil, tt.AllowPXE) + err := setAllowPXE(context.Background(), fakeClient, wf, nil, tt.AllowPXE) got := &v1alpha1.Hardware{} if err := fakeClient.Get(context.Background(), client.ObjectKeyFromObject(tt.OriginalHardware), got); err != nil { @@ -686,6 +686,7 @@ tasks: }, Status: v1alpha1.WorkflowStatus{ State: v1alpha1.WorkflowStateTimeout, + CurrentAction: "stream-debian-image", GlobalTimeout: 600, Tasks: []v1alpha1.Task{ { diff --git a/internal/server/kubernetes_api_test.go b/internal/server/kubernetes_api_test.go index ce0c8803b..b53b3fcd4 100644 --- a/internal/server/kubernetes_api_test.go +++ b/internal/server/kubernetes_api_test.go @@ -386,7 +386,7 @@ func TestModifyWorkflowState(t *testing.T) { }, want: &v1alpha1.Workflow{ Status: v1alpha1.WorkflowStatus{ - State: "STATE_SUCCESS", + State: "STATE_POST", GlobalTimeout: 600, Tasks: []v1alpha1.Task{ { diff --git a/internal/server/kubernetes_api_workflow.go b/internal/server/kubernetes_api_workflow.go index 0dc557422..af38426c8 100644 --- a/internal/server/kubernetes_api_workflow.go +++ b/internal/server/kubernetes_api_workflow.go @@ -77,12 +77,10 @@ func (s *KubernetesBackedServer) GetWorkflowContexts(req *proto.WorkflowContextR return err } for _, wf := range wflows { - // Don't serve Actions when in a v1alpha1.WorkflowStateWaiting state. + // Don't serve Actions when in a v1alpha1.WorkflowStatePreparing state. // This is to prevent the worker from starting Actions before Workflow boot options are performed. - if wf.Spec.BootOptions.ToggleAllowNetboot || wf.Spec.BootOptions.OneTimeNetboot { - if wf.Status.State == v1alpha1.WorkflowStateWaiting { - continue - } + if wf.Spec.BootOptions.BootMode != "" && wf.Status.State == v1alpha1.WorkflowStatePreparing { + continue } if err := stream.Send(getWorkflowContext(wf)); err != nil { return err @@ -161,7 +159,8 @@ cont: } // Mark success on last action success if wfContext.CurrentActionIndex+1 == wfContext.TotalNumberOfActions { - wf.Status.State = v1alpha1.WorkflowState(proto.State_name[int32(wfContext.CurrentActionState)]) + // Set the state to POST instead of Success to allow any post tasks to run. + wf.Status.State = v1alpha1.WorkflowStatePost } case proto.State_STATE_PENDING: // This is probably a client bug?