Skip to content

Commit

Permalink
test
Browse files Browse the repository at this point in the history
  • Loading branch information
haijianyang committed Nov 7, 2023
1 parent 1f2e5a4 commit 3bf177b
Show file tree
Hide file tree
Showing 10 changed files with 134 additions and 76 deletions.
6 changes: 3 additions & 3 deletions api/v1beta1/elfmachine_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,11 +318,11 @@ func (m *ElfMachine) GetVMDisconnectionTimestamp() *metav1.Time {
return nil
}

func (m *ElfMachine) RequiresGPUOrVGPUDevices() bool {
return m.RequiresGPUDevices() || m.RequiresVGPUDevices()
func (m *ElfMachine) RequiresGPUDevices() bool {
return m.RequiresPassThroughGPUDevices() || m.RequiresVGPUDevices()
}

func (m *ElfMachine) RequiresGPUDevices() bool {
func (m *ElfMachine) RequiresPassThroughGPUDevices() bool {
return len(m.Spec.GPUDevices) > 0
}

Expand Down
15 changes: 7 additions & 8 deletions controllers/elfmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ func (r *ElfMachineReconciler) reconcileDelete(ctx *context.MachineContext) (rec
// locked by the virtual machine may not be unlocked.
// For example, the Cluster or ElfMachine was deleted during a pause.
if !ctrlutil.ContainsFinalizer(ctx.ElfMachine, infrav1.MachineFinalizer) &&
ctx.ElfMachine.RequiresGPUOrVGPUDevices() {
ctx.ElfMachine.RequiresGPUDevices() {
unlockGPUDevicesLockedByVM(ctx.ElfCluster.Spec.Cluster, ctx.ElfMachine.Name)
}
}()
Expand Down Expand Up @@ -561,7 +561,7 @@ func (r *ElfMachineReconciler) reconcileVM(ctx *context.MachineContext) (*models
ctx.ElfMachine.SetVM(util.GetVMRef(vm))
} else {
// Duplicate VM error does not require unlocking GPU devices.
if ctx.ElfMachine.RequiresGPUOrVGPUDevices() {
if ctx.ElfMachine.RequiresGPUDevices() {
unlockGPUDevicesLockedByVM(ctx.ElfCluster.Spec.Cluster, ctx.ElfMachine.Name)
}

Expand Down Expand Up @@ -907,13 +907,12 @@ func (r *ElfMachineReconciler) reconcileVMTask(ctx *context.MachineContext, vm *
setVMDuplicate(ctx.ElfMachine.Name)
}

if ctx.ElfMachine.RequiresGPUOrVGPUDevices() {
unlockGPUDevicesLockedByVM(ctx.ElfCluster.Spec.Cluster, ctx.ElfMachine.Name)
}
case service.IsPowerOnVMTask(task) || service.IsUpdateVMTask(task):
if ctx.ElfMachine.RequiresGPUOrVGPUDevices() {
if ctx.ElfMachine.RequiresGPUDevices() {
unlockGPUDevicesLockedByVM(ctx.ElfCluster.Spec.Cluster, ctx.ElfMachine.Name)
}
case ctx.ElfMachine.RequiresGPUDevices() &&
(service.IsPowerOnVMTask(task) || service.IsUpdateVMTask(task) || service.IsVMColdMigrationTask(task)):
unlockGPUDevicesLockedByVM(ctx.ElfCluster.Spec.Cluster, ctx.ElfMachine.Name)
case service.IsMemoryInsufficientError(errorMessage):
recordElfClusterMemoryInsufficient(ctx, true)
message := fmt.Sprintf("Insufficient memory detected for the ELF cluster %s", ctx.ElfCluster.Spec.Cluster)
Expand All @@ -932,7 +931,7 @@ func (r *ElfMachineReconciler) reconcileVMTask(ctx *context.MachineContext, vm *
case models.TaskStatusSUCCESSED:
ctx.Logger.Info("VM task succeeded", "vmRef", vmRef, "taskRef", taskRef, "taskDescription", service.GetTowerString(task.Description))

if ctx.ElfMachine.RequiresGPUOrVGPUDevices() &&
if ctx.ElfMachine.RequiresGPUDevices() &&
(service.IsCloneVMTask(task) || service.IsPowerOnVMTask(task) || service.IsUpdateVMTask(task)) {
unlockGPUDevicesLockedByVM(ctx.ElfCluster.Spec.Cluster, ctx.ElfMachine.Name)
}
Expand Down
13 changes: 8 additions & 5 deletions controllers/elfmachine_controller_gpu.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ import (
//
// The return gpudevices: the GPU devices for virtual machine.
func (r *ElfMachineReconciler) selectHostAndGPUsForVM(ctx *context.MachineContext, preferredHostID string) (rethost *string, gpudevices []*service.GPUDeviceInfo, reterr error) {
if !ctx.ElfMachine.RequiresGPUOrVGPUDevices() {
if !ctx.ElfMachine.RequiresGPUDevices() {
return pointer.String(""), nil, nil
}

Expand Down Expand Up @@ -98,7 +98,7 @@ func (r *ElfMachineReconciler) selectHostAndGPUsForVM(ctx *context.MachineContex
gpuDeviceIDs[i] = *gpuDevices[i].ID
}
// Get GPU devices with VMs and allocation details.
gpuDeviceInfos, err := ctx.VMService.FindGPUDeviceInfos(gpuDeviceIDs)
gpuDeviceInfos, err := ctx.VMService.GetGPUDevicesAllocationInfo(gpuDeviceIDs)
if err != nil {
return nil, nil, err
}
Expand Down Expand Up @@ -144,7 +144,7 @@ func (r *ElfMachineReconciler) selectHostAndGPUsForVM(ctx *context.MachineContex
}

var selectedGPUDeviceInfos []*service.GPUDeviceInfo
if ctx.ElfMachine.RequiresGPUDevices() {
if ctx.ElfMachine.RequiresPassThroughGPUDevices() {
selectedGPUDeviceInfos = selectGPUDevicesForVM(hostGPUDeviceInfos, ctx.ElfMachine.Spec.GPUDevices)
} else {
selectedGPUDeviceInfos = selectVGPUDevicesForVM(hostGPUDeviceInfos, ctx.ElfMachine.Spec.VGPUDevices)
Expand Down Expand Up @@ -248,7 +248,7 @@ func selectVGPUDevicesForVM(hostGPUDeviceInfos service.GPUDeviceInfos, requiredV

// reconcileGPUDevices ensures that the virtual machine has the expected GPU devices.
func (r *ElfMachineReconciler) reconcileGPUDevices(ctx *context.MachineContext, vm *models.VM) (bool, error) {
if !ctx.ElfMachine.RequiresGPUOrVGPUDevices() {
if !ctx.ElfMachine.RequiresGPUDevices() {
return true, nil
}

Expand Down Expand Up @@ -364,12 +364,15 @@ func (r *ElfMachineReconciler) checkGPUsCanBeUsedForVM(ctx *context.MachineConte
return false, err
}

gpuDeviceInfos, err := ctx.VMService.FindGPUDeviceInfos(gpuDeviceIDs)
gpuDeviceInfos, err := ctx.VMService.GetGPUDevicesAllocationInfo(gpuDeviceIDs)
if err != nil {
return false, err
}

service.AggregateUnusedGPUDevicesToGPUDeviceInfos(gpuDeviceInfos, gpuDevices)
if gpuDeviceInfos.Len() != len(gpuDeviceIDs) {
return false, err
}

if service.HasGPUsCanNotBeUsedForVM(gpuDeviceInfos, ctx.ElfMachine) {
return false, nil
Expand Down
30 changes: 18 additions & 12 deletions controllers/elfmachine_controller_gpu_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() {
fake.InitOwnerReferences(ctrlContext, elfCluster, cluster, elfMachine, machine)
mockVMService.EXPECT().GetHostsByCluster(elfCluster.Spec.Cluster).Return(service.NewHosts(host), nil)
mockVMService.EXPECT().FindGPUDevicesByHostIDs([]string{*host.ID}, models.GpuDeviceUsagePASSTHROUGH).Return(gpusDevices, nil)
mockVMService.EXPECT().FindGPUDeviceInfos(gpuIDs).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().GetGPUDevicesAllocationInfo(gpuIDs).Return(gpusDeviceInfos, nil)

machineContext := newMachineContext(ctrlContext, elfCluster, cluster, elfMachine, machine, mockVMService)
reconciler := &ElfMachineReconciler{ControllerContext: ctrlContext, NewVMService: mockNewVMService}
Expand All @@ -133,7 +133,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() {
Expect(gpus[0].AllocatedCount).To(Equal(int32(1)))

mockVMService.EXPECT().FindGPUDevicesByIDs(gpuIDs).Return(gpusDevices, nil)
mockVMService.EXPECT().FindGPUDeviceInfos(gpuIDs).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().GetGPUDevicesAllocationInfo(gpuIDs).Return(gpusDeviceInfos, nil)
hostID, gpus, err = reconciler.selectHostAndGPUsForVM(machineContext, "")
Expect(err).NotTo(HaveOccurred())
Expect(*hostID).To(Equal(*host.ID))
Expand All @@ -150,7 +150,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() {
gpusDeviceInfo.VMs = []service.GPUDeviceVM{{ID: "id", Name: "vm"}}
mockVMService.EXPECT().FindGPUDevicesByIDs(gpuIDs).Return(gpusDevices, nil)
mockVMService.EXPECT().GetHostsByCluster(elfCluster.Spec.Cluster).Return(nil, nil)
mockVMService.EXPECT().FindGPUDeviceInfos(gpuIDs).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().GetGPUDevicesAllocationInfo(gpuIDs).Return(gpusDeviceInfos, nil)
hostID, gpus, err = reconciler.selectHostAndGPUsForVM(machineContext, "")
Expect(err).NotTo(HaveOccurred())
Expect(hostID).To(BeNil())
Expand Down Expand Up @@ -188,7 +188,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() {
fake.InitOwnerReferences(ctrlContext, elfCluster, cluster, elfMachine, machine)
mockVMService.EXPECT().GetHostsByCluster(elfCluster.Spec.Cluster).Return(service.NewHosts(host, preferredHost), nil)
mockVMService.EXPECT().FindGPUDevicesByHostIDs(gomock.InAnyOrder([]string{*host.ID, *preferredHost.ID}), models.GpuDeviceUsagePASSTHROUGH).Return(gpusDevices, nil)
mockVMService.EXPECT().FindGPUDeviceInfos(gomock.InAnyOrder([]string{*gpu.ID, *preferredGPU.ID})).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().GetGPUDevicesAllocationInfo(gomock.InAnyOrder([]string{*gpu.ID, *preferredGPU.ID})).Return(gpusDeviceInfos, nil)

machineContext := newMachineContext(ctrlContext, elfCluster, cluster, elfMachine, machine, mockVMService)
reconciler := &ElfMachineReconciler{ControllerContext: ctrlContext, NewVMService: mockNewVMService}
Expand Down Expand Up @@ -246,7 +246,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() {

mockVMService.EXPECT().GetHostsByCluster(elfCluster.Spec.Cluster).Return(service.NewHosts(host), nil)
mockVMService.EXPECT().FindGPUDevicesByHostIDs([]string{*host.ID}, models.GpuDeviceUsageVGPU).Return(gpusDevices, nil)
mockVMService.EXPECT().FindGPUDeviceInfos(gomock.InAnyOrder([]string{*gpu1.ID, *gpu2.ID})).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().GetGPUDevicesAllocationInfo(gomock.InAnyOrder([]string{*gpu1.ID, *gpu2.ID})).Return(gpusDeviceInfos, nil)
hostID, gpus, err = reconciler.selectHostAndGPUsForVM(machineContext, "")
Expect(err).NotTo(HaveOccurred())
Expect(hostID).NotTo(BeNil())
Expand All @@ -273,7 +273,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() {
}})
mockVMService.EXPECT().GetHostsByCluster(elfCluster.Spec.Cluster).Return(service.NewHosts(host), nil)
mockVMService.EXPECT().FindGPUDevicesByHostIDs([]string{*host.ID}, models.GpuDeviceUsageVGPU).Return(gpusDevices, nil)
mockVMService.EXPECT().FindGPUDeviceInfos(gomock.InAnyOrder([]string{*gpu1.ID, *gpu2.ID})).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().GetGPUDevicesAllocationInfo(gomock.InAnyOrder([]string{*gpu1.ID, *gpu2.ID})).Return(gpusDeviceInfos, nil)
hostID, gpus, err = reconciler.selectHostAndGPUsForVM(machineContext, "")
Expect(err).NotTo(HaveOccurred())
Expect(hostID).To(BeNil())
Expand Down Expand Up @@ -372,7 +372,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() {
ctrlContext := newCtrlContexts(elfCluster, cluster, elfMachine, machine, secret, md)
fake.InitOwnerReferences(ctrlContext, elfCluster, cluster, elfMachine, machine)
mockVMService.EXPECT().FindGPUDevicesByIDs([]string{*gpu.ID}).Times(2).Return([]*models.GpuDevice{gpu}, nil)
mockVMService.EXPECT().FindGPUDeviceInfos([]string{*gpu.ID}).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().GetGPUDevicesAllocationInfo([]string{*gpu.ID}).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().RemoveGPUDevices(elfMachine.Status.VMRef, gomock.Len(1)).Return(nil, unexpectedError)

machineContext := newMachineContext(ctrlContext, elfCluster, cluster, elfMachine, machine, mockVMService)
Expand All @@ -384,7 +384,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() {
Expect(logBuffer.String()).To(ContainSubstring("GPU devices of VM are already in use, so remove and reallocate"))

gpusDeviceInfos.Get(*gpu.ID).VMs = []service.GPUDeviceVM{{Name: *vm.Name, AllocatedCount: 1}}
mockVMService.EXPECT().FindGPUDeviceInfos([]string{*gpu.ID}).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().GetGPUDevicesAllocationInfo([]string{*gpu.ID}).Return(gpusDeviceInfos, nil)
ok, err = reconciler.reconcileGPUDevices(machineContext, vm)
Expect(err).NotTo(HaveOccurred())
Expect(ok).To(BeTrue())
Expand Down Expand Up @@ -417,7 +417,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() {
fake.InitOwnerReferences(ctrlContext, elfCluster, cluster, elfMachine, machine)
mockVMService.EXPECT().GetHostsByCluster(elfCluster.Spec.Cluster).Times(2).Return(service.NewHosts(host), nil)
mockVMService.EXPECT().FindGPUDevicesByHostIDs([]string{*host.ID}, models.GpuDeviceUsagePASSTHROUGH).Times(2).Return([]*models.GpuDevice{gpu}, nil)
mockVMService.EXPECT().FindGPUDeviceInfos([]string{*gpu.ID}).Times(2).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().GetGPUDevicesAllocationInfo([]string{*gpu.ID}).Times(2).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().Migrate(*vm.ID, *host.ID).Return(withTaskVM, nil)

machineContext := newMachineContext(ctrlContext, elfCluster, cluster, elfMachine, machine, mockVMService)
Expand Down Expand Up @@ -458,7 +458,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() {
fake.InitOwnerReferences(ctrlContext, elfCluster, cluster, elfMachine, machine)
mockVMService.EXPECT().GetHostsByCluster(elfCluster.Spec.Cluster).Times(2).Return(service.NewHosts(host), nil)
mockVMService.EXPECT().FindGPUDevicesByHostIDs([]string{*host.ID}, models.GpuDeviceUsagePASSTHROUGH).Times(2).Return([]*models.GpuDevice{gpu}, nil)
mockVMService.EXPECT().FindGPUDeviceInfos([]string{*gpu.ID}).Times(2).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().GetGPUDevicesAllocationInfo([]string{*gpu.ID}).Times(2).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().AddGPUDevices(elfMachine.Status.VMRef, gomock.Any()).Return(task, nil)

machineContext := newMachineContext(ctrlContext, elfCluster, cluster, elfMachine, machine, mockVMService)
Expand Down Expand Up @@ -590,7 +590,13 @@ var _ = Describe("ElfMachineReconciler-GPU", func() {
Expect(ok).To(BeFalse())

mockVMService.EXPECT().FindGPUDevicesByIDs(gpuIDs).Return(gpusDevices, nil)
mockVMService.EXPECT().FindGPUDeviceInfos(gpuIDs).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().GetGPUDevicesAllocationInfo(gpuIDs).Return(service.NewGPUDeviceInfos(), nil)
ok, err = reconciler.checkGPUsCanBeUsedForVM(machineContext, gpuIDs)
Expect(err).NotTo(HaveOccurred())
Expect(ok).To(BeTrue())

mockVMService.EXPECT().FindGPUDevicesByIDs(gpuIDs).Return(gpusDevices, nil)
mockVMService.EXPECT().GetGPUDevicesAllocationInfo(gpuIDs).Return(gpusDeviceInfos, nil)
ok, err = reconciler.checkGPUsCanBeUsedForVM(machineContext, gpuIDs)
Expect(err).NotTo(HaveOccurred())
Expect(ok).To(BeTrue())
Expand All @@ -599,7 +605,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() {
VMs: []service.GPUDeviceVM{{ID: "vm1", Name: "vm1"}},
})
mockVMService.EXPECT().FindGPUDevicesByIDs(gpuIDs).Return(gpusDevices, nil)
mockVMService.EXPECT().FindGPUDeviceInfos(gpuIDs).Return(gpusDeviceInfos, nil)
mockVMService.EXPECT().GetGPUDevicesAllocationInfo(gpuIDs).Return(gpusDeviceInfos, nil)
ok, err = reconciler.checkGPUsCanBeUsedForVM(machineContext, gpuIDs)
Expect(err).NotTo(HaveOccurred())
Expect(ok).To(BeFalse())
Expand Down
1 change: 1 addition & 0 deletions controllers/elfmachine_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3121,6 +3121,7 @@ var _ = Describe("ElfMachineReconciler", func() {
{"Create a VM", models.TaskStatusFAILED},
{"Start VM", models.TaskStatusFAILED},
{"Edit VM", models.TaskStatusFAILED},
{"performing a cold migration", models.TaskStatusFAILED},
{"Create a VM", models.TaskStatusSUCCESSED},
{"Start VM", models.TaskStatusSUCCESSED},
{"Edit VM", models.TaskStatusSUCCESSED},
Expand Down
30 changes: 15 additions & 15 deletions pkg/service/mock_services/vm_mock.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions pkg/service/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,13 @@ func (g *GPUDeviceInfo) GetVMCount() int {
return len(g.VMs)
}

func (g *GPUDeviceInfo) FirstVMIs(vm string) bool {
if len(g.VMs) == 0 {
return false
}
return g.VMs[0].ID == vm || g.VMs[0].Name == vm
}

func (g *GPUDeviceInfo) ContainsVM(vm string) bool {
for i := 0; i < len(g.VMs); i++ {
if g.VMs[i].ID == vm || g.VMs[i].Name == vm {
Expand Down
20 changes: 14 additions & 6 deletions pkg/service/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,10 @@ func IsUpdateVMTask(task *models.Task) bool {
return strings.Contains(GetTowerString(task.Description), "Edit VM")
}

func IsVMColdMigrationTask(task *models.Task) bool {
return strings.Contains(GetTowerString(task.Description), "performing a cold migration")
}

func IsVMMigrationTask(task *models.Task) bool {
return strings.Contains(GetTowerString(task.Description), "performing a live migration")
}
Expand All @@ -204,17 +208,21 @@ func IsPlacementGroupTask(task *models.Task) bool {
// HasGPUsCanNotBeUsedForVM returns whether the specified GPUs contains GPU
// that cannot be used by the specified VM.
func HasGPUsCanNotBeUsedForVM(gpuDeviceInfos GPUDeviceInfos, elfMachine *infrav1.ElfMachine) bool {
if elfMachine.RequiresGPUDevices() {
if elfMachine.RequiresPassThroughGPUDevices() {
for gpuID := range gpuDeviceInfos {
gpuInfo := gpuDeviceInfos[gpuID]
if gpuInfo.GetVMCount() > 1 || (gpuInfo.GetVMCount() == 1 && !gpuInfo.ContainsVM(elfMachine.Name)) {
if gpuInfo.GetVMCount() >= 1 && !gpuInfo.FirstVMIs(elfMachine.Name) {
return true
}
}

return false
}

if gpuDeviceInfos.Len() == 0 {
return false
}

gpuCountUsedByVM := 0
availableCountMap := make(map[string]int32)
for gpuID := range gpuDeviceInfos {
Expand All @@ -224,7 +232,7 @@ func HasGPUsCanNotBeUsedForVM(gpuDeviceInfos GPUDeviceInfos, elfMachine *infrav1
gpuCountUsedByVM += 1
}

if count, ok := availableCountMap[gpuInfo.ID]; ok {
if count, ok := availableCountMap[gpuInfo.VGPUType]; ok {
availableCountMap[gpuInfo.VGPUType] = count + gpuInfo.AvailableCount
} else {
availableCountMap[gpuInfo.VGPUType] = gpuInfo.AvailableCount
Expand All @@ -247,9 +255,9 @@ func HasGPUsCanNotBeUsedForVM(gpuDeviceInfos GPUDeviceInfos, elfMachine *infrav1

// AggregateUnusedGPUDevicesToGPUDeviceInfos selects the GPU device
// that gpuDeviceInfos does not have from the specified GPU devices and add to it.
// It should be used in conjunction with FindGPUDeviceInfos.
// It should be used in conjunction with GetGPUDevicesAllocationInfo.
//
// FindGPUDeviceInfos only returns the GPUs that has been used by the virtual machine,
// GetGPUDevicesAllocationInfo only returns the GPUs that has been used by the virtual machine,
// so need to aggregate the unused GPUs.
func AggregateUnusedGPUDevicesToGPUDeviceInfos(gpuDeviceInfos GPUDeviceInfos, gpuDevices []*models.GpuDevice) {
for i := 0; i < len(gpuDevices); i++ {
Expand All @@ -275,7 +283,7 @@ func AggregateUnusedGPUDevicesToGPUDeviceInfos(gpuDeviceInfos GPUDeviceInfos, gp
}

// ConvertVMGpuInfosToGPUDeviceInfos Converts Tower's VMGpuInfo type to GPUDeviceInfos.
// It should be used in conjunction with FindGPUDeviceInfos.
// It should be used in conjunction with GetGPUDevicesAllocationInfo.
//
// Tower does not provide API to obtain the detailes of the VM allocated by the GPU Device.
// So we need to get GPUDeviceInfos reversely through VMGpuInfo.
Expand Down
Loading

0 comments on commit 3bf177b

Please sign in to comment.