Skip to content

Commit

Permalink
update vm delete
Browse files Browse the repository at this point in the history
  • Loading branch information
kon-angelo committed Apr 18, 2023
1 parent eeca05c commit d853823
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 50 deletions.
6 changes: 3 additions & 3 deletions pkg/azure/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,13 @@ import (
"fmt"
"strings"

"github.com/gardener/machine-controller-manager-provider-azure/pkg/spi"
"github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1"
"github.com/gardener/machine-controller-manager/pkg/util/provider/driver"
"github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/codes"
"github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/status"
"k8s.io/klog/v2"

"github.com/gardener/machine-controller-manager-provider-azure/pkg/spi"
)

const (
Expand Down Expand Up @@ -79,7 +80,6 @@ func NewAzureDriver(spi spi.SessionProviderInterface) *MachinePlugin {
// It is optionally expected by the safety controller to use an identification mechanisms to map the VM Created by a providerSpec.
// These could be done using tag(s)/resource-groups etc.
// This logic is used by safety controller to delete orphan VMs which are not backed by any machine CRD
//
func (d *MachinePlugin) CreateMachine(ctx context.Context, req *driver.CreateMachineRequest) (*driver.CreateMachineResponse, error) {
// Log messages to track request
klog.V(2).Infof("Machine creation request has been recieved for %q", req.Machine.Name)
Expand Down Expand Up @@ -218,8 +218,8 @@ func (d *MachinePlugin) GetMachineStatus(ctx context.Context, req *driver.GetMac
//
// RESPONSE PARAMETERS (driver.ListMachinesResponse)
// MachineList map<string,string> A map containing the keys as the MachineID and value as the MachineName
// for all machine's who where possibilly created by this ProviderSpec
//
// for all machine's who where possibilly created by this ProviderSpec
func (d *MachinePlugin) ListMachines(ctx context.Context, req *driver.ListMachinesRequest) (*driver.ListMachinesResponse, error) {
// Log messages to track start and end of request
klog.V(2).Infof("List machines request has been recieved for %q", req.MachineClass.Name)
Expand Down
4 changes: 2 additions & 2 deletions pkg/azure/mock/mockclient.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ func (clients *AzureDriverClients) GetClient() autorest.Client {
// return clients.deployments
// }

//PluginSPIImpl is the mock implementation of PluginSPIImpl
// PluginSPIImpl is the mock implementation of PluginSPIImpl
type PluginSPIImpl struct {
AzureProviderSpec *api.AzureProviderSpec
Secret *corev1.Secret
Expand All @@ -114,7 +114,7 @@ func NewMockPluginSPIImpl(controller *gomock.Controller) spi.SessionProviderInte
return &PluginSPIImpl{Controller: controller}
}

//Setup creates a compute service instance using the mock
// Setup creates a compute service instance using the mock
func (ms *PluginSPIImpl) Setup(secret *corev1.Secret) (spi.AzureDriverClientsInterface, error) {

if ms.azureDriverClients != nil {
Expand Down
57 changes: 12 additions & 45 deletions pkg/azure/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,19 +21,20 @@ import (
"github.com/Azure/azure-sdk-for-go/profiles/latest/network/mgmt/network"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/to"
api "github.com/gardener/machine-controller-manager-provider-azure/pkg/azure/apis"
spi "github.com/gardener/machine-controller-manager-provider-azure/pkg/spi"
"github.com/gardener/machine-controller-manager/pkg/apis/machine"
"github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1"
backoff "github.com/gardener/machine-controller-manager/pkg/util/backoff"
"github.com/gardener/machine-controller-manager/pkg/util/backoff"
"github.com/gardener/machine-controller-manager/pkg/util/provider/driver"
"github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/codes"
"github.com/gardener/machine-controller-manager/pkg/util/provider/machinecodes/status"
metrics "github.com/gardener/machine-controller-manager/pkg/util/provider/metrics"
"github.com/gardener/machine-controller-manager/pkg/util/provider/metrics"
"github.com/prometheus/client_golang/prometheus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"

api "github.com/gardener/machine-controller-manager-provider-azure/pkg/azure/apis"
"github.com/gardener/machine-controller-manager-provider-azure/pkg/spi"
)

// constant suffixes
Expand Down Expand Up @@ -447,7 +448,7 @@ func (d *MachinePlugin) createVMNicDisk(req *driver.CreateMachineRequest) (*comp
*imageReference.Version)

if err != nil {
//Since machine creation failed, delete any infra resources created
// Since machine creation failed, delete any infra resources created
deleteErr := d.deleteVMNicDisks(ctx, clients, resourceGroupName, vmName, nicName, diskName, dataDiskNames)
if deleteErr != nil {
klog.Errorf("Error occurred during resource clean up: %s", deleteErr)
Expand All @@ -467,7 +468,7 @@ func (d *MachinePlugin) createVMNicDisk(req *driver.CreateMachineRequest) (*comp
)

if err != nil {
//Since machine creation failed, delete any infra resources created
// Since machine creation failed, delete any infra resources created
deleteErr := d.deleteVMNicDisks(ctx, clients, resourceGroupName, vmName, nicName, diskName, dataDiskNames)
if deleteErr != nil {
klog.Errorf("Error occurred during resource clean up: %s", deleteErr)
Expand All @@ -490,7 +491,7 @@ func (d *MachinePlugin) createVMNicDisk(req *driver.CreateMachineRequest) (*comp
)

if err != nil {
//Since machine creation failed, delete any infra resources created
// Since machine creation failed, delete any infra resources created
deleteErr := d.deleteVMNicDisks(ctx, clients, resourceGroupName, vmName, nicName, diskName, dataDiskNames)
if deleteErr != nil {
klog.Errorf("Error occurred during resource clean up: %s", deleteErr)
Expand All @@ -510,7 +511,7 @@ func (d *MachinePlugin) createVMNicDisk(req *driver.CreateMachineRequest) (*comp
klog.V(3).Infof("VM creation began for %q", vmName)
VMFuture, err := clients.GetVM().CreateOrUpdate(ctx, resourceGroupName, *VMParameters.Name, VMParameters)
if err != nil {
//Since machine creation failed, delete any infra resources created
// Since machine creation failed, delete any infra resources created
deleteErr := d.deleteVMNicDisks(ctx, clients, resourceGroupName, vmName, nicName, diskName, dataDiskNames)
if deleteErr != nil {
klog.Errorf("Error occurred during resource clean up: %s", deleteErr)
Expand Down Expand Up @@ -553,21 +554,10 @@ func (d *MachinePlugin) createVMNicDisk(req *driver.CreateMachineRequest) (*comp
// deleteVMNicDisks deletes the VM and associated Disks and NIC
func (d *MachinePlugin) deleteVMNicDisks(ctx context.Context, clients spi.AzureDriverClientsInterface, resourceGroupName string, VMName string, nicName string, diskName string, dataDiskNames []string) error {

// We try to fetch the VM, detach its data disks and finally delete it
if vm, vmErr := clients.GetVM().Get(ctx, resourceGroupName, VMName, ""); vmErr == nil {

if detachmentErr := waitForDataDiskDetachment(ctx, clients, resourceGroupName, vm); detachmentErr != nil {
return detachmentErr
}
if deleteErr := DeleteVM(ctx, clients, resourceGroupName, VMName); deleteErr != nil {
return deleteErr
}

OnARMAPISuccess(prometheusServiceVM, "VM Get was successful for %s", *vm.Name)
} else if !NotFound(vmErr) {
// If some other error occurred, which is not 404 Not Found (the VM doesn't exist) then bubble up
return OnARMAPIErrorFail(prometheusServiceVM, vmErr, "vm.Get")
if deleteErr := DeleteVM(ctx, clients, resourceGroupName, VMName); deleteErr != nil && !NotFound(deleteErr) {
return deleteErr
}
OnARMAPISuccess(prometheusServiceVM, "VM Delete was successful for %s", VMName)

// Fetch the NIC and deleted it
nicDeleter := func() error {
Expand Down Expand Up @@ -648,29 +638,6 @@ func fillUpMachineClass(azureMachineClass *v1alpha1.AzureMachineClass, machineCl
return err
}

// WaitForDataDiskDetachment is function that ensures all the data disks are detached from the VM
func waitForDataDiskDetachment(ctx context.Context, clients spi.AzureDriverClientsInterface, resourceGroupName string, vm compute.VirtualMachine) error {
klog.V(2).Infof("Data disk detachment began for %q", *vm.Name)
defer klog.V(2).Infof("Data disk detached for %q", *vm.Name)

if len(*vm.StorageProfile.DataDisks) > 0 {
// There are disks attached hence need to detach them
vm.StorageProfile.DataDisks = &[]compute.DataDisk{}

future, err := clients.GetVM().CreateOrUpdate(ctx, resourceGroupName, *vm.Name, vm)
if err != nil {
return OnARMAPIErrorFail(prometheusServiceVM, err, "Failed to CreateOrUpdate. Error Message - %s", err)
}
err = future.WaitForCompletionRef(ctx, clients.GetClient())
if err != nil {
return OnARMAPIErrorFail(prometheusServiceVM, err, "Failed to CreateOrUpdate. Error Message - %s", err)
}
OnARMAPISuccess(prometheusServiceVM, "VM CreateOrUpdate was successful for %s", *vm.Name)
}

return nil
}

// FetchAttachedVMfromNIC is a helper function to fetch the attached VM for a particular NIC
func FetchAttachedVMfromNIC(ctx context.Context, clients spi.AzureDriverClientsInterface, resourceGroupName, nicName string) (string, error) {
nic, err := clients.GetNic().Get(ctx, resourceGroupName, nicName, "")
Expand Down

0 comments on commit d853823

Please sign in to comment.