diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py index 8f2e1873afb..07b4b6b3bdb 100644 --- a/nova/tests/unit/compute/test_compute.py +++ b/nova/tests/unit/compute/test_compute.py @@ -7882,6 +7882,7 @@ def test_init_instance_for_partial_deletion(self): admin_context = context.get_admin_context() instance = objects.Instance(admin_context) instance.id = 1 + instance.uuid = uuids.fake instance.vm_state = vm_states.DELETED instance.deleted = False instance.host = self.compute.host diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py index 67b3f544ffa..e5b5e94d831 100644 --- a/nova/tests/unit/compute/test_compute_mgr.py +++ b/nova/tests/unit/compute/test_compute_mgr.py @@ -79,6 +79,23 @@ fake_host_list = [mock.sentinel.host1] +class _PropertiesChanged(object): + def __init__(self, obj, properties): + self._obj = obj + self._properties = properties + + def __eq__(self, other): + if self._obj is not other: + return False + try: + for key, value in six.iteritems(self._properties): + if getattr(other, key) != value: + return False + return True + except KeyError: + return False + + @ddt.ddt class ComputeManagerUnitTestCase(test.NoDBTestCase): def setUp(self): @@ -1193,7 +1210,7 @@ def test_init_instance_failed_resume_sets_error(self, mock_set_inst, power_state.SHUTDOWN) mock_get_inst.return_value = 'fake-bdm' mock_resume.side_effect = test.TestingException - self.compute._init_instance('fake-context', instance) + self.compute._init_instance(self.context, instance) mock_get_power.assert_has_calls([mock.call(mock.ANY, instance), mock.call(mock.ANY, instance)]) mock_plug.assert_called_once_with(instance, mock.ANY) @@ -1271,7 +1288,7 @@ def test_init_instance_complete_partial_deletion_raises_exception( with mock.patch.object(self.compute, '_complete_partial_deletion') as mock_deletion: mock_deletion.side_effect = test.TestingException() - self.compute._init_instance(self, instance) + self.compute._init_instance(self.context, instance) msg = u'Failed to complete a deletion' mock_log.exception.assert_called_once_with(msg, instance=instance) @@ -2148,8 +2165,10 @@ def save(self): pass def _fake_get(ctx, filter, expected_attrs, use_slave): + changed_ctx = _PropertiesChanged(ctx, {'read_deleted': 'yes'},) + mock_get.assert_called_once_with( - {'read_deleted': 'yes'}, + changed_ctx, {'deleted': True, 'soft_deleted': False, 'host': 'fake-mini', 'cleaned': False}, expected_attrs=['system_metadata'], @@ -2163,7 +2182,7 @@ def _fake_get(ctx, filter, expected_attrs, use_slave): mock_get.side_effect = _fake_get mock_delete.side_effect = [True, False] - self.compute._run_pending_deletes({}) + self.compute._run_pending_deletes(self.context) self.assertFalse(a.cleaned) self.assertEqual('100', a.system_metadata['clean_attempts']) diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index d8f65563720..9567258ad3d 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -324,7 +324,7 @@ def get_console_output(self, context, instance): path = os.path.join(CONF.vmware.serial_log_dir, fname) if not os.path.exists(path): LOG.warning('The console log is missing. Check your VSPC ' - 'configuration', instance=instance) + 'configuration') return b"" read_log_data, remaining = nova.privsep.path.last_bytes( path, MAX_CONSOLE_BYTES) @@ -526,8 +526,7 @@ def detach_volume(self, context, connection_info, instance, mountpoint, """Detach volume storage to VM instance.""" if not self._vmops.is_instance_in_resource_pool(instance): LOG.debug("Not detaching %s, vm is in different cluster", - connection_info["volume_id"], - instance=instance) + connection_info["volume_id"]) return True # NOTE(claudiub): if context parameter is to be used in the future, # the _detach_instance_volumes method will have to be updated as well. @@ -570,15 +569,13 @@ def _detach_instance_volumes(self, instance, block_device_info): disk.get('device_name')) except exception.DiskNotFound: LOG.warning('The volume %s does not exist!', - disk.get('device_name'), - instance=instance) + disk.get('device_name')) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error("Failed to detach %(device_name)s. " "Exception: %(exc)s", {'device_name': disk.get('device_name'), - 'exc': e}, - instance=instance) + 'exc': e}) def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True): @@ -602,8 +599,7 @@ def destroy(self, context, instance, network_info, block_device_info=None, except (vexc.ManagedObjectNotFoundException, exception.InstanceNotFound): LOG.warning('Instance does not exists. Proceeding to ' - 'delete instance properties on datastore', - instance=instance) + 'delete instance properties on datastore') self._vmops.destroy(context, instance, destroy_disks) def pause(self, instance): @@ -849,7 +845,7 @@ def _pre_live_migration(self, context, instance, block_device_info, if hasattr(result, 'drsFault'): LOG.error("Placement Error: %s", vim_util.serialize_object( - result.drsFault), instance=instance) + result.drsFault)) if (not hasattr(result, 'recommendations') or not result.recommendations): @@ -904,12 +900,12 @@ def live_migration(self, context, instance, dest, """Live migration of an instance to another host.""" if not migrate_data: LOG.error("live_migration() called without migration_data" - " - cannot continue operations", instance=instance) + " - cannot continue operations") recover_method(context, instance, dest, migrate_data) raise ValueError("Missing migrate_data") if migrate_data.instance_already_migrated: - LOG.info("Recovering migration", instance=instance) + LOG.info("Recovering migration") post_method(context, instance, dest, block_migration, migrate_data) return @@ -931,16 +927,15 @@ def live_migration(self, context, instance, dest, required_volume_attributes) self._set_vif_infos(migrate_data, dest_session) self._vmops.live_migration(instance, migrate_data, volumes) - LOG.info("Migration operation completed", instance=instance) + LOG.info("Migration operation completed") post_method(context, instance, dest, block_migration, migrate_data) except Exception: - LOG.exception("Failed due to an exception", instance=instance) + LOG.exception("Failed due to an exception") with excutils.save_and_reraise_exception(): # We are still in the task-state migrating, so cannot # recover the DRS settings. We rely on the sync to do that LOG.debug("Calling live migration recover_method " - "for instance: %s", instance["name"], - instance=instance) + "for instance: %s", instance["name"]) recover_method(context, instance, dest, migrate_data) def _get_volume_mappings(self, context, instance): @@ -965,7 +960,7 @@ def _get_volume_mappings(self, context, instance): message = ("Could not parse connection_info for volume {}." " Reason: {}" ).format(bdm.volume_id, e) - LOG.warning(message, instance=instance) + LOG.warning(message) # Normalize the datastore reference # As it depends on the caller, if actually need the @@ -1017,7 +1012,7 @@ def rollback_live_migration_at_destination(self, context, instance, migrate_data=None): """Clean up destination node after a failed live migration.""" LOG.info("rollback_live_migration_at_destination %s", - block_device_info, instance=instance) + block_device_info) if not migrate_data.is_same_vcenter: self._volumeops.delete_shadow_vms(block_device_info, instance) @@ -1027,8 +1022,7 @@ def _error_out_instance_on_exception(self, instance, message): yield except Exception as error: LOG.exception("Failed to %s, setting to ERROR state", - message, - instance=instance, error=error) + message, error=error) instance.vm_state = vm_states.ERROR instance.save() @@ -1065,7 +1059,7 @@ def post_live_migration_at_destination(self, context, instance, with self._error_out_instance_on_exception(instance, "fixup shadow vms"): volumes = self._get_volume_mappings(context, instance) - LOG.debug("Fixing shadow vms %s", volumes, instance=instance) + LOG.debug("Fixing shadow vms %s", volumes) self._volumeops.fixup_shadow_vms(instance, volumes) def ensure_filtering_rules_for_instance(self, instance, network_info): diff --git a/nova/virt/vmwareapi/images.py b/nova/virt/vmwareapi/images.py index dac18c3c5fe..df13b9b7bcc 100644 --- a/nova/virt/vmwareapi/images.py +++ b/nova/virt/vmwareapi/images.py @@ -227,8 +227,7 @@ def image_transfer(read_handle, write_handle): def upload_iso_to_datastore(iso_path, instance, **kwargs): - LOG.debug("Uploading iso %s to datastore", iso_path, - instance=instance) + LOG.debug("Uploading iso %s to datastore", iso_path) with open(iso_path, 'r') as iso_file: write_file_handle = rw_handles.FileWriteHandle( kwargs.get("host"), @@ -248,8 +247,7 @@ def upload_iso_to_datastore(iso_path, instance, **kwargs): data = iso_file.read(block_size) write_file_handle.close() - LOG.debug("Uploaded iso %s to datastore", iso_path, - instance=instance) + LOG.debug("Uploaded iso %s to datastore", iso_path) def fetch_image(context, instance, host, port, dc_name, ds_name, file_path, @@ -259,8 +257,7 @@ def fetch_image(context, instance, host, port, dc_name, ds_name, file_path, LOG.debug("Downloading image file data %(image_ref)s to the " "data store %(data_store_name)s", {'image_ref': image_ref, - 'data_store_name': ds_name}, - instance=instance) + 'data_store_name': ds_name}) metadata = IMAGE_API.get(context, image_ref) file_size = int(metadata['size']) @@ -274,8 +271,7 @@ def fetch_image(context, instance, host, port, dc_name, ds_name, file_path, "%(data_store_name)s", {'image_ref': image_ref, 'upload_name': 'n/a' if file_path is None else file_path, - 'data_store_name': 'n/a' if ds_name is None else ds_name}, - instance=instance) + 'data_store_name': 'n/a' if ds_name is None else ds_name}) def _build_shadow_vm_config_spec(session, name, size_kb, disk_type, ds_name): @@ -350,8 +346,7 @@ def fetch_image_stream_optimized(context, instance, session, vm_name, image_ref = image_id if image_id else instance.image_ref LOG.debug("Downloading image file data %(image_ref)s to the ESX " "as VM named '%(vm_name)s'", - {'image_ref': image_ref, 'vm_name': vm_name}, - instance=instance) + {'image_ref': image_ref, 'vm_name': vm_name}) metadata = IMAGE_API.get(context, image_ref) file_size = int(metadata['size']) @@ -367,7 +362,7 @@ def fetch_image_stream_optimized(context, instance, session, vm_name, file_size) LOG.info("Downloaded image file data %(image_ref)s", - {'image_ref': instance.image_ref}, instance=instance) + {'image_ref': instance.image_ref}) vmdk = vm_util.get_vmdk_info(session, imported_vm_ref, vm_name) vm_util.mark_vm_as_template(session, instance, imported_vm_ref) return vmdk.capacity_in_bytes, vmdk.path @@ -487,8 +482,7 @@ def fetch_image_ova(context, instance, session, vm_name, ds_name, image_ref = instance.image_ref LOG.debug("Downloading OVA image file %(image_ref)s to the ESX " "as VM named '%(vm_name)s'", - {'image_ref': image_ref, 'vm_name': vm_name}, - instance=instance) + {'image_ref': image_ref, 'vm_name': vm_name}) metadata = IMAGE_API.get(context, image_ref) file_size = int(metadata['size']) @@ -515,7 +509,7 @@ def fetch_image_ova(context, instance, session, vm_name, ds_name, file_size) LOG.info("Downloaded OVA image file %(image_ref)s", - {'image_ref': instance.image_ref}, instance=instance) + {'image_ref': instance.image_ref}) vmdk = vm_util.get_vmdk_info(session, imported_vm_ref, vm_name) @@ -529,7 +523,7 @@ def fetch_image_ova(context, instance, session, vm_name, ds_name, def upload_image_stream_optimized(context, image_id, instance, session, vm, vmdk_size): """Upload the snapshotted vm disk file to Glance image server.""" - LOG.debug("Uploading image %s", image_id, instance=instance) + LOG.debug("Uploading image %s", image_id) metadata = IMAGE_API.get(context, image_id) read_handle = rw_handles.VmdkReadHandle(session, @@ -560,5 +554,4 @@ def upload_image_stream_optimized(context, image_id, instance, session, updater.stop() read_handle.close() - LOG.debug("Uploaded image %s to the Glance image server", image_id, - instance=instance) + LOG.debug("Uploaded image %s to the Glance image server", image_id) diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index 620d4597310..9909bb414c5 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -1822,7 +1822,7 @@ def get_vmdk_adapter_type(adapter_type): def create_vm(session, instance, vm_folder, config_spec, res_pool_ref): """Create VM on ESX host.""" - LOG.debug("Creating VM on the ESX host", instance=instance) + LOG.debug("Creating VM on the ESX host") vm_create_task = session._call_method( session.vim, "CreateVM_Task", vm_folder, @@ -1844,7 +1844,7 @@ def create_vm(session, instance, vm_folder, config_spec, res_pool_ref): '\'%(ostype)s\'. An invalid os type may be ' 'one cause of this instance creation failure', {'ostype': config_spec.guestId}) - LOG.debug("Created VM on the ESX host", instance=instance) + LOG.debug("Created VM on the ESX host") return task_info.result @@ -1852,11 +1852,11 @@ def create_vm(session, instance, vm_folder, config_spec, res_pool_ref): def _destroy_vm(session, instance, vm_ref=None): if not vm_ref: vm_ref = get_vm_ref(session, instance) - LOG.debug("Destroying the VM", instance=instance) + LOG.debug("Destroying the VM") destroy_task = session._call_method(session.vim, "Destroy_Task", vm_ref) session._wait_for_task(destroy_task) - LOG.info("Destroyed the VM", instance=instance) + LOG.info("Destroyed the VM") def destroy_vm(session, instance, vm_ref=None): @@ -1865,13 +1865,13 @@ def destroy_vm(session, instance, vm_ref=None): return _destroy_vm(session, instance, vm_ref=vm_ref) except vexc.VimFaultException as e: with excutils.save_and_reraise_exception() as ctx: - LOG.exception(_('Destroy VM failed'), instance=instance) + LOG.exception(_('Destroy VM failed')) # we need the `InvalidArgument` fault to bubble out of this # function so it can be acted upon on higher levels if 'InvalidArgument' not in e.fault_list: ctx.reraise = False except Exception: - LOG.exception(_('Destroy VM failed'), instance=instance) + LOG.exception(_('Destroy VM failed')) def mark_vm_as_template(session, instance, vm_ref=None): @@ -1879,11 +1879,11 @@ def mark_vm_as_template(session, instance, vm_ref=None): try: if not vm_ref: vm_ref = get_vm_ref(session, instance) - LOG.debug("Marking the VM as template", instance=instance) + LOG.debug("Marking the VM as template") session._call_method(session.vim, "MarkAsTemplate", vm_ref) - LOG.info("Marked the VM as template", instance=instance) + LOG.info("Marked the VM as template") except Exception: - LOG.exception(_('Mark VM as template failed'), instance=instance) + LOG.exception(_('Mark VM as template failed')) def create_virtual_disk(session, dc_ref, adapter_type, disk_type, @@ -1962,15 +1962,15 @@ def power_on_instance(session, instance, vm_ref=None): if vm_ref is None: vm_ref = get_vm_ref(session, instance) - LOG.debug("Powering on the VM", instance=instance) + LOG.debug("Powering on the VM") try: poweron_task = session._call_method( session.vim, "PowerOnVM_Task", vm_ref) session._wait_for_task(poweron_task) - LOG.debug("Powered on the VM", instance=instance) + LOG.debug("Powered on the VM") except vexc.InvalidPowerStateException: - LOG.debug("VM already powered on", instance=instance) + LOG.debug("VM already powered on") def _get_vm_port_indices(session, vm_ref): @@ -2023,14 +2023,14 @@ def power_off_instance(session, instance, vm_ref=None): if vm_ref is None: vm_ref = get_vm_ref(session, instance) - LOG.debug("Powering off the VM", instance=instance) + LOG.debug("Powering off the VM") try: poweroff_task = session._call_method(session.vim, "PowerOffVM_Task", vm_ref) session._wait_for_task(poweroff_task) - LOG.debug("Powered off the VM", instance=instance) + LOG.debug("Powered off the VM") except vexc.InvalidPowerStateException: - LOG.debug("VM already powered off", instance=instance) + LOG.debug("VM already powered off") def find_rescue_device(hardware_devices, instance): diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index c88917a0a54..9f684c711f5 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -49,7 +49,7 @@ from nova.console import type as ctype from nova import context as nova_context from nova import exception -from nova.i18n import _, _LI +from nova.i18n import _ from nova import network from nova import objects from nova.objects import fields @@ -178,8 +178,7 @@ def _get_base_folder(self): def _extend_virtual_disk(self, instance, requested_size, name, dc_ref): service_content = self._session.vim.service_content - LOG.debug("Extending root virtual disk to %s", requested_size, - instance=instance) + LOG.debug("Extending root virtual disk to %s", requested_size) vmdk_extend_task = self._session._call_method( self._session.vim, "ExtendVirtualDisk_Task", @@ -193,14 +192,14 @@ def _extend_virtual_disk(self, instance, requested_size, name, dc_ref): except Exception as e: with excutils.save_and_reraise_exception(): LOG.error('Extending virtual disk failed with error: %s', - e, instance=instance) + e) # Clean up files created during the extend operation files = [name.replace(".vmdk", "-flat.vmdk"), name] for file in files: ds_path = ds_obj.DatastorePath.parse(file) self._delete_datastore_file(ds_path, dc_ref) - LOG.debug("Extended root virtual disk", instance=instance) + LOG.debug("Extended root virtual disk") def _delete_datastore_file(self, datastore_path, dc_ref): try: @@ -312,7 +311,7 @@ def _get_vm_config_spec(self, instance, image_info, utils.is_neutron(), image_info.vif_model, network_info) - LOG.debug('Instance VIF info %s', vif_infos, instance=instance) + LOG.debug('Instance VIF info %s', vif_infos) if extra_specs.storage_policy: profile_spec = vm_util.get_storage_profile_spec( @@ -441,8 +440,7 @@ def _fetch_vsphere_image(self, context, vi, image_ds_loc): "%(datastore_name)s", {'image_id': vi.ii.image_id, 'file_path': image_ds_loc, - 'datastore_name': vi.datastore.name}, - instance=vi.instance) + 'datastore_name': vi.datastore.name}) location_url = ds_obj.DatastoreURL.urlparse(location) datacenter_path = location_url.datacenter_path @@ -460,8 +458,7 @@ def _fetch_vsphere_image(self, context, vi, image_ds_loc): "%(datastore_name)s", {'image_id': vi.ii.image_id, 'file_path': image_ds_loc, - 'datastore_name': vi.datastore.name}, - instance=vi.instance) + 'datastore_name': vi.datastore.name}) def _fetch_image_as_file(self, context, vi, image_ds_loc): """Download image as an individual file to host via HTTP PUT.""" @@ -472,8 +469,7 @@ def _fetch_image_as_file(self, context, vi, image_ds_loc): "%(datastore_name)s", {'image_id': vi.ii.image_id, 'file_path': image_ds_loc, - 'datastore_name': vi.datastore.name}, - instance=vi.instance) + 'datastore_name': vi.datastore.name}) # try to get esx cookie to upload try: @@ -481,8 +477,7 @@ def _fetch_image_as_file(self, context, vi, image_ds_loc): host, cookies = self._get_esx_host_and_cookies(vi.datastore, dc_path, image_ds_loc.rel_path) except Exception as e: - LOG.warning("Get esx cookies failed: %s", e, - instance=vi.instance) + LOG.warning("Get esx cookies failed: %s", e) dc_path = vutil.get_inventory_path(session.vim, vi.dc_info.ref) host = self._session._host @@ -513,8 +508,7 @@ def _fetch_image_as_vapp(self, context, vi, image_ds_loc): "%(datastore_name)s as vApp", {'image_id': vi.ii.image_id, 'vm_name': vm_name, - 'datastore_name': vi.datastore.name}, - instance=vi.instance) + 'datastore_name': vi.datastore.name}) image_size, src_folder_ds_path = images.fetch_image_stream_optimized( context, @@ -652,18 +646,17 @@ def _unregister_template_vm(self, templ_vm_ref, instance=None): """ try: LOG.debug("Unregistering the template VM %s", - templ_vm_ref.value, - instance=instance) + templ_vm_ref.value) self._session._call_method(self._session.vim, "UnregisterVM", templ_vm_ref) - LOG.debug("Unregistered the template VM", instance=instance) + LOG.debug("Unregistered the template VM") except Exception as excep: LOG.warning("got this exception while un-registering a ", "template VM: %s", - excep, instance=instance) + excep) def _cache_vm_image_from_template(self, vi, templ_vm_ref): - LOG.debug("Caching VDMK from template VM", instance=vi.instance) + LOG.debug("Caching VDMK from template VM") vm_name = self._get_image_template_vm_name(vi.ii.image_id, vi.datastore.name) vmdk = vm_util.get_vmdk_info(self._session, templ_vm_ref, vm_name) @@ -675,11 +668,11 @@ def _cache_vm_image_from_template(self, vi, templ_vm_ref): self._cache_vm_image(vi, vmdk.path) except vexc.FileNotFoundException: LOG.warning("Could not find files for template VM %s", - templ_vm_ref.value, instance=vi.instance) + templ_vm_ref.value) self._unregister_template_vm(templ_vm_ref, vi.instance) return False - LOG.debug("Cached VDMK from template VM", instance=vi.instance) + LOG.debug("Cached VDMK from template VM") return True def _cache_stream_optimized_image(self, vi, tmp_image_ds_loc): @@ -789,7 +782,7 @@ def _fetch_image_from_other_datastores(self, vi): self._session._wait_for_task(templ_vm_clone_task) except vexc.FileNotFoundException: LOG.warning("Could not find files for template VM %s", - other_templ_vm_ref.value, instance=vi.instance) + other_templ_vm_ref.value) continue except vexc.VimFaultException as e: if 'VirtualHardwareVersionNotSupported' in e.fault_list: @@ -805,7 +798,7 @@ def _fetch_image_from_other_datastores(self, vi): def _fetch_image_if_missing(self, context, vi): image_prepare, image_fetch, image_cache = self._get_image_callbacks(vi) - LOG.debug("Processing image %s", vi.ii.image_id, instance=vi.instance) + LOG.debug("Processing image %s", vi.ii.image_id) with lockutils.lock(str(vi.cache_image_path), lock_file_prefix='nova-vmware-fetch_image'): @@ -824,7 +817,7 @@ def _fetch_image_if_missing(self, context, vi): if not image_available: # no matter if image_as_template is set, we can use the # template_vm in any case - LOG.debug("Trying to find template VM", instance=vi.instance) + LOG.debug("Trying to find template VM") templ_vm_ref = self._find_image_template_vm(vi) image_available = (templ_vm_ref is not None) if image_available and not CONF.vmware.image_as_template: @@ -834,8 +827,7 @@ def _fetch_image_if_missing(self, context, vi): if (not image_available and CONF.vmware.fetch_image_from_other_datastores): # fetching from another DS is still faster - LOG.debug("Trying to find template VM on other DS", - instance=vi.instance) + LOG.debug("Trying to find template VM on other DS") templ_vm_ref = self._fetch_image_from_other_datastores(vi) image_available = (templ_vm_ref is not None) if image_available and not CONF.vmware.image_as_template: @@ -844,15 +836,13 @@ def _fetch_image_if_missing(self, context, vi): if not image_available: # we didn't find it anywhere. upload it - LOG.debug("Preparing fetch location", instance=vi.instance) + LOG.debug("Preparing fetch location") tmp_dir_loc, tmp_image_ds_loc = image_prepare(vi) - LOG.debug("Fetch image to %s", tmp_image_ds_loc, - instance=vi.instance) + LOG.debug("Fetch image to %s", tmp_image_ds_loc) image_fetch(context, vi, tmp_image_ds_loc) - LOG.debug("Caching image", instance=vi.instance) + LOG.debug("Caching image") image_cache(vi, tmp_image_ds_loc) - LOG.debug("Cleaning up location %s", str(tmp_dir_loc), - instance=vi.instance) + LOG.debug("Cleaning up location %s", str(tmp_dir_loc)) if tmp_dir_loc: self._delete_datastore_file(str(tmp_dir_loc), vi.dc_info.ref) @@ -988,13 +978,13 @@ def _create_image_template(self, context, vi, extra_specs): reraise=is_last_attempt): LOG.error('Creating VM template for image failed' ' with error: %s', - create_templ_exc, instance=vi.instance) + create_templ_exc) try: vm_util.destroy_vm(self._session, vi.instance) except Exception as destroy_templ_exc: LOG.error('Cleaning up VM template for' ' image failed with error: %s', - destroy_templ_exc, instance=vi.instance) + destroy_templ_exc) def _build_template_vm_inventory_path(self, vi): vm_folder_name = self._session._call_method(vutil, @@ -1248,7 +1238,7 @@ def spawn(self, context, instance, image_meta, injected_files, msg = "Block device information present: %s" % block_device_info # NOTE(mriedem): block_device_info can contain an auth_password # so we have to scrub the message before logging it. - LOG.debug(strutils.mask_password(msg), instance=instance) + LOG.debug(strutils.mask_password(msg)) # Before attempting to attach any volume, make sure the # block_device_mapping (i.e. disk_bus) is valid @@ -1297,8 +1287,7 @@ def disable_drs_if_needed(self, instance): if utils.is_big_vm(int(instance.memory_mb), instance.flavor) or \ utils.is_large_vm(int(instance.memory_mb), instance.flavor): behavior = constants.DRS_BEHAVIOR_PARTIALLY_AUTOMATED - LOG.debug("Adding DRS override '%s' for big VM.", behavior, - instance=instance) + LOG.debug("Adding DRS override '%s' for big VM.", behavior) vm_ref = vm_util.get_vm_ref(self._session, instance) cluster_util.update_cluster_drs_vm_override(self._session, self._cluster, @@ -1357,7 +1346,7 @@ def _create_config_drive(self, context, instance, injected_files, CONF.config_drive_format) raise exception.InstancePowerOnFailure(reason=reason) - LOG.info('Using config drive for instance', instance=instance) + LOG.info('Using config drive for instance') extra_md = {} if admin_password: extra_md['admin_pass'] = admin_password @@ -1386,7 +1375,7 @@ def _create_config_drive(self, context, instance, injected_files, except Exception as e: with excutils.save_and_reraise_exception(): LOG.error('Creating config drive failed with error: %s', - e, instance=instance) + e) def _attach_cdrom_to_vm(self, vm_ref, instance, datastore, file_path): @@ -1405,13 +1394,13 @@ def _attach_cdrom_to_vm(self, vm_ref, instance, cdrom_attach_config_spec.deviceChange.append(controller_spec) LOG.debug("Reconfiguring VM instance to attach cdrom %s", - file_path, instance=instance) + file_path) vm_util.reconfigure_vm(self._session, vm_ref, cdrom_attach_config_spec) LOG.debug("Reconfigured VM instance to attach cdrom %s", - file_path, instance=instance) + file_path) def _create_vm_snapshot(self, instance, vm_ref, image_id=None): - LOG.debug("Creating Snapshot of the VM instance", instance=instance) + LOG.debug("Creating Snapshot of the VM instance") snapshot_task = self._session._call_method( self._session.vim, "CreateSnapshot_Task", vm_ref, @@ -1420,7 +1409,7 @@ def _create_vm_snapshot(self, instance, vm_ref, image_id=None): memory=False, quiesce=True) self._session._wait_for_task(snapshot_task) - LOG.debug("Created Snapshot of the VM instance", instance=instance) + LOG.debug("Created Snapshot of the VM instance") task_info = self._session._call_method(vutil, "get_object_property", snapshot_task, @@ -1430,13 +1419,13 @@ def _create_vm_snapshot(self, instance, vm_ref, image_id=None): @retry_if_task_in_progress def _delete_vm_snapshot(self, instance, vm_ref, snapshot): - LOG.debug("Deleting Snapshot of the VM instance", instance=instance) + LOG.debug("Deleting Snapshot of the VM instance") delete_snapshot_task = self._session._call_method( self._session.vim, "RemoveSnapshot_Task", snapshot, removeChildren=False, consolidate=True) self._session._wait_for_task(delete_snapshot_task) - LOG.debug("Deleted Snapshot of the VM instance", instance=instance) + LOG.debug("Deleted Snapshot of the VM instance") def _create_linked_clone_from_snapshot(self, instance, vm_ref, snapshot_ref, dc_info): @@ -1453,7 +1442,7 @@ def _create_linked_clone_from_snapshot(self, instance, vm_name = "%s_%s" % (constants.SNAPSHOT_VM_PREFIX, uuidutils.generate_uuid()) - LOG.debug("Creating linked-clone VM from snapshot", instance=instance) + LOG.debug("Creating linked-clone VM from snapshot") vm_clone_task = self._session._call_method( self._session.vim, "CloneVM_Task", @@ -1462,7 +1451,7 @@ def _create_linked_clone_from_snapshot(self, instance, name=vm_name, spec=clone_spec) self._session._wait_for_task(vm_clone_task) - LOG.info("Created linked-clone VM from snapshot", instance=instance) + LOG.info("Created linked-clone VM from snapshot") task_info = self._session._call_method(vutil, "get_object_property", vm_clone_task, @@ -1545,7 +1534,7 @@ def _create_vm_clone(self, instance, vm_ref, snapshot_ref, dc_info, template=True, config=config_spec) - LOG.debug("Cloning VM %s", vm_name, instance=instance) + LOG.debug("Cloning VM %s", vm_name) vm_clone_task = self._session._call_method( self._session.vim, "CloneVM_Task", @@ -1556,8 +1545,7 @@ def _create_vm_clone(self, instance, vm_ref, snapshot_ref, dc_info, name=vm_name, spec=clone_spec) self._session._wait_for_task(vm_clone_task) - LOG.info(_LI("Cloned VM %s"), vm_name, - instance=instance) + LOG.info("Cloned VM %s", vm_name) task_info = self._session._call_method(vutil, "get_object_property", vm_clone_task, @@ -1587,8 +1575,7 @@ def _get_vm_and_vmdk_attribs(): vmdk = vm_util.get_vmdk_info(self._session, vm_ref, instance.uuid) if not vmdk.path: - LOG.debug("No root disk defined. Unable to snapshot.", - instance=instance) + LOG.debug("No root disk defined. Unable to snapshot.") raise error_util.NoRootDiskDefined() lst_properties = ["datastore", "summary.config.guestId"] @@ -1672,16 +1659,16 @@ def reboot(self, instance, network_info, reboot_type="SOFT"): if (tools_status == "toolsOk" and tools_running_status == "guestToolsRunning" and reboot_type == "SOFT"): - LOG.debug("Rebooting guest OS of VM", instance=instance) + LOG.debug("Rebooting guest OS of VM") self._session._call_method(self._session.vim, "RebootGuest", vm_ref) - LOG.debug("Rebooted guest OS of VM", instance=instance) + LOG.debug("Rebooted guest OS of VM") else: - LOG.debug("Doing hard reboot of VM", instance=instance) + LOG.debug("Doing hard reboot of VM") reset_task = self._session._call_method(self._session.vim, "ResetVM_Task", vm_ref) self._session._wait_for_task(reset_task) - LOG.debug("Did hard reboot of VM", instance=instance) + LOG.debug("Did hard reboot of VM") def _destroy_instance(self, context, instance, destroy_disks=True): # Destroy a VM instance @@ -1707,14 +1694,14 @@ def _destroy_instance(self, context, instance, destroy_disks=True): # Un-register the VM try: - LOG.debug("Unregistering the VM", instance=instance) + LOG.debug("Unregistering the VM") self._session._call_method(self._session.vim, "UnregisterVM", vm_ref) - LOG.debug("Unregistered the VM", instance=instance) + LOG.debug("Unregistered the VM") except Exception as excep: LOG.warning("In vmwareapi:vmops:_destroy_instance, got " "this exception while un-registering the VM: %s", - excep, instance=instance) + excep) # Delete the folder holding the VM related content on # the datastore. @@ -1723,8 +1710,7 @@ def _destroy_instance(self, context, instance, destroy_disks=True): dir_ds_compliant_path = vm_ds_path.parent LOG.debug("Deleting contents of the VM from " "datastore %(datastore_name)s", - {'datastore_name': vm_ds_path.datastore}, - instance=instance) + {'datastore_name': vm_ds_path.datastore}) ds_ref_ret = props['datastore'] ds_ref = ds_ref_ret.ManagedObjectReference[0] dc_info = self.get_datacenter_ref_and_name(ds_ref) @@ -1733,18 +1719,16 @@ def _destroy_instance(self, context, instance, destroy_disks=True): dc_info.ref) LOG.debug("Deleted contents of the VM from " "datastore %(datastore_name)s", - {'datastore_name': vm_ds_path.datastore}, - instance=instance) + {'datastore_name': vm_ds_path.datastore}) except Exception: LOG.warning("In vmwareapi:vmops:_destroy_instance, " "exception while deleting the VM contents " "from the disk", - exc_info=True, instance=instance) + exc_info=True) except exception.InstanceNotFound: - LOG.warning('Instance does not exist on backend', - instance=instance) + LOG.warning('Instance does not exist on backend') except Exception: - LOG.exception(_('Destroy instance failed'), instance=instance) + LOG.exception(_('Destroy instance failed')) finally: vm_util.vm_ref_cache_delete(instance.uuid) @@ -1756,9 +1740,9 @@ def destroy(self, context, instance, destroy_disks=True): 2. Un-register. 3. Delete the contents of the folder holding the VM related data. """ - LOG.debug("Destroying instance", instance=instance) + LOG.debug("Destroying instance") self._destroy_instance(context, instance, destroy_disks=destroy_disks) - LOG.debug("Instance destroyed", instance=instance) + LOG.debug("Instance destroyed") def pause(self, instance): msg = _("pause not supported for vmwareapi") @@ -1777,18 +1761,18 @@ def suspend(self, instance): "runtime.powerState") # Only PoweredOn VMs can be suspended. if pwr_state == "poweredOn": - LOG.debug("Suspending the VM", instance=instance) + LOG.debug("Suspending the VM") suspend_task = self._session._call_method(self._session.vim, "SuspendVM_Task", vm_ref) self._session._wait_for_task(suspend_task) - LOG.debug("Suspended the VM", instance=instance) + LOG.debug("Suspended the VM") # Raise Exception if VM is poweredOff elif pwr_state == "poweredOff": reason = _("instance is powered off and cannot be suspended.") raise exception.InstanceSuspendFailure(reason=reason) else: LOG.debug("VM was already in suspended state. So returning " - "without doing anything", instance=instance) + "without doing anything") def resume(self, instance): """Resume the specified instance.""" @@ -1798,12 +1782,12 @@ def resume(self, instance): vm_ref, "runtime.powerState") if pwr_state.lower() == "suspended": - LOG.debug("Resuming the VM", instance=instance) + LOG.debug("Resuming the VM") suspend_task = self._session._call_method( self._session.vim, "PowerOnVM_Task", vm_ref) self._session._wait_for_task(suspend_task) - LOG.debug("Resumed the VM", instance=instance) + LOG.debug("Resumed the VM") else: reason = _("instance is not in a suspended state") raise exception.InstanceResumeFailure(reason=reason) @@ -1871,8 +1855,7 @@ def unrescue(self, instance, power_on=True): rescue_device = self._get_rescue_device(instance, vm_ref) except exception.NotFound: with excutils.save_and_reraise_exception(): - LOG.error('Unable to access the rescue disk', - instance=instance) + LOG.error('Unable to access the rescue disk') vm_util.power_off_instance(self._session, instance, vm_ref) self._volumeops.detach_disk_from_vm(vm_ref, instance, rescue_device, destroy_disk=True) @@ -1906,15 +1889,13 @@ def _clean_shutdown(self, instance, timeout, retry_interval): :return: True if the instance was shutdown within time limit, False otherwise. """ - LOG.debug("Performing Soft shutdown on instance", - instance=instance) + LOG.debug("Performing Soft shutdown on instance") vm_ref = vm_util.get_vm_ref(self._session, instance) props = self._get_instance_props(vm_ref) if props.get("runtime.powerState") != "poweredOn": - LOG.debug("Instance not in poweredOn state.", - instance=instance) + LOG.debug("Instance not in poweredOn state.") return False if ((props.get("summary.guest.toolsStatus") == "toolsOk") and @@ -1922,7 +1903,7 @@ def _clean_shutdown(self, instance, timeout, retry_interval): "guestToolsRunning")): LOG.debug("Soft shutdown instance, timeout: %d", - timeout, instance=instance) + timeout) self._session._call_method(self._session.vim, "ShutdownGuest", vm_ref) @@ -1932,17 +1913,15 @@ def _clean_shutdown(self, instance, timeout, retry_interval): props = self._get_instance_props(vm_ref) if props.get("runtime.powerState") == "poweredOff": - LOG.info("Soft shutdown succeeded.", - instance=instance) + LOG.info("Soft shutdown succeeded.") return True time.sleep(wait_time) timeout -= retry_interval - LOG.warning("Timed out while waiting for soft shutdown.", - instance=instance) + LOG.warning("Timed out while waiting for soft shutdown.") else: - LOG.debug("VMware Tools not running", instance=instance) + LOG.debug("VMware Tools not running") return False @@ -1956,7 +1935,7 @@ def is_instance_in_resource_pool(self, instance): vutil.get_moref_value(self._root_resource_pool) except (exception.InstanceNotFound, vexc.ManagedObjectNotFoundException): - LOG.debug("Failed to find instance", instance=instance) + LOG.debug("Failed to find instance") return False def _get_instance_props(self, vm_ref): @@ -1994,8 +1973,7 @@ def _update_instance_progress(self, context, instance, step, total_steps): instance_uuid = instance.uuid LOG.debug("Updating instance '%(instance_uuid)s' progress to" " %(progress)d", - {'instance_uuid': instance_uuid, 'progress': progress}, - instance=instance) + {'instance_uuid': instance_uuid, 'progress': progress}) instance.progress = progress instance.save() @@ -2023,8 +2001,7 @@ def _resize_vm(self, context, instance, vm_ref, flavor, image_meta): if not old_needs_override and new_needs_override: # Make sure we don't automatically move around "big" VMs behavior = constants.DRS_BEHAVIOR_PARTIALLY_AUTOMATED - LOG.debug("Adding DRS override '%s' for big VM.", behavior, - instance=instance) + LOG.debug("Adding DRS override '%s' for big VM.", behavior) cluster_util.update_cluster_drs_vm_override(self._session, self._cluster, vm_ref, @@ -2033,16 +2010,14 @@ def _resize_vm(self, context, instance, vm_ref, flavor, image_meta): elif old_needs_override and not new_needs_override: # remove the old override, if we had one before. make sure we don't # error out if it was already deleted another way - LOG.debug("Removing DRS override for former big VM.", - instance=instance) + LOG.debug("Removing DRS override for former big VM.") try: cluster_util.update_cluster_drs_vm_override(self._session, self._cluster, vm_ref, operation='remove') except Exception: - LOG.exception('Could not remove DRS override.', - instance=instance) + LOG.exception('Could not remove DRS override.') self._clean_up_after_special_spawning(context, flavor.memory_mb, flavor) @@ -2086,7 +2061,7 @@ def _resize_create_ephemerals_and_swap(self, vm_ref, instance, vmdk = vm_util.get_vmdk_info(self._session, vm_ref, uuid=instance.uuid) if not vmdk.device: - LOG.debug("No root disk attached!", instance=instance) + LOG.debug("No root disk attached!") return ds_ref = vmdk.device.backing.datastore datastore = ds_obj.get_datastore_by_ref(self._session, ds_ref) @@ -2203,16 +2178,13 @@ def finish_revert_migration(self, context, instance, network_info, adapter_type = vmdk.adapter_type self._detach_volumes(instance, block_device_info) - LOG.debug("Relocating VM for reverting migration", - instance=instance) + LOG.debug("Relocating VM for reverting migration") try: self._relocate_vm(vm_ref, context, instance, network_info) - LOG.debug("Relocated VM for reverting migration", - instance=instance) + LOG.debug("Relocated VM for reverting migration") except Exception as e: with excutils.save_and_reraise_exception(): - LOG.error("Relocating the VM failed: %s", e, - instance=instance) + LOG.error("Relocating the VM failed: %s", e) else: self.update_cluster_placement(context, instance) finally: @@ -2243,16 +2215,14 @@ def finish_migration(self, context, migration, instance, disk_info, self._detach_volumes(instance, block_device_info) reattach_volumes = True LOG.debug("Relocating VM for migration to %s", - migration.dest_compute, instance=instance) + migration.dest_compute) try: self._relocate_vm(vm_ref, context, instance, network_info, image_meta) - LOG.debug("Relocated VM to %s", migration.dest_compute, - instance=instance) + LOG.debug("Relocated VM to %s", migration.dest_compute) except Exception as e: with excutils.save_and_reraise_exception(): - LOG.error("Relocating the VM failed with error: %s", e, - instance=instance) + LOG.error("Relocating the VM failed with error: %s", e) self._attach_volumes(instance, block_device_info, adapter_type) @@ -2457,7 +2427,7 @@ def poll_rebooting_instances(self, timeout, instances): "older than %(timeout)d seconds", instances_info) for instance in instances: - LOG.info("Automatically hard rebooting", instance=instance) + LOG.info("Automatically hard rebooting") self.compute_api.reboot(ctxt, instance, "HARD") def get_info(self, instance): @@ -2586,11 +2556,9 @@ def _set_machine_id(self, client_factory, instance, network_info, client_factory, self._get_machine_id_str(network_info)) - LOG.debug("Reconfiguring VM instance to set the machine id", - instance=instance) + LOG.debug("Reconfiguring VM instance to set the machine id") vm_util.reconfigure_vm(self._session, vm_ref, machine_id_change_spec) - LOG.debug("Reconfigured VM instance to set the machine id", - instance=instance) + LOG.debug("Reconfigured VM instance to set the machine id") @utils.synchronized('vmware.get_and_set_vnc_port') def _get_and_set_vnc_config(self, client_factory, instance, vm_ref): @@ -2600,12 +2568,10 @@ def _get_and_set_vnc_config(self, client_factory, instance, vm_ref): client_factory, port) LOG.debug("Reconfiguring VM instance to enable vnc on " - "port - %(port)s", {'port': port}, - instance=instance) + "port - %(port)s", {'port': port}) vm_util.reconfigure_vm(self._session, vm_ref, vnc_config_spec) LOG.debug("Reconfigured VM instance to enable vnc on " - "port - %(port)s", {'port': port}, - instance=instance) + "port - %(port)s", {'port': port}) def _get_ds_browser(self, ds_ref): ds_browser = self._datastore_browser_mapping.get(ds_ref.value) @@ -2865,21 +2831,20 @@ def attach_interface(self, context, instance, image_meta, vif): attach_config_spec = vm_util.get_network_attach_config_spec( client_factory, vif_info, port_index, extra_specs.vif_limits) - LOG.debug("Reconfiguring VM to attach interface", - instance=instance) + LOG.debug("Reconfiguring VM to attach interface") try: vm_util.reconfigure_vm(self._session, vm_ref, attach_config_spec) except Exception as e: LOG.error('Attaching network adapter failed. Exception: %s', - e, instance=instance) + e) raise exception.InterfaceAttachFailed( instance_uuid=instance.uuid) self._network_api.update_instance_vnic_index( context, instance, vif, port_index) - LOG.debug("Reconfigured VM to attach interface", instance=instance) + LOG.debug("Reconfigured VM to attach interface") def detach_interface(self, context, instance, vif): """Detach an interface from the instance.""" @@ -2910,17 +2875,16 @@ def detach_interface(self, context, instance, vif): client_factory = self._session.vim.client.factory detach_config_spec = vm_util.get_network_detach_config_spec( client_factory, device, port_index) - LOG.debug("Reconfiguring VM to detach interface", - instance=instance) + LOG.debug("Reconfiguring VM to detach interface") try: vm_util.reconfigure_vm(self._session, vm_ref, detach_config_spec) except Exception as e: LOG.error('Detaching network adapter failed. Exception: %s', - e, instance=instance) + e) raise exception.InterfaceDetachFailed( instance_uuid=instance.uuid) - LOG.debug("Reconfigured VM to detach interface", instance=instance) + LOG.debug("Reconfigured VM to detach interface") def _use_disk_image_as_full_clone(self, vm_ref, vi): """Uses cached image disk by copying it into the VM directory.""" @@ -2977,8 +2941,7 @@ def _use_disk_image_as_linked_clone(self, vm_ref, vi): if not self._sized_image_exists(sized_disk_ds_loc, vi.datastore.ref): - LOG.debug("Copying root disk of size %sGb", vi.root_gb, - instance=vi.instance) + LOG.debug("Copying root disk of size %sGb", vi.root_gb) try: vm_util.copy_virtual_disk( self._session, @@ -2986,15 +2949,13 @@ def _use_disk_image_as_linked_clone(self, vm_ref, vi): str(vi.cache_image_path), str(sized_disk_ds_loc)) except Exception as e: - LOG.warning("Root disk file creation failed - %s", - e, instance=vi.instance) + LOG.warning("Root disk file creation failed - %s", e) with excutils.save_and_reraise_exception(): LOG.error('Failed to copy cached image %(source)s to ' '%(dest)s for resize: %(error)s', {'source': vi.cache_image_path, 'dest': sized_disk_ds_loc, - 'error': e}, - instance=vi.instance) + 'error': e}) try: ds_util.file_delete(self._session, sized_disk_ds_loc, @@ -3105,8 +3066,7 @@ def get_vnc_console(self, instance): # NOTE: VM can move hosts in some situations. Debug for admins. LOG.debug("VM %(uuid)s is currently on host %(host_name)s", - {'uuid': instance.uuid, 'host_name': host_name}, - instance=instance) + {'uuid': instance.uuid, 'host_name': host_name}) return ctype.ConsoleVNC(**vnc_console) def get_mks_console(self, instance): diff --git a/nova/virt/vmwareapi/volumeops.py b/nova/virt/vmwareapi/volumeops.py index 8cd476692bb..dceb8cf7ba7 100644 --- a/nova/virt/vmwareapi/volumeops.py +++ b/nova/virt/vmwareapi/volumeops.py @@ -67,7 +67,7 @@ def attach_disk_to_vm(self, vm_ref, instance, if volume_uuid and backing_uuid: LOG.debug("Adding volume details for %s to attach config spec.", - volume_uuid, instance=instance) + volume_uuid) self._add_volume_details_to_config_spec(vmdk_attach_config_spec, volume_uuid, backing_uuid) @@ -75,15 +75,13 @@ def attach_disk_to_vm(self, vm_ref, instance, "disk %(vmdk_path)s or device %(device_name)s with type " "%(disk_type)s", {'vm_ref': vm_ref.value, 'vmdk_path': vmdk_path, - 'device_name': device_name, 'disk_type': disk_type}, - instance=instance) + 'device_name': device_name, 'disk_type': disk_type}) vm_util.reconfigure_vm(self._session, vm_ref, vmdk_attach_config_spec) LOG.debug("Reconfigured VM instance %(vm_ref)s to attach " "disk %(vmdk_path)s or device %(device_name)s with type " "%(disk_type)s", {'vm_ref': vm_ref.value, 'vmdk_path': vmdk_path, - 'device_name': device_name, 'disk_type': disk_type}, - instance=instance) + 'device_name': device_name, 'disk_type': disk_type}) def _add_volume_details_to_config_spec(self, config_spec, volume_uuid, device_uuid): @@ -118,20 +116,18 @@ def detach_disk_from_vm(self, vm_ref, instance, device, if volume_uuid is not None: LOG.debug("Adding volume details for %s to detach config spec.", - volume_uuid, instance=instance) + volume_uuid) self._add_volume_details_to_config_spec(vmdk_detach_config_spec, volume_uuid, '') disk_key = device.key LOG.debug("Reconfiguring VM instance %(vm_ref)s to detach " "disk %(disk_key)s", - {'vm_ref': vm_ref.value, 'disk_key': disk_key}, - instance=instance) + {'vm_ref': vm_ref.value, 'disk_key': disk_key}) vm_util.reconfigure_vm(self._session, vm_ref, vmdk_detach_config_spec) LOG.debug("Reconfigured VM instance %(vm_ref)s to detach " "disk %(disk_key)s", - {'vm_ref': vm_ref.value, 'disk_key': disk_key}, - instance=instance) + {'vm_ref': vm_ref.value, 'disk_key': disk_key}) def _iscsi_get_target(self, data): """Return the iSCSI Target given a volume info.""" @@ -344,8 +340,7 @@ def _attach_volume_vmdk(self, connection_info, instance, adapter_type=None): """Attach vmdk volume storage to VM instance.""" vm_ref = vm_util.get_vm_ref(self._session, instance) - LOG.debug("_attach_volume_vmdk: %s", connection_info, - instance=instance) + LOG.debug("_attach_volume_vmdk: %s", connection_info) data = connection_info['data'] volume_ref = self._get_volume_ref(data['volume']) @@ -369,15 +364,14 @@ def _attach_volume_vmdk(self, connection_info, instance, volume_uuid=data['volume_id'], backing_uuid=vmdk.device.backing.uuid) - LOG.debug("Attached VMDK: %s", connection_info, instance=instance) + LOG.debug("Attached VMDK: %s", connection_info) def _attach_volume_iscsi(self, connection_info, instance, adapter_type=None): """Attach iscsi volume storage to VM instance.""" vm_ref = vm_util.get_vm_ref(self._session, instance) # Attach Volume to VM - LOG.debug("_attach_volume_iscsi: %s", connection_info, - instance=instance) + LOG.debug("_attach_volume_iscsi: %s", connection_info) data = connection_info['data'] @@ -395,13 +389,12 @@ def _attach_volume_iscsi(self, connection_info, instance, self.attach_disk_to_vm(vm_ref, instance, adapter_type, 'rdmp', device_name=device_name) - LOG.debug("Attached ISCSI: %s", connection_info, instance=instance) + LOG.debug("Attached ISCSI: %s", connection_info) def attach_volume(self, connection_info, instance, adapter_type=None): """Attach volume storage to VM instance.""" driver_type = connection_info['driver_volume_type'] - LOG.debug("Volume attach. Driver type: %s", driver_type, - instance=instance) + LOG.debug("Volume attach. Driver type: %s", driver_type) if driver_type == constants.DISK_FORMAT_VMDK: self._attach_volume_vmdk(connection_info, instance, adapter_type) elif driver_type == constants.DISK_FORMAT_ISCSI: @@ -531,8 +524,7 @@ def _detach_volume_vmdk(self, connection_info, instance): """Detach volume storage to VM instance.""" vm_ref = vm_util.get_vm_ref(self._session, instance) # Detach Volume from VM - LOG.debug("_detach_volume_vmdk: %s", connection_info, - instance=instance) + LOG.debug("_detach_volume_vmdk: %s", connection_info) data = connection_info['data'] volume_ref = self._get_volume_ref(data['volume']) @@ -562,14 +554,13 @@ def _detach_volume_vmdk(self, connection_info, instance): self.detach_disk_from_vm(vm_ref, instance, device, volume_uuid=data['volume_id']) - LOG.debug("Detached VMDK: %s", connection_info, instance=instance) + LOG.debug("Detached VMDK: %s", connection_info) def _detach_volume_iscsi(self, connection_info, instance): """Detach volume storage to VM instance.""" vm_ref = vm_util.get_vm_ref(self._session, instance) # Detach Volume from VM - LOG.debug("_detach_volume_iscsi: %s", connection_info, - instance=instance) + LOG.debug("_detach_volume_iscsi: %s", connection_info) data = connection_info['data'] # Discover iSCSI Target @@ -584,13 +575,12 @@ def _detach_volume_iscsi(self, connection_info, instance): if device is None: raise exception.DiskNotFound(message=_("Unable to find volume")) self.detach_disk_from_vm(vm_ref, instance, device, destroy_disk=True) - LOG.debug("Detached ISCSI: %s", connection_info, instance=instance) + LOG.debug("Detached ISCSI: %s", connection_info) def detach_volume(self, connection_info, instance): """Detach volume storage to VM instance.""" driver_type = connection_info['driver_volume_type'] - LOG.debug("Volume detach. Driver type: %s", driver_type, - instance=instance) + LOG.debug("Volume detach. Driver type: %s", driver_type) if driver_type == constants.DISK_FORMAT_VMDK: self._detach_volume_vmdk(connection_info, instance) elif driver_type == constants.DISK_FORMAT_ISCSI: @@ -602,8 +592,7 @@ def attach_root_volume(self, connection_info, instance, datastore, adapter_type=None): """Attach a root volume to the VM instance.""" driver_type = connection_info['driver_volume_type'] - LOG.debug("Root volume attach. Driver type: %s", driver_type, - instance=instance) + LOG.debug("Root volume attach. Driver type: %s", driver_type) # NOTE(jkulik): Upstream moves the volume to the instance DS here. This # would violate the differentiation between ephemeral and volume DS, so # we don't do that. This comment should help us detect upstream changes @@ -662,7 +651,6 @@ def fixup_shadow_vms(self, instance, shadow_vms): LOG.warning("Shadow-vm %s already has a disk" " attached at %s replacing it with %s", volume, original_device_path, current_device_path, - instance=instance ) self.detach_disk_from_vm(self, volume_ref, instance, original_device, destroy_disk=True) @@ -676,8 +664,7 @@ def fixup_shadow_vms(self, instance, shadow_vms): ) except Exception: LOG.exception("Failed to attach volume {}. Device {}".format( - data["volume_id"], - device.key), instance=instance) + data["volume_id"], device.key)) def delete_shadow_vms(self, block_device_info, instance=None): # We need to delete the migrated shadow vms @@ -702,12 +689,12 @@ def delete_shadow_vms(self, block_device_info, instance=None): deleted.append("{volume_id} ({volume})".format(**data)) except oslo_vmw_exceptions.ManagedObjectNotFoundException: LOG.debug("Volume %s already deleted", - data.get("volume_id"), instance=instance) + data.get("volume_id")) except Exception: LOG.exception("Failed to delete volume %s", - data.get("volume_id"), instance=instance) + data.get("volume_id")) - LOG.info("Deleted %s", deleted, instance=instance) + LOG.info("Deleted %s", deleted) def map_volumes_to_devices(self, instance, disk_infos): """Maps a connection_info.data to a device of the instance by its key