22513201 nova resize revert will fail if there are volumes attached s11u3-sru
authorSean Wilcox <sean.wilcox@oracle.com>
Tue, 09 Feb 2016 16:26:27 -0800
branchs11u3-sru
changeset 5432 e6a5784e62c3
parent 5431 a515b642a711
child 5433 48cd54da97b5
22513201 nova resize revert will fail if there are volumes attached
components/openstack/nova/files/solariszones/driver.py
--- a/components/openstack/nova/files/solariszones/driver.py	Tue Feb 09 16:26:27 2016 -0800
+++ b/components/openstack/nova/files/solariszones/driver.py	Tue Feb 09 16:26:27 2016 -0800
@@ -1727,10 +1727,7 @@
         """
         self.power_off(instance)
 
-        inst_type = flavor_obj.Flavor.get_by_id(
-            nova_context.get_admin_context(read_deleted='yes'),
-            instance['instance_type_id'])
-        extra_specs = inst_type['extra_specs'].copy()
+        extra_specs = self._get_extra_specs(instance)
         brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
 
         name = instance['name']
@@ -1747,14 +1744,11 @@
             new_rvid = instance.system_metadata.get('new_instance_volid')
             newvname = instance['display_name'] + "-" + self._rootzpool_suffix
             mount_dev = instance['root_device_name']
-            del instance.system_metadata['new_instance_volid']
             del instance.system_metadata['old_instance_volid']
 
             self._resize_disk_migration(context, instance, new_rvid, old_rvid,
                                         rgb, mount_dev)
 
-            self._volume_api.delete(context, new_rvid)
-
     def destroy(self, context, instance, network_info, block_device_info=None,
                 destroy_disks=True, migrate_data=None):
         """Destroy the specified instance from the Hypervisor.
@@ -1810,7 +1804,7 @@
 
         # One last point of house keeping. If we are deleting the instance
         # during a resize operation we want to make sure the cinder volumes are
-        # property cleaned up. We need to do this here, because the periodic
+        # properly cleaned up. We need to do this here, because the periodic
         # task that comes along and cleans these things up isn't nice enough to
         # pass a context in so that we could simply do the work there.  But
         # because we have access to a context, we can handle the work here and
@@ -2206,10 +2200,8 @@
         LOG.debug("Starting migrate_disk_and_power_off", instance=instance)
 
         samehost = (dest == self.get_host_ip_addr())
-        inst_type = flavor_obj.Flavor.get_by_id(
-            nova_context.get_admin_context(read_deleted='yes'),
-            instance['instance_type_id'])
-        extra_specs = inst_type['extra_specs'].copy()
+
+        extra_specs = self._get_extra_specs(instance)
         brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
         if brand != ZONE_BRAND_SOLARIS_KZ and not samehost:
             msg = (_("'%s' branded zones do not currently support "
@@ -2445,10 +2437,7 @@
         if samehost:
             instance.system_metadata['old_vm_state'] = vm_states.RESIZED
 
-        inst_type = flavor_obj.Flavor.get_by_id(
-            nova_context.get_admin_context(read_deleted='yes'),
-            instance['instance_type_id'])
-        extra_specs = inst_type['extra_specs'].copy()
+        extra_specs = self._get_extra_specs(instance)
         brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
         name = instance['name']
 
@@ -2654,14 +2643,11 @@
 
             del instance.system_metadata['new_instance_volid']
             del instance.system_metadata['old_instance_volid']
-
-            rootmp = instance.root_device_name
-            bmap = block_device_info.get('block_device_mapping')
-            for entry in bmap:
-                if entry['mount_device'] != rootmp:
-                    self.attach_volume(context,
-                                       entry['connection_info'], instance,
-                                       entry['mount_device'])
+        else:
+            new_rvid = instance.system_metadata['new_instance_volid']
+            if new_rvid:
+                del instance.system_metadata['new_instance_volid']
+                self._volume_api.delete(context, new_rvid)
 
         self._power_on(instance)