18595100 nova resize does not work s11u3-sru
authorSean Wilcox <sean.wilcox@oracle.com>
Tue, 09 Feb 2016 16:26:27 -0800
branchs11u3-sru
changeset 5430 b6b088be89d5
parent 5429 1ae4cfbadda9
child 5431 a515b642a711
18595100 nova resize does not work 22675628 problem in SERVICE/NOVA
components/openstack/cinder/files/solaris/zfs.py
components/openstack/horizon/files/overrides.py
components/openstack/nova/files/solariszones/driver.py
components/openstack/nova/patches/08-confirm_migration_context.patch
components/openstack/nova/patches/09-CVE-2015-3280.patch
--- a/components/openstack/cinder/files/solaris/zfs.py	Tue Feb 09 16:26:27 2016 -0800
+++ b/components/openstack/cinder/files/solaris/zfs.py	Tue Feb 09 16:26:27 2016 -0800
@@ -551,6 +551,11 @@
 
     def create_export(self, context, volume):
         """Export the volume."""
+        # If the volume is already exported there is nothing to do, as we
+        # simply export volumes and they are universally available.
+        if self._get_luid(volume):
+            return
+
         zvol = self._get_zvol_path(volume)
 
         # Create a Logical Unit (LU)
--- a/components/openstack/horizon/files/overrides.py	Tue Feb 09 16:26:27 2016 -0800
+++ b/components/openstack/horizon/files/overrides.py	Tue Feb 09 16:26:27 2016 -0800
@@ -70,10 +70,12 @@
 # Remove 'SecurityGroupsTab' tab from Project/Compute/Access & Security
 AccessAndSecurityTabs.tabs = (KeypairsTab, FloatingIPsTab, APIAccessTab)
 
-# Remove 'ConfirmResize', 'RevertResize', 'TogglePause', 'ToggleSuspend',
-# 'MigrateInstance' actions from Admin/System/Instances/Actions
+# Remove 'TogglePause', 'ToggleSuspend', 'MigrateInstance' actions from
+# Admin/Instances/Actions
 admin_tables.AdminInstancesTable._meta.row_actions = (
     admin_tables.AdminEditInstance,
+    project_tables.ConfirmResize,
+    project_tables.RevertResize,
     project_tables.ConsoleLink,
     project_tables.LogLink,
     project_tables.CreateSnapshot,
@@ -83,11 +85,12 @@
     project_tables.TerminateInstance
 )
 
-# Remove 'ConfirmResize', 'RevertResize', 'DecryptInstancePassword',
-# 'EditInstanceSecurityGroups', 'TogglePause', 'ToggleSuspend', 'ResizeLink',
-# 'RebuildInstance' actions from Project/Compute/Instances/Actions
+# Remove 'EditInstanceSecurityGroups', 'TogglePause', 'RebuildInstance'
+# actions from Project/Instances/Actions
 project_tables.InstancesTable._meta.row_actions = (
     project_tables.StartInstance,
+    project_tables.ConfirmResize,
+    project_tables.RevertResize,
     project_tables.CreateSnapshot,
     project_tables.SimpleAssociateIP,
     project_tables.AssociateIP,
@@ -95,6 +98,7 @@
     project_tables.EditInstance,
     project_tables.ConsoleLink,
     project_tables.LogLink,
+    project_tables.ResizeLink,
     project_tables.SoftRebootInstance,
     project_tables.RebootInstance,
     project_tables.StopInstance,
--- a/components/openstack/nova/files/solariszones/driver.py	Tue Feb 09 16:26:27 2016 -0800
+++ b/components/openstack/nova/files/solariszones/driver.py	Tue Feb 09 16:26:27 2016 -0800
@@ -36,6 +36,7 @@
 from solaris_install.engine import InstallEngine
 from solaris_install.target.size import Size
 
+from cinderclient import exceptions as cinder_exception
 from eventlet import greenthread
 from lxml import etree
 from oslo_config import cfg
@@ -65,6 +66,11 @@
 from nova.virt import images
 from nova.virt.solariszones import sysconfig
 from nova import volume
+from nova.volume.cinder import API
+from nova.volume.cinder import cinderclient
+from nova.volume.cinder import get_cinder_client_version
+from nova.volume.cinder import translate_volume_exception
+from nova.volume.cinder import _untranslate_volume_summary_view
 
 solariszones_opts = [
     cfg.StrOpt('glancecache_dirname',
@@ -140,6 +146,7 @@
 # incompatible change such as concerning kernel zone live migration.
 HYPERVISOR_VERSION = '5.11'
 
+ROOTZPOOL_RESOURCE = 'rootzpool'
 
 def lookup_resource_property(zone, resource, prop, filter=None):
     """Lookup specified property from specified Solaris Zone resource."""
@@ -194,6 +201,87 @@
     return result
 
 
+class SolarisVolumeAPI(API):
+    """ Extending the volume api to support additional cinder sub-commands
+    """
+    @translate_volume_exception
+    def create(self, context, size, name, description, snapshot=None,
+               image_id=None, volume_type=None, metadata=None,
+               availability_zone=None, source_volume=None):
+        """Clone the source volume by calling the cinderclient version of
+        create with a source_volid argument
+
+        :param context: the context for the clone
+        :param size: size of the new volume, must be the same as the source
+            volume
+        :param name: display_name of the new volume
+        :param description: display_description of the new volume
+        :param snapshot: Snapshot object
+        :param image_id: image_id to create the volume from
+        :param volume_type: type of volume
+        :param metadata: Additional metadata for the volume
+        :param availability_zone: zone:host where the volume is to be created
+        :param source_volume: Volume object
+
+        Returns a volume object
+        """
+        if snapshot is not None:
+            snapshot_id = snapshot['id']
+        else:
+            snapshot_id = None
+
+        if source_volume is not None:
+            source_volid = source_volume['id']
+        else:
+            source_volid = None
+
+        kwargs = dict(snapshot_id=snapshot_id,
+                      volume_type=volume_type,
+                      user_id=context.user_id,
+                      project_id=context.project_id,
+                      availability_zone=availability_zone,
+                      metadata=metadata,
+                      imageRef=image_id,
+                      source_volid=source_volid)
+
+        version = get_cinder_client_version(context)
+        if version == '1':
+            kwargs['display_name'] = name
+            kwargs['display_description'] = description
+        elif version == '2':
+            kwargs['name'] = name
+            kwargs['description'] = description
+
+        try:
+            item = cinderclient(context).volumes.create(size, **kwargs)
+            return _untranslate_volume_summary_view(context, item)
+        except cinder_exception.OverLimit:
+            raise exception.OverQuota(overs='volumes')
+        except cinder_exception.BadRequest as err:
+            raise exception.InvalidInput(reason=unicode(err))
+
+    @translate_volume_exception
+    def update(self, context, volume_id, fields):
+        """Update the fields of a volume for example used to rename a volume
+        via a call to cinderclient
+
+        :param context: the context for the update
+        :param volume_id: the id of the volume to update
+        :param fields: a dictionary of of the name/value pairs to update
+        """
+        cinderclient(context).volumes.update(volume_id, **fields)
+
+    @translate_volume_exception
+    def extend(self, context, volume, newsize):
+        """Extend the size of a cinder volume by calling the cinderclient
+
+        :param context: the context for the extend
+        :param volume: the volume object to extend
+        :param newsize: the new size of the volume in GB
+        """
+        cinderclient(context).volumes.extend(volume, newsize)
+
+
 class ZoneConfig(object):
     """ZoneConfig - context manager for access zone configurations.
     Automatically opens the configuration for a zone and commits any changes
@@ -265,8 +353,13 @@
                       % (prop, resource, self.zone.name, reason))
             raise
 
-    def addresource(self, resource, props=None):
-        """creates a new resource with an optional property list."""
+    def addresource(self, resource, props=None, ignore_exists=False):
+        """creates a new resource with an optional property list, or set the
+        property if the resource exists and ignore_exists is true.
+
+        :param ignore_exists: If the resource exists, set the property for the
+            resource.
+        """
         if props is None:
             props = []
 
@@ -353,7 +446,8 @@
         self._rad_connection = None
         self._uname = os.uname()
         self._validated_archives = list()
-        self._volume_api = volume.API()
+        self._volume_api = SolarisVolumeAPI()
+        self._rootzpool_suffix = ROOTZPOOL_RESOURCE
 
     @property
     def rad_connection(self):
@@ -510,7 +604,12 @@
 
     def _get_max_mem(self, zone):
         """Return the maximum memory in KBytes allowed."""
-        max_mem = lookup_resource_property(zone, 'capped-memory', 'physical')
+        if zone.brand == ZONE_BRAND_SOLARIS:
+            mem_resource = 'swap'
+        else:
+            mem_resource = 'physical'
+
+        max_mem = lookup_resource_property(zone, 'capped-memory', mem_resource)
         if max_mem is not None:
             return strutils.string_to_bytes("%sB" % max_mem) / 1024
 
@@ -919,7 +1018,7 @@
             vol = self._volume_api.create(
                 context,
                 instance['root_gb'],
-                instance['display_name'] + "-rootzpool",
+                instance['display_name'] + "-" + self._rootzpool_suffix,
                 "Boot volume for instance '%s' (%s)"
                 % (instance['name'], instance['uuid']))
             # TODO(npower): Polling is what nova/compute/manager also does when
@@ -943,6 +1042,7 @@
         connector = self.get_volume_connector(instance)
         connection_info = self._volume_api.initialize_connection(
             context, volume_id, connector)
+        connection_info['serial'] = volume_id
 
         # Check connection_info to determine if the provided volume is
         # local to this compute node. If it is, then don't use it for
@@ -1009,8 +1109,9 @@
                     [zonemgr.Property("storage", suri)])
             else:
                 zc.addresource(
-                    "rootzpool",
-                    [zonemgr.Property("storage", listvalue=[suri])])
+                    ROOTZPOOL_RESOURCE,
+                    [zonemgr.Property("storage", listvalue=[suri])],
+                    ignore_exists=True)
 
     def _set_num_cpu(self, name, vcpus, brand):
         """Set number of VCPUs in a Solaris Zone configuration."""
@@ -1104,20 +1205,27 @@
                                                   filter)
                     linkname = 'net%s' % id
 
-            # create the required sysconfig file
-            subnet_uuid = port['fixed_ips'][0]['subnet_id']
-            subnet = network_plugin.show_subnet(subnet_uuid)['subnet']
-
-            if subnet['enable_dhcp']:
-                tree = sysconfig.create_ncp_defaultfixed('dhcp', linkname,
-                                                         netid, ip_version)
-            else:
-                tree = sysconfig.create_ncp_defaultfixed('static', linkname,
-                                                         netid, ip_version, ip,
-                                                         route, nameservers)
-
-            fp = os.path.join(sc_dir, 'evs-network-%d.xml' % netid)
-            sysconfig.create_sc_profile(fp, tree)
+            # create the required sysconfig file (or skip if this is part of a
+            # resize process)
+            tstate = instance['task_state']
+            if tstate not in [task_states.RESIZE_FINISH,
+                              task_states.RESIZE_REVERTING,
+                              task_states.RESIZE_MIGRATING]:
+                subnet_uuid = port['fixed_ips'][0]['subnet_id']
+                subnet = network_plugin.show_subnet(subnet_uuid)['subnet']
+
+                if subnet['enable_dhcp']:
+                    tree = sysconfig.create_ncp_defaultfixed('dhcp', linkname,
+                                                             netid, ip_version)
+                else:
+                    tree = sysconfig.create_ncp_defaultfixed('static',
+                                                             linkname, netid,
+                                                             ip_version, ip,
+                                                             route,
+                                                             nameservers)
+
+                fp = os.path.join(sc_dir, 'evs-network-%d.xml' % netid)
+                sysconfig.create_sc_profile(fp, tree)
 
         if tenant_id is not None:
             # set the tenant id
@@ -1200,14 +1308,19 @@
                    % (brand, name)))
             raise exception.NovaException(msg)
 
-        sc_profile = extra_specs.get('install:sc_profile')
-        if sc_profile is not None:
-            if os.path.isfile(sc_profile):
-                shutil.copy(sc_profile, sc_dir)
-            elif os.path.isdir(sc_profile):
-                shutil.copytree(sc_profile, os.path.join(sc_dir, 'sysconfig'))
-
-        self._verify_sysconfig(sc_dir, instance)
+        tstate = instance['task_state']
+        if tstate not in [task_states.RESIZE_FINISH,
+                           task_states.RESIZE_REVERTING,
+                           task_states.RESIZE_MIGRATING]:
+            sc_profile = extra_specs.get('install:sc_profile')
+            if sc_profile is not None:
+                if os.path.isfile(sc_profile):
+                    shutil.copy(sc_profile, sc_dir)
+                elif os.path.isdir(sc_profile):
+                    shutil.copytree(sc_profile, os.path.join(sc_dir,
+                                    'sysconfig'))
+
+            self._verify_sysconfig(sc_dir, instance)
 
         LOG.debug(_("Creating zone configuration for '%s' (%s)")
                   % (name, instance['display_name']))
@@ -1571,7 +1684,8 @@
 
         if connection_info is not None:
             bdm = objects.BlockDeviceMapping(
-                    source_type='volume', destination_type='volume',
+                    source_type='volume',
+                    destination_type='volume',
                     instance_uuid=instance.uuid,
                     volume_id=volume_id,
                     connection_info=jsonutils.dumps(connection_info),
@@ -1607,6 +1721,40 @@
                         "%s") % (name, reason))
             raise exception.InstancePowerOffFailure(reason=reason)
 
+    def _samehost_revert_resize(self, context, instance, network_info,
+                                block_device_info):
+        """Reverts the zones configuration to pre-resize config
+        """
+        self.power_off(instance)
+
+        inst_type = flavor_obj.Flavor.get_by_id(
+            nova_context.get_admin_context(read_deleted='yes'),
+            instance['instance_type_id'])
+        extra_specs = inst_type['extra_specs'].copy()
+        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
+
+        name = instance['name']
+
+        cpu = int(instance.system_metadata['old_instance_type_vcpus'])
+        mem = int(instance.system_metadata['old_instance_type_memory_mb'])
+
+        self._set_num_cpu(name, cpu, brand)
+        self._set_memory_cap(name, mem, brand)
+
+        rgb = int(instance.system_metadata['new_instance_type_root_gb'])
+        old_rvid = instance.system_metadata.get('old_instance_volid')
+        if old_rvid:
+            new_rvid = instance.system_metadata.get('new_instance_volid')
+            newvname = instance['display_name'] + "-" + self._rootzpool_suffix
+            mount_dev = instance['root_device_name']
+            del instance.system_metadata['new_instance_volid']
+            del instance.system_metadata['old_instance_volid']
+
+            self._resize_disk_migration(context, instance, new_rvid, old_rvid,
+                                        rgb, mount_dev)
+
+            self._volume_api.delete(context, new_rvid)
+
     def destroy(self, context, instance, network_info, block_device_info=None,
                 destroy_disks=True, migrate_data=None):
         """Destroy the specified instance from the Hypervisor.
@@ -1624,6 +1772,12 @@
         :param destroy_disks: Indicates if disks should be destroyed
         :param migrate_data: implementation specific params
         """
+        if (instance['task_state'] == task_states.RESIZE_REVERTING and
+           instance.system_metadata['old_vm_state'] == vm_states.RESIZED):
+            self._samehost_revert_resize(context, instance, network_info,
+                                         block_device_info)
+            return
+
         try:
             # These methods log if problems occur so no need to double log
             # here. Just catch any stray exceptions and allow destroy to
@@ -1654,6 +1808,33 @@
             LOG.warning(_("Unable to destroy instance '%s' via zonemgr(3RAD): "
                           "%s") % (name, reason))
 
+        # One last point of house keeping. If we are deleting the instance
+        # during a resize operation we want to make sure the cinder volumes are
+        # property cleaned up. We need to do this here, because the periodic
+        # task that comes along and cleans these things up isn't nice enough to
+        # pass a context in so that we could simply do the work there.  But
+        # because we have access to a context, we can handle the work here and
+        # let the periodic task simply clean up the left over zone
+        # configuration that might be left around.  Note that the left over
+        # zone will only show up in zoneadm list, not nova list.
+        #
+        # If the task state is RESIZE_REVERTING do not process these because
+        # the cinder volume cleanup is taken care of in
+        # finish_revert_migration.
+        if instance['task_state'] == task_states.RESIZE_REVERTING:
+            return
+
+        tags = ['old_instance_volid', 'new_instance_volid']
+        for tag in tags:
+            volid = instance.system_metadata.get(tag)
+            if volid:
+                try:
+                    LOG.debug(_("Deleting volume %s"), volid)
+                    self._volume_api.delete(context, volid)
+                    del instance.system_metadata[tag]
+                except Exception:
+                    pass
+
     def cleanup(self, context, instance, network_info, block_device_info=None,
                 destroy_disks=True, migrate_data=None, destroy_vifs=True):
         """Cleanup the instance resources .
@@ -1937,7 +2118,8 @@
             zc.addresource("device", [zonemgr.Property("storage", suri)])
 
         # apply the configuration to the running zone
-        zone.apply()
+        if zone.state == ZONE_STATE_RUNNING:
+            zone.apply()
 
     def detach_volume(self, connection_info, instance, mountpoint,
                       encryption=None):
@@ -1968,7 +2150,8 @@
             zc.removeresources("device", [zonemgr.Property("storage", suri)])
 
         # apply the configuration to the running zone
-        zone.apply()
+        if zone.state == ZONE_STATE_RUNNING:
+            zone.apply()
 
     def swap_volume(self, old_connection_info, new_connection_info,
                     instance, mountpoint, resize_to):
@@ -1995,6 +2178,19 @@
         """
         raise NotImplementedError()
 
+    def _cleanup_migrate_disk(self, context, instance, volume):
+        """Make a best effort at cleaning up the volume that was created to
+        hold the new root disk
+
+        :param context: the context for the migration/resize
+        :param instance: nova.objects.instance.Instance being migrated/resized
+        :param volume: new volume created by the call to cinder create
+        """
+        try:
+            self._volume_api.delete(context, volume['id'])
+        except Exception as err:
+            LOG.error(_("Unable to cleanup the resized volume: %s" % err))
+
     def migrate_disk_and_power_off(self, context, instance, dest,
                                    flavor, network_info,
                                    block_device_info=None,
@@ -2007,7 +2203,88 @@
         :param retry_interval: How often to signal guest while
                                waiting for it to shutdown
         """
-        raise NotImplementedError()
+        LOG.debug("Starting migrate_disk_and_power_off", instance=instance)
+
+        samehost = (dest == self.get_host_ip_addr())
+        inst_type = flavor_obj.Flavor.get_by_id(
+            nova_context.get_admin_context(read_deleted='yes'),
+            instance['instance_type_id'])
+        extra_specs = inst_type['extra_specs'].copy()
+        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
+        if brand != ZONE_BRAND_SOLARIS_KZ and not samehost:
+            msg = (_("'%s' branded zones do not currently support "
+                     "resize to a different host.") % brand)
+            raise exception.MigrationPreCheckError(reason=msg)
+
+        if brand != flavor['extra_specs'].get('zonecfg:brand'):
+            msg = (_("Unable to change brand of zone during resize."))
+            raise exception.MigrationPreCheckError(reason=msg)
+
+        orgb = instance['root_gb']
+        nrgb = int(instance.system_metadata['new_instance_type_root_gb'])
+        if orgb > nrgb:
+            msg = (_("Unable to resize to a smaller boot volume."))
+            raise exception.ResizeError(reason=msg)
+
+        self.power_off(instance, timeout, retry_interval)
+
+        disk_info = None
+        if nrgb > orgb or not samehost:
+            bmap = block_device_info.get('block_device_mapping')
+            rootmp = instance.root_device_name
+            for entry in bmap:
+                mountdev = entry['mount_device'].rpartition('/')[2]
+                if mountdev == rootmp:
+                    root_ci = entry['connection_info']
+                    break
+            else:
+                # If this is a non-global zone that is on the same host and is
+                # simply using a dataset, the disk size is purely an OpenStack
+                # quota.  We can continue without doing any disk work.
+                if samehost and brand == ZONE_BRAND_SOLARIS:
+                    return disk_info
+                else:
+                    msg = (_("Cannot find an attached root device."))
+                    raise exception.ResizeError(reason=msg)
+
+            if root_ci['driver_volume_type'] == 'iscsi':
+                volume_id = root_ci['data']['volume_id']
+            else:
+                volume_id = root_ci['serial']
+
+            if volume_id is None:
+                msg = (_("Cannot find an attached root device."))
+                raise exception.ResizeError(reason=msg)
+
+            vinfo = self._volume_api.get(context, volume_id)
+            newvolume = self._volume_api.create(context, orgb,
+                                                vinfo['display_name'] +
+                                                '-resized',
+                                                vinfo['display_description'],
+                                                source_volume=vinfo)
+
+            instance.system_metadata['old_instance_volid'] = volume_id
+            instance.system_metadata['new_instance_volid'] = newvolume['id']
+
+            # TODO(npower): Polling is what nova/compute/manager also does when
+            # creating a new volume, so we do likewise here.
+            while True:
+                volume = self._volume_api.get(context, newvolume['id'])
+                if volume['status'] != 'creating':
+                    break
+                greenthread.sleep(1)
+
+            if nrgb > orgb:
+                try:
+                    self._volume_api.extend(context, newvolume['id'], nrgb)
+                except Exception:
+                    LOG.error(_("Failed to extend the new volume"))
+                    self._cleanup_migrate_disk(context, instance, newvolume)
+                    raise
+
+            disk_info = newvolume
+
+        return disk_info
 
     def snapshot(self, context, instance, image_id, update_task_state):
         """Snapshots the specified instance.
@@ -2105,6 +2382,42 @@
         """
         pass
 
+    def _cleanup_finish_migration(self, context, instance, disk_info,
+                                  network_info, samehost):
+        """Best effort attempt at cleaning up any additional resources that are
+        not directly managed by Nova or Cinder so as not to leak these
+        resources.
+        """
+        if disk_info:
+            self._volume_api.detach(context, disk_info['id'])
+            self._volume_api.delete(context, disk_info['id'])
+
+            old_rvid = instance.system_metadata.get('old_instance_volid')
+            if old_rvid:
+                connector = self.get_volume_connector(instance)
+                connection_info = self._volume_api.initialize_connection(
+                                    context, old_rvid, connector)
+
+                new_rvid = instance.system_metadata['new_instance_volid']
+
+                rootmp = instance.root_device_name
+                self._volume_api.attach(context, old_rvid, instance['uuid'],
+                                        rootmp)
+
+                bdmobj = objects.BlockDeviceMapping()
+                bdm = bdmobj.get_by_volume_id(context, new_rvid)
+                bdm['connection_info'] = jsonutils.dumps(connection_info)
+                bdm['volume_id'] = old_rvid
+                bdm.save()
+
+                del instance.system_metadata['new_instance_volid']
+                del instance.system_metadata['old_instance_volid']
+
+        if not samehost:
+            self.destroy(context, instance, network_info)
+            instance['host'] = instance['launched_on']
+            instance['node'] = instance['launched_on']
+
     def finish_migration(self, context, migration, instance, disk_info,
                          network_info, image_meta, resize_instance,
                          block_device_info=None, power_on=True):
@@ -2125,15 +2438,183 @@
         :param power_on: True if the instance should be powered on, False
                          otherwise
         """
-        raise NotImplementedError()
-
-    def confirm_migration(self, migration, instance, network_info):
+        if not resize_instance:
+            raise NotImplementedError()
+
+        samehost = (migration['dest_node'] == migration['source_node'])
+        if samehost:
+            instance.system_metadata['old_vm_state'] = vm_states.RESIZED
+
+        inst_type = flavor_obj.Flavor.get_by_id(
+            nova_context.get_admin_context(read_deleted='yes'),
+            instance['instance_type_id'])
+        extra_specs = inst_type['extra_specs'].copy()
+        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
+        name = instance['name']
+
+        if disk_info:
+            bmap = block_device_info.get('block_device_mapping')
+            rootmp = instance['root_device_name']
+            for entry in bmap:
+                if entry['mount_device'] == rootmp:
+                    mount_dev = entry['mount_device']
+                    root_ci = entry['connection_info']
+                    break
+
+        try:
+            if samehost:
+                metadstr = 'new_instance_type_vcpus'
+                cpu = int(instance.system_metadata[metadstr])
+                metadstr = 'new_instance_type_memory_mb'
+                mem = int(instance.system_metadata[metadstr])
+                self._set_num_cpu(name, cpu, brand)
+                self._set_memory_cap(name, mem, brand)
+
+                # Add the new disk to the volume if the size of the disk
+                # changed
+                if disk_info:
+                    metadstr = 'new_instance_type_root_gb'
+                    rgb = int(instance.system_metadata[metadstr])
+                    self._resize_disk_migration(context, instance,
+                                                root_ci['serial'],
+                                                disk_info['id'],
+                                                rgb, mount_dev)
+
+            else:
+                # No need to check disk_info here, because when not on the
+                # same host a disk_info is always passed in.
+                mount_dev = 'c1d0'
+                root_serial = root_ci['serial']
+                connection_info = self._resize_disk_migration(context,
+                                                              instance,
+                                                              root_serial,
+                                                              disk_info['id'],
+                                                              0, mount_dev,
+                                                              samehost)
+
+                self._create_config(context, instance, network_info,
+                                    connection_info, None)
+
+                zone = self._get_zone_by_name(name)
+                if zone is None:
+                    raise exception.InstanceNotFound(instance_id=name)
+
+                zone.attach(['-x', 'initialize-hostdata'])
+
+                bmap = block_device_info.get('block_device_mapping')
+                for entry in bmap:
+                    if entry['mount_device'] != rootmp:
+                        self.attach_volume(context,
+                                           entry['connection_info'], instance,
+                                           entry['mount_device'])
+
+            if power_on:
+                self._power_on(instance)
+
+                if brand == ZONE_BRAND_SOLARIS:
+                    return
+
+                # Toggle the autoexpand to extend the size of the rpool.
+                # We need to sleep for a few seconds to make sure the zone
+                # is in a state to accept the toggle.  Once bugs are fixed
+                # around the autoexpand and the toggle is no longer needed
+                # or zone.boot() returns only after the zone is ready we
+                # can remove this hack.
+                greenthread.sleep(15)
+                out, err = utils.execute('/usr/sbin/zlogin', '-S', name,
+                                         '/usr/sbin/zpool', 'set',
+                                         'autoexpand=off', 'rpool')
+                out, err = utils.execute('/usr/sbin/zlogin', '-S', name,
+                                         '/usr/sbin/zpool', 'set',
+                                         'autoexpand=on', 'rpool')
+        except Exception:
+            # Attempt to cleanup the new zone and new volume to at least
+            # give the user a chance to recover without too many hoops
+            self._cleanup_finish_migration(context, instance, disk_info,
+                                           network_info, samehost)
+            raise
+
+    def confirm_migration(self, context, migration, instance, network_info):
         """Confirms a resize, destroying the source VM.
 
         :param instance: nova.objects.instance.Instance
         """
-        # TODO(Vek): Need to pass context in for access to auth_token
-        raise NotImplementedError()
+        samehost = (migration['dest_host'] == self.get_host_ip_addr())
+        old_rvid = instance.system_metadata.get('old_instance_volid')
+        new_rvid = instance.system_metadata.get('new_instance_volid')
+        if new_rvid and old_rvid:
+            new_vname = instance['display_name'] + "-" + self._rootzpool_suffix
+            del instance.system_metadata['old_instance_volid']
+            del instance.system_metadata['new_instance_volid']
+
+            self._volume_api.delete(context, old_rvid)
+            self._volume_api.update(context, new_rvid,
+                                    {'display_name': new_vname})
+
+        if not samehost:
+            self.destroy(context, instance, network_info)
+
+    def _resize_disk_migration(self, context, instance, configured,
+                               replacement, newvolumesz, mountdev,
+                               samehost=True):
+        """Handles the zone root volume switch-over or simply
+        initializing the connection for the new zone if not resizing to the
+        same host
+
+        :param context: the context for the _resize_disk_migration
+        :param instance: nova.objects.instance.Instance being resized
+        :param configured: id of the current configured volume
+        :param replacement: id of the new volume
+        :param newvolumesz: size of the new volume
+        :param mountdev: the mount point of the device
+        :param samehost: is the resize happening on the same host
+        """
+        connector = self.get_volume_connector(instance)
+        connection_info = self._volume_api.initialize_connection(context,
+                                                                 replacement,
+                                                                 connector)
+        connection_info['serial'] = replacement
+        rootmp = instance.root_device_name
+
+        if samehost:
+            name = instance['name']
+            zone = self._get_zone_by_name(name)
+            if zone is None:
+                raise exception.InstanceNotFound(instance_id=name)
+
+            # Need to detach the zone and re-attach the zone if this is a
+            # non-global zone so that the update of the rootzpool resource does
+            # not fail.
+            if zone.brand == ZONE_BRAND_SOLARIS:
+                zone.detach()
+
+            try:
+                self._set_boot_device(name, connection_info, zone.brand)
+            finally:
+                if zone.brand == ZONE_BRAND_SOLARIS:
+                    zone.attach()
+
+        try:
+            self._volume_api.detach(context, configured)
+        except Exception:
+            LOG.error(_("Failed to detach the volume"))
+            raise
+
+        try:
+            self._volume_api.attach(context, replacement, instance['uuid'],
+                                    rootmp)
+        except Exception:
+            LOG.error(_("Failed to attach the volume"))
+            raise
+
+        bdmobj = objects.BlockDeviceMapping()
+        bdm = bdmobj.get_by_volume_id(context, configured)
+        bdm['connection_info'] = jsonutils.dumps(connection_info)
+        bdm['volume_id'] = replacement
+        bdm.save()
+
+        if not samehost:
+            return connection_info
 
     def finish_revert_migration(self, context, instance, network_info,
                                 block_device_info=None, power_on=True):
@@ -2147,7 +2628,42 @@
         :param power_on: True if the instance should be powered on, False
                          otherwise
         """
-        raise NotImplementedError()
+        # If this is not a samehost migration then we need to re-attach the
+        # original volume to the instance.  If this was processed in the
+        # initial revert handling this work has already been done.
+        old_rvid = instance.system_metadata.get('old_instance_volid')
+        if old_rvid:
+            connector = self.get_volume_connector(instance)
+            connection_info = self._volume_api.initialize_connection(context,
+                                                                     old_rvid,
+                                                                     connector)
+
+            new_rvid = instance.system_metadata['new_instance_volid']
+            self._volume_api.detach(context, new_rvid)
+            self._volume_api.delete(context, new_rvid)
+
+            rootmp = instance.root_device_name
+            self._volume_api.attach(context, old_rvid, instance['uuid'],
+                                    rootmp)
+
+            bdmobj = objects.BlockDeviceMapping()
+            bdm = bdmobj.get_by_volume_id(context, new_rvid)
+            bdm['connection_info'] = jsonutils.dumps(connection_info)
+            bdm['volume_id'] = old_rvid
+            bdm.save()
+
+            del instance.system_metadata['new_instance_volid']
+            del instance.system_metadata['old_instance_volid']
+
+            rootmp = instance.root_device_name
+            bmap = block_device_info.get('block_device_mapping')
+            for entry in bmap:
+                if entry['mount_device'] != rootmp:
+                    self.attach_volume(context,
+                                       entry['connection_info'], instance,
+                                       entry['mount_device'])
+
+        self._power_on(instance)
 
     def pause(self, instance):
         """Pause the specified instance.
@@ -3144,6 +3660,14 @@
         :param instance: nova.objects.instance.Instance
         :returns: True if the instance was deleted from disk, False otherwise.
         """
+        LOG.debug(_("Cleaning up for instance %s"), instance['name'])
+        # Delete the zone configuration for the instance using destroy, because
+        # it will simply take care of the work, and we don't need to duplicate
+        # the code here.
+        try:
+            self.destroy(None, instance, None)
+        except Exception:
+            return False
         return True
 
     @property
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/nova/patches/08-confirm_migration_context.patch	Tue Feb 09 16:26:27 2016 -0800
@@ -0,0 +1,115 @@
+In-house patch to fix a long standing "TODO" for adding the context
+argument to the confirm_migration() function.
+
+--- ./nova/virt/driver.py.~1~	2015-07-06 14:54:33.047120275 -0700
++++ ./nova/virt/driver.py	2015-07-06 14:54:38.753366130 -0700
+@@ -532,7 +532,7 @@ class ComputeDriver(object):
+         """
+         raise NotImplementedError()
+ 
+-    def confirm_migration(self, migration, instance, network_info):
++    def confirm_migration(self, context, migration, instance, network_info):
+         """Confirms a resize, destroying the source VM.
+ 
+         :param instance: nova.objects.instance.Instance
+--- ./nova/virt/hyperv/driver.py.~1~	2015-07-06 14:54:08.762060896 -0700
++++ ./nova/virt/hyperv/driver.py	2015-07-06 14:54:15.312335765 -0700
+@@ -205,7 +205,7 @@ class HyperVDriver(driver.ComputeDriver)
+                                                              timeout,
+                                                              retry_interval)
+ 
+-    def confirm_migration(self, migration, instance, network_info):
++    def confirm_migration(self, context, migration, instance, network_info):
+         self._migrationops.confirm_migration(migration, instance, network_info)
+ 
+     def finish_revert_migration(self, context, instance, network_info,
+--- ./nova/virt/hyperv/migrationops.py.~1~	2015-07-06 14:54:01.054149365 -0700
++++ ./nova/virt/hyperv/migrationops.py	2015-07-06 14:54:07.641370995 -0700
+@@ -134,7 +134,7 @@ class MigrationOps(object):
+         # disk_info is not used
+         return ""
+ 
+-    def confirm_migration(self, migration, instance, network_info):
++    def confirm_migration(self, context, migration, instance, network_info):
+         LOG.debug("confirm_migration called", instance=instance)
+ 
+         self._pathutils.get_instance_migr_revert_dir(instance['name'],
+--- ./nova/virt/vmwareapi/driver.py.~1~	2015-07-06 14:54:25.357745275 -0700
++++ ./nova/virt/vmwareapi/driver.py	2015-07-06 14:54:31.960294652 -0700
+@@ -240,7 +240,7 @@ class VMwareVCDriver(driver.ComputeDrive
+         return _vmops.migrate_disk_and_power_off(context, instance,
+                                                  dest, flavor)
+ 
+-    def confirm_migration(self, migration, instance, network_info):
++    def confirm_migration(self, context, migration, instance, network_info):
+         """Confirms a resize, destroying the source VM."""
+         _vmops = self._get_vmops_for_compute_node(instance['node'])
+         _vmops.confirm_migration(migration, instance, network_info)
+--- ./nova/virt/vmwareapi/vmops.py.~1~	2015-07-06 14:54:16.425639034 -0700
++++ ./nova/virt/vmwareapi/vmops.py	2015-07-06 14:54:24.242155495 -0700
+@@ -1038,7 +1038,7 @@ class VMwareVMOps(object):
+                                        step=3,
+                                        total_steps=RESIZE_TOTAL_STEPS)
+ 
+-    def confirm_migration(self, migration, instance, network_info):
++    def confirm_migration(self, context, migration, instance, network_info):
+         """Confirms a resize, destroying the source VM."""
+         # Destroy the original VM. The vm_ref needs to be searched using the
+         # instance.uuid + self._migrate_suffix as the identifier. We will
+--- ./nova/virt/xenapi/driver.py.~1~	2015-07-06 14:54:39.888243081 -0700
++++ ./nova/virt/xenapi/driver.py	2015-07-06 14:54:57.800183720 -0700
+@@ -203,9 +203,8 @@ class XenAPIDriver(driver.ComputeDriver)
+         self._vmops.spawn(context, instance, image_meta, injected_files,
+                           admin_password, network_info, block_device_info)
+ 
+-    def confirm_migration(self, migration, instance, network_info):
++    def confirm_migration(self, context, migration, instance, network_info):
+         """Confirms a resize, destroying the source VM."""
+-        # TODO(Vek): Need to pass context in for access to auth_token
+         self._vmops.confirm_migration(migration, instance, network_info)
+ 
+     def finish_revert_migration(self, context, instance, network_info,
+--- ./nova/virt/xenapi/vmops.py.~1~	2015-07-06 14:54:59.066221974 -0700
++++ ./nova/virt/xenapi/vmops.py	2015-07-06 14:55:06.185514152 -0700
+@@ -203,7 +203,7 @@ class VMOps(object):
+                 nova_uuids.append(nova_uuid)
+         return nova_uuids
+ 
+-    def confirm_migration(self, migration, instance, network_info):
++    def confirm_migration(self, context, migration, instance, network_info):
+         self._destroy_orig_vm(instance, network_info)
+ 
+     def _destroy_orig_vm(self, instance, network_info):
+--- ./nova/virt/fake.py.~1~	2015-07-06 14:53:48.635021003 -0700
++++ ./nova/virt/fake.py	2015-07-06 14:53:59.952265943 -0700
+@@ -434,7 +434,7 @@ class FakeDriver(driver.ComputeDriver):
+                          block_device_info=None, power_on=True):
+         return
+ 
+-    def confirm_migration(self, migration, instance, network_info):
++    def confirm_migration(self, context, migration, instance, network_info):
+         return
+ 
+     def pre_live_migration(self, context, instance_ref, block_device_info,
+--- ./nova/virt/libvirt/driver.py.~1~	2015-07-06 14:55:07.301721887 -0700
++++ ./nova/virt/libvirt/driver.py	2015-07-06 14:55:13.513983532 -0700
+@@ -6095,7 +6095,7 @@ class LibvirtDriver(driver.ComputeDriver
+                                                     instance)
+             timer.start(interval=0.5).wait()
+ 
+-    def confirm_migration(self, migration, instance, network_info):
++    def confirm_migration(self, context, migration, instance, network_info):
+         """Confirms a resize, destroying the source VM."""
+         self._cleanup_resize(instance, network_info)
+ 
+--- ./nova/compute/manager.py.~1~	2015-07-06 14:56:34.328028956 -0700
++++ ./nova/compute/manager.py	2015-07-06 14:56:48.919044192 -0700
+@@ -3431,7 +3431,7 @@ class ComputeManager(manager.Manager):
+                                migration.source_compute, teardown=True)
+ 
+             network_info = self._get_instance_nw_info(context, instance)
+-            self.driver.confirm_migration(migration, instance,
++            self.driver.confirm_migration(context, migration, instance,
+                                           network_info)
+ 
+             migration.status = 'confirmed'
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/nova/patches/09-CVE-2015-3280.patch	Tue Feb 09 16:26:27 2016 -0800
@@ -0,0 +1,200 @@
+Upstream patch to address CVE-2015-3280.  This fix will be included in
+the future 2014.2.3 (juno) release.
+
+From fa72fb8b51d59e04913c871539cee98a3da79058 Mon Sep 17 00:00:00 2001
+From: Rajesh Tailor <[email protected]>
+Date: Wed, 4 Mar 2015 05:05:19 -0800
+Subject: Delete orphaned instance files from compute nodes
+
+While resizing/revert-resizing instance, if instance gets deleted
+in between, then instance files remains either on the source or
+destination compute node.
+
+To address this issue, added a new periodic task
+'_cleanup_incomplete_migrations' which takes care of deleting
+instance files from source/destination compute nodes and then
+mark migration record as failed so that it doesn't appear again
+in the next periodic task run.
+
+Conflicts:
+	nova/compute/manager.py
+	nova/tests/unit/compute/test_compute_mgr.py
+
+SecurityImpact
+Closes-Bug: 1392527
+Change-Id: I9866d8e32e99b9f907921f4b226edf7b62bd83a7
+(cherry picked from commit 18d6b5cc79973fc553daf7a92f22cce4dc0ca013)
+
+--- nova-2014.2.2/nova/compute/manager.py.~1~   2015-09-02 14:46:43.532548379 -0700
++++ nova-2014.2.2/nova/compute/manager.py       2015-09-02 14:47:57.813280934 -0700
+@@ -257,12 +257,18 @@ def errors_out_migration(function):
+     def decorated_function(self, context, *args, **kwargs):
+         try:
+             return function(self, context, *args, **kwargs)
+-        except Exception:
++        except Exception as ex:
+             with excutils.save_and_reraise_exception():
+                 migration = kwargs['migration']
+-                status = migration.status
+-                if status not in ['migrating', 'post-migrating']:
+-                    return
++
++                # NOTE(rajesht): If InstanceNotFound error is thrown from
++                # decorated function, migration status should be set to
++                # 'error', without checking current migration status.
++                if not isinstance(ex, exception.InstanceNotFound):
++                    status = migration.status
++                    if status not in ['migrating', 'post-migrating']:
++                        return
++
+                 migration.status = 'error'
+                 try:
+                     migration.save(context.elevated())
+@@ -3469,6 +3475,7 @@ class ComputeManager(manager.Manager):
+     @wrap_exception()
+     @reverts_task_state
+     @wrap_instance_event
++    @errors_out_migration
+     @wrap_instance_fault
+     def revert_resize(self, context, instance, migration, reservations):
+         """Destroys the new instance on the destination machine.
+@@ -3523,6 +3530,7 @@ class ComputeManager(manager.Manager):
+     @wrap_exception()
+     @reverts_task_state
+     @wrap_instance_event
++    @errors_out_migration
+     @wrap_instance_fault
+     def finish_revert_resize(self, context, instance, reservations, migration):
+         """Finishes the second half of reverting a resize.
+@@ -6246,3 +6254,48 @@ class ComputeManager(manager.Manager):
+                     instance.cleaned = True
+                 with utils.temporary_mutation(context, read_deleted='yes'):
+                     instance.save(context)
++
++    @periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
++    def _cleanup_incomplete_migrations(self, context):
++        """Delete instance files on failed resize/revert-resize operation
++
++        During resize/revert-resize operation, if that instance gets deleted
++        in-between then instance files might remain either on source or
++        destination compute node because of race condition.
++        """
++        LOG.debug('Cleaning up deleted instances with incomplete migration ')
++        migration_filters = {'host':CONF.host,
++                             'status': 'error'}
++        migrations = objects.MigrationList.get_by_filters(context,
++                                                         migration_filters)
++
++        if not migrations:
++            return
++
++        inst_uuid_from_migrations = set([migration.instance_uuid for migration
++                                         in migrations])
++
++        inst_filters = {'deleted': True, 'soft_deleted': False,
++                        'uuid': inst_uuid_from_migrations}
++        attrs = ['info_cache', 'security_groups', 'system_metadata']
++        with utils.temporary_mutation(context, read_deleted='yes'):
++            instances = objects.InstanceList.get_by_filters(
++                context, inst_filters, expected_attrs=attrs, use_slave=True)
++
++        for instance in instances:
++            if instance.host != CONF.host:
++                for migration in migrations:
++                    if instance.uuid == migration.instance_uuid:
++                        # Delete instance files if not cleanup properly either
++                        # from the source or destination compute nodes when
++                        # the instance is deleted during resizing.
++                        self.driver.delete_instance_files(instance)
++                        try:
++                            migration.status = 'failed'
++                            migration.save(context.elevated())
++                        except exception.MigrationNotFound:
++                            LOG.warning(_LW("Migration %s is not found."),
++                                        migration.id, context=context,
++                                        instance=instance)
++                        break
++
+
+--- ./nova/tests/compute/test_compute_mgr.py.~1~	2015-09-29 09:45:07.760433246 -0700
++++ ./nova/tests/compute/test_compute_mgr.py		2015-09-29 09:48:00.008811912 -0700
+@@ -1047,6 +1047,79 @@ class ComputeManagerUnitTestCase(test.No
+         self.assertFalse(c.cleaned)
+         self.assertEqual('1', c.system_metadata['clean_attempts'])
+ 
++    @mock.patch.object(objects.Migration, 'save')
++    @mock.patch.object(objects.MigrationList, 'get_by_filters')
++    @mock.patch.object(objects.InstanceList, 'get_by_filters')
++    def _test_cleanup_incomplete_migrations(self, inst_host,
++                                            mock_inst_get_by_filters,
++                                            mock_migration_get_by_filters,
++                                            mock_save):
++        def fake_inst(context, uuid, host):
++            inst = objects.Instance(context)
++            inst.uuid = uuid
++            inst.host = host
++            return inst
++
++        def fake_migration(uuid, status, inst_uuid, src_host, dest_host):
++            migration = objects.Migration()
++            migration.uuid = uuid
++            migration.status = status
++            migration.instance_uuid = inst_uuid
++            migration.source_compute = src_host
++            migration.dest_compute = dest_host
++            return migration
++
++        fake_instances = [fake_inst(self.context, '111', inst_host),
++                          fake_inst(self.context, '222', inst_host)]
++
++        fake_migrations = [fake_migration('123', 'error', '111',
++                                          'fake-host', 'fake-mini'),
++                           fake_migration('456', 'error', '222',
++                                          'fake-host', 'fake-mini')]
++
++        mock_migration_get_by_filters.return_value = fake_migrations
++        mock_inst_get_by_filters.return_value = fake_instances
++
++        with mock.patch.object(self.compute.driver, 'delete_instance_files'):
++            self.compute._cleanup_incomplete_migrations(self.context)
++
++        # Ensure that migration status is set to 'failed' after instance
++        # files deletion for those instances whose instance.host is not
++        # same as compute host where periodic task is running.
++        for inst in fake_instances:
++            if inst.host != CONF.host:
++                for mig in fake_migrations:
++                    if inst.uuid == mig.instance_uuid:
++                        self.assertEqual('failed', mig.status)
++
++    def test_cleanup_incomplete_migrations_dest_node(self):
++        """Test to ensure instance files are deleted from destination node.
++
++        If an instance gets deleted during resizing/revert-resizing
++        operation, in that case instance files gets deleted from
++        instance.host (source host here), but there is possibility that
++        instance files could be present on destination node.
++
++        This test ensures that `_cleanup_incomplete_migration` periodic
++        task deletes orphaned instance files from destination compute node.
++        """
++        self.flags(host='fake-mini')
++        self._test_cleanup_incomplete_migrations('fake-host')
++
++    def test_cleanup_incomplete_migrations_source_node(self):
++        """Test to ensure instance files are deleted from source node.
++
++        If instance gets deleted during resizing/revert-resizing operation,
++        in that case instance files gets deleted from instance.host (dest
++        host here), but there is possibility that instance files could be
++        present on source node.
++
++        This test ensures that `_cleanup_incomplete_migration` periodic
++        task deletes orphaned instance files from source compute node.
++        """
++        self.flags(host='fake-host')
++        self._test_cleanup_incomplete_migrations('fake-mini')
++
+     def test_attach_interface_failure(self):
+         # Test that the fault methods are invoked when an attach fails
+         db_instance = fake_instance.fake_db_instance()