PSARC 2015/324 OpenStack Nova support for kernel zone live-migration s11u3-sru
authorsaurabh.vyas@oracle.com
Tue, 15 Dec 2015 00:08:40 -0800
branchs11u3-sru
changeset 5413 bca6b9853ab7
parent 5412 8566c7ab4a73
child 5414 1697d1e334a5
PSARC 2015/324 OpenStack Nova support for kernel zone live-migration 19438929 'nova live-migration' shouldn't leave instances in an error state 20739475 Nova driver should support live-migration
components/openstack/horizon/files/overrides.py
components/openstack/nova/files/nova.conf
components/openstack/nova/files/nova.prof_attr
components/openstack/nova/files/solariszones/driver.py
components/openstack/nova/nova.p5m
components/openstack/nova/patches/10-launchpad-1356552.patch
components/openstack/nova/patches/11-launchpad-1377644.patch
components/openstack/nova/patches/12-launchpad-1397153.patch
--- a/components/openstack/horizon/files/overrides.py	Tue Dec 15 00:08:40 2015 -0800
+++ b/components/openstack/horizon/files/overrides.py	Tue Dec 15 00:08:40 2015 -0800
@@ -1,4 +1,4 @@
-# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
@@ -16,6 +16,8 @@
 Solaris-specific customizations for Horizon
 """
 
+from openstack_dashboard.dashboards.admin.instances.forms import \
+    LiveMigrateForm
 from openstack_dashboard.dashboards.admin.instances import tables \
     as admin_tables
 from openstack_dashboard.dashboards.admin.networks.forms import CreateNetwork
@@ -29,17 +31,12 @@
     DeleteNetwork, NetworksTable
 from openstack_dashboard.dashboards.project.access_and_security.tabs import \
     AccessAndSecurityTabs, APIAccessTab, FloatingIPsTab, KeypairsTab
-from \
-    openstack_dashboard.dashboards.project.images.images.tables \
-    import EditImage, DeleteImage, ImagesTable, LaunchImage
+from openstack_dashboard.dashboards.project.images.images.tables import \
+    EditImage, DeleteImage, ImagesTable, LaunchImage
 from openstack_dashboard.dashboards.project.instances import tables \
     as project_tables
-from openstack_dashboard.dashboards.project.instances.tabs import \
-    InstanceDetailTabs, LogTab, OverviewTab
 from openstack_dashboard.dashboards.project.instances.workflows import \
-    create_instance
-from openstack_dashboard.dashboards.project.instances.workflows import \
-    update_instance
+    create_instance, update_instance
 from openstack_dashboard.dashboards.project.networks.ports.tables import \
     PortsTable as projectPortsTable
 from openstack_dashboard.dashboards.project.networks.subnets.tables import \
@@ -54,10 +51,9 @@
     NetworksTable as projectNetworksTable
 from openstack_dashboard.dashboards.project.networks.workflows import \
     CreateNetworkInfoAction, CreateSubnetDetailAction, CreateSubnetInfoAction
-from openstack_dashboard.dashboards.project.stacks.tabs import \
-    StackDetailTabs, StackEventsTab, StackOverviewTab, StackResourcesTab
 
-# Remove 'PostCreationStep' from Project/Instances/Launch Instance
+# Remove 'PostCreationStep', 'SetAdvanced' from
+# Project/Compute/Instances/Launch Instance
 create_instance.LaunchInstance.default_steps = (
     create_instance.SelectProjectUser,
     create_instance.SetInstanceDetails,
@@ -66,29 +62,30 @@
 )
 
 # Remove 'UpdateInstanceSecurityGroups' from
-# Project/Instances/Actions/Edit Instance
+# Project/Compute/Instances/Actions/Edit Instance
 update_instance.UpdateInstance.default_steps = (
     update_instance.UpdateInstanceInfo,
 )
 
-# Remove 'SecurityGroupsTab' tab from Project/Access & Security
+# Remove 'SecurityGroupsTab' tab from Project/Compute/Access & Security
 AccessAndSecurityTabs.tabs = (KeypairsTab, FloatingIPsTab, APIAccessTab)
 
-# Remove 'ConfirmResize', 'RevertResize', 'TogglePause',
-# 'ToggleSuspend', 'MigrateInstance' actions from Admin/Instances/Actions
+# Remove 'ConfirmResize', 'RevertResize', 'TogglePause', 'ToggleSuspend',
+# 'MigrateInstance' actions from Admin/System/Instances/Actions
 admin_tables.AdminInstancesTable._meta.row_actions = (
     admin_tables.AdminEditInstance,
     project_tables.ConsoleLink,
     project_tables.LogLink,
     project_tables.CreateSnapshot,
+    admin_tables.LiveMigrateInstance,
     project_tables.SoftRebootInstance,
     project_tables.RebootInstance,
     project_tables.TerminateInstance
 )
 
-# Remove 'ConfirmResize', 'RevertResize', 'EditInstanceSecurityGroups',
-# 'TogglePause', 'ToggleSuspend', 'ResizeLink', 'RebuildInstance' actions
-# from Project/Instances/Actions
+# Remove 'ConfirmResize', 'RevertResize', 'DecryptInstancePassword',
+# 'EditInstanceSecurityGroups', 'TogglePause', 'ToggleSuspend', 'ResizeLink',
+# 'RebuildInstance' actions from Project/Compute/Instances/Actions
 project_tables.InstancesTable._meta.row_actions = (
     project_tables.StartInstance,
     project_tables.CreateSnapshot,
@@ -104,41 +101,49 @@
     project_tables.TerminateInstance
 )
 
-# Disable 'admin_state' in Admin/Networks/Create Network
+# Disable 'disk_over_commit', 'block_migration' in
+# Admin/System/Instances/Actions/Live Migrate Instance. Note that this is
+# unchecked by default.
+LiveMigrateForm.base_fields['disk_over_commit'].widget.attrs['disabled'] = True
+LiveMigrateForm.base_fields['block_migration'].widget.attrs['disabled'] = True
+
+# Disable 'admin_state' in Admin/System/Networks/Create Network
 admin_state = CreateNetwork.base_fields['admin_state']
 admin_state.widget.attrs['disabled'] = True
 admin_state.widget.value_from_datadict = lambda *args: True
 
-# Disable 'shared' in Admin/Networks/Create Network. Note that this
-# is unchecked by default.
+# Disable 'shared' in Admin/System/Networks/Create Network. Note that this is
+# unchecked by default.
 CreateNetwork.base_fields['shared'].widget.attrs['disabled'] = True
 
-# Disable 'admin_state' in Admin/Networks/Network Name/Create Port
+# Disable 'admin_state' in Admin/System/Networks/Network Name/Create Port
 admin_state = CreatePort.base_fields['admin_state']
 admin_state.widget.attrs['disabled'] = True
 admin_state.widget.value_from_datadict = lambda *args: True
 
-# Remove 'UpdatePort' action from Admin/Networks/Network Name/Actions
+# Remove 'UpdatePort' action from Admin/System/Networks/Network Name/Actions
 PortsTable._meta.row_actions = (DeletePort,)
 
-# Remove 'UpdateSubnet' action from Admin/Networks/Network Name/Actions
+# Remove 'UpdateSubnet' action from
+# Admin/System/Networks/Network Name/Subnets/Actions
 SubnetsTable._meta.row_actions = (DeleteSubnet,)
 
-# Remove the 'EditNetwork' action from Admin/Networks/Actions
+# Remove the 'EditNetwork' action from Admin/System/Networks/Actions
 NetworksTable._meta.row_actions = (DeleteNetwork,)
 
-# Remove the 'UpdatePort' action from Project/Networks/Name/Ports/Actions
+# Remove the 'UpdatePort' action from
+# Project/Network/Networks/Name/Ports/Actions
 projectPortsTable._meta.row_actions = ()
 
 # Remove the 'UpdateSubnet' action from
-# Project/Networks/Name/Subnets/Actions
+# Project/Network/Networks/Name/Subnets/Actions
 projectSubnetsTable._meta.row_actions = (projectDeleteSubnet,)
 
-# Remove the 'EditNetwork' action from Project/Networks/Actions
+# Remove the 'EditNetwork' action from Project/Network/Networks/Actions
 projectNetworksTable._meta.row_actions = (projectCreateSubnet,
                                           projectDeleteNetwork)
 
-# Disable 'admin_state' in Project/Networks/Create Network/Network
+# Disable 'admin_state' in Project/Network/Networks/Create Network/Network
 admin_state = CreateNetworkInfoAction.base_fields['admin_state']
 admin_state.widget.attrs['disabled'] = True
 admin_state.widget.value_from_datadict = lambda *args: True
@@ -148,11 +153,10 @@
 CreateSubnetInfoAction.base_fields['no_gateway'].widget.attrs['disabled'] = \
     True
 
-# Remove 'CreateVolumeFromImage' checkbox from
-# Project/Images & Snapshots/Actions
+# Remove 'CreateVolumeFromImage' checkbox from Project/Compute/Images/Actions
 ImagesTable._meta.row_actions = (LaunchImage, EditImage, DeleteImage,)
 
 # Change 'host_routes' field to read-only in
-# Project/Networks/Create Network/Subnet Detail
+# Project/Network/Networks/Create Network/Subnet Detail
 base_fields = CreateSubnetDetailAction.base_fields
 base_fields['host_routes'].widget.attrs['readonly'] = 'readonly'
--- a/components/openstack/nova/files/nova.conf	Tue Dec 15 00:08:40 2015 -0800
+++ b/components/openstack/nova/files/nova.conf	Tue Dec 15 00:08:40 2015 -0800
@@ -1972,9 +1972,14 @@
 # value)
 #glancecache_dirname=$state_path/images
 
-# Location where solariszones driver will store snapshots
-# before uploading them to the Glance image service (string
-# value)
+# Cipher to use for encryption of memory traffic during live
+# migration. If not specified, a common encryption algorithm
+# will be negotiated. Options include: none or the name of a
+# supported OpenSSL cipher algorithm. (string value)
+#live_migration_cipher=<None>
+
+# Location to store snapshots before uploading them to the
+# Glance image service. (string value)
 #solariszones_snapshots_directory=$instances_path/snapshots
 
 
--- a/components/openstack/nova/files/nova.prof_attr	Tue Dec 15 00:08:40 2015 -0800
+++ b/components/openstack/nova/files/nova.prof_attr	Tue Dec 15 00:08:40 2015 -0800
@@ -15,4 +15,5 @@
 auths=solaris.smf.manage.nova,solaris.smf.modify,solaris.smf.value.nova;\
 profiles=Unified Archive Administration,\
 Zone Management,\
+Zone Migration,\
 Zone Security
--- a/components/openstack/nova/files/solariszones/driver.py	Tue Dec 15 00:08:40 2015 -0800
+++ b/components/openstack/nova/files/solariszones/driver.py	Tue Dec 15 00:08:40 2015 -0800
@@ -1,7 +1,7 @@
 # Copyright 2011 Justin Santa Barbara
 # All Rights Reserved.
 #
-# Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
@@ -52,6 +52,7 @@
 from nova.network import neutronv2
 from nova import objects
 from nova.objects import flavor as flavor_obj
+from nova.openstack.common import excutils
 from nova.openstack.common import fileutils
 from nova.openstack.common import jsonutils
 from nova.openstack.common import log as logging
@@ -69,10 +70,15 @@
     cfg.StrOpt('glancecache_dirname',
                default='$state_path/images',
                help='Default path to Glance cache for Solaris Zones.'),
+    cfg.StrOpt('live_migration_cipher',
+               help='Cipher to use for encryption of memory traffic during '
+                    'live migration. If not specified, a common encryption '
+                    'algorithm will be negotiated. Options include: none or '
+                    'the name of a supported OpenSSL cipher algorithm.'),
     cfg.StrOpt('solariszones_snapshots_directory',
                default='$instances_path/snapshots',
-               help='Location where solariszones driver will store snapshots '
-                    'before uploading them to the Glance image service'),
+               help='Location to store snapshots before uploading them to the '
+                    'Glance image service.'),
 ]
 
 CONF = cfg.CONF
@@ -121,11 +127,19 @@
 }
 
 MAX_CONSOLE_BYTES = 102400
+
 VNC_CONSOLE_BASE_FMRI = 'svc:/application/openstack/nova/zone-vnc-console'
 # Required in order to create a zone VNC console SMF service instance
 VNC_SERVER_PATH = '/usr/bin/vncserver'
 XTERM_PATH = '/usr/bin/xterm'
 
+# The underlying Solaris Zones framework does not expose a specific
+# version number, instead relying on feature tests to identify what is
+# and what is not supported. A HYPERVISOR_VERSION is defined here for
+# Nova's use but it generally should not be changed unless there is a
+# incompatible change such as concerning kernel zone live migration.
+HYPERVISOR_VERSION = '5.11'
+
 
 def lookup_resource_property(zone, resource, prop, filter=None):
     """Lookup specified property from specified Solaris Zone resource."""
@@ -2244,7 +2258,8 @@
         host_stats['local_gb_used'] = host_stats['local_gb'] - free_disk_gb
 
         host_stats['hypervisor_type'] = 'solariszones'
-        host_stats['hypervisor_version'] = int(self._uname[2].replace('.', ''))
+        host_stats['hypervisor_version'] = \
+            utils.convert_version_to_int(HYPERVISOR_VERSION)
         host_stats['hypervisor_hostname'] = self._uname[1]
 
         if self._uname[4] == 'i86pc':
@@ -2306,7 +2321,22 @@
         :param disk_info: instance disk information
         :param migrate_data: implementation specific data dict.
         """
-        raise NotImplementedError()
+        return {}
+
+    def _live_migration(self, name, dest, dry_run=False):
+        """Live migration of a Solaris kernel zone to another host."""
+        zone = self._get_zone_by_name(name)
+        if zone is None:
+            raise exception.InstanceNotFound(instance_id=name)
+
+        options = []
+        live_migration_cipher = CONF.live_migration_cipher
+        if live_migration_cipher is not None:
+            options.extend(['-c', live_migration_cipher])
+        if dry_run:
+            options.append('-nq')
+        options.append('ssh://[email protected]' + dest)
+        zone.migrate(options)
 
     def live_migration(self, context, instance, dest,
                        post_method, recover_method, block_migration=False,
@@ -2328,7 +2358,17 @@
         :param migrate_data: implementation specific params.
 
         """
-        raise NotImplementedError()
+        name = instance['name']
+        try:
+            self._live_migration(name, dest, dry_run=False)
+        except Exception as ex:
+            with excutils.save_and_reraise_exception():
+                LOG.error(_("Unable to live migrate instance '%s' to host "
+                            "'%s' via zonemgr(3RAD): %s")
+                          % (name, dest, ex))
+                recover_method(context, instance, dest, block_migration)
+
+        post_method(context, instance, dest, block_migration, migrate_data)
 
     def rollback_live_migration_at_destination(self, context, instance,
                                                network_info,
@@ -2346,7 +2386,7 @@
         :param migrate_data: implementation specific params
 
         """
-        raise NotImplementedError()
+        pass
 
     def post_live_migration(self, context, instance, block_device_info,
                             migrate_data=None):
@@ -2357,7 +2397,30 @@
         :block_device_info: instance block device information
         :param migrate_data: if not None, it is a dict which has data
         """
-        pass
+        try:
+            # These methods log if problems occur so no need to double log
+            # here. Just catch any stray exceptions and allow destroy to
+            # proceed.
+            if self._has_vnc_console_service(instance):
+                self._disable_vnc_console_service(instance)
+                self._delete_vnc_console_service(instance)
+        except Exception:
+            pass
+
+        name = instance['name']
+        zone = self._get_zone_by_name(name)
+        # If instance cannot be found, just return.
+        if zone is None:
+            LOG.warning(_("Unable to find instance '%s' via zonemgr(3RAD)")
+                        % name)
+            return
+
+        try:
+            self._delete_config(instance)
+        except Exception as ex:
+            LOG.error(_("Unable to delete configuration for instance '%s' via "
+                        "zonemgr(3RAD): %s") % (name, ex))
+            raise
 
     def post_live_migration_at_source(self, context, instance, network_info):
         """Unplug VIFs from networks at source.
@@ -2380,7 +2443,7 @@
         :param network_info: instance network information
         :param block_migration: if true, post operation of block_migration.
         """
-        raise NotImplementedError()
+        pass
 
     def check_instance_shared_storage_local(self, context, instance):
         """Check if instance files located on shared storage.
@@ -2426,7 +2489,37 @@
         :param disk_over_commit: if true, allow disk over commit
         :returns: a dict containing migration info (hypervisor-dependent)
         """
-        raise NotImplementedError()
+        src_cpu_info = jsonutils.loads(src_compute_info['cpu_info'])
+        src_cpu_arch = src_cpu_info['arch']
+        dst_cpu_info = jsonutils.loads(dst_compute_info['cpu_info'])
+        dst_cpu_arch = dst_cpu_info['arch']
+        if src_cpu_arch != dst_cpu_arch:
+            reason = (_("CPU architectures between source host '%s' (%s) and "
+                        "destination host '%s' (%s) are incompatible.")
+                      % (src_compute_info['hypervisor_hostname'], src_cpu_arch,
+                         dst_compute_info['hypervisor_hostname'],
+                         dst_cpu_arch))
+            raise exception.MigrationPreCheckError(reason=reason)
+
+        extra_specs = self._get_extra_specs(instance)
+        brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
+        if brand != ZONE_BRAND_SOLARIS_KZ:
+            # Only Solaris kernel zones are currently supported.
+            reason = (_("'%s' branded zones do not currently support live "
+                        "migration.") % brand)
+            raise exception.MigrationPreCheckError(reason=reason)
+
+        if block_migration:
+            reason = (_('Block migration is not currently supported.'))
+            raise exception.MigrationPreCheckError(reason=reason)
+        if disk_over_commit:
+            reason = (_('Disk overcommit is not currently supported.'))
+            raise exception.MigrationPreCheckError(reason=reason)
+
+        dest_check_data = {
+            'hypervisor_hostname': dst_compute_info['hypervisor_hostname']
+        }
+        return dest_check_data
 
     def check_can_live_migrate_destination_cleanup(self, context,
                                                    dest_check_data):
@@ -2435,10 +2528,21 @@
         :param context: security context
         :param dest_check_data: result of check_can_live_migrate_destination
         """
-        raise NotImplementedError()
+        pass
+
+    def _check_local_volumes_present(self, block_device_info):
+        """Check if local volumes are attached to the instance."""
+        bmap = block_device_info.get('block_device_mapping')
+        for entry in bmap:
+            connection_info = entry['connection_info']
+            driver_type = connection_info['driver_volume_type']
+            if driver_type == 'local':
+                reason = (_("Instances with attached '%s' volumes are not "
+                            "currently supported.") % driver_type)
+                raise exception.MigrationPreCheckError(reason=reason)
 
     def check_can_live_migrate_source(self, context, instance,
-                                      dest_check_data):
+                                      dest_check_data, block_device_info):
         """Check if it is possible to execute live migration.
 
         This checks if the live migration can succeed, based on the
@@ -2447,9 +2551,17 @@
         :param context: security context
         :param instance: nova.db.sqlalchemy.models.Instance
         :param dest_check_data: result of check_can_live_migrate_destination
+        :param block_device_info: result of _get_instance_block_device_info
         :returns: a dict containing migration info (hypervisor-dependent)
         """
-        raise NotImplementedError()
+        self._check_local_volumes_present(block_device_info)
+        name = instance['name']
+        dest = dest_check_data['hypervisor_hostname']
+        try:
+            self._live_migration(name, dest, dry_run=True)
+        except Exception as ex:
+            raise exception.MigrationPreCheckError(reason=ex)
+        return dest_check_data
 
     def get_instance_disk_info(self, instance_name,
                                block_device_info=None):
@@ -2578,7 +2690,7 @@
 
         """
         # TODO(Vek): Need to pass context in for access to auth_token
-        raise NotImplementedError()
+        pass
 
     def filter_defer_apply_on(self):
         """Defer application of IPTables rules."""
@@ -2591,7 +2703,7 @@
     def unfilter_instance(self, instance, network_info):
         """Stop filtering instance."""
         # TODO(Vek): Need to pass context in for access to auth_token
-        raise NotImplementedError()
+        pass
 
     def set_admin_password(self, instance, new_pass):
         """Set the root password on the specified instance.
--- a/components/openstack/nova/nova.p5m	Tue Dec 15 00:08:40 2015 -0800
+++ b/components/openstack/nova/nova.p5m	Tue Dec 15 00:08:40 2015 -0800
@@ -854,7 +854,7 @@
 #
 group groupname=nova gid=85
 user username=nova ftpuser=false gcos-field="OpenStack Nova" group=nova \
-    home-dir=/var/lib/nova uid=85
+    home-dir=/var/lib/nova password=NP uid=85
 #
 license nova.license license="Apache v2.0"
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/nova/patches/10-launchpad-1356552.patch	Tue Dec 15 00:08:40 2015 -0800
@@ -0,0 +1,208 @@
+commit 42cae28241cd0c213201d036bfbe13fb118e4bee
+Author: Cyril Roelandt <[email protected]>
+Date:   Mon Aug 18 17:45:35 2014 +0000
+
+    libvirt: Make sure volumes are well detected during block migration
+    
+    Current implementation of live migration in libvirt incorrectly includes
+    block devices on shared storage (e.g., NFS) when computing destination
+    storage requirements. Since these volumes are already on shared storage
+    they do not need to be migrated. As a result, migration fails if the
+    amount of free space on the shared drive is less than the size of the
+    volume to be migrated. The problem is addressed by adding a
+    block_device_info parameter to check_can_live_migrate_source() to allow
+    volumes to be filtered correctly when computing migration space
+    requirements.
+    
+    This only fixes the issue on libvirt: it is unclear whether other
+    implementations suffer from the same issue.
+    
+    Thanks to Florent Flament for spotting and fixing an issue while trying out
+    this patch.
+    
+    Co-Authored-By: Florent Flament <[email protected]>
+    Change-Id: Iac7d2cd2a70800fd89864463ca45c030c47411b0
+    Closes-Bug: #1356552
+    (cherry picked from commit 671aa9f8b7ca5274696f83bde0d4822ee431b837)
+
+--- nova-2014.2.2/nova/compute/manager.py.~3~	2015-12-01 05:07:52.781465660 -0800
++++ nova-2014.2.2/nova/compute/manager.py	2015-12-01 05:07:52.795381628 -0800
[email protected]@ -4891,8 +4891,11 @@ class ComputeManager(manager.Manager):
+         is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt,
+                                                                       instance)
+         dest_check_data['is_volume_backed'] = is_volume_backed
++        block_device_info = self._get_instance_block_device_info(
++                            ctxt, instance, refresh_conn_info=True)
+         return self.driver.check_can_live_migrate_source(ctxt, instance,
+-                                                         dest_check_data)
++                                                         dest_check_data,
++                                                         block_device_info)
+ 
+     @object_compat
+     @wrap_exception()
+--- nova-2014.2.2/nova/tests/compute/test_compute_mgr.py.~2~	2015-12-01 05:07:52.782691092 -0800
++++ nova-2014.2.2/nova/tests/compute/test_compute_mgr.py	2015-12-01 05:07:52.796520248 -0800
[email protected]@ -1302,13 +1302,19 @@ class ComputeManagerUnitTestCase(test.No
+ 
+         self.mox.StubOutWithMock(self.compute.compute_api,
+                                  'is_volume_backed_instance')
++        self.mox.StubOutWithMock(self.compute,
++                                 '_get_instance_block_device_info')
+         self.mox.StubOutWithMock(self.compute.driver,
+                                  'check_can_live_migrate_source')
+ 
+         self.compute.compute_api.is_volume_backed_instance(
+                 self.context, instance).AndReturn(is_volume_backed)
++        self.compute._get_instance_block_device_info(
++                self.context, instance, refresh_conn_info=True
++                ).AndReturn({'block_device_mapping': 'fake'})
+         self.compute.driver.check_can_live_migrate_source(
+-                self.context, instance, expected_dest_check_data)
++                self.context, instance, expected_dest_check_data,
++                {'block_device_mapping': 'fake'})
+ 
+         self.mox.ReplayAll()
+ 
+diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py
+index 613943b..c686b88 100644
+--- a/nova/tests/virt/libvirt/test_driver.py
++++ b/nova/tests/virt/libvirt/test_driver.py
[email protected]@ -5323,7 +5323,7 @@ class LibvirtConnTestCase(test.TestCase):
+         self.mox.StubOutWithMock(conn, "_assert_dest_node_has_enough_disk")
+         conn._assert_dest_node_has_enough_disk(
+             self.context, instance, dest_check_data['disk_available_mb'],
+-            False)
++            False, None)
+ 
+         self.mox.ReplayAll()
+         ret = conn.check_can_live_migrate_source(self.context, instance,
[email protected]@ -5386,8 +5386,9 @@ class LibvirtConnTestCase(test.TestCase):
+                 disk_available_mb=0)
+ 
+         self.mox.StubOutWithMock(conn, "get_instance_disk_info")
+-        conn.get_instance_disk_info(instance["name"]).AndReturn(
+-                                            '[{"virt_disk_size":2}]')
++        conn.get_instance_disk_info(instance["name"],
++                                    block_device_info=None).AndReturn(
++                                        '[{"virt_disk_size":2}]')
+ 
+         self.mox.ReplayAll()
+         self.assertRaises(exception.MigrationError,
+diff --git a/nova/virt/driver.py b/nova/virt/driver.py
+index fd483e5..20f4dd1 100644
+--- a/nova/virt/driver.py
++++ b/nova/virt/driver.py
[email protected]@ -808,7 +808,7 @@ class ComputeDriver(object):
+         raise NotImplementedError()
+ 
+     def check_can_live_migrate_source(self, context, instance,
+-                                      dest_check_data):
++                                      dest_check_data, block_device_info=None):
+         """Check if it is possible to execute live migration.
+ 
+         This checks if the live migration can succeed, based on the
[email protected]@ -817,6 +817,7 @@ class ComputeDriver(object):
+         :param context: security context
+         :param instance: nova.db.sqlalchemy.models.Instance
+         :param dest_check_data: result of check_can_live_migrate_destination
++        :param block_device_info: result of _get_instance_block_device_info
+         :returns: a dict containing migration info (hypervisor-dependent)
+         """
+         raise NotImplementedError()
+diff --git a/nova/virt/fake.py b/nova/virt/fake.py
+index 049c519..fe9ff1c 100644
+--- a/nova/virt/fake.py
++++ b/nova/virt/fake.py
[email protected]@ -426,7 +426,7 @@ class FakeDriver(driver.ComputeDriver):
+         return {}
+ 
+     def check_can_live_migrate_source(self, ctxt, instance_ref,
+-                                      dest_check_data):
++                                      dest_check_data, block_device_info=None):
+         return
+ 
+     def finish_migration(self, context, migration, instance, disk_info,
+diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
+index 485aa23..fa42130 100644
+--- a/nova/virt/hyperv/driver.py
++++ b/nova/virt/hyperv/driver.py
[email protected]@ -169,7 +169,7 @@ class HyperVDriver(driver.ComputeDriver):
+             context, dest_check_data)
+ 
+     def check_can_live_migrate_source(self, context, instance,
+-                                      dest_check_data):
++                                      dest_check_data, block_device_info=None):
+         return self._livemigrationops.check_can_live_migrate_source(
+             context, instance, dest_check_data)
+ 
+diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
+index cec2013..d420f15 100644
+--- a/nova/virt/libvirt/driver.py
++++ b/nova/virt/libvirt/driver.py
[email protected]@ -5028,7 +5028,8 @@ class LibvirtDriver(driver.ComputeDriver):
+         self._cleanup_shared_storage_test_file(filename)
+ 
+     def check_can_live_migrate_source(self, context, instance,
+-                                      dest_check_data):
++                                      dest_check_data,
++                                      block_device_info=None):
+         """Check if it is possible to execute live migration.
+ 
+         This checks if the live migration can succeed, based on the
[email protected]@ -5037,6 +5038,7 @@ class LibvirtDriver(driver.ComputeDriver):
+         :param context: security context
+         :param instance: nova.db.sqlalchemy.models.Instance
+         :param dest_check_data: result of check_can_live_migrate_destination
++        :param block_device_info: result of _get_instance_block_device_info
+         :returns: a dict containing migration info
+         """
+         # Checking shared storage connectivity
[email protected]@ -5058,7 +5060,8 @@ class LibvirtDriver(driver.ComputeDriver):
+                 raise exception.InvalidLocalStorage(reason=reason, path=source)
+             self._assert_dest_node_has_enough_disk(context, instance,
+                                     dest_check_data['disk_available_mb'],
+-                                    dest_check_data['disk_over_commit'])
++                                    dest_check_data['disk_over_commit'],
++                                    block_device_info)
+ 
+         elif not (dest_check_data['is_shared_block_storage'] or
+                   dest_check_data['is_shared_instance_path']):
[email protected]@ -5106,7 +5109,8 @@ class LibvirtDriver(driver.ComputeDriver):
+         return False
+ 
+     def _assert_dest_node_has_enough_disk(self, context, instance,
+-                                             available_mb, disk_over_commit):
++                                             available_mb, disk_over_commit,
++                                             block_device_info=None):
+         """Checks if destination has enough disk for block migration."""
+         # Libvirt supports qcow2 disk format,which is usually compressed
+         # on compute nodes.
[email protected]@ -5122,7 +5126,8 @@ class LibvirtDriver(driver.ComputeDriver):
+         if available_mb:
+             available = available_mb * units.Mi
+ 
+-        ret = self.get_instance_disk_info(instance['name'])
++        ret = self.get_instance_disk_info(instance['name'],
++                                          block_device_info=block_device_info)
+         disk_infos = jsonutils.loads(ret)
+ 
+         necessary = 0
+--- nova-2014.2.2/nova/virt/xenapi/driver.py.~2~	2015-12-01 05:16:19.562306358 -0800
++++ nova-2014.2.2/nova/virt/xenapi/driver.py	2015-12-01 05:16:19.614403555 -0800
[email protected]@ -501,7 +501,7 @@ class XenAPIDriver(driver.ComputeDriver)
+         pass
+ 
+     def check_can_live_migrate_source(self, context, instance,
+-                                      dest_check_data):
++                                      dest_check_data, block_device_info=None):
+         """Check if it is possible to execute live migration.
+ 
+         This checks if the live migration can succeed, based on the
[email protected]@ -511,6 +511,7 @@ class XenAPIDriver(driver.ComputeDriver)
+         :param instance: nova.db.sqlalchemy.models.Instance
+         :param dest_check_data: result of check_can_live_migrate_destination
+                                 includes the block_migration flag
++        :param block_device_info: result of _get_instance_block_device_info
+         """
+         return self._vmops.check_can_live_migrate_source(context, instance,
+                                                          dest_check_data)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/nova/patches/11-launchpad-1377644.patch	Tue Dec 15 00:08:40 2015 -0800
@@ -0,0 +1,62 @@
+This external patch was a follow up fix to Launchpad bug 1377644.
+While the original fix was backported to Juno 2014.2.1, the subsequent
+one was not.
+
+commit 64882d39d9fea9d6001ccc61973624949825c52f
+Author: He Jie Xu <[email protected]>
+Date:   Fri Nov 7 23:24:12 2014 +0800
+
+    Fix circular reference error when live migration failed
+    
+    When unexpected exception raised in live migration, the MigrationError
+    carry another original exception in it. But when oslo.message
+    serialize the exception, the circular reference error happended. This
+    patch just pass the exception as unicode string into MigrationError.
+    
+    Change-Id: I4e449baae74bd9a15490ae7accbd2103bae90d57
+    Related-Bug: #1377644
+
+--- nova-2014.2.2/nova/conductor/manager.py.~1~	2015-12-01 04:52:25.839270759 -0800
++++ nova-2014.2.2/nova/conductor/manager.py	2015-12-01 04:54:08.341268026 -0800
[email protected]@ -589,7 +589,7 @@ class ComputeTaskManager(base.Base):
+                        ' %(dest)s unexpectedly failed.'),
+                        {'instance_id': instance['uuid'], 'dest': destination},
+                        exc_info=True)
+-            raise exception.MigrationError(reason=ex)
++            raise exception.MigrationError(reason=six.text_type(ex))
+ 
+     def build_instances(self, context, instances, image, filter_properties,
+             admin_password, injected_files, requested_networks,
+--- nova-2014.2.2/nova/tests/conductor/test_conductor.py.~1~	2015-02-05 06:26:50.000000000 -0800
++++ nova-2014.2.2/nova/tests/conductor/test_conductor.py	2015-12-01 04:55:27.135695264 -0800
[email protected]@ -20,6 +20,7 @@ import contextlib
+ import mock
+ import mox
+ from oslo import messaging
++import six
+ 
+ from nova.api.ec2 import ec2utils
+ from nova.compute import arch
[email protected]@ -1711,18 +1712,19 @@ class ConductorTaskTestCase(_BaseTaskTes
+         self.mox.StubOutWithMock(scheduler_utils,
+                 'set_vm_state_and_notify')
+ 
+-        ex = IOError()
++        expected_ex = IOError('fake error')
+         live_migrate.execute(self.context, mox.IsA(objects.Instance),
+                              'destination', 'block_migration',
+-                             'disk_over_commit').AndRaise(ex)
++                             'disk_over_commit').AndRaise(expected_ex)
+         self.mox.ReplayAll()
+ 
+         self.conductor = utils.ExceptionHelper(self.conductor)
+ 
+-        self.assertRaises(exc.MigrationError,
++        ex = self.assertRaises(exc.MigrationError,
+             self.conductor.migrate_server, self.context, inst_obj,
+             {'host': 'destination'}, True, False, None, 'block_migration',
+             'disk_over_commit')
++        self.assertEqual(ex.kwargs['reason'], six.text_type(expected_ex))
+ 
+     def test_set_vm_state_and_notify(self):
+         self.mox.StubOutWithMock(scheduler_utils,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/nova/patches/12-launchpad-1397153.patch	Tue Dec 15 00:08:40 2015 -0800
@@ -0,0 +1,103 @@
+commit 40a37269c52977a718e91012cfd4580b2e31ec65
+Author: He Jie Xu <[email protected]>
+Date:   Fri Nov 7 23:29:18 2014 +0800
+
+    Set vm state error when raising unexpected exception in live migrate
+    
+    The instance stuck at migrating task_state when unexpected exception.
+    This is confuse for user, this patch set the vm_state to error.
+    
+    Change-Id: Ib1b97452bc5e777c66c4d368f71156dbe1e116b7
+    Partial-Bug: 1397153
+
+--- nova-2014.2.2/nova/conductor/manager.py.~2~	2015-12-01 04:57:58.583807338 -0800
++++ nova-2014.2.2/nova/conductor/manager.py	2015-12-01 05:00:43.319845439 -0800
[email protected]@ -558,6 +558,19 @@ class ComputeTaskManager(base.Base):
+     def _live_migrate(self, context, instance, scheduler_hint,
+                       block_migration, disk_over_commit):
+         destination = scheduler_hint.get("host")
++
++        def _set_vm_state(context, instance, ex, vm_state=None,
++                          task_state=None):
++            request_spec = {'instance_properties': {
++                'uuid': instance['uuid'], },
++            }
++            scheduler_utils.set_vm_state_and_notify(context,
++                'compute_task', 'migrate_server',
++                dict(vm_state=vm_state,
++                     task_state=task_state,
++                     expected_task_state=task_states.MIGRATING,),
++                ex, request_spec, self.db)
++
+         try:
+             live_migrate.execute(context, instance, destination,
+                              block_migration, disk_over_commit)
[email protected]@ -575,20 +588,14 @@ class ComputeTaskManager(base.Base):
+                 exception.LiveMigrationWithOldNovaNotSafe) as ex:
+             with excutils.save_and_reraise_exception():
+                 # TODO(johngarbutt) - eventually need instance actions here
+-                request_spec = {'instance_properties': {
+-                    'uuid': instance['uuid'], },
+-                }
+-                scheduler_utils.set_vm_state_and_notify(context,
+-                        'compute_task', 'migrate_server',
+-                        dict(vm_state=instance['vm_state'],
+-                             task_state=None,
+-                             expected_task_state=task_states.MIGRATING,),
+-                        ex, request_spec, self.db)
++                _set_vm_state(context, instance, ex, instance['vm_state'])
+         except Exception as ex:
+             LOG.error(_('Migration of instance %(instance_id)s to host'
+                        ' %(dest)s unexpectedly failed.'),
+                        {'instance_id': instance['uuid'], 'dest': destination},
+                        exc_info=True)
++            _set_vm_state(context, instance, ex, vm_states.ERROR,
++                          instance['task_state'])
+             raise exception.MigrationError(reason=six.text_type(ex))
+ 
+     def build_instances(self, context, instances, image, filter_properties,
+--- nova-2014.2.2/nova/tests/conductor/test_conductor.py.~2~	2015-12-01 04:57:58.599204982 -0800
++++ nova-2014.2.2/nova/tests/conductor/test_conductor.py	2015-12-01 05:04:39.416251458 -0800
[email protected]@ -1704,27 +1704,28 @@ class ConductorTaskTestCase(_BaseTaskTes
+         ex = exc.LiveMigrationWithOldNovaNotSafe(server='dummy')
+         self._test_migrate_server_deals_with_expected_exceptions(ex)
+ 
+-    def test_migrate_server_deals_with_unexpected_exceptions(self):
++    @mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
++    @mock.patch.object(live_migrate, 'execute')
++    def test_migrate_server_deals_with_unexpected_exceptions(self,
++            mock_live_migrate, mock_set_state):
++        expected_ex = IOError('fake error')
++        mock_live_migrate.side_effect = expected_ex
+         instance = fake_instance.fake_db_instance()
+         inst_obj = objects.Instance._from_db_object(
+             self.context, objects.Instance(), instance, [])
+-        self.mox.StubOutWithMock(live_migrate, 'execute')
+-        self.mox.StubOutWithMock(scheduler_utils,
+-                'set_vm_state_and_notify')
+-
+-        expected_ex = IOError('fake error')
+-        live_migrate.execute(self.context, mox.IsA(objects.Instance),
+-                             'destination', 'block_migration',
+-                             'disk_over_commit').AndRaise(expected_ex)
+-        self.mox.ReplayAll()
+-
+-        self.conductor = utils.ExceptionHelper(self.conductor)
+-
+         ex = self.assertRaises(exc.MigrationError,
+             self.conductor.migrate_server, self.context, inst_obj,
+             {'host': 'destination'}, True, False, None, 'block_migration',
+             'disk_over_commit')
+-        self.assertEqual(ex.kwargs['reason'], six.text_type(expected_ex))
++        request_spec = {'instance_properties': {
++                'uuid': instance['uuid'], },
++        }
++        mock_set_state.assert_called_once_with(self.context,
++                        'compute_task', 'migrate_server',
++                        dict(vm_state=vm_states.ERROR,
++                             task_state=inst_obj.task_state,
++                             expected_task_state=task_states.MIGRATING,),
++                        expected_ex, request_spec, self.conductor.db)
+ 
+     def test_set_vm_state_and_notify(self):
+         self.mox.StubOutWithMock(scheduler_utils,