PSARC/2014/207 OpenStack Glance Update to Havana
PSARC/2014/208 OpenStack Cinder Update to Havana
PSARC/2014/209 OpenStack Keystone Update to Havana
PSARC/2014/210 OpenStack Nova Update to Havana
18416146 Neutron agents (L3 and DHCP) should cleanup resources when they are disabled
18562372 Failed to create a new project under Horizon
18645763 ZFSSA Cinder Driver support
18686327 evs agent silently ignores user-specified pool allocation ranges
18702697 fibre channel volumes should be supported in the cinder volume driver
18734289 nova won't terminate failed kz deployments
18738371 cinder-volume:setup should account for commented-out zfs_volume_base
18738374 cinder-volume:setup should check for existence of configuration file
18826190 nova-compute fails due to nova.utils.to_bytes
18855698 Update OpenStack to Havana 2013.2.3
18855710 Update python-cinderclient to 1.0.9
18855743 Update python-keystoneclient to 0.8.0
18855754 Update python-neutronclient to 2.3.4
18855764 Update python-novaclient to 2.17.0
18855793 Update python-swiftclient to 2.1.0
18856992 External networks can be deleted even when floating IP addresses are in use
18857784 bake in some more openstack configuration
18884923 Incorrect locale facets in python modules for openstack
18913890 the error in _get_view_and_lun may cause the failure of deleting volumes
18943044 Disable 'Security Groups' tab in Horizon dashboard
This upstream patch addresses CVE-2014-2573 and is tracked under
Launchpad bug 1269418. It is addressed in Icehouse 2014.1 and Havana
2013.2.4. It has been modified to apply cleanly into our current Havana
implementation
This particulr hypervisor driver is not currently shipped with
Solaris.
commit b3cc3f62a60662e5bb82136c0cfa464592a6afe9
Author: Gary Kotton <[email protected]>
Date: Thu Mar 13 06:53:58 2014 -0700
VMware: ensure rescue instance is deleted when instance is deleted
If the user creates a rescue instance and then proceeded to delete
the original instance then the rescue instance would still be up
and running on the backend.
This patch ensures that the rescue instance is cleaned up if
necessary.
The vmops unrescue method has a new parameter indicating if
the original VM should be powered on.
Closes-bug: 1269418
(cherry picked from commit efb66531bc37ee416778a70d46c657608ca767af)
Conflicts:
nova/tests/virt/vmwareapi/test_vmwareapi.py
nova/virt/vmwareapi/vmops.py
Change-Id: I3c1d0b1d003392b306094b80ea1ac99377441fbf
--- nova-2013.2.3/nova/tests/virt/vmwareapi/test_vmwareapi.py.~1~ 2014-04-03 11:49:46.000000000 -0700
+++ nova-2013.2.3/nova/tests/virt/vmwareapi/test_vmwareapi.py 2014-06-09 23:03:38.008877252 -0700
@@ -34,6 +34,7 @@
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import task_states
+from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
@@ -793,6 +794,31 @@
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
+ def destroy_rescued(self, fake_method):
+ self._rescue()
+ with (
+ mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
+ fake_method)
+ ):
+ self.instance['vm_state'] = vm_states.RESCUED
+ self.conn.destroy(self.instance, self.network_info)
+ inst_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(inst_path))
+ rescue_file_path = '[%s] %s-rescue/%s-rescue.vmdk' % (self.ds,
+ self.uuid,
+ self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(rescue_file_path))
+
+ def test_destroy_rescued(self):
+ def fake_detach_disk_from_vm(*args, **kwargs):
+ pass
+ self.destroy_rescued(fake_detach_disk_from_vm)
+
+ def test_destroy_rescued_with_exception(self):
+ def fake_detach_disk_from_vm(*args, **kwargs):
+ raise exception.NovaException('Here is my fake exception')
+ self.destroy_rescued(fake_detach_disk_from_vm)
+
def test_destroy(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
--- nova-2013.2.3/nova/virt/vmwareapi/vmops.py.~1~ 2014-04-03 11:49:46.000000000 -0700
+++ nova-2013.2.3/nova/virt/vmwareapi/vmops.py 2014-06-09 23:09:13.557941347 -0700
@@ -35,6 +35,7 @@
from nova import compute
from nova.compute import power_state
from nova.compute import task_states
+from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova.openstack.common import excutils
@@ -904,13 +905,9 @@
except Exception as exc:
LOG.exception(exc, instance=instance)
- def destroy(self, instance, network_info, destroy_disks=True):
- """
- Destroy a VM instance. Steps followed are:
- 1. Power off the VM, if it is in poweredOn state.
- 2. Un-register a VM.
- 3. Delete the contents of the folder holding the VM related data.
- """
+ def _destroy_instance(self, instance, network_info, destroy_disks=True,
+ instance_name=None):
+ # Destroy a VM instance
try:
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["config.files.vmPathName", "runtime.powerState",
@@ -943,8 +940,9 @@
"UnregisterVM", vm_ref)
LOG.debug(_("Unregistered the VM"), instance=instance)
except Exception as excep:
- LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
- " while un-registering the VM: %s") % str(excep))
+ LOG.warn(_("In vmwareapi:vmops:_destroy_instance, got this "
+ "exception while un-registering the VM: %s"),
+ excep)
if network_info:
self.unplug_vifs(instance, network_info)
@@ -976,13 +974,37 @@
{'datastore_name': datastore_name},
instance=instance)
except Exception as excep:
- LOG.warn(_("In vmwareapi:vmops:destroy, "
- "got this exception while deleting"
- " the VM contents from the disk: %s")
- % str(excep))
+ LOG.warn(_("In vmwareapi:vmops:_destroy_instance, "
+ "got this exception while deleting "
+ "the VM contents from the disk: %s"),
+ excep)
except Exception as exc:
LOG.exception(exc, instance=instance)
+ def destroy(self, instance, network_info, destroy_disks=True):
+ """Destroy a VM instance.
+
+ Steps followed for each VM are:
+ 1. Power off, if it is in poweredOn state.
+ 2. Un-register.
+ 3. Delete the contents of the folder holding the VM related data.
+ """
+ # If there is a rescue VM then we need to destroy that one too.
+ LOG.debug(_("Destroying instance"), instance=instance)
+ if instance['vm_state'] == vm_states.RESCUED:
+ LOG.debug(_("Rescue VM configured"), instance=instance)
+ try:
+ self.unrescue(instance, power_on=False)
+ LOG.debug(_("Rescue VM destroyed"), instance=instance)
+ except Exception:
+ rescue_name = instance['uuid'] + self._rescue_suffix
+ self._destroy_instance(instance, network_info,
+ destroy_disks=destroy_disks,
+ instance_name=rescue_name)
+ self._destroy_instance(instance, network_info,
+ destroy_disks=destroy_disks)
+ LOG.debug(_("Instance destroyed"), instance=instance)
+
def pause(self, instance):
msg = _("pause not supported for vmwareapi")
raise NotImplementedError(msg)
@@ -1066,7 +1088,7 @@
controller_key=controller_key,
unit_number=unit_number)
- def unrescue(self, instance):
+ def unrescue(self, instance, power_on=True):
"""Unrescue the specified instance."""
# Get the original vmdk_path
vm_ref = vm_util.get_vm_ref(self._session, instance)
@@ -1079,8 +1101,9 @@
r_instance = copy.deepcopy(instance)
r_instance['name'] = r_instance['name'] + self._rescue_suffix
r_instance['uuid'] = r_instance['uuid'] + self._rescue_suffix
- self.destroy(r_instance, None)
- self._power_on(instance)
+ self._destroy_instance(r_instance, None, instance_name=instance_name)
+ if power_on:
+ self._power_on(instance)
def power_off(self, instance):
"""Power off the specified instance."""