--- a/components/openstack/nova/files/solariszones/driver.py Mon May 16 14:46:20 2016 +0200
+++ b/components/openstack/nova/files/solariszones/driver.py Fri May 20 17:42:29 2016 -0400
@@ -19,6 +19,7 @@
Driver for Solaris Zones (nee Containers):
"""
+import base64
import glob
import os
import platform
@@ -37,36 +38,40 @@
from solaris_install.target.size import Size
from cinderclient import exceptions as cinder_exception
+from cinderclient.v1 import client as v1_client
from eventlet import greenthread
+from keystoneclient import exceptions as keystone_exception
from lxml import etree
+from oslo_concurrency import processutils
from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_serialization import jsonutils
+from oslo_utils import excutils
+from oslo_utils import strutils
from passlib.hash import sha256_crypt
+from nova.api.metadata import password
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
from nova.console import type as ctype
from nova import conductor
from nova import context as nova_context
+from nova import crypto
from nova import exception
-from nova.i18n import _
+from nova.i18n import _, _LE, _LI
+from nova.image import API as glance_api
from nova.image import glance
-from nova.network import neutronv2
+from nova.network.neutronv2 import api as neutronv2_api
from nova import objects
from nova.objects import flavor as flavor_obj
-from nova.openstack.common import excutils
from nova.openstack.common import fileutils
-from nova.openstack.common import jsonutils
-from nova.openstack.common import log as logging
-from nova.openstack.common import loopingcall
-from nova.openstack.common import processutils
-from nova.openstack.common import strutils
from nova import utils
from nova.virt import driver
from nova.virt import event as virtevent
+from nova.virt import hardware
from nova.virt import images
from nova.virt.solariszones import sysconfig
-from nova import volume
from nova.volume.cinder import API
from nova.volume.cinder import cinderclient
from nova.volume.cinder import get_cinder_client_version
@@ -75,7 +80,7 @@
solariszones_opts = [
cfg.StrOpt('glancecache_dirname',
- default='$state_path/images',
+ default='/var/share/nova/images',
help='Default path to Glance cache for Solaris Zones.'),
cfg.StrOpt('live_migration_cipher',
help='Cipher to use for encryption of memory traffic during '
@@ -86,6 +91,9 @@
default='$instances_path/snapshots',
help='Location to store snapshots before uploading them to the '
'Glance image service.'),
+ cfg.StrOpt('zones_suspend_path',
+ default='/var/share/zones/SYSsuspend',
+ help='Default path for suspend images for Solaris Zones.'),
]
CONF = cfg.CONF
@@ -140,6 +148,8 @@
VNC_SERVER_PATH = '/usr/bin/vncserver'
XTERM_PATH = '/usr/bin/xterm'
+ROOTZPOOL_RESOURCE = 'rootzpool'
+
# The underlying Solaris Zones framework does not expose a specific
# version number, instead relying on feature tests to identify what is
# and what is not supported. A HYPERVISOR_VERSION is defined here for
@@ -147,10 +157,20 @@
# incompatible change such as concerning kernel zone live migration.
HYPERVISOR_VERSION = '5.11'
-ROOTZPOOL_RESOURCE = 'rootzpool'
-
shared_storage = ['iscsi', 'fibre_channel']
+
+def lookup_resource(zone, resource):
+ """Lookup specified resource from specified Solaris Zone."""
+ try:
+ val = zone.getResources(zonemgr.Resource(resource))
+ except rad.client.ObjectError:
+ return None
+ except Exception:
+ raise
+ return val[0] if val else None
+
+
def lookup_resource_property(zone, resource, prop, filter=None):
"""Lookup specified property from specified Solaris Zone resource."""
try:
@@ -228,6 +248,8 @@
Returns a volume object
"""
+ client = cinderclient(context)
+
if snapshot is not None:
snapshot_id = snapshot['id']
else:
@@ -247,11 +269,10 @@
imageRef=image_id,
source_volid=source_volid)
- version = get_cinder_client_version(context)
- if version == '1':
+ if isinstance(client, v1_client.Client):
kwargs['display_name'] = name
kwargs['display_description'] = description
- elif version == '2':
+ else:
kwargs['name'] = name
kwargs['description'] = description
@@ -260,8 +281,9 @@
return _untranslate_volume_summary_view(context, item)
except cinder_exception.OverLimit:
raise exception.OverQuota(overs='volumes')
- except cinder_exception.BadRequest as err:
- raise exception.InvalidInput(reason=unicode(err))
+ except (cinder_exception.BadRequest,
+ keystone_exception.BadRequest) as reason:
+ raise exception.InvalidInput(reason=reason)
@translate_volume_exception
def update(self, context, volume_id, fields):
@@ -445,12 +467,14 @@
self._host_stats = {}
self._initiator = None
self._install_engine = None
+ self._kstat_control = None
self._pagesize = os.sysconf('SC_PAGESIZE')
self._rad_connection = None
+ self._rootzpool_suffix = ROOTZPOOL_RESOURCE
self._uname = os.uname()
self._validated_archives = list()
self._volume_api = SolarisVolumeAPI()
- self._rootzpool_suffix = ROOTZPOOL_RESOURCE
+ self._zone_manager = None
@property
def rad_connection(self):
@@ -465,28 +489,38 @@
return self._rad_connection
- def _init_rad(self):
- """Connect to RAD providers for kernel statistics and Solaris
- Zones. By connecting to the local rad(1M) service through a
- UNIX domain socket, kernel statistics can be read via
- kstat(3RAD) and Solaris Zones can be configured and controlled
- via zonemgr(3RAD).
- """
-
+ @property
+ def zone_manager(self):
try:
- self._kstat_control = self.rad_connection.get_object(
- kstat.Control())
- except Exception as reason:
- msg = (_('Unable to connect to svc:/system/rad:local: %s')
- % reason)
- raise exception.NovaException(msg)
+ if (self._zone_manager is None or
+ self._zone_manager._conn._closed is not None):
+ self._zone_manager = self.rad_connection.get_object(
+ zonemgr.ZoneManager())
+ except Exception as ex:
+ reason = _("Unable to obtain RAD object: %s") % ex
+ raise exception.NovaException(reason)
+
+ return self._zone_manager
+
+ @property
+ def kstat_control(self):
+ try:
+ if (self._kstat_control is None or
+ self._kstat_control._conn._closed is not None):
+ self._kstat_control = self.rad_connection.get_object(
+ kstat.Control())
+ except Exception as ex:
+ reason = _("Unable to obtain RAD object: %s") % ex
+ raise exception.NovaException(reason)
+
+ return self._kstat_control
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host.
"""
# TODO(Vek): Need to pass context in for access to auth_token
- self._init_rad()
+ pass
def cleanup_host(self, host):
"""Clean up anything that is necessary for the driver gracefully stop,
@@ -573,19 +607,6 @@
initiator_iqn = initiator_name_line.rsplit(' ', 1)[1]
return initiator_iqn
- def _get_zone_auto_install_state(self, zone_name):
- """Returns the SMF state of the auto-installer service,
- or None if auto-installer service is non-existent
- """
- try:
- out, err = utils.execute('/usr/sbin/zlogin', '-S', zone_name,
- '/usr/bin/svcs', '-H', '-o', 'state',
- 'auto-installer:default')
- return out.strip()
- except processutils.ProcessExecutionError as err:
- # No auto-installer instance most likely.
- return None
-
def _get_zone_by_name(self, name):
"""Return a Solaris Zones object via RAD by name."""
try:
@@ -672,7 +693,7 @@
'name': name
}
try:
- self._kstat_control.update()
+ self.kstat_control.update()
kstat_object = self.rad_connection.get_object(
kstat.Kstat(), rad.client.ADRGlobPattern(pattern))
except Exception as reason:
@@ -704,26 +725,18 @@
:param instance: nova.objects.instance.Instance object
- Returns a dict containing:
-
- :state: the running state, one of the power_state codes
- :max_mem: (int) the maximum memory in KBytes allowed
- :mem: (int) the memory in KBytes used by the domain
- :num_cpu: (int) the number of virtual CPUs for the domain
- :cpu_time: (int) the CPU time used in nanoseconds
+ Returns a InstanceInfo object
"""
# TODO(Vek): Need to pass context in for access to auth_token
name = instance['name']
zone = self._get_zone_by_name(name)
if zone is None:
raise exception.InstanceNotFound(instance_id=name)
- return {
- 'state': self._get_state(zone),
- 'max_mem': self._get_max_mem(zone),
- 'mem': self._get_mem(zone),
- 'num_cpu': self._get_num_cpu(zone),
- 'cpu_time': self._get_cpu_time(zone)
- }
+ return hardware.InstanceInfo(state=self._get_state(zone),
+ max_mem_kb=self._get_max_mem(zone),
+ mem_kb=self._get_mem(zone),
+ num_cpu=self._get_num_cpu(zone),
+ cpu_time_ns=self._get_cpu_time(zone))
def get_num_instances(self):
"""Return the total number of virtual machines.
@@ -857,10 +870,7 @@
if recreate:
instance.system_metadata['evac_from'] = instance['launched_on']
instance.save()
- inst_type = flavor_obj.Flavor.get_by_id(
- nova_context.get_admin_context(read_deleted='yes'),
- instance['instance_type_id'])
- extra_specs = inst_type['extra_specs'].copy()
+ extra_specs = self._get_extra_specs(instance)
brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
if brand == ZONE_BRAND_SOLARIS:
msg = (_("'%s' branded zones do not currently support "
@@ -894,6 +904,7 @@
if driver_type not in shared_storage:
msg = (_("Root device is not on shared storage for instance "
"'%s'.") % instance['name'])
+
raise exception.NovaException(msg)
if not recreate:
@@ -902,20 +913,12 @@
self._volume_api.detach(context, root_ci['serial'])
self._volume_api.delete(context, root_ci['serial'])
- # We need to clear the block device mapping for the root device
- bdmobj = objects.BlockDeviceMapping()
- bdm = bdmobj.get_by_volume_id(context, root_ci['serial'])
- bdm.destroy(context)
-
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(
expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
if recreate:
- inst_type = flavor_obj.Flavor.get_by_id(
- nova_context.get_admin_context(read_deleted='yes'),
- instance['instance_type_id'])
- extra_specs = inst_type['extra_specs'].copy()
+ extra_specs = self._get_extra_specs(instance)
instance.system_metadata['rebuilding'] = False
self._create_config(context, instance, network_info,
@@ -926,7 +929,6 @@
instance.system_metadata['rebuilding'] = True
self.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
- greenthread.sleep(15)
self.power_off(instance)
del instance.system_metadata['rebuilding']
@@ -938,7 +940,6 @@
if recreate:
zone.attach(['-x', 'initialize-hostdata'])
- instance_uuid = instance.uuid
rootmp = instance['root_device_name']
for entry in bdms:
if (entry['connection_info'] is None or
@@ -986,9 +987,9 @@
raise
return image
- def _validate_image(self, image, instance):
- """Validate a glance image for compatibility with the instance"""
- # Skip if the image was already checked and confirmed as valid
+ def _validate_image(self, context, image, instance):
+ """Validate a glance image for compatibility with the instance."""
+ # Skip if the image was already checked and confirmed as valid.
if instance['image_ref'] in self._validated_archives:
return
@@ -1025,6 +1026,18 @@
reason = (_('Image architecture "%s" is incompatible with this'
'compute host architecture: "%s"')
% (deployable_arch, compute_arch))
+
+ # For some reason we have gotten the wrong architecture image,
+ # which should have been filtered by the scheduler. One reason
+ # this could happen is because the images architecture type is
+ # incorrectly set. Check for this and report a better reason.
+ glanceapi = glance_api()
+ image_meta = glanceapi.get(context, instance['image_ref'])
+ image_properties = image_meta.get('properties')
+ if image_properties.get('architecture') is None:
+ reason = reason + (_(" The 'architecture' property is not "
+ "set on the Glance image."))
+
raise exception.ImageUnacceptable(
image_id=instance['image_ref'],
reason=reason)
@@ -1046,11 +1059,12 @@
class_type=UnifiedArchive)
def _suri_from_volume_info(self, connection_info):
- """Returns a suri(5) formatted string based on connection_info
- Currently supports local ZFS volume and iSCSI driver types.
+ """Returns a suri(5) formatted string based on connection_info.
+ Currently supports local ZFS volume, NFS, Fibre Channel and iSCSI
+ driver types.
"""
driver_type = connection_info['driver_volume_type']
- if driver_type not in ['iscsi', 'fibre_channel', 'local']:
+ if driver_type not in ['iscsi', 'fibre_channel', 'local', 'nfs']:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
if driver_type == 'local':
suri = 'dev:/dev/zvol/dsk/%s' % connection_info['volume_path']
@@ -1066,6 +1080,13 @@
data['target_iqn'],
data['target_lun'])
# TODO(npower): need to handle CHAP authentication also
+ elif driver_type == 'nfs':
+ data = connection_info['data']
+ suri = (
+ 'nfs://cinder:cinder@%s/%s' %
+ (data['export'].replace(':', ''), data['name'])
+ )
+
elif driver_type == 'fibre_channel':
data = connection_info['data']
target_wwn = data['target_wwn']
@@ -1145,7 +1166,7 @@
vol = self._volume_api.create(
context,
instance['root_gb'],
- instance['display_name'] + "-" + self._rootzpool_suffix,
+ instance['hostname'] + "-" + self._rootzpool_suffix,
"Boot volume for instance '%s' (%s)"
% (instance['name'], instance['uuid']))
# TODO(npower): Polling is what nova/compute/manager also does when
@@ -1291,7 +1312,7 @@
return
tenant_id = None
- network_plugin = neutronv2.get_client(context)
+ network_plugin = neutronv2_api.get_client(context)
for netid, network in enumerate(network_info):
if tenant_id is None:
tenant_id = network['network']['meta']['tenant_id']
@@ -1333,7 +1354,7 @@
linkname = 'net%s' % id
# create the required sysconfig file (or skip if this is part of a
- # resize process)
+ # resize or evacuate process)
tstate = instance['task_state']
if tstate not in [task_states.RESIZE_FINISH,
task_states.RESIZE_REVERTING,
@@ -1362,7 +1383,19 @@
with ZoneConfig(zone) as zc:
zc.setprop('global', 'tenant', tenant_id)
- def _verify_sysconfig(self, sc_dir, instance):
+ def _set_suspend(self, instance):
+ """Use the instance name to specify the pathname for the suspend image.
+ """
+ name = instance['name']
+ zone = self._get_zone_by_name(name)
+ if zone is None:
+ raise exception.InstanceNotFound(instance_id=name)
+
+ path = os.path.join(CONF.zones_suspend_path, '%{zonename}')
+ with ZoneConfig(zone) as zc:
+ zc.addresource('suspend', [zonemgr.Property('path', path)])
+
+ def _verify_sysconfig(self, sc_dir, instance, admin_password=None):
"""verify the SC profile(s) passed in contain an entry for
system/config-user to configure the root account. If an SSH key is
specified, configure root's profile to use it.
@@ -1373,7 +1406,12 @@
root_account_needed = True
hostname_needed = True
sshkey = instance.get('key_data')
- name = instance.get('display_name')
+ name = instance.get('hostname')
+ encrypted_password = None
+
+ # encrypt admin password, using SHA-256 as default
+ if admin_password is not None:
+ encrypted_password = sha256_crypt.encrypt(admin_password)
# find all XML files in sc_dir
for root, dirs, files in os.walk(sc_dir):
@@ -1401,12 +1439,22 @@
if root_account_needed:
fp = os.path.join(sc_dir, 'config-root.xml')
- if sshkey is not None:
- # set up the root account as 'normal' with no expiration and
- # an ssh key
- tree = sysconfig.create_default_root_account(sshkey=sshkey)
+ if admin_password is not None and sshkey is not None:
+ # store password for horizon retrieval
+ ctxt = nova_context.get_admin_context()
+ enc = crypto.ssh_encrypt_text(sshkey, admin_password)
+ instance.system_metadata.update(
+ password.convert_password(ctxt, base64.b64encode(enc)))
+ instance.save()
+
+ if encrypted_password is not None or sshkey is not None:
+ # set up the root account as 'normal' with no expiration,
+ # an ssh key, and a root password
+ tree = sysconfig.create_default_root_account(
+ sshkey=sshkey, password=encrypted_password)
else:
- # set up the root account as 'normal' but to expire immediately
+ # sets up root account with expiration if sshkey is None
+ # and password is none
tree = sysconfig.create_default_root_account(expire='0')
sysconfig.create_sc_profile(fp, tree)
@@ -1420,8 +1468,8 @@
fp = os.path.join(sc_dir, 'hostname.xml')
sysconfig.create_sc_profile(fp, sysconfig.create_hostname(name))
- def _create_config(self, context, instance, network_info,
- connection_info, sc_dir):
+ def _create_config(self, context, instance, network_info, connection_info,
+ sc_dir, admin_password=None):
"""Create a new Solaris Zone configuration."""
name = instance['name']
if self._get_zone_by_name(name) is not None:
@@ -1440,9 +1488,9 @@
tstate = instance['task_state']
if tstate not in [task_states.RESIZE_FINISH,
- task_states.RESIZE_REVERTING,
- task_states.RESIZE_MIGRATING,
- task_states.REBUILD_SPAWNING] or \
+ task_states.RESIZE_REVERTING,
+ task_states.RESIZE_MIGRATING,
+ task_states.REBUILD_SPAWNING] or \
(tstate == task_states.REBUILD_SPAWNING and
instance.system_metadata['rebuilding']):
sc_profile = extra_specs.get('install:sc_profile')
@@ -1453,13 +1501,12 @@
shutil.copytree(sc_profile, os.path.join(sc_dir,
'sysconfig'))
- self._verify_sysconfig(sc_dir, instance)
+ self._verify_sysconfig(sc_dir, instance, admin_password)
LOG.debug(_("Creating zone configuration for '%s' (%s)")
% (name, instance['display_name']))
- zonemanager = self.rad_connection.get_object(zonemgr.ZoneManager())
try:
- zonemanager.create(name, None, template)
+ self.zone_manager.create(name, None, template)
self._set_global_properties(name, extra_specs, brand)
if connection_info is not None:
self._set_boot_device(name, connection_info, brand)
@@ -1511,8 +1558,7 @@
# TODO(npower): investigate using RAD instead of CLI invocation
try:
out, err = utils.execute('/usr/sbin/svccfg', '-s',
- VNC_CONSOLE_BASE_FMRI, 'delete', '-f',
- name)
+ VNC_CONSOLE_BASE_FMRI, 'delete', name)
except processutils.ProcessExecutionError as ex:
if not self._has_vnc_console_service(instance):
LOG.debug(_("Ignoring attempt to delete a non-existent zone "
@@ -1600,7 +1646,7 @@
console_fmri = VNC_CONSOLE_BASE_FMRI + ':' + name
# TODO(npower): investigate using RAD instead of CLI invocation
try:
- out, err = utils.execute('/usr/sbin/svcadm', 'disable',
+ out, err = utils.execute('/usr/sbin/svcadm', 'disable', '-s',
console_fmri)
except processutils.ProcessExecutionError as ex:
reason = ex.stderr
@@ -1644,8 +1690,7 @@
console_fmri = VNC_CONSOLE_BASE_FMRI + ':' + name
# TODO(npower): investigate using RAD instead of CLI invocation
try:
- utils.execute('/usr/bin/svcs', '-H', '-o', 'state',
- console_fmri)
+ utils.execute('/usr/bin/svcs', '-H', '-o', 'state', console_fmri)
return True
except Exception:
return False
@@ -1663,11 +1708,11 @@
LOG.debug(zc.zone.exportConfig(True))
LOG.debug("-" * 80)
- options = ['-a ', image]
+ options = ['-a', image]
if os.listdir(sc_dir):
# the directory isn't empty so pass it along to install
- options.extend(['-c ', sc_dir])
+ options.extend(['-c', sc_dir])
try:
LOG.debug(_("Installing instance '%s' (%s)") %
@@ -1722,9 +1767,8 @@
if self._get_zone_by_name(name) is None:
raise exception.InstanceNotFound(instance_id=name)
- zonemanager = self.rad_connection.get_object(zonemgr.ZoneManager())
try:
- zonemanager.delete(name)
+ self.zone_manager.delete(name)
except Exception as ex:
reason = zonemgr_strerror(ex)
LOG.error(_("Unable to delete configuration for instance '%s' via "
@@ -1756,7 +1800,7 @@
attached to the instance.
"""
image = self._fetch_image(context, instance)
- self._validate_image(image, instance)
+ self._validate_image(context, image, instance)
# create a new directory for SC profiles
sc_dir = tempfile.mkdtemp(prefix="nova-sysconfig-",
@@ -1794,7 +1838,7 @@
installed = False
try:
self._create_config(context, instance, network_info,
- connection_info, sc_dir)
+ connection_info, sc_dir, admin_password)
configured = True
self._install(instance, image, sc_dir)
installed = True
@@ -1816,16 +1860,19 @@
shutil.rmtree(sc_dir)
if connection_info is not None:
- bdm = objects.BlockDeviceMapping(
- source_type='volume',
- destination_type='volume',
- instance_uuid=instance.uuid,
- volume_id=volume_id,
- connection_info=jsonutils.dumps(connection_info),
- device_name=mountpoint,
- delete_on_termination=True,
- volume_size=instance['root_gb'])
- bdm.create(context)
+ bdm_obj = objects.BlockDeviceMappingList()
+ # there's only one bdm for this instance at this point
+ bdm = bdm_obj.get_by_instance_uuid(context,
+ instance.uuid).objects[0]
+
+ # update the required attributes
+ bdm['connection_info'] = jsonutils.dumps(connection_info)
+ bdm['source_type'] = 'volume'
+ bdm['destination_type'] = 'volume'
+ bdm['device_name'] = mountpoint
+ bdm['delete_on_termination'] = True
+ bdm['volume_id'] = volume_id
+ bdm['volume_size'] = instance['root_gb']
bdm.save()
def _power_off(self, instance, halt_type):
@@ -1865,17 +1912,13 @@
name = instance['name']
- cpu = int(instance.system_metadata['old_instance_type_vcpus'])
- mem = int(instance.system_metadata['old_instance_type_memory_mb'])
-
- self._set_num_cpu(name, cpu, brand)
- self._set_memory_cap(name, mem, brand)
-
- rgb = int(instance.system_metadata['new_instance_type_root_gb'])
+ self._set_num_cpu(name, instance.vcpus, brand)
+ self._set_memory_cap(name, instance.memory_mb, brand)
+
+ rgb = instance.root_gb
old_rvid = instance.system_metadata.get('old_instance_volid')
if old_rvid:
new_rvid = instance.system_metadata.get('new_instance_volid')
- newvname = instance['display_name'] + "-" + self._rootzpool_suffix
mount_dev = instance['root_device_name']
del instance.system_metadata['old_instance_volid']
@@ -1900,9 +1943,7 @@
:param migrate_data: implementation specific params
"""
if (instance['task_state'] == task_states.RESIZE_REVERTING and
- instance.system_metadata['old_vm_state'] == vm_states.RESIZED):
- self._samehost_revert_resize(context, instance, network_info,
- block_device_info)
+ instance.system_metadata['old_vm_state'] == vm_states.RESIZED):
return
# A destroy is issued for the original zone for an evac case. If
@@ -2258,7 +2299,16 @@
# apply the configuration to the running zone
if zone.state == ZONE_STATE_RUNNING:
- zone.apply()
+ try:
+ zone.apply()
+ except Exception as ex:
+ reason = zonemgr_strerror(ex)
+ LOG.error(_("Unable to attach '%s' to instance '%s' via "
+ "zonemgr(3RAD): %s") % (suri, name, reason))
+ with ZoneConfig(zone) as zc:
+ zc.removeresources("device", [zonemgr.Property("storage",
+ suri)])
+ raise
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
@@ -2345,20 +2395,22 @@
LOG.debug("Starting migrate_disk_and_power_off", instance=instance)
samehost = (dest == self.get_host_ip_addr())
+ if samehost:
+ instance.system_metadata['resize_samehost'] = samehost
extra_specs = self._get_extra_specs(instance)
brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
if brand != ZONE_BRAND_SOLARIS_KZ and not samehost:
- msg = (_("'%s' branded zones do not currently support "
- "resize to a different host.") % brand)
- raise exception.MigrationPreCheckError(reason=msg)
+ reason = (_("'%s' branded zones do not currently support resize "
+ "to a different host.") % brand)
+ raise exception.MigrationPreCheckError(reason=reason)
if brand != flavor['extra_specs'].get('zonecfg:brand'):
- msg = (_("Unable to change brand of zone during resize."))
- raise exception.MigrationPreCheckError(reason=msg)
+ reason = (_("Unable to change brand of zone during resize."))
+ raise exception.MigrationPreCheckError(reason=reason)
orgb = instance['root_gb']
- nrgb = int(instance.system_metadata['new_instance_type_root_gb'])
+ nrgb = flavor.root_gb
if orgb > nrgb:
msg = (_("Unable to resize to a smaller boot volume."))
raise exception.ResizeError(reason=msg)
@@ -2453,6 +2505,7 @@
'image_state': 'available',
'owner_id': instance['project_id'],
'instance_uuid': instance['uuid'],
+ 'image_type': snapshot['properties']['image_type'],
}
}
# Match architecture, hypervisor_type and vm_mode properties to base
@@ -2463,7 +2516,7 @@
metadata['properties'][prop] = base_prop
# Set generic container and disk formats initially in case the glance
- # service rejects unified archives (uar) and zfs in metadata
+ # service rejects Unified Archives (uar) and ZFS in metadata.
metadata['container_format'] = 'ovf'
metadata['disk_format'] = 'raw'
@@ -2597,18 +2650,15 @@
try:
if samehost:
- metadstr = 'new_instance_type_vcpus'
- cpu = int(instance.system_metadata[metadstr])
- metadstr = 'new_instance_type_memory_mb'
- mem = int(instance.system_metadata[metadstr])
+ cpu = instance.vcpus
+ mem = instance.memory_mb
self._set_num_cpu(name, cpu, brand)
self._set_memory_cap(name, mem, brand)
# Add the new disk to the volume if the size of the disk
# changed
if disk_info:
- metadstr = 'new_instance_type_root_gb'
- rgb = int(instance.system_metadata[metadstr])
+ rgb = instance.root_gb
self._resize_disk_migration(context, instance,
root_ci['serial'],
disk_info['id'],
@@ -2687,6 +2737,8 @@
if not samehost:
self.destroy(context, instance, network_info)
+ else:
+ del instance.system_metadata['resize_samehost']
def _resize_disk_migration(self, context, instance, configured,
replacement, newvolumesz, mountdev,
@@ -2763,8 +2815,14 @@
otherwise
"""
# If this is not a samehost migration then we need to re-attach the
- # original volume to the instance. If this was processed in the
- # initial revert handling this work has already been done.
+ # original volume to the instance. Otherwise we need to update the
+ # original zone configuration.
+ samehost = instance.system_metadata.get('resize_samehost')
+ if samehost:
+ self._samehost_revert_resize(context, instance, network_info,
+ block_device_info)
+ del instance.system_metadata['resize_samehost']
+
old_rvid = instance.system_metadata.get('old_instance_volid')
if old_rvid:
connector = self.get_volume_connector(instance)
@@ -2789,7 +2847,7 @@
del instance.system_metadata['new_instance_volid']
del instance.system_metadata['old_instance_volid']
else:
- new_rvid = instance.system_metadata['new_instance_volid']
+ new_rvid = instance.system_metadata.get('new_instance_volid')
if new_rvid:
del instance.system_metadata['new_instance_volid']
self._volume_api.delete(context, new_rvid)
@@ -2812,13 +2870,46 @@
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def suspend(self, instance):
+ def suspend(self, context, instance):
"""suspend the specified instance.
+ :param context: the context for the suspend
:param instance: nova.objects.instance.Instance
"""
- # TODO(Vek): Need to pass context in for access to auth_token
- raise NotImplementedError()
+ name = instance['name']
+ zone = self._get_zone_by_name(name)
+ if zone is None:
+ raise exception.InstanceNotFound(instance_id=name)
+
+ if zone.brand != ZONE_BRAND_SOLARIS_KZ:
+ # Only Solaris kernel zones are currently supported.
+ reason = (_("'%s' branded zones do not currently support "
+ "suspend. Use 'nova reset-state --active %s' "
+ "to reset instance state back to 'active'.")
+ % (zone.brand, instance['display_name']))
+ raise exception.InstanceSuspendFailure(reason=reason)
+
+ if self._get_state(zone) != power_state.RUNNING:
+ reason = (_("Instance '%s' is not running.") % name)
+ raise exception.InstanceSuspendFailure(reason=reason)
+
+ try:
+ new_path = os.path.join(CONF.zones_suspend_path, '%{zonename}')
+ if not lookup_resource(zone, 'suspend'):
+ # add suspend if not configured
+ self._set_suspend(instance)
+ elif lookup_resource_property(zone, 'suspend', 'path') != new_path:
+ # replace the old suspend resource with the new one
+ with ZoneConfig(zone) as zc:
+ zc.removeresources('suspend')
+ self._set_suspend(instance)
+
+ zone.suspend()
+ except Exception as ex:
+ reason = zonemgr_strerror(ex)
+ LOG.error(_("Unable to suspend instance '%s' via "
+ "zonemgr(3RAD): %s") % (name, reason))
+ raise exception.InstanceSuspendFailure(reason=reason)
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance.
@@ -2829,7 +2920,29 @@
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: instance volume block device info
"""
- raise NotImplementedError()
+ name = instance['name']
+ zone = self._get_zone_by_name(name)
+ if zone is None:
+ raise exception.InstanceNotFound(instance_id=name)
+
+ if zone.brand != ZONE_BRAND_SOLARIS_KZ:
+ # Only Solaris kernel zones are currently supported.
+ reason = (_("'%s' branded zones do not currently support "
+ "resume.") % zone.brand)
+ raise exception.InstanceResumeFailure(reason=reason)
+
+ # check that the instance is suspended
+ if self._get_state(zone) != power_state.SHUTDOWN:
+ reason = (_("Instance '%s' is not suspended.") % name)
+ raise exception.InstanceResumeFailure(reason=reason)
+
+ try:
+ zone.boot()
+ except Exception as ex:
+ reason = zonemgr_strerror(ex)
+ LOG.error(_("Unable to resume instance '%s' via zonemgr(3RAD): %s")
+ % (name, reason))
+ raise exception.InstanceResumeFailure(reason=reason)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
@@ -3160,7 +3273,7 @@
back to the source host to check the results.
:param context: security context
- :param instance: nova.db.sqlalchemy.models.Instance
+ :param instance: nova.objects.instance.Instance object
"""
raise NotImplementedError()
@@ -3250,7 +3363,7 @@
raise exception.MigrationPreCheckError(reason=reason)
def check_can_live_migrate_source(self, context, instance,
- dest_check_data, block_device_info):
+ dest_check_data, block_device_info=None):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
@@ -3272,12 +3385,11 @@
raise exception.MigrationPreCheckError(reason=reason)
return dest_check_data
- def get_instance_disk_info(self, instance_name,
+ def get_instance_disk_info(self, instance,
block_device_info=None):
"""Retrieve information about actual disk sizes of an instance.
- :param instance_name:
- name of a nova instance as returned by list_instances()
+ :param instance: nova.objects.Instance
:param block_device_info:
Optional; Can be used to filter out devices which are
actually volumes.
@@ -3418,7 +3530,7 @@
"""Set the root password on the specified instance.
:param instance: nova.objects.instance.Instance
- :param new_password: the new password
+ :param new_pass: the new password
"""
name = instance['name']
zone = self._get_zone_by_name(name)
@@ -3427,8 +3539,8 @@
if zone.state == ZONE_STATE_RUNNING:
out, err = utils.execute('/usr/sbin/zlogin', '-S', name,
- '/usr/bin/passwd', '-p', "'%s'" %
- sha256_crypt.encrypt(new_pass))
+ '/usr/bin/passwd', '-p',
+ "'%s'" % sha256_crypt.encrypt(new_pass))
else:
raise exception.InstanceNotRunning(instance_id=name)
@@ -3476,7 +3588,7 @@
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def host_power_action(self, host, action):
+ def host_power_action(self, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
@@ -3486,12 +3598,12 @@
"""
raise NotImplementedError()
- def set_host_enabled(self, host, enabled):
+ def set_host_enabled(self, enabled):
"""Sets the specified host's ability to accept new instances."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def get_host_uptime(self, host):
+ def get_host_uptime(self):
"""Returns the result of calling "uptime" on the target host."""
# TODO(Vek): Need to pass context in for access to auth_token
return utils.execute('/usr/bin/uptime')[0]
@@ -3511,23 +3623,6 @@
"""
raise NotImplementedError()
- def get_host_stats(self, refresh=False):
- """Return currently known host stats.
-
- If the hypervisor supports pci passthrough, the returned
- dictionary includes a key-value pair for it.
- The key of pci passthrough device is "pci_passthrough_devices"
- and the value is a json string for the list of assignable
- pci devices. Each device is a dictionary, with mandatory
- keys of 'address', 'vendor_id', 'product_id', 'dev_type',
- 'dev_id', 'label' and other optional device specific information.
-
- Refer to the objects/pci_device.py for more idea of these keys.
- """
- if refresh or not self._host_stats:
- self._update_host_stats()
- return self._host_stats
-
def get_host_cpu_stats(self):
"""Get the currently known host CPU stats.
@@ -3550,9 +3645,9 @@
"""
raise NotImplementedError()
- def block_stats(self, instance_name, disk_id):
+ def block_stats(self, instance, disk_id):
"""Return performance counters associated with the given disk_id on the
- given instance_name. These are returned as [rd_req, rd_bytes, wr_req,
+ given instance. These are returned as [rd_req, rd_bytes, wr_req,
wr_bytes, errs], where rd indicates read, wr indicates write, req is
the total number of I/O requests made, bytes is the total number of
bytes transferred, and errs is the number of requests held up due to a
@@ -3569,25 +3664,6 @@
"""
raise NotImplementedError()
- def interface_stats(self, instance_name, iface_id):
- """Return performance counters associated with the given iface_id
- on the given instance_id. These are returned as [rx_bytes, rx_packets,
- rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx
- indicates receive, tx indicates transmit, bytes and packets indicate
- the total number of bytes or packets transferred, and errs and dropped
- is the total number of packets failed / dropped.
-
- All counters are long integers.
-
- This method is optional. On some platforms (e.g. XenAPI) performance
- statistics can be retrieved directly in aggregate form, without Nova
- having to do the aggregation. On those platforms, this method is
- unused.
-
- Note that this function takes an instance ID.
- """
- raise NotImplementedError()
-
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?"""
return False
@@ -3645,7 +3721,7 @@
| ]
"""
- pass
+ return None
def manage_image_cache(self, context, all_instances):
"""Manage the driver's local image cache.
@@ -3655,7 +3731,7 @@
related to other calls into the driver. The prime example is to clean
the cache and remove images which are no longer of interest.
- :param instances: nova.objects.instance.InstanceList
+ :param all_instances: nova.objects.instance.InstanceList
"""
pass
@@ -3728,7 +3804,9 @@
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
- stats = self.get_host_stats(refresh=refresh)
+ if refresh or not self._host_stats:
+ self._update_host_stats()
+ stats = self._host_stats
if not isinstance(stats, list):
stats = [stats]
return [s['hypervisor_hostname'] for s in stats]
@@ -3813,7 +3891,7 @@
LOG.debug("Emitting event %s", str(event))
self._compute_event_callback(event)
except Exception as ex:
- LOG.error(_("Exception dispatching event %(event)s: %(ex)s"),
+ LOG.error(_LE("Exception dispatching event %(event)s: %(ex)s"),
{'event': event, 'ex': ex})
def delete_instance_files(self, instance):
@@ -3822,10 +3900,10 @@
:param instance: nova.objects.instance.Instance
:returns: True if the instance was deleted from disk, False otherwise.
"""
- LOG.debug(_("Cleaning up for instance %s"), instance['name'])
# Delete the zone configuration for the instance using destroy, because
# it will simply take care of the work, and we don't need to duplicate
# the code here.
+ LOG.debug(_("Cleaning up for instance %s"), instance['name'])
try:
self.destroy(None, instance, None)
except Exception:
@@ -3895,3 +3973,33 @@
# type and implement this function at their
# virt layer.
return False
+
+ def quiesce(self, context, instance, image_meta):
+ """Quiesce the specified instance to prepare for snapshots.
+
+ If the specified instance doesn't support quiescing,
+ InstanceQuiesceNotSupported is raised. When it fails to quiesce by
+ other errors (e.g. agent timeout), NovaException is raised.
+
+ :param context: request context
+ :param instance: nova.objects.instance.Instance to be quiesced
+ :param image_meta: image object returned by nova.image.glance that
+ defines the image from which this instance
+ was created
+ """
+ raise NotImplementedError()
+
+ def unquiesce(self, context, instance, image_meta):
+ """Unquiesce the specified instance after snapshots.
+
+ If the specified instance doesn't support quiescing,
+ InstanceQuiesceNotSupported is raised. When it fails to quiesce by
+ other errors (e.g. agent timeout), NovaException is raised.
+
+ :param context: request context
+ :param instance: nova.objects.instance.Instance to be unquiesced
+ :param image_meta: image object returned by nova.image.glance that
+ defines the image from which this instance
+ was created
+ """
+ raise NotImplementedError()