components/openstack/nova/files/solariszones/driver.py
branchs11u3-sru
changeset 5430 b6b088be89d5
parent 5429 1ae4cfbadda9
child 5432 e6a5784e62c3
equal deleted inserted replaced
5429:1ae4cfbadda9 5430:b6b088be89d5
    34 from solaris_install.archive import LOGFILE as ARCHIVE_LOGFILE
    34 from solaris_install.archive import LOGFILE as ARCHIVE_LOGFILE
    35 from solaris_install.archive import UnifiedArchive
    35 from solaris_install.archive import UnifiedArchive
    36 from solaris_install.engine import InstallEngine
    36 from solaris_install.engine import InstallEngine
    37 from solaris_install.target.size import Size
    37 from solaris_install.target.size import Size
    38 
    38 
       
    39 from cinderclient import exceptions as cinder_exception
    39 from eventlet import greenthread
    40 from eventlet import greenthread
    40 from lxml import etree
    41 from lxml import etree
    41 from oslo_config import cfg
    42 from oslo_config import cfg
    42 
    43 
    43 from nova.compute import power_state
    44 from nova.compute import power_state
    63 from nova.virt import driver
    64 from nova.virt import driver
    64 from nova.virt import event as virtevent
    65 from nova.virt import event as virtevent
    65 from nova.virt import images
    66 from nova.virt import images
    66 from nova.virt.solariszones import sysconfig
    67 from nova.virt.solariszones import sysconfig
    67 from nova import volume
    68 from nova import volume
       
    69 from nova.volume.cinder import API
       
    70 from nova.volume.cinder import cinderclient
       
    71 from nova.volume.cinder import get_cinder_client_version
       
    72 from nova.volume.cinder import translate_volume_exception
       
    73 from nova.volume.cinder import _untranslate_volume_summary_view
    68 
    74 
    69 solariszones_opts = [
    75 solariszones_opts = [
    70     cfg.StrOpt('glancecache_dirname',
    76     cfg.StrOpt('glancecache_dirname',
    71                default='$state_path/images',
    77                default='$state_path/images',
    72                help='Default path to Glance cache for Solaris Zones.'),
    78                help='Default path to Glance cache for Solaris Zones.'),
   138 # and what is not supported. A HYPERVISOR_VERSION is defined here for
   144 # and what is not supported. A HYPERVISOR_VERSION is defined here for
   139 # Nova's use but it generally should not be changed unless there is a
   145 # Nova's use but it generally should not be changed unless there is a
   140 # incompatible change such as concerning kernel zone live migration.
   146 # incompatible change such as concerning kernel zone live migration.
   141 HYPERVISOR_VERSION = '5.11'
   147 HYPERVISOR_VERSION = '5.11'
   142 
   148 
       
   149 ROOTZPOOL_RESOURCE = 'rootzpool'
   143 
   150 
   144 def lookup_resource_property(zone, resource, prop, filter=None):
   151 def lookup_resource_property(zone, resource, prop, filter=None):
   145     """Lookup specified property from specified Solaris Zone resource."""
   152     """Lookup specified property from specified Solaris Zone resource."""
   146     try:
   153     try:
   147         val = zone.getResourceProperties(zonemgr.Resource(resource, filter),
   154         val = zone.getResourceProperties(zonemgr.Resource(resource, filter),
   192         error.append(stderr.replace('\n', ': '))
   199         error.append(stderr.replace('\n', ': '))
   193     result = ': '.join(error)
   200     result = ': '.join(error)
   194     return result
   201     return result
   195 
   202 
   196 
   203 
       
   204 class SolarisVolumeAPI(API):
       
   205     """ Extending the volume api to support additional cinder sub-commands
       
   206     """
       
   207     @translate_volume_exception
       
   208     def create(self, context, size, name, description, snapshot=None,
       
   209                image_id=None, volume_type=None, metadata=None,
       
   210                availability_zone=None, source_volume=None):
       
   211         """Clone the source volume by calling the cinderclient version of
       
   212         create with a source_volid argument
       
   213 
       
   214         :param context: the context for the clone
       
   215         :param size: size of the new volume, must be the same as the source
       
   216             volume
       
   217         :param name: display_name of the new volume
       
   218         :param description: display_description of the new volume
       
   219         :param snapshot: Snapshot object
       
   220         :param image_id: image_id to create the volume from
       
   221         :param volume_type: type of volume
       
   222         :param metadata: Additional metadata for the volume
       
   223         :param availability_zone: zone:host where the volume is to be created
       
   224         :param source_volume: Volume object
       
   225 
       
   226         Returns a volume object
       
   227         """
       
   228         if snapshot is not None:
       
   229             snapshot_id = snapshot['id']
       
   230         else:
       
   231             snapshot_id = None
       
   232 
       
   233         if source_volume is not None:
       
   234             source_volid = source_volume['id']
       
   235         else:
       
   236             source_volid = None
       
   237 
       
   238         kwargs = dict(snapshot_id=snapshot_id,
       
   239                       volume_type=volume_type,
       
   240                       user_id=context.user_id,
       
   241                       project_id=context.project_id,
       
   242                       availability_zone=availability_zone,
       
   243                       metadata=metadata,
       
   244                       imageRef=image_id,
       
   245                       source_volid=source_volid)
       
   246 
       
   247         version = get_cinder_client_version(context)
       
   248         if version == '1':
       
   249             kwargs['display_name'] = name
       
   250             kwargs['display_description'] = description
       
   251         elif version == '2':
       
   252             kwargs['name'] = name
       
   253             kwargs['description'] = description
       
   254 
       
   255         try:
       
   256             item = cinderclient(context).volumes.create(size, **kwargs)
       
   257             return _untranslate_volume_summary_view(context, item)
       
   258         except cinder_exception.OverLimit:
       
   259             raise exception.OverQuota(overs='volumes')
       
   260         except cinder_exception.BadRequest as err:
       
   261             raise exception.InvalidInput(reason=unicode(err))
       
   262 
       
   263     @translate_volume_exception
       
   264     def update(self, context, volume_id, fields):
       
   265         """Update the fields of a volume for example used to rename a volume
       
   266         via a call to cinderclient
       
   267 
       
   268         :param context: the context for the update
       
   269         :param volume_id: the id of the volume to update
       
   270         :param fields: a dictionary of of the name/value pairs to update
       
   271         """
       
   272         cinderclient(context).volumes.update(volume_id, **fields)
       
   273 
       
   274     @translate_volume_exception
       
   275     def extend(self, context, volume, newsize):
       
   276         """Extend the size of a cinder volume by calling the cinderclient
       
   277 
       
   278         :param context: the context for the extend
       
   279         :param volume: the volume object to extend
       
   280         :param newsize: the new size of the volume in GB
       
   281         """
       
   282         cinderclient(context).volumes.extend(volume, newsize)
       
   283 
       
   284 
   197 class ZoneConfig(object):
   285 class ZoneConfig(object):
   198     """ZoneConfig - context manager for access zone configurations.
   286     """ZoneConfig - context manager for access zone configurations.
   199     Automatically opens the configuration for a zone and commits any changes
   287     Automatically opens the configuration for a zone and commits any changes
   200     before exiting
   288     before exiting
   201     """
   289     """
   263             LOG.error(_("Unable to set '%s' property on '%s' resource for "
   351             LOG.error(_("Unable to set '%s' property on '%s' resource for "
   264                         "instance '%s' via zonemgr(3RAD): %s")
   352                         "instance '%s' via zonemgr(3RAD): %s")
   265                       % (prop, resource, self.zone.name, reason))
   353                       % (prop, resource, self.zone.name, reason))
   266             raise
   354             raise
   267 
   355 
   268     def addresource(self, resource, props=None):
   356     def addresource(self, resource, props=None, ignore_exists=False):
   269         """creates a new resource with an optional property list."""
   357         """creates a new resource with an optional property list, or set the
       
   358         property if the resource exists and ignore_exists is true.
       
   359 
       
   360         :param ignore_exists: If the resource exists, set the property for the
       
   361             resource.
       
   362         """
   270         if props is None:
   363         if props is None:
   271             props = []
   364             props = []
   272 
   365 
   273         try:
   366         try:
   274             self.zone.addResource(zonemgr.Resource(resource, props))
   367             self.zone.addResource(zonemgr.Resource(resource, props))
   351         self._install_engine = None
   444         self._install_engine = None
   352         self._pagesize = os.sysconf('SC_PAGESIZE')
   445         self._pagesize = os.sysconf('SC_PAGESIZE')
   353         self._rad_connection = None
   446         self._rad_connection = None
   354         self._uname = os.uname()
   447         self._uname = os.uname()
   355         self._validated_archives = list()
   448         self._validated_archives = list()
   356         self._volume_api = volume.API()
   449         self._volume_api = SolarisVolumeAPI()
       
   450         self._rootzpool_suffix = ROOTZPOOL_RESOURCE
   357 
   451 
   358     @property
   452     @property
   359     def rad_connection(self):
   453     def rad_connection(self):
   360         if self._rad_connection is None:
   454         if self._rad_connection is None:
   361             self._rad_connection = rad.connect.connect_unix()
   455             self._rad_connection = rad.connect.connect_unix()
   508         """Convert a number of pages of memory into a total size in KBytes."""
   602         """Convert a number of pages of memory into a total size in KBytes."""
   509         return (pages * self._pagesize) / 1024
   603         return (pages * self._pagesize) / 1024
   510 
   604 
   511     def _get_max_mem(self, zone):
   605     def _get_max_mem(self, zone):
   512         """Return the maximum memory in KBytes allowed."""
   606         """Return the maximum memory in KBytes allowed."""
   513         max_mem = lookup_resource_property(zone, 'capped-memory', 'physical')
   607         if zone.brand == ZONE_BRAND_SOLARIS:
       
   608             mem_resource = 'swap'
       
   609         else:
       
   610             mem_resource = 'physical'
       
   611 
       
   612         max_mem = lookup_resource_property(zone, 'capped-memory', mem_resource)
   514         if max_mem is not None:
   613         if max_mem is not None:
   515             return strutils.string_to_bytes("%sB" % max_mem) / 1024
   614             return strutils.string_to_bytes("%sB" % max_mem) / 1024
   516 
   615 
   517         # If physical property in capped-memory doesn't exist, this may
   616         # If physical property in capped-memory doesn't exist, this may
   518         # represent a non-global zone so just return the system's total
   617         # represent a non-global zone so just return the system's total
   917         """Create a (Cinder) volume service backed boot volume"""
  1016         """Create a (Cinder) volume service backed boot volume"""
   918         try:
  1017         try:
   919             vol = self._volume_api.create(
  1018             vol = self._volume_api.create(
   920                 context,
  1019                 context,
   921                 instance['root_gb'],
  1020                 instance['root_gb'],
   922                 instance['display_name'] + "-rootzpool",
  1021                 instance['display_name'] + "-" + self._rootzpool_suffix,
   923                 "Boot volume for instance '%s' (%s)"
  1022                 "Boot volume for instance '%s' (%s)"
   924                 % (instance['name'], instance['uuid']))
  1023                 % (instance['name'], instance['uuid']))
   925             # TODO(npower): Polling is what nova/compute/manager also does when
  1024             # TODO(npower): Polling is what nova/compute/manager also does when
   926             # creating a new volume, so we do likewise here.
  1025             # creating a new volume, so we do likewise here.
   927             while True:
  1026             while True:
   941         volume_id = volume['id']
  1040         volume_id = volume['id']
   942 
  1041 
   943         connector = self.get_volume_connector(instance)
  1042         connector = self.get_volume_connector(instance)
   944         connection_info = self._volume_api.initialize_connection(
  1043         connection_info = self._volume_api.initialize_connection(
   945             context, volume_id, connector)
  1044             context, volume_id, connector)
       
  1045         connection_info['serial'] = volume_id
   946 
  1046 
   947         # Check connection_info to determine if the provided volume is
  1047         # Check connection_info to determine if the provided volume is
   948         # local to this compute node. If it is, then don't use it for
  1048         # local to this compute node. If it is, then don't use it for
   949         # Solaris branded zones in order to avoid a known ZFS deadlock issue
  1049         # Solaris branded zones in order to avoid a known ZFS deadlock issue
   950         # when using a zpool within another zpool on the same system.
  1050         # when using a zpool within another zpool on the same system.
  1007                         "device",
  1107                         "device",
  1008                         [zonemgr.Property("bootpri", "0")]),
  1108                         [zonemgr.Property("bootpri", "0")]),
  1009                     [zonemgr.Property("storage", suri)])
  1109                     [zonemgr.Property("storage", suri)])
  1010             else:
  1110             else:
  1011                 zc.addresource(
  1111                 zc.addresource(
  1012                     "rootzpool",
  1112                     ROOTZPOOL_RESOURCE,
  1013                     [zonemgr.Property("storage", listvalue=[suri])])
  1113                     [zonemgr.Property("storage", listvalue=[suri])],
       
  1114                     ignore_exists=True)
  1014 
  1115 
  1015     def _set_num_cpu(self, name, vcpus, brand):
  1116     def _set_num_cpu(self, name, vcpus, brand):
  1016         """Set number of VCPUs in a Solaris Zone configuration."""
  1117         """Set number of VCPUs in a Solaris Zone configuration."""
  1017         zone = self._get_zone_by_name(name)
  1118         zone = self._get_zone_by_name(name)
  1018         if zone is None:
  1119         if zone is None:
  1102                 else:
  1203                 else:
  1103                     id = lookup_resource_property(zc.zone, 'anet', 'id',
  1204                     id = lookup_resource_property(zc.zone, 'anet', 'id',
  1104                                                   filter)
  1205                                                   filter)
  1105                     linkname = 'net%s' % id
  1206                     linkname = 'net%s' % id
  1106 
  1207 
  1107             # create the required sysconfig file
  1208             # create the required sysconfig file (or skip if this is part of a
  1108             subnet_uuid = port['fixed_ips'][0]['subnet_id']
  1209             # resize process)
  1109             subnet = network_plugin.show_subnet(subnet_uuid)['subnet']
  1210             tstate = instance['task_state']
  1110 
  1211             if tstate not in [task_states.RESIZE_FINISH,
  1111             if subnet['enable_dhcp']:
  1212                               task_states.RESIZE_REVERTING,
  1112                 tree = sysconfig.create_ncp_defaultfixed('dhcp', linkname,
  1213                               task_states.RESIZE_MIGRATING]:
  1113                                                          netid, ip_version)
  1214                 subnet_uuid = port['fixed_ips'][0]['subnet_id']
  1114             else:
  1215                 subnet = network_plugin.show_subnet(subnet_uuid)['subnet']
  1115                 tree = sysconfig.create_ncp_defaultfixed('static', linkname,
  1216 
  1116                                                          netid, ip_version, ip,
  1217                 if subnet['enable_dhcp']:
  1117                                                          route, nameservers)
  1218                     tree = sysconfig.create_ncp_defaultfixed('dhcp', linkname,
  1118 
  1219                                                              netid, ip_version)
  1119             fp = os.path.join(sc_dir, 'evs-network-%d.xml' % netid)
  1220                 else:
  1120             sysconfig.create_sc_profile(fp, tree)
  1221                     tree = sysconfig.create_ncp_defaultfixed('static',
       
  1222                                                              linkname, netid,
       
  1223                                                              ip_version, ip,
       
  1224                                                              route,
       
  1225                                                              nameservers)
       
  1226 
       
  1227                 fp = os.path.join(sc_dir, 'evs-network-%d.xml' % netid)
       
  1228                 sysconfig.create_sc_profile(fp, tree)
  1121 
  1229 
  1122         if tenant_id is not None:
  1230         if tenant_id is not None:
  1123             # set the tenant id
  1231             # set the tenant id
  1124             with ZoneConfig(zone) as zc:
  1232             with ZoneConfig(zone) as zc:
  1125                 zc.setprop('global', 'tenant', tenant_id)
  1233                 zc.setprop('global', 'tenant', tenant_id)
  1198         if template is None:
  1306         if template is None:
  1199             msg = (_("Invalid brand '%s' specified for instance '%s'"
  1307             msg = (_("Invalid brand '%s' specified for instance '%s'"
  1200                    % (brand, name)))
  1308                    % (brand, name)))
  1201             raise exception.NovaException(msg)
  1309             raise exception.NovaException(msg)
  1202 
  1310 
  1203         sc_profile = extra_specs.get('install:sc_profile')
  1311         tstate = instance['task_state']
  1204         if sc_profile is not None:
  1312         if tstate not in [task_states.RESIZE_FINISH,
  1205             if os.path.isfile(sc_profile):
  1313                            task_states.RESIZE_REVERTING,
  1206                 shutil.copy(sc_profile, sc_dir)
  1314                            task_states.RESIZE_MIGRATING]:
  1207             elif os.path.isdir(sc_profile):
  1315             sc_profile = extra_specs.get('install:sc_profile')
  1208                 shutil.copytree(sc_profile, os.path.join(sc_dir, 'sysconfig'))
  1316             if sc_profile is not None:
  1209 
  1317                 if os.path.isfile(sc_profile):
  1210         self._verify_sysconfig(sc_dir, instance)
  1318                     shutil.copy(sc_profile, sc_dir)
       
  1319                 elif os.path.isdir(sc_profile):
       
  1320                     shutil.copytree(sc_profile, os.path.join(sc_dir,
       
  1321                                     'sysconfig'))
       
  1322 
       
  1323             self._verify_sysconfig(sc_dir, instance)
  1211 
  1324 
  1212         LOG.debug(_("Creating zone configuration for '%s' (%s)")
  1325         LOG.debug(_("Creating zone configuration for '%s' (%s)")
  1213                   % (name, instance['display_name']))
  1326                   % (name, instance['display_name']))
  1214         zonemanager = self.rad_connection.get_object(zonemgr.ZoneManager())
  1327         zonemanager = self.rad_connection.get_object(zonemgr.ZoneManager())
  1215         try:
  1328         try:
  1569             # remove the sc_profile temp directory
  1682             # remove the sc_profile temp directory
  1570             shutil.rmtree(sc_dir)
  1683             shutil.rmtree(sc_dir)
  1571 
  1684 
  1572         if connection_info is not None:
  1685         if connection_info is not None:
  1573             bdm = objects.BlockDeviceMapping(
  1686             bdm = objects.BlockDeviceMapping(
  1574                     source_type='volume', destination_type='volume',
  1687                     source_type='volume',
       
  1688                     destination_type='volume',
  1575                     instance_uuid=instance.uuid,
  1689                     instance_uuid=instance.uuid,
  1576                     volume_id=volume_id,
  1690                     volume_id=volume_id,
  1577                     connection_info=jsonutils.dumps(connection_info),
  1691                     connection_info=jsonutils.dumps(connection_info),
  1578                     device_name=mountpoint,
  1692                     device_name=mountpoint,
  1579                     delete_on_termination=True,
  1693                     delete_on_termination=True,
  1605                     return
  1719                     return
  1606             LOG.error(_("Unable to power off instance '%s' via zonemgr(3RAD): "
  1720             LOG.error(_("Unable to power off instance '%s' via zonemgr(3RAD): "
  1607                         "%s") % (name, reason))
  1721                         "%s") % (name, reason))
  1608             raise exception.InstancePowerOffFailure(reason=reason)
  1722             raise exception.InstancePowerOffFailure(reason=reason)
  1609 
  1723 
       
  1724     def _samehost_revert_resize(self, context, instance, network_info,
       
  1725                                 block_device_info):
       
  1726         """Reverts the zones configuration to pre-resize config
       
  1727         """
       
  1728         self.power_off(instance)
       
  1729 
       
  1730         inst_type = flavor_obj.Flavor.get_by_id(
       
  1731             nova_context.get_admin_context(read_deleted='yes'),
       
  1732             instance['instance_type_id'])
       
  1733         extra_specs = inst_type['extra_specs'].copy()
       
  1734         brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
       
  1735 
       
  1736         name = instance['name']
       
  1737 
       
  1738         cpu = int(instance.system_metadata['old_instance_type_vcpus'])
       
  1739         mem = int(instance.system_metadata['old_instance_type_memory_mb'])
       
  1740 
       
  1741         self._set_num_cpu(name, cpu, brand)
       
  1742         self._set_memory_cap(name, mem, brand)
       
  1743 
       
  1744         rgb = int(instance.system_metadata['new_instance_type_root_gb'])
       
  1745         old_rvid = instance.system_metadata.get('old_instance_volid')
       
  1746         if old_rvid:
       
  1747             new_rvid = instance.system_metadata.get('new_instance_volid')
       
  1748             newvname = instance['display_name'] + "-" + self._rootzpool_suffix
       
  1749             mount_dev = instance['root_device_name']
       
  1750             del instance.system_metadata['new_instance_volid']
       
  1751             del instance.system_metadata['old_instance_volid']
       
  1752 
       
  1753             self._resize_disk_migration(context, instance, new_rvid, old_rvid,
       
  1754                                         rgb, mount_dev)
       
  1755 
       
  1756             self._volume_api.delete(context, new_rvid)
       
  1757 
  1610     def destroy(self, context, instance, network_info, block_device_info=None,
  1758     def destroy(self, context, instance, network_info, block_device_info=None,
  1611                 destroy_disks=True, migrate_data=None):
  1759                 destroy_disks=True, migrate_data=None):
  1612         """Destroy the specified instance from the Hypervisor.
  1760         """Destroy the specified instance from the Hypervisor.
  1613 
  1761 
  1614         If the instance is not found (for example if networking failed), this
  1762         If the instance is not found (for example if networking failed), this
  1622         :param block_device_info: Information about block devices that should
  1770         :param block_device_info: Information about block devices that should
  1623                                   be detached from the instance.
  1771                                   be detached from the instance.
  1624         :param destroy_disks: Indicates if disks should be destroyed
  1772         :param destroy_disks: Indicates if disks should be destroyed
  1625         :param migrate_data: implementation specific params
  1773         :param migrate_data: implementation specific params
  1626         """
  1774         """
       
  1775         if (instance['task_state'] == task_states.RESIZE_REVERTING and
       
  1776            instance.system_metadata['old_vm_state'] == vm_states.RESIZED):
       
  1777             self._samehost_revert_resize(context, instance, network_info,
       
  1778                                          block_device_info)
       
  1779             return
       
  1780 
  1627         try:
  1781         try:
  1628             # These methods log if problems occur so no need to double log
  1782             # These methods log if problems occur so no need to double log
  1629             # here. Just catch any stray exceptions and allow destroy to
  1783             # here. Just catch any stray exceptions and allow destroy to
  1630             # proceed.
  1784             # proceed.
  1631             if self._has_vnc_console_service(instance):
  1785             if self._has_vnc_console_service(instance):
  1651                 self._delete_config(instance)
  1805                 self._delete_config(instance)
  1652         except Exception as ex:
  1806         except Exception as ex:
  1653             reason = zonemgr_strerror(ex)
  1807             reason = zonemgr_strerror(ex)
  1654             LOG.warning(_("Unable to destroy instance '%s' via zonemgr(3RAD): "
  1808             LOG.warning(_("Unable to destroy instance '%s' via zonemgr(3RAD): "
  1655                           "%s") % (name, reason))
  1809                           "%s") % (name, reason))
       
  1810 
       
  1811         # One last point of house keeping. If we are deleting the instance
       
  1812         # during a resize operation we want to make sure the cinder volumes are
       
  1813         # property cleaned up. We need to do this here, because the periodic
       
  1814         # task that comes along and cleans these things up isn't nice enough to
       
  1815         # pass a context in so that we could simply do the work there.  But
       
  1816         # because we have access to a context, we can handle the work here and
       
  1817         # let the periodic task simply clean up the left over zone
       
  1818         # configuration that might be left around.  Note that the left over
       
  1819         # zone will only show up in zoneadm list, not nova list.
       
  1820         #
       
  1821         # If the task state is RESIZE_REVERTING do not process these because
       
  1822         # the cinder volume cleanup is taken care of in
       
  1823         # finish_revert_migration.
       
  1824         if instance['task_state'] == task_states.RESIZE_REVERTING:
       
  1825             return
       
  1826 
       
  1827         tags = ['old_instance_volid', 'new_instance_volid']
       
  1828         for tag in tags:
       
  1829             volid = instance.system_metadata.get(tag)
       
  1830             if volid:
       
  1831                 try:
       
  1832                     LOG.debug(_("Deleting volume %s"), volid)
       
  1833                     self._volume_api.delete(context, volid)
       
  1834                     del instance.system_metadata[tag]
       
  1835                 except Exception:
       
  1836                     pass
  1656 
  1837 
  1657     def cleanup(self, context, instance, network_info, block_device_info=None,
  1838     def cleanup(self, context, instance, network_info, block_device_info=None,
  1658                 destroy_disks=True, migrate_data=None, destroy_vifs=True):
  1839                 destroy_disks=True, migrate_data=None, destroy_vifs=True):
  1659         """Cleanup the instance resources .
  1840         """Cleanup the instance resources .
  1660 
  1841 
  1935 
  2116 
  1936         with ZoneConfig(zone) as zc:
  2117         with ZoneConfig(zone) as zc:
  1937             zc.addresource("device", [zonemgr.Property("storage", suri)])
  2118             zc.addresource("device", [zonemgr.Property("storage", suri)])
  1938 
  2119 
  1939         # apply the configuration to the running zone
  2120         # apply the configuration to the running zone
  1940         zone.apply()
  2121         if zone.state == ZONE_STATE_RUNNING:
       
  2122             zone.apply()
  1941 
  2123 
  1942     def detach_volume(self, connection_info, instance, mountpoint,
  2124     def detach_volume(self, connection_info, instance, mountpoint,
  1943                       encryption=None):
  2125                       encryption=None):
  1944         """Detach the disk attached to the instance."""
  2126         """Detach the disk attached to the instance."""
  1945         name = instance['name']
  2127         name = instance['name']
  1966 
  2148 
  1967         with ZoneConfig(zone) as zc:
  2149         with ZoneConfig(zone) as zc:
  1968             zc.removeresources("device", [zonemgr.Property("storage", suri)])
  2150             zc.removeresources("device", [zonemgr.Property("storage", suri)])
  1969 
  2151 
  1970         # apply the configuration to the running zone
  2152         # apply the configuration to the running zone
  1971         zone.apply()
  2153         if zone.state == ZONE_STATE_RUNNING:
       
  2154             zone.apply()
  1972 
  2155 
  1973     def swap_volume(self, old_connection_info, new_connection_info,
  2156     def swap_volume(self, old_connection_info, new_connection_info,
  1974                     instance, mountpoint, resize_to):
  2157                     instance, mountpoint, resize_to):
  1975         """Replace the disk attached to the instance.
  2158         """Replace the disk attached to the instance.
  1976 
  2159 
  1992         """Detach an interface from the instance.
  2175         """Detach an interface from the instance.
  1993 
  2176 
  1994         :param instance: nova.objects.instance.Instance
  2177         :param instance: nova.objects.instance.Instance
  1995         """
  2178         """
  1996         raise NotImplementedError()
  2179         raise NotImplementedError()
       
  2180 
       
  2181     def _cleanup_migrate_disk(self, context, instance, volume):
       
  2182         """Make a best effort at cleaning up the volume that was created to
       
  2183         hold the new root disk
       
  2184 
       
  2185         :param context: the context for the migration/resize
       
  2186         :param instance: nova.objects.instance.Instance being migrated/resized
       
  2187         :param volume: new volume created by the call to cinder create
       
  2188         """
       
  2189         try:
       
  2190             self._volume_api.delete(context, volume['id'])
       
  2191         except Exception as err:
       
  2192             LOG.error(_("Unable to cleanup the resized volume: %s" % err))
  1997 
  2193 
  1998     def migrate_disk_and_power_off(self, context, instance, dest,
  2194     def migrate_disk_and_power_off(self, context, instance, dest,
  1999                                    flavor, network_info,
  2195                                    flavor, network_info,
  2000                                    block_device_info=None,
  2196                                    block_device_info=None,
  2001                                    timeout=0, retry_interval=0):
  2197                                    timeout=0, retry_interval=0):
  2005         :param instance: nova.objects.instance.Instance
  2201         :param instance: nova.objects.instance.Instance
  2006         :param timeout: time to wait for GuestOS to shutdown
  2202         :param timeout: time to wait for GuestOS to shutdown
  2007         :param retry_interval: How often to signal guest while
  2203         :param retry_interval: How often to signal guest while
  2008                                waiting for it to shutdown
  2204                                waiting for it to shutdown
  2009         """
  2205         """
  2010         raise NotImplementedError()
  2206         LOG.debug("Starting migrate_disk_and_power_off", instance=instance)
       
  2207 
       
  2208         samehost = (dest == self.get_host_ip_addr())
       
  2209         inst_type = flavor_obj.Flavor.get_by_id(
       
  2210             nova_context.get_admin_context(read_deleted='yes'),
       
  2211             instance['instance_type_id'])
       
  2212         extra_specs = inst_type['extra_specs'].copy()
       
  2213         brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
       
  2214         if brand != ZONE_BRAND_SOLARIS_KZ and not samehost:
       
  2215             msg = (_("'%s' branded zones do not currently support "
       
  2216                      "resize to a different host.") % brand)
       
  2217             raise exception.MigrationPreCheckError(reason=msg)
       
  2218 
       
  2219         if brand != flavor['extra_specs'].get('zonecfg:brand'):
       
  2220             msg = (_("Unable to change brand of zone during resize."))
       
  2221             raise exception.MigrationPreCheckError(reason=msg)
       
  2222 
       
  2223         orgb = instance['root_gb']
       
  2224         nrgb = int(instance.system_metadata['new_instance_type_root_gb'])
       
  2225         if orgb > nrgb:
       
  2226             msg = (_("Unable to resize to a smaller boot volume."))
       
  2227             raise exception.ResizeError(reason=msg)
       
  2228 
       
  2229         self.power_off(instance, timeout, retry_interval)
       
  2230 
       
  2231         disk_info = None
       
  2232         if nrgb > orgb or not samehost:
       
  2233             bmap = block_device_info.get('block_device_mapping')
       
  2234             rootmp = instance.root_device_name
       
  2235             for entry in bmap:
       
  2236                 mountdev = entry['mount_device'].rpartition('/')[2]
       
  2237                 if mountdev == rootmp:
       
  2238                     root_ci = entry['connection_info']
       
  2239                     break
       
  2240             else:
       
  2241                 # If this is a non-global zone that is on the same host and is
       
  2242                 # simply using a dataset, the disk size is purely an OpenStack
       
  2243                 # quota.  We can continue without doing any disk work.
       
  2244                 if samehost and brand == ZONE_BRAND_SOLARIS:
       
  2245                     return disk_info
       
  2246                 else:
       
  2247                     msg = (_("Cannot find an attached root device."))
       
  2248                     raise exception.ResizeError(reason=msg)
       
  2249 
       
  2250             if root_ci['driver_volume_type'] == 'iscsi':
       
  2251                 volume_id = root_ci['data']['volume_id']
       
  2252             else:
       
  2253                 volume_id = root_ci['serial']
       
  2254 
       
  2255             if volume_id is None:
       
  2256                 msg = (_("Cannot find an attached root device."))
       
  2257                 raise exception.ResizeError(reason=msg)
       
  2258 
       
  2259             vinfo = self._volume_api.get(context, volume_id)
       
  2260             newvolume = self._volume_api.create(context, orgb,
       
  2261                                                 vinfo['display_name'] +
       
  2262                                                 '-resized',
       
  2263                                                 vinfo['display_description'],
       
  2264                                                 source_volume=vinfo)
       
  2265 
       
  2266             instance.system_metadata['old_instance_volid'] = volume_id
       
  2267             instance.system_metadata['new_instance_volid'] = newvolume['id']
       
  2268 
       
  2269             # TODO(npower): Polling is what nova/compute/manager also does when
       
  2270             # creating a new volume, so we do likewise here.
       
  2271             while True:
       
  2272                 volume = self._volume_api.get(context, newvolume['id'])
       
  2273                 if volume['status'] != 'creating':
       
  2274                     break
       
  2275                 greenthread.sleep(1)
       
  2276 
       
  2277             if nrgb > orgb:
       
  2278                 try:
       
  2279                     self._volume_api.extend(context, newvolume['id'], nrgb)
       
  2280                 except Exception:
       
  2281                     LOG.error(_("Failed to extend the new volume"))
       
  2282                     self._cleanup_migrate_disk(context, instance, newvolume)
       
  2283                     raise
       
  2284 
       
  2285             disk_info = newvolume
       
  2286 
       
  2287         return disk_info
  2011 
  2288 
  2012     def snapshot(self, context, instance, image_id, update_task_state):
  2289     def snapshot(self, context, instance, image_id, update_task_state):
  2013         """Snapshots the specified instance.
  2290         """Snapshots the specified instance.
  2014 
  2291 
  2015         :param context: security context
  2292         :param context: security context
  2103         :param context: security context
  2380         :param context: security context
  2104         :param instance: nova.objects.instance.Instance
  2381         :param instance: nova.objects.instance.Instance
  2105         """
  2382         """
  2106         pass
  2383         pass
  2107 
  2384 
       
  2385     def _cleanup_finish_migration(self, context, instance, disk_info,
       
  2386                                   network_info, samehost):
       
  2387         """Best effort attempt at cleaning up any additional resources that are
       
  2388         not directly managed by Nova or Cinder so as not to leak these
       
  2389         resources.
       
  2390         """
       
  2391         if disk_info:
       
  2392             self._volume_api.detach(context, disk_info['id'])
       
  2393             self._volume_api.delete(context, disk_info['id'])
       
  2394 
       
  2395             old_rvid = instance.system_metadata.get('old_instance_volid')
       
  2396             if old_rvid:
       
  2397                 connector = self.get_volume_connector(instance)
       
  2398                 connection_info = self._volume_api.initialize_connection(
       
  2399                                     context, old_rvid, connector)
       
  2400 
       
  2401                 new_rvid = instance.system_metadata['new_instance_volid']
       
  2402 
       
  2403                 rootmp = instance.root_device_name
       
  2404                 self._volume_api.attach(context, old_rvid, instance['uuid'],
       
  2405                                         rootmp)
       
  2406 
       
  2407                 bdmobj = objects.BlockDeviceMapping()
       
  2408                 bdm = bdmobj.get_by_volume_id(context, new_rvid)
       
  2409                 bdm['connection_info'] = jsonutils.dumps(connection_info)
       
  2410                 bdm['volume_id'] = old_rvid
       
  2411                 bdm.save()
       
  2412 
       
  2413                 del instance.system_metadata['new_instance_volid']
       
  2414                 del instance.system_metadata['old_instance_volid']
       
  2415 
       
  2416         if not samehost:
       
  2417             self.destroy(context, instance, network_info)
       
  2418             instance['host'] = instance['launched_on']
       
  2419             instance['node'] = instance['launched_on']
       
  2420 
  2108     def finish_migration(self, context, migration, instance, disk_info,
  2421     def finish_migration(self, context, migration, instance, disk_info,
  2109                          network_info, image_meta, resize_instance,
  2422                          network_info, image_meta, resize_instance,
  2110                          block_device_info=None, power_on=True):
  2423                          block_device_info=None, power_on=True):
  2111         """Completes a resize.
  2424         """Completes a resize.
  2112 
  2425 
  2123                                 False otherwise
  2436                                 False otherwise
  2124         :param block_device_info: instance volume block device info
  2437         :param block_device_info: instance volume block device info
  2125         :param power_on: True if the instance should be powered on, False
  2438         :param power_on: True if the instance should be powered on, False
  2126                          otherwise
  2439                          otherwise
  2127         """
  2440         """
  2128         raise NotImplementedError()
  2441         if not resize_instance:
  2129 
  2442             raise NotImplementedError()
  2130     def confirm_migration(self, migration, instance, network_info):
  2443 
       
  2444         samehost = (migration['dest_node'] == migration['source_node'])
       
  2445         if samehost:
       
  2446             instance.system_metadata['old_vm_state'] = vm_states.RESIZED
       
  2447 
       
  2448         inst_type = flavor_obj.Flavor.get_by_id(
       
  2449             nova_context.get_admin_context(read_deleted='yes'),
       
  2450             instance['instance_type_id'])
       
  2451         extra_specs = inst_type['extra_specs'].copy()
       
  2452         brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
       
  2453         name = instance['name']
       
  2454 
       
  2455         if disk_info:
       
  2456             bmap = block_device_info.get('block_device_mapping')
       
  2457             rootmp = instance['root_device_name']
       
  2458             for entry in bmap:
       
  2459                 if entry['mount_device'] == rootmp:
       
  2460                     mount_dev = entry['mount_device']
       
  2461                     root_ci = entry['connection_info']
       
  2462                     break
       
  2463 
       
  2464         try:
       
  2465             if samehost:
       
  2466                 metadstr = 'new_instance_type_vcpus'
       
  2467                 cpu = int(instance.system_metadata[metadstr])
       
  2468                 metadstr = 'new_instance_type_memory_mb'
       
  2469                 mem = int(instance.system_metadata[metadstr])
       
  2470                 self._set_num_cpu(name, cpu, brand)
       
  2471                 self._set_memory_cap(name, mem, brand)
       
  2472 
       
  2473                 # Add the new disk to the volume if the size of the disk
       
  2474                 # changed
       
  2475                 if disk_info:
       
  2476                     metadstr = 'new_instance_type_root_gb'
       
  2477                     rgb = int(instance.system_metadata[metadstr])
       
  2478                     self._resize_disk_migration(context, instance,
       
  2479                                                 root_ci['serial'],
       
  2480                                                 disk_info['id'],
       
  2481                                                 rgb, mount_dev)
       
  2482 
       
  2483             else:
       
  2484                 # No need to check disk_info here, because when not on the
       
  2485                 # same host a disk_info is always passed in.
       
  2486                 mount_dev = 'c1d0'
       
  2487                 root_serial = root_ci['serial']
       
  2488                 connection_info = self._resize_disk_migration(context,
       
  2489                                                               instance,
       
  2490                                                               root_serial,
       
  2491                                                               disk_info['id'],
       
  2492                                                               0, mount_dev,
       
  2493                                                               samehost)
       
  2494 
       
  2495                 self._create_config(context, instance, network_info,
       
  2496                                     connection_info, None)
       
  2497 
       
  2498                 zone = self._get_zone_by_name(name)
       
  2499                 if zone is None:
       
  2500                     raise exception.InstanceNotFound(instance_id=name)
       
  2501 
       
  2502                 zone.attach(['-x', 'initialize-hostdata'])
       
  2503 
       
  2504                 bmap = block_device_info.get('block_device_mapping')
       
  2505                 for entry in bmap:
       
  2506                     if entry['mount_device'] != rootmp:
       
  2507                         self.attach_volume(context,
       
  2508                                            entry['connection_info'], instance,
       
  2509                                            entry['mount_device'])
       
  2510 
       
  2511             if power_on:
       
  2512                 self._power_on(instance)
       
  2513 
       
  2514                 if brand == ZONE_BRAND_SOLARIS:
       
  2515                     return
       
  2516 
       
  2517                 # Toggle the autoexpand to extend the size of the rpool.
       
  2518                 # We need to sleep for a few seconds to make sure the zone
       
  2519                 # is in a state to accept the toggle.  Once bugs are fixed
       
  2520                 # around the autoexpand and the toggle is no longer needed
       
  2521                 # or zone.boot() returns only after the zone is ready we
       
  2522                 # can remove this hack.
       
  2523                 greenthread.sleep(15)
       
  2524                 out, err = utils.execute('/usr/sbin/zlogin', '-S', name,
       
  2525                                          '/usr/sbin/zpool', 'set',
       
  2526                                          'autoexpand=off', 'rpool')
       
  2527                 out, err = utils.execute('/usr/sbin/zlogin', '-S', name,
       
  2528                                          '/usr/sbin/zpool', 'set',
       
  2529                                          'autoexpand=on', 'rpool')
       
  2530         except Exception:
       
  2531             # Attempt to cleanup the new zone and new volume to at least
       
  2532             # give the user a chance to recover without too many hoops
       
  2533             self._cleanup_finish_migration(context, instance, disk_info,
       
  2534                                            network_info, samehost)
       
  2535             raise
       
  2536 
       
  2537     def confirm_migration(self, context, migration, instance, network_info):
  2131         """Confirms a resize, destroying the source VM.
  2538         """Confirms a resize, destroying the source VM.
  2132 
  2539 
  2133         :param instance: nova.objects.instance.Instance
  2540         :param instance: nova.objects.instance.Instance
  2134         """
  2541         """
  2135         # TODO(Vek): Need to pass context in for access to auth_token
  2542         samehost = (migration['dest_host'] == self.get_host_ip_addr())
  2136         raise NotImplementedError()
  2543         old_rvid = instance.system_metadata.get('old_instance_volid')
       
  2544         new_rvid = instance.system_metadata.get('new_instance_volid')
       
  2545         if new_rvid and old_rvid:
       
  2546             new_vname = instance['display_name'] + "-" + self._rootzpool_suffix
       
  2547             del instance.system_metadata['old_instance_volid']
       
  2548             del instance.system_metadata['new_instance_volid']
       
  2549 
       
  2550             self._volume_api.delete(context, old_rvid)
       
  2551             self._volume_api.update(context, new_rvid,
       
  2552                                     {'display_name': new_vname})
       
  2553 
       
  2554         if not samehost:
       
  2555             self.destroy(context, instance, network_info)
       
  2556 
       
  2557     def _resize_disk_migration(self, context, instance, configured,
       
  2558                                replacement, newvolumesz, mountdev,
       
  2559                                samehost=True):
       
  2560         """Handles the zone root volume switch-over or simply
       
  2561         initializing the connection for the new zone if not resizing to the
       
  2562         same host
       
  2563 
       
  2564         :param context: the context for the _resize_disk_migration
       
  2565         :param instance: nova.objects.instance.Instance being resized
       
  2566         :param configured: id of the current configured volume
       
  2567         :param replacement: id of the new volume
       
  2568         :param newvolumesz: size of the new volume
       
  2569         :param mountdev: the mount point of the device
       
  2570         :param samehost: is the resize happening on the same host
       
  2571         """
       
  2572         connector = self.get_volume_connector(instance)
       
  2573         connection_info = self._volume_api.initialize_connection(context,
       
  2574                                                                  replacement,
       
  2575                                                                  connector)
       
  2576         connection_info['serial'] = replacement
       
  2577         rootmp = instance.root_device_name
       
  2578 
       
  2579         if samehost:
       
  2580             name = instance['name']
       
  2581             zone = self._get_zone_by_name(name)
       
  2582             if zone is None:
       
  2583                 raise exception.InstanceNotFound(instance_id=name)
       
  2584 
       
  2585             # Need to detach the zone and re-attach the zone if this is a
       
  2586             # non-global zone so that the update of the rootzpool resource does
       
  2587             # not fail.
       
  2588             if zone.brand == ZONE_BRAND_SOLARIS:
       
  2589                 zone.detach()
       
  2590 
       
  2591             try:
       
  2592                 self._set_boot_device(name, connection_info, zone.brand)
       
  2593             finally:
       
  2594                 if zone.brand == ZONE_BRAND_SOLARIS:
       
  2595                     zone.attach()
       
  2596 
       
  2597         try:
       
  2598             self._volume_api.detach(context, configured)
       
  2599         except Exception:
       
  2600             LOG.error(_("Failed to detach the volume"))
       
  2601             raise
       
  2602 
       
  2603         try:
       
  2604             self._volume_api.attach(context, replacement, instance['uuid'],
       
  2605                                     rootmp)
       
  2606         except Exception:
       
  2607             LOG.error(_("Failed to attach the volume"))
       
  2608             raise
       
  2609 
       
  2610         bdmobj = objects.BlockDeviceMapping()
       
  2611         bdm = bdmobj.get_by_volume_id(context, configured)
       
  2612         bdm['connection_info'] = jsonutils.dumps(connection_info)
       
  2613         bdm['volume_id'] = replacement
       
  2614         bdm.save()
       
  2615 
       
  2616         if not samehost:
       
  2617             return connection_info
  2137 
  2618 
  2138     def finish_revert_migration(self, context, instance, network_info,
  2619     def finish_revert_migration(self, context, instance, network_info,
  2139                                 block_device_info=None, power_on=True):
  2620                                 block_device_info=None, power_on=True):
  2140         """Finish reverting a resize.
  2621         """Finish reverting a resize.
  2141 
  2622 
  2145            :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
  2626            :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
  2146         :param block_device_info: instance volume block device info
  2627         :param block_device_info: instance volume block device info
  2147         :param power_on: True if the instance should be powered on, False
  2628         :param power_on: True if the instance should be powered on, False
  2148                          otherwise
  2629                          otherwise
  2149         """
  2630         """
  2150         raise NotImplementedError()
  2631         # If this is not a samehost migration then we need to re-attach the
       
  2632         # original volume to the instance.  If this was processed in the
       
  2633         # initial revert handling this work has already been done.
       
  2634         old_rvid = instance.system_metadata.get('old_instance_volid')
       
  2635         if old_rvid:
       
  2636             connector = self.get_volume_connector(instance)
       
  2637             connection_info = self._volume_api.initialize_connection(context,
       
  2638                                                                      old_rvid,
       
  2639                                                                      connector)
       
  2640 
       
  2641             new_rvid = instance.system_metadata['new_instance_volid']
       
  2642             self._volume_api.detach(context, new_rvid)
       
  2643             self._volume_api.delete(context, new_rvid)
       
  2644 
       
  2645             rootmp = instance.root_device_name
       
  2646             self._volume_api.attach(context, old_rvid, instance['uuid'],
       
  2647                                     rootmp)
       
  2648 
       
  2649             bdmobj = objects.BlockDeviceMapping()
       
  2650             bdm = bdmobj.get_by_volume_id(context, new_rvid)
       
  2651             bdm['connection_info'] = jsonutils.dumps(connection_info)
       
  2652             bdm['volume_id'] = old_rvid
       
  2653             bdm.save()
       
  2654 
       
  2655             del instance.system_metadata['new_instance_volid']
       
  2656             del instance.system_metadata['old_instance_volid']
       
  2657 
       
  2658             rootmp = instance.root_device_name
       
  2659             bmap = block_device_info.get('block_device_mapping')
       
  2660             for entry in bmap:
       
  2661                 if entry['mount_device'] != rootmp:
       
  2662                     self.attach_volume(context,
       
  2663                                        entry['connection_info'], instance,
       
  2664                                        entry['mount_device'])
       
  2665 
       
  2666         self._power_on(instance)
  2151 
  2667 
  2152     def pause(self, instance):
  2668     def pause(self, instance):
  2153         """Pause the specified instance.
  2669         """Pause the specified instance.
  2154 
  2670 
  2155         :param instance: nova.objects.instance.Instance
  2671         :param instance: nova.objects.instance.Instance
  3142         """Delete any lingering instance files for an instance.
  3658         """Delete any lingering instance files for an instance.
  3143 
  3659 
  3144         :param instance: nova.objects.instance.Instance
  3660         :param instance: nova.objects.instance.Instance
  3145         :returns: True if the instance was deleted from disk, False otherwise.
  3661         :returns: True if the instance was deleted from disk, False otherwise.
  3146         """
  3662         """
       
  3663         LOG.debug(_("Cleaning up for instance %s"), instance['name'])
       
  3664         # Delete the zone configuration for the instance using destroy, because
       
  3665         # it will simply take care of the work, and we don't need to duplicate
       
  3666         # the code here.
       
  3667         try:
       
  3668             self.destroy(None, instance, None)
       
  3669         except Exception:
       
  3670             return False
  3147         return True
  3671         return True
  3148 
  3672 
  3149     @property
  3673     @property
  3150     def need_legacy_block_device_info(self):
  3674     def need_legacy_block_device_info(self):
  3151         """Tell the caller if the driver requires legacy block device info.
  3675         """Tell the caller if the driver requires legacy block device info.