components/openstack/nova/files/solariszones/driver.py
changeset 6854 52081f923019
parent 6458 d1b4766148c2
child 6900 7563855115a6
equal deleted inserted replaced
6853:cf1567491b1b 6854:52081f923019
    43 from oslo_concurrency import processutils
    43 from oslo_concurrency import processutils
    44 from oslo_config import cfg
    44 from oslo_config import cfg
    45 from oslo_log import log as logging
    45 from oslo_log import log as logging
    46 from oslo_serialization import jsonutils
    46 from oslo_serialization import jsonutils
    47 from oslo_utils import excutils
    47 from oslo_utils import excutils
       
    48 from oslo_utils import fileutils
    48 from oslo_utils import strutils
    49 from oslo_utils import strutils
       
    50 from oslo_utils import versionutils
    49 from passlib.hash import sha256_crypt
    51 from passlib.hash import sha256_crypt
    50 
    52 
    51 from nova.api.metadata import password
    53 from nova.api.metadata import password
       
    54 from nova.compute import arch
       
    55 from nova.compute import hv_type
    52 from nova.compute import power_state
    56 from nova.compute import power_state
    53 from nova.compute import task_states
    57 from nova.compute import task_states
       
    58 from nova.compute import vm_mode
    54 from nova.compute import vm_states
    59 from nova.compute import vm_states
       
    60 from nova import conductor
       
    61 import nova.conf
    55 from nova.console import type as ctype
    62 from nova.console import type as ctype
    56 from nova import conductor
       
    57 from nova import context as nova_context
    63 from nova import context as nova_context
    58 from nova import crypto
    64 from nova import crypto
    59 from nova import exception
    65 from nova import exception
    60 from nova.i18n import _, _LE, _LI
    66 from nova.i18n import _, _LE, _LI
    61 from nova.image import API as glance_api
    67 from nova.image import API as glance_api
    62 from nova.image import glance
    68 from nova.image import glance
    63 from nova.network.neutronv2 import api as neutronv2_api
    69 from nova.network.neutronv2 import api as neutronv2_api
    64 from nova import objects
    70 from nova import objects
    65 from nova.objects import flavor as flavor_obj
    71 from nova.objects import flavor as flavor_obj
    66 from nova.openstack.common import fileutils
    72 from nova.objects import migrate_data as migrate_data_obj
    67 from nova import utils
    73 from nova import utils
    68 from nova.virt import driver
    74 from nova.virt import driver
    69 from nova.virt import event as virtevent
    75 from nova.virt import event as virtevent
    70 from nova.virt import hardware
    76 from nova.virt import hardware
    71 from nova.virt import images
    77 from nova.virt import images
   101                 default=True,
   107                 default=True,
   102                 help='Allow kernel boot options to be set in instance '
   108                 help='Allow kernel boot options to be set in instance '
   103                      'metadata.'),
   109                      'metadata.'),
   104 ]
   110 ]
   105 
   111 
   106 CONF = cfg.CONF
   112 CONF = nova.conf.CONF
   107 CONF.register_opts(solariszones_opts)
   113 CONF.register_opts(solariszones_opts, 'solariszones')
   108 CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
       
   109 LOG = logging.getLogger(__name__)
   114 LOG = logging.getLogger(__name__)
   110 
   115 
   111 # These should match the strings returned by the zone_state_str()
   116 # These should match the strings returned by the zone_state_str()
   112 # function in the (private) libzonecfg library. These values are in turn
   117 # function in the (private) libzonecfg library. These values are in turn
   113 # returned in the 'state' string of the Solaris Zones' RAD interface by
   118 # returned in the 'state' string of the Solaris Zones' RAD interface by
   400         except Exception as ex:
   405         except Exception as ex:
   401             if isinstance(ex, rad.client.ObjectError):
   406             if isinstance(ex, rad.client.ObjectError):
   402                 code = ex.get_payload().code
   407                 code = ex.get_payload().code
   403                 if (ignore_exists and
   408                 if (ignore_exists and
   404                         code == zonemgr.ErrorCode.RESOURCE_ALREADY_EXISTS):
   409                         code == zonemgr.ErrorCode.RESOURCE_ALREADY_EXISTS):
   405                     self.zone.setResourceProperties(zonemgr.Resource(
   410                     self.zone.setResourceProperties(
   406                         resource, None), props)
   411                         zonemgr.Resource(resource, None), props)
   407                     return
   412                     return
   408             reason = zonemgr_strerror(ex)
   413             reason = zonemgr_strerror(ex)
   409             LOG.error(_("Unable to create new resource '%s' for instance '%s'"
   414             LOG.error(_("Unable to create new resource '%s' for instance '%s'"
   410                         "via zonemgr(3RAD): %s")
   415                         "via zonemgr(3RAD): %s")
   411                       % (resource, self.zone.name, reason))
   416                       % (resource, self.zone.name, reason))
   428             raise
   433             raise
   429 
   434 
   430     def clear_resource_props(self, resource, props):
   435     def clear_resource_props(self, resource, props):
   431         """Clear property values of a given resource
   436         """Clear property values of a given resource
   432         """
   437         """
   433 
       
   434         try:
   438         try:
   435             self.zone.clearResourceProperties(zonemgr.Resource(resource, None),
   439             self.zone.clearResourceProperties(zonemgr.Resource(resource, None),
   436                                               props)
   440                                               props)
   437         except rad.client.ObjectError as ex:
   441         except rad.client.ObjectError as ex:
   438             reason = zonemgr_strerror(ex)
   442             reason = zonemgr_strerror(ex)
   474     """
   478     """
   475 
   479 
   476     capabilities = {
   480     capabilities = {
   477         "has_imagecache": False,
   481         "has_imagecache": False,
   478         "supports_recreate": True,
   482         "supports_recreate": True,
   479         }
   483         "supports_migrate_to_same_host": False
       
   484     }
   480 
   485 
   481     def __init__(self, virtapi):
   486     def __init__(self, virtapi):
   482         self.virtapi = virtapi
   487         self.virtapi = virtapi
   483         self._archive_manager = None
   488         self._archive_manager = None
   484         self._compute_event_callback = None
   489         self._compute_event_callback = None
   693         or 'dedicated-cpu' resources in the configuration or whether
   698         or 'dedicated-cpu' resources in the configuration or whether
   694         there was an assigned pool in the configuration. This algorithm
   699         there was an assigned pool in the configuration. This algorithm
   695         attempts to emulate what the virtual platform code does to
   700         attempts to emulate what the virtual platform code does to
   696         determine a number of virtual CPUs to use.
   701         determine a number of virtual CPUs to use.
   697         """
   702         """
   698 
       
   699         # If a 'virtual-cpu' resource exists, use the minimum number of
   703         # If a 'virtual-cpu' resource exists, use the minimum number of
   700         # CPUs defined there.
   704         # CPUs defined there.
   701         ncpus = lookup_resource_property(zone, 'virtual-cpu', 'ncpus')
   705         ncpus = lookup_resource_property(zone, 'virtual-cpu', 'ncpus')
   702         if ncpus is not None:
   706         if ncpus is not None:
   703             min = ncpus.split('-', 1)[0]
   707             min = ncpus.split('-', 1)[0]
   719         # of online CPUs.
   723         # of online CPUs.
   720         return os.sysconf('SC_NPROCESSORS_ONLN')
   724         return os.sysconf('SC_NPROCESSORS_ONLN')
   721 
   725 
   722     def _get_kstat_by_name(self, kstat_class, module, instance, name):
   726     def _get_kstat_by_name(self, kstat_class, module, instance, name):
   723         """Return Kstat snapshot data via RAD as a dictionary."""
   727         """Return Kstat snapshot data via RAD as a dictionary."""
   724         pattern = {
   728         pattern = {}
   725             'class':    kstat_class,
   729         if kstat_class is not None:
   726             'module':   module,
   730             pattern.update({'class': kstat_class})
   727             'instance': instance,
   731         if module is not None:
   728             'name':     name
   732             pattern.update({'module': module})
   729         }
   733         if instance is not None:
       
   734             pattern.update({'instance': instance})
       
   735         if name is not None:
       
   736             pattern.update({'name': name})
       
   737 
   730         try:
   738         try:
   731             self.kstat_control.update()
   739             self.kstat_control.update()
   732             kstat_object = self.rad_connection.get_object(
   740             kstat_objects = self.rad_connection.list_objects(
   733                 kstat.Kstat(), rad.client.ADRGlobPattern(pattern))
   741                 kstat.Kstat(), rad.client.ADRGlobPattern(pattern))
   734         except Exception as reason:
   742         except Exception as reason:
   735             LOG.info(_("Unable to retrieve kstat object '%s:%s:%s' of class "
   743             LOG.info(_("Unable to retrieve kstat object '%s:%s:%s' of class "
   736                        "'%s' via kstat(3RAD): %s")
   744                        "'%s' via kstat(3RAD): %s")
   737                      % (module, instance, name, kstat_class, reason))
   745                      % (module, instance, name, kstat_class, reason))
   738             return None
   746             return None
   739 
   747 
   740         kstat_data = {}
   748         kstat_data = []
   741         for named in kstat_object.fresh_snapshot().data.NAMED:
   749         for kstat_object in kstat_objects:
   742             kstat_data[named.name] = getattr(named.value,
   750             object = self.rad_connection.get_object(kstat_object)
   743                                              str(named.value.discriminant))
   751             for named in object.fresh_snapshot().data.NAMED:
       
   752                 kstat_data.append(
       
   753                     {named.name:
       
   754                      getattr(named.value, str(named.value.discriminant))})
   744         return kstat_data
   755         return kstat_data
       
   756 
       
   757     def _aggregate_kstat_statistic(self, kstat_data, statistic):
       
   758         aggregate = 0
       
   759         for ks in kstat_data:
       
   760             value = ks.get(statistic)
       
   761             if value is not None:
       
   762                 aggregate += value
       
   763 
       
   764         return aggregate
       
   765 
       
   766     def _get_kstat_statistic(self, kstat_data, statistic):
       
   767         value = None
       
   768         for ks in kstat_data:
       
   769             value = ks.get(statistic)
       
   770             if value is not None:
       
   771                 break
       
   772 
       
   773         return value
   745 
   774 
   746     def _get_cpu_time(self, zone):
   775     def _get_cpu_time(self, zone):
   747         """Return the CPU time used in nanoseconds."""
   776         """Return the CPU time used in nanoseconds."""
   748         if zone.id == -1:
   777         if zone.id == -1:
   749             return 0
   778             return 0
   750 
   779 
   751         kstat_data = self._get_kstat_by_name('zones', 'cpu', str(zone.id),
   780         kstat_data = self._get_kstat_by_name(
   752                                              'sys_zone_aggr')
   781             'zones', 'cpu', None, ''.join(('sys_zone_', str(zone.id))))
   753         if kstat_data is None:
   782         if kstat_data is None:
   754             return 0
   783             return 0
   755 
   784 
   756         return kstat_data['cpu_nsec_kernel'] + kstat_data['cpu_nsec_user']
   785         cpu_nsec_kernel = self._aggregate_kstat_statistic(kstat_data,
       
   786                                                           'cpu_nsec_kernel')
       
   787         cpu_nsec_user = self._aggregate_kstat_statistic(kstat_data,
       
   788                                                         'cpu_nsec_user')
       
   789 
       
   790         return cpu_nsec_kernel + cpu_nsec_user
   757 
   791 
   758     def get_info(self, instance):
   792     def get_info(self, instance):
   759         """Get the current status of an instance, by name (not ID!)
   793         """Get the current status of an instance, by name (not ID!)
   760 
   794 
   761         :param instance: nova.objects.instance.Instance object
   795         :param instance: nova.objects.instance.Instance object
   916 
   950 
   917         :param context: security context
   951         :param context: security context
   918         :param instance: nova.objects.instance.Instance
   952         :param instance: nova.objects.instance.Instance
   919                          This function should use the data there to guide
   953                          This function should use the data there to guide
   920                          the creation of the new instance.
   954                          the creation of the new instance.
   921         :param image_meta: image object returned by nova.image.glance that
   955         :param nova.objects.ImageMeta image_meta:
   922                            defines the image from which to boot this instance
   956             The metadata of the image of the instance.
   923         :param injected_files: User files to inject into instance.
   957         :param injected_files: User files to inject into instance.
   924         :param admin_password: Administrator password to set in instance.
   958         :param admin_password: Administrator password to set in instance.
   925         :param bdms: block-device-mappings to use for rebuild
   959         :param bdms: block-device-mappings to use for rebuild
   926         :param detach_block_devices: function to detach block devices. See
   960         :param detach_block_devices: function to detach block devices. See
   927             nova.compute.manager.ComputeManager:_rebuild_default_impl for
   961             nova.compute.manager.ComputeManager:_rebuild_default_impl for
   953         instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
   987         instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
   954         instance.save(expected_task_state=[task_states.REBUILDING])
   988         instance.save(expected_task_state=[task_states.REBUILDING])
   955         root_ci = self._rebuild_block_devices(context, instance, bdms,
   989         root_ci = self._rebuild_block_devices(context, instance, bdms,
   956                                               recreate)
   990                                               recreate)
   957 
   991 
   958         if root_ci is not None:
   992         if recreate:
   959             driver_type = root_ci['driver_volume_type']
   993             if root_ci is not None:
   960         else:
   994                 driver_type = root_ci['driver_volume_type']
   961             driver_type = 'local'
   995             else:
   962 
   996                 driver_type = 'local'
   963         # If image_meta is provided then the --on-shared-storage option
       
   964         # was not used.
       
   965         if image_meta:
       
   966             # If not then raise an exception.  But if this is a rebuild then
       
   967             # the local storage is ok.
       
   968             if driver_type in shared_storage and recreate:
       
   969                 msg = (_("Root device is on shared storage for instance '%s'.")
       
   970                        % instance['name'])
       
   971                 raise exception.NovaException(msg)
       
   972 
       
   973         else:
       
   974             # So the root device is not expected to be local so we can move
       
   975             # forward with building the zone.
       
   976             if driver_type not in shared_storage:
   997             if driver_type not in shared_storage:
   977                 msg = (_("Root device is not on shared storage for instance "
   998                 msg = (_("Root device is not on shared storage for instance "
   978                          "'%s'.") % instance['name'])
   999                          "'%s'.") % instance['name'])
   979 
       
   980                 raise exception.NovaException(msg)
  1000                 raise exception.NovaException(msg)
   981 
  1001 
   982         if not recreate:
  1002         if not recreate:
   983             self.destroy(context, instance, network_info, block_device_info)
  1003             self.destroy(context, instance, network_info, block_device_info)
   984             if root_ci is not None:
  1004             if root_ci is not None:
   987 
  1007 
   988         instance.task_state = task_states.REBUILD_SPAWNING
  1008         instance.task_state = task_states.REBUILD_SPAWNING
   989         instance.save(
  1009         instance.save(
   990             expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
  1010             expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
   991 
  1011 
       
  1012         # Instead of using a boolean for 'rebuilding' scratch data, use a
       
  1013         # string because the object will translate it to a string anyways.
   992         if recreate:
  1014         if recreate:
   993             extra_specs = self._get_extra_specs(instance)
  1015             extra_specs = self._get_extra_specs(instance)
   994 
  1016 
   995             instance.system_metadata['rebuilding'] = False
  1017             instance.system_metadata['rebuilding'] = 'false'
   996             self._create_config(context, instance, network_info,
  1018             self._create_config(context, instance, network_info, root_ci, None)
   997                                 root_ci, None)
       
   998             del instance.system_metadata['evac_from']
  1019             del instance.system_metadata['evac_from']
   999             instance.save()
  1020             instance.save()
  1000         else:
  1021         else:
  1001             instance.system_metadata['rebuilding'] = True
  1022             instance.system_metadata['rebuilding'] = 'true'
  1002             self.spawn(context, instance, image_meta, injected_files,
  1023             self.spawn(context, instance, image_meta, injected_files,
  1003                        admin_password, network_info, block_device_info)
  1024                        admin_password, network_info, block_device_info)
  1004             self.power_off(instance)
  1025             self.power_off(instance)
  1005 
  1026 
  1006         del instance.system_metadata['rebuilding']
  1027         del instance.system_metadata['rebuilding']
  1038             instance['instance_type_id'])
  1059             instance['instance_type_id'])
  1039         return flavor['extra_specs'].copy()
  1060         return flavor['extra_specs'].copy()
  1040 
  1061 
  1041     def _fetch_image(self, context, instance):
  1062     def _fetch_image(self, context, instance):
  1042         """Fetch an image using Glance given the instance's image_ref."""
  1063         """Fetch an image using Glance given the instance's image_ref."""
  1043         glancecache_dirname = CONF.glancecache_dirname
  1064         glancecache_dirname = CONF.solariszones.glancecache_dirname
  1044         fileutils.ensure_tree(glancecache_dirname)
  1065         fileutils.ensure_tree(glancecache_dirname)
  1045         image = ''.join([glancecache_dirname, '/', instance['image_ref']])
  1066         image = ''.join([glancecache_dirname, '/', instance['image_ref']])
  1046         if os.path.exists(image):
  1067         if os.path.exists(image):
  1047             LOG.debug(_("Using existing, cached Glance image: id %s")
  1068             LOG.debug(_("Using existing, cached Glance image: id %s")
  1048                       % instance['image_ref'])
  1069                       % instance['image_ref'])
  1070         except Exception as ex:
  1091         except Exception as ex:
  1071             if isinstance(ex, rad.client.ObjectError):
  1092             if isinstance(ex, rad.client.ObjectError):
  1072                 reason = ex.get_payload().info
  1093                 reason = ex.get_payload().info
  1073             else:
  1094             else:
  1074                 reason = str(ex)
  1095                 reason = str(ex)
  1075             raise exception.ImageUnacceptable(
  1096             raise exception.ImageUnacceptable(image_id=instance['image_ref'],
  1076                 image_id=instance['image_ref'],
  1097                                               reason=reason)
  1077                 reason=reason)
       
  1078 
  1098 
  1079         # Validate the image at this point to ensure:
  1099         # Validate the image at this point to ensure:
  1080         # - contains one deployable system
  1100         # - contains one deployable system
  1081         deployables = ua.getArchivedSystems()
  1101         deployables = ua.getArchivedSystems()
  1082         if len(deployables) != 1:
  1102         if len(deployables) != 1:
  1124             suri = 'dev:/dev/zvol/dsk/%s' % connection_info['volume_path']
  1144             suri = 'dev:/dev/zvol/dsk/%s' % connection_info['volume_path']
  1125         elif driver_type == 'iscsi':
  1145         elif driver_type == 'iscsi':
  1126             data = connection_info['data']
  1146             data = connection_info['data']
  1127             # suri(5) format:
  1147             # suri(5) format:
  1128             #       iscsi://<host>[:<port>]/target.<IQN>,lun.<LUN>
  1148             #       iscsi://<host>[:<port>]/target.<IQN>,lun.<LUN>
       
  1149             # luname-only URI format for the multipathing:
       
  1150             #       iscsi://<host>[:<port>]/luname.naa.<ID>
  1129             # Sample iSCSI connection data values:
  1151             # Sample iSCSI connection data values:
  1130             # target_portal: 192.168.1.244:3260
  1152             # target_portal: 192.168.1.244:3260
  1131             # target_iqn: iqn.2010-10.org.openstack:volume-a89c.....
  1153             # target_iqn: iqn.2010-10.org.openstack:volume-a89c.....
  1132             # target_lun: 1
  1154             # target_lun: 1
  1133             suri = 'iscsi://%s/target.%s,lun.%d' % (data['target_portal'],
  1155             suri = None
  1134                                                     data['target_iqn'],
  1156             if 'target_iqns' in data:
  1135                                                     data['target_lun'])
  1157                 target = data['target_iqns'][0]
       
  1158                 target_lun = data['target_luns'][0]
       
  1159                 try:
       
  1160                     utils.execute('/usr/sbin/iscsiadm', 'list', 'target',
       
  1161                                   '-vS', target)
       
  1162                     out, err = utils.execute('/usr/sbin/suriadm', 'lookup-uri',
       
  1163                                              '-t', 'iscsi',
       
  1164                                              '-p', 'target=%s' % target,
       
  1165                                              '-p', 'lun=%s' % target_lun)
       
  1166                     for line in [l.strip() for l in out.splitlines()]:
       
  1167                         if "luname.naa." in line:
       
  1168                             LOG.debug(_("The found luname-only URI for the "
       
  1169                                       "LUN '%s' is '%s'.") %
       
  1170                                       (target_lun, line))
       
  1171                             suri = line
       
  1172                 except processutils.ProcessExecutionError as ex:
       
  1173                     reason = ex.stderr
       
  1174                     LOG.debug(_("Failed to lookup-uri for volume '%s', lun "
       
  1175                               "'%s': '%s'.") % (target, target_lun, reason))
       
  1176 
       
  1177             if suri is None:
       
  1178                 suri = 'iscsi://%s/target.%s,lun.%d' % (data['target_portal'],
       
  1179                                                         data['target_iqn'],
       
  1180                                                         data['target_lun'])
  1136             # TODO(npower): need to handle CHAP authentication also
  1181             # TODO(npower): need to handle CHAP authentication also
  1137         elif driver_type == 'nfs':
  1182         elif driver_type == 'nfs':
  1138             data = connection_info['data']
  1183             data = connection_info['data']
  1139             suri = (
  1184             suri = (
  1140                 'nfs://cinder:cinder@%s/%s' %
  1185                 'nfs://cinder:cinder@%s/%s' %
  1156             # If the volume was exported just a few seconds previously then
  1201             # If the volume was exported just a few seconds previously then
  1157             # it will probably not be visible to the local adapter yet.
  1202             # it will probably not be visible to the local adapter yet.
  1158             # Invoke 'fcinfo remote-port' on all local HBA ports to trigger
  1203             # Invoke 'fcinfo remote-port' on all local HBA ports to trigger
  1159             # a refresh.
  1204             # a refresh.
  1160             for wwpn in self._get_fc_wwpns():
  1205             for wwpn in self._get_fc_wwpns():
  1161                 utils.execute('/usr/sbin/fcinfo', 'remote-port',
  1206                 utils.execute('/usr/sbin/fcinfo', 'remote-port', '-p', wwpn)
  1162                               '-p', wwpn)
       
  1163 
  1207 
  1164             suri = self._lookup_fc_volume_suri(target_wwn, target_lun)
  1208             suri = self._lookup_fc_volume_suri(target_wwn, target_lun)
  1165         return suri
  1209         return suri
  1166 
  1210 
  1167     def _lookup_fc_volume_suri(self, target_wwn, target_lun):
  1211     def _lookup_fc_volume_suri(self, target_wwn, target_lun):
  1226                     continue
  1270                     continue
  1227                 zc.setprop('global', prop, value)
  1271                 zc.setprop('global', prop, value)
  1228 
  1272 
  1229     def _create_boot_volume(self, context, instance):
  1273     def _create_boot_volume(self, context, instance):
  1230         """Create a (Cinder) volume service backed boot volume"""
  1274         """Create a (Cinder) volume service backed boot volume"""
  1231         boot_vol_az = CONF.boot_volume_az
  1275         boot_vol_az = CONF.solariszones.boot_volume_az
  1232         boot_vol_type = CONF.boot_volume_type
  1276         boot_vol_type = CONF.solariszones.boot_volume_type
  1233         try:
  1277         try:
  1234             vol = self._volume_api.create(
  1278             vol = self._volume_api.create(
  1235                 context,
  1279                 context, instance['root_gb'],
  1236                 instance['root_gb'],
       
  1237                 instance['hostname'] + "-" + self._rootzpool_suffix,
  1280                 instance['hostname'] + "-" + self._rootzpool_suffix,
  1238                 "Boot volume for instance '%s' (%s)"
  1281                 "Boot volume for instance '%s' (%s)"
  1239                 % (instance['name'], instance['uuid']),
  1282                 % (instance['name'], instance['uuid']),
  1240                 volume_type=boot_vol_type,
  1283                 volume_type=boot_vol_type, availability_zone=boot_vol_az)
  1241                 availability_zone=boot_vol_az)
       
  1242             # TODO(npower): Polling is what nova/compute/manager also does when
  1284             # TODO(npower): Polling is what nova/compute/manager also does when
  1243             # creating a new volume, so we do likewise here.
  1285             # creating a new volume, so we do likewise here.
  1244             while True:
  1286             while True:
  1245                 volume = self._volume_api.get(context, vol['id'])
  1287                 volume = self._volume_api.get(context, vol['id'])
  1246                 if volume['status'] != 'creating':
  1288                 if volume['status'] != 'creating':
  1256         """Connect a (Cinder) volume service backed boot volume"""
  1298         """Connect a (Cinder) volume service backed boot volume"""
  1257         instance_uuid = instance['uuid']
  1299         instance_uuid = instance['uuid']
  1258         volume_id = volume['id']
  1300         volume_id = volume['id']
  1259 
  1301 
  1260         connector = self.get_volume_connector(instance)
  1302         connector = self.get_volume_connector(instance)
  1261         connection_info = self._volume_api.initialize_connection(
  1303         connection_info = self._volume_api.initialize_connection(context,
  1262             context, volume_id, connector)
  1304                                                                  volume_id,
       
  1305                                                                  connector)
  1263         connection_info['serial'] = volume_id
  1306         connection_info['serial'] = volume_id
  1264 
  1307 
  1265         # Check connection_info to determine if the provided volume is
  1308         # Check connection_info to determine if the provided volume is
  1266         # local to this compute node. If it is, then don't use it for
  1309         # local to this compute node. If it is, then don't use it for
  1267         # Solaris branded zones in order to avoid a known ZFS deadlock issue
  1310         # Solaris branded zones in order to avoid a known ZFS deadlock issue
  1303                 msg = _("Unsupported volume driver type '%s' can not be used "
  1346                 msg = _("Unsupported volume driver type '%s' can not be used "
  1304                         "as a boot device for zones." % driver_type)
  1347                         "as a boot device for zones." % driver_type)
  1305                 raise exception.InvalidVolume(reason=msg)
  1348                 raise exception.InvalidVolume(reason=msg)
  1306 
  1349 
  1307         # Volume looks OK to use. Notify Cinder of the attachment.
  1350         # Volume looks OK to use. Notify Cinder of the attachment.
  1308         self._volume_api.attach(context, volume_id, instance_uuid,
  1351         self._volume_api.attach(context, volume_id, instance_uuid, mountpoint)
  1309                                 mountpoint)
       
  1310         return connection_info
  1352         return connection_info
  1311 
  1353 
  1312     def _set_boot_device(self, name, connection_info, brand):
  1354     def _set_boot_device(self, name, connection_info, brand):
  1313         """Set the boot device specified by connection_info"""
  1355         """Set the boot device specified by connection_info"""
  1314         zone = self._get_zone_by_name(name)
  1356         zone = self._get_zone_by_name(name)
  1319 
  1361 
  1320         with ZoneConfig(zone) as zc:
  1362         with ZoneConfig(zone) as zc:
  1321             # ZOSS device configuration is different for the solaris-kz brand
  1363             # ZOSS device configuration is different for the solaris-kz brand
  1322             if brand == ZONE_BRAND_SOLARIS_KZ:
  1364             if brand == ZONE_BRAND_SOLARIS_KZ:
  1323                 zc.zone.setResourceProperties(
  1365                 zc.zone.setResourceProperties(
  1324                     zonemgr.Resource(
  1366                     zonemgr.Resource("device",
  1325                         "device",
  1367                                      [zonemgr.Property("bootpri", "0")]),
  1326                         [zonemgr.Property("bootpri", "0")]),
       
  1327                     [zonemgr.Property("storage", suri)])
  1368                     [zonemgr.Property("storage", suri)])
  1328             else:
  1369             else:
  1329                 zc.addresource(
  1370                 zc.addresource(ROOTZPOOL_RESOURCE,
  1330                     ROOTZPOOL_RESOURCE,
  1371                                [zonemgr.Property("storage", listvalue=[suri])],
  1331                     [zonemgr.Property("storage", listvalue=[suri])],
  1372                                ignore_exists=True)
  1332                     ignore_exists=True)
       
  1333 
  1373 
  1334     def _set_num_cpu(self, name, vcpus, brand):
  1374     def _set_num_cpu(self, name, vcpus, brand):
  1335         """Set number of VCPUs in a Solaris Zone configuration."""
  1375         """Set number of VCPUs in a Solaris Zone configuration."""
  1336         zone = self._get_zone_by_name(name)
  1376         zone = self._get_zone_by_name(name)
  1337         if zone is None:
  1377         if zone is None:
  1363             mem_resource = 'physical'
  1403             mem_resource = 'physical'
  1364 
  1404 
  1365         with ZoneConfig(zone) as zc:
  1405         with ZoneConfig(zone) as zc:
  1366             zc.setprop('capped-memory', mem_resource, '%dM' % memory_mb)
  1406             zc.setprop('capped-memory', mem_resource, '%dM' % memory_mb)
  1367 
  1407 
       
  1408     def _ovs_add_port(self, instance, vif, port):
       
  1409         if vif['type'] == 'binding_failed':
       
  1410             LOG.error(_('Port binding has failed for VIF %s. Ensure that '
       
  1411                         'OVS agent is running and/or bridge_mappings are '
       
  1412                         'correctly configured. VM will not have network '
       
  1413                         'connectivity') % vif)
       
  1414 
       
  1415         ovs_bridge = CONF.neutron.ovs_bridge
       
  1416         cmd = ['/usr/sbin/ovs-vsctl',
       
  1417                '--timeout=%s' % CONF.ovs_vsctl_timeout,
       
  1418                '--', '--if-exists', 'del-port', ovs_bridge, port,
       
  1419                '--', 'add-port', ovs_bridge, port,
       
  1420                '--', 'set', 'Interface', port,
       
  1421                'external-ids:iface-id=%s' % vif['id'],
       
  1422                'external-ids:iface-status=active',
       
  1423                'external-ids:attached-mac=%s' % vif['address'],
       
  1424                'external-ids:vm-uuid=%s' % instance['uuid']
       
  1425                ]
       
  1426         try:
       
  1427             out, err = utils.execute(*cmd)
       
  1428         except Exception as reason:
       
  1429             msg = (_("Failed to add port '%s' with MAC address '%s' to "
       
  1430                      "OVS Bridge '%s': %s")
       
  1431                    % (port, vif['address'], ovs_bridge, reason))
       
  1432             raise exception.NovaException(msg)
       
  1433         LOG.debug(_('Successfully added port %s with MAC adddress %s') %
       
  1434                   (port, vif['address']))
       
  1435 
       
  1436     def _ovs_delete_port(self, port, log_warnings=False):
       
  1437         ovs_bridge = CONF.neutron.ovs_bridge
       
  1438         cmd = ['/usr/sbin/ovs-vsctl',
       
  1439                '--timeout=%s' % CONF.ovs_vsctl_timeout,
       
  1440                '--', '--if-exists', 'del-port', ovs_bridge, port]
       
  1441         try:
       
  1442             out, err = utils.execute(*cmd)
       
  1443             LOG.debug(_('Removed port %s from the OVS bridge %s') %
       
  1444                       (port, ovs_bridge))
       
  1445         except Exception as reason:
       
  1446             msg = (_("Unable to remove port '%s' from the OVS "
       
  1447                      "bridge '%s': %s") % (port, ovs_bridge, reason))
       
  1448             if log_warnings:
       
  1449                 LOG.warning(msg)
       
  1450             else:
       
  1451                 raise nova.exception.NovaException(msg)
       
  1452 
  1368     def _plug_vifs(self, instance, network_info):
  1453     def _plug_vifs(self, instance, network_info):
  1369         # if the VIF is of EVS type (i.e., vif['type'] is ''),
  1454         if not network_info:
  1370         # then nothing to do
  1455             LOG.debug(_("Instance has no VIF. Nothing to plug."))
  1371         if not network_info or not network_info[0]['type']:
       
  1372             LOG.debug(_("VIF is an EVS type. Nothing to plug."))
       
  1373             return
  1456             return
  1374 
  1457 
  1375         # first find out all the anets for a given instance
  1458         # first find out all the anets for a given instance
  1376         try:
  1459         try:
  1377             out, err = utils.execute('/usr/sbin/dladm', 'show-vnic',
  1460             out, err = utils.execute('/usr/sbin/dladm', 'show-vnic',
  1378                                      '-z', instance['name'],
  1461                                      '-z', instance['name'],
  1379                                      '-po', 'link,macaddress')
  1462                                      '-po', 'link,macaddress')
  1380         except Exception as reason:
  1463         except Exception as reason:
  1381             msg = (_("Unable to get ANETs for instance '%s': %s")
  1464             msg = (_("Unable to get interfaces for instance '%s': %s")
  1382                    % (instance['name'], reason))
  1465                    % (instance['name'], reason))
  1383             raise exception.NovaException(msg)
  1466             raise exception.NovaException(msg)
  1384 
  1467 
  1385         anetdict = {}
  1468         anetdict = {}
  1386         for anet_maddr in out.strip().splitlines():
  1469         for anet_maddr in out.strip().splitlines():
  1387             anet, maddr = anet_maddr.strip().split(':', 1)
  1470             anet, maddr = anet_maddr.strip().split(':', 1)
  1388             maddr = maddr.replace('\\', '')
  1471             maddr = maddr.replace('\\', '')
  1389             maddr = ''.join(['%02x' % int(b, 16) for b in maddr.split(':')])
  1472             maddr = ''.join(['%02x' % int(b, 16) for b in maddr.split(':')])
  1390             anetdict[maddr] = anet
  1473             anetdict[maddr] = anet
  1391 
  1474 
  1392         LOG.debug(_("List of instance %s's anets: %s") % (instance['name'],
  1475         LOG.debug(_("List of instance %s's anets: %s")
  1393                                                           anetdict))
  1476                   % (instance['name'], anetdict))
  1394         # we now have a list of VNICs that belong to the VM
  1477         # we now have a list of VNICs that belong to the VM
  1395         # we need to map the VNIC to the bridge
  1478         # we need to map the VNIC to the bridge
  1396         bridge = CONF.neutron.ovs_bridge
       
  1397         for vif in network_info:
  1479         for vif in network_info:
  1398             if vif['type'] == 'binding_failed':
       
  1399                 LOG.error(_('Port binding has failed for VIF %s. Ensure that '
       
  1400                             'OVS agent is running and/or bridge_mappings are '
       
  1401                             'correctly configured. VM will not have network '
       
  1402                             'connectivity') % vif)
       
  1403             vif_maddr = ''.join(['%02x' % int(b, 16) for b in
  1480             vif_maddr = ''.join(['%02x' % int(b, 16) for b in
  1404                                  vif['address'].split(':')])
  1481                                  vif['address'].split(':')])
  1405             anet = anetdict.get(vif_maddr)
  1482             anet = anetdict.get(vif_maddr)
  1406             if anet is None:
  1483             if anet is None:
  1407                 LOG.error(_('Failed to add port %s connected to network %s '
  1484                 LOG.error(_('Failed to add port %s connected to network %s '
  1408                             'to instance %s') % (vif['ovs_interfaceid'],
  1485                             'to instance %s')
  1409                                                  vif['network']['id'],
  1486                           % (vif['ovs_interfaceid'], vif['network']['id'],
  1410                                                  instance['name']))
  1487                              instance['name']))
  1411                 continue
  1488                 continue
  1412             cmd = ['/usr/sbin/ovs-vsctl',
  1489             self._ovs_add_port(instance, vif, anet)
  1413                    '--timeout=%s' % CONF.ovs_vsctl_timeout,
       
  1414                    '--', '--if-exists', 'del-port', bridge, anet,
       
  1415                    '--', 'add-port', bridge, anet,
       
  1416                    '--', 'set', 'Interface', anet,
       
  1417                    'external-ids:iface-id=%s' % vif['id'],
       
  1418                    'external-ids:iface-status=active',
       
  1419                    'external-ids:attached-mac=%s' % vif['address'],
       
  1420                    'external-ids:vm-uuid=%s' % instance['uuid']
       
  1421                    ]
       
  1422             try:
       
  1423                 out, err = utils.execute(*cmd)
       
  1424             except Exception as reason:
       
  1425                 msg = (_("Failed to add VNIC '%s' with MAC address %s to "
       
  1426                          "OVS Bridge '%s': %s") % (anet, vif['address'],
       
  1427                                                    bridge, reason))
       
  1428                 raise exception.NovaException(msg)
       
  1429             LOG.debug(_('Successfully added anet %s with MAC adddress %s') %
       
  1430                       (anet, vif['address']))
       
  1431 
  1490 
  1432     def _unplug_vifs(self, instance):
  1491     def _unplug_vifs(self, instance):
  1433         # Since we don't have VIF info here, we need to find if the anets
       
  1434         # were EVS based or OVS based by looking at the CONF setting. In
       
  1435         # EVS based cloud neutron.ovs_bridge setting will be set to the
       
  1436         # default value of 'br-int'.
       
  1437         ovs_bridge = CONF.neutron.ovs_bridge
  1492         ovs_bridge = CONF.neutron.ovs_bridge
  1438         if ovs_bridge == 'br-int':
       
  1439             LOG.debug(_("Instance %s doesn't have any OVS based anets") %
       
  1440                       instance['name'])
       
  1441             return
       
  1442         # remove the anets from the OVS bridge
  1493         # remove the anets from the OVS bridge
  1443         cmd = ['/usr/sbin/ovs-vsctl', '--timeout=%s' % CONF.ovs_vsctl_timeout,
  1494         cmd = ['/usr/sbin/ovs-vsctl', '--timeout=%s' % CONF.ovs_vsctl_timeout,
  1444                'list-ports', ovs_bridge]
  1495                'list-ports', ovs_bridge]
  1445         try:
  1496         try:
  1446             out, err = utils.execute(*cmd)
  1497             out, err = utils.execute(*cmd)
  1447         except Exception as reason:
  1498         except Exception as reason:
  1448             msg = (_("Unable to get ANETs for instance '%s': %s")
  1499             msg = (_("Unable to get interfaces for instance '%s': %s")
  1449                    % (instance['name'], reason))
  1500                    % (instance['name'], reason))
  1450             raise exception.NovaException(msg)
  1501             raise exception.NovaException(msg)
  1451 
  1502 
  1452         for port in out.strip().splitlines():
  1503         for port in out.strip().splitlines():
  1453             if port.split('/')[0] != instance['name']:
  1504             if port.split('/')[0] != instance['name']:
  1454                 continue
  1505                 continue
  1455             cmd = ['/usr/sbin/ovs-vsctl',
  1506             self._ovs_delete_port(port, log_warnings=True)
  1456                    '--timeout=%s' % CONF.ovs_vsctl_timeout,
  1507 
  1457                    '--', '--if-exists', 'del-port', ovs_bridge, port]
  1508     def _set_ovs_info(self, context, zone, brand, first_anet, vif):
  1458             try:
       
  1459                 out, err = utils.execute(*cmd)
       
  1460                 LOG.debug(_('Removed port %s from the OVS bridge %s') %
       
  1461                           (port, ovs_bridge))
       
  1462             except Exception as reason:
       
  1463                 LOG.warning(_("Unable to remove port %s from the OVS "
       
  1464                               "bridge %s: %s") % (port, ovs_bridge, reason))
       
  1465 
       
  1466     def _set_evs_info(self, zone, brand, vifid, vif):
       
  1467         vport_uuid = vif['id']
       
  1468         evs_uuid = vif['network']['id']
       
  1469         with ZoneConfig(zone) as zc:
       
  1470             if vifid == 0:
       
  1471                 tenant_id = vif['network']['meta']['tenant_id']
       
  1472                 zc.setprop('global', 'tenant', tenant_id)
       
  1473                 zc.setprop('anet', 'configure-allowed-address', 'false')
       
  1474                 zc.setprop('anet', 'evs', evs_uuid)
       
  1475                 zc.setprop('anet', 'vport', vport_uuid)
       
  1476             else:
       
  1477                 zc.addresource(
       
  1478                     'anet',
       
  1479                     [zonemgr.Property('configure-allowed-address',
       
  1480                                       'false'),
       
  1481                      zonemgr.Property('evs', evs_uuid),
       
  1482                      zonemgr.Property('vport', vport_uuid)])
       
  1483 
       
  1484             prop_filter = [zonemgr.Property('vport', vport_uuid)]
       
  1485             if brand == ZONE_BRAND_SOLARIS:
       
  1486                 anetname = lookup_resource_property(zc.zone, 'anet',
       
  1487                                                     'linkname', prop_filter)
       
  1488             else:
       
  1489                 anetid = lookup_resource_property(zc.zone, 'anet', 'id',
       
  1490                                                   prop_filter)
       
  1491                 anetname = 'net%s' % anetid
       
  1492         return anetname
       
  1493 
       
  1494     def _set_ovs_info(self, context, zone, brand, vifid, vif):
       
  1495         # Need to be admin to retrieve provider:network_type attribute
  1509         # Need to be admin to retrieve provider:network_type attribute
  1496         network_plugin = neutronv2_api.get_client(context, admin=True)
  1510         network_plugin = neutronv2_api.get_client(context, admin=True)
  1497         network = network_plugin.show_network(
  1511         network = network_plugin.show_network(
  1498             vif['network']['id'])['network']
  1512             vif['network']['id'])['network']
  1499         network_type = network['provider:network_type']
  1513         network_type = network['provider:network_type']
  1510                 raise
  1524                 raise
  1511 
  1525 
  1512             other_config = results[0]['other_config']
  1526             other_config = results[0]['other_config']
  1513             if not other_config:
  1527             if not other_config:
  1514                 msg = (_("'other_config' column in 'Open_vSwitch' OVSDB table "
  1528                 msg = (_("'other_config' column in 'Open_vSwitch' OVSDB table "
  1515                          "is not configured. Please configure it so that "
  1529                          "is not configured. Please configure it so that the "
  1516                          "lower-link can be determined for the instance's "
  1530                          "lower-link can be determined for the instance's "
  1517                          "anet"))
  1531                          "interface."))
  1518                 LOG.error(msg)
  1532                 LOG.error(msg)
  1519                 raise exception.NovaException(msg)
  1533                 raise exception.NovaException(msg)
  1520             bridge_mappings = other_config.get('bridge_mappings')
  1534             bridge_mappings = other_config.get('bridge_mappings')
  1521             if not bridge_mappings:
  1535             if not bridge_mappings:
  1522                 msg = (_("'bridge_mappings' info is not set in 'other_config' "
  1536                 msg = (_("'bridge_mappings' info is not set in the "
  1523                          "column of 'Open_vSwitch' OVSDB table. Please "
  1537                          "'other_config' column of 'Open_vSwitch' OVSDB "
  1524                          "configure it so that lower-link can be determined "
  1538                          "table. Please configure it so that the lower-link "
  1525                          "for the instance's anet"))
  1539                          "can be determined for the instance's interface."))
  1526                 LOG.error(msg)
  1540                 LOG.error(msg)
  1527                 raise exception.NovaException(msg)
  1541                 raise exception.NovaException(msg)
  1528             for bridge_mapping in bridge_mappings.split(','):
  1542             for bridge_mapping in bridge_mappings.split(','):
  1529                 if physical_network in bridge_mapping:
  1543                 if physical_network in bridge_mapping:
  1530                     lower_link = bridge_mapping.split(':')[1]
  1544                     lower_link = bridge_mapping.split(':')[1]
  1531                     break
  1545                     break
  1532             if not lower_link:
  1546             if not lower_link:
  1533                 msg = (_("Failed to determine the lower_link for vif '%s'") %
  1547                 msg = (_("Failed to determine the lower_link for vif '%s'.") %
  1534                        (vif))
  1548                        (vif))
  1535                 LOG.error(msg)
  1549                 LOG.error(msg)
  1536                 raise exception.NovaException(msg)
  1550                 raise exception.NovaException(msg)
  1537         else:
  1551         else:
  1538             # TYPE_GRE and TYPE_LOCAL
  1552             # TYPE_GRE and TYPE_LOCAL
  1540             LOG.error(msg)
  1554             LOG.error(msg)
  1541             raise exception.NovaException(msg)
  1555             raise exception.NovaException(msg)
  1542 
  1556 
  1543         mtu = network['mtu']
  1557         mtu = network['mtu']
  1544         with ZoneConfig(zone) as zc:
  1558         with ZoneConfig(zone) as zc:
  1545             if vifid == 0:
  1559             if first_anet:
  1546                 zc.setprop('anet', 'lower-link', lower_link)
  1560                 zc.setprop('anet', 'lower-link', lower_link)
  1547                 zc.setprop('anet', 'configure-allowed-address', 'false')
  1561                 zc.setprop('anet', 'configure-allowed-address', 'false')
  1548                 zc.setprop('anet', 'mac-address', vif['address'])
  1562                 zc.setprop('anet', 'mac-address', vif['address'])
  1549                 if mtu > 0:
  1563                 if mtu > 0:
  1550                     zc.setprop('anet', 'mtu', str(mtu))
  1564                     zc.setprop('anet', 'mtu', str(mtu))
  1584                 return
  1598                 return
  1585 
  1599 
  1586         for vifid, vif in enumerate(network_info):
  1600         for vifid, vif in enumerate(network_info):
  1587             LOG.debug("%s", jsonutils.dumps(vif, indent=5))
  1601             LOG.debug("%s", jsonutils.dumps(vif, indent=5))
  1588 
  1602 
  1589             # get all the info common to both EVS or OVS based VIF
       
  1590             ip = vif['network']['subnets'][0]['ips'][0]['address']
  1603             ip = vif['network']['subnets'][0]['ips'][0]['address']
  1591             cidr = vif['network']['subnets'][0]['cidr']
  1604             cidr = vif['network']['subnets'][0]['cidr']
  1592             ip_cidr = "%s/%s" % (ip, cidr.split('/')[1])
  1605             ip_cidr = "%s/%s" % (ip, cidr.split('/')[1])
  1593             ip_version = vif['network']['subnets'][0]['version']
  1606             ip_version = vif['network']['subnets'][0]['version']
  1594             dhcp_server = \
  1607             dhcp_server = \
  1599             nameservers = []
  1612             nameservers = []
  1600             for dns in dns_list:
  1613             for dns in dns_list:
  1601                 if dns['type'] == 'dns':
  1614                 if dns['type'] == 'dns':
  1602                     nameservers.append(dns['address'])
  1615                     nameservers.append(dns['address'])
  1603 
  1616 
  1604             # for EVS based VIFs the type is empty since EVS plugin
  1617             anetname = self._set_ovs_info(context, zone, brand, vifid == 0,
  1605             # doesn't support portbinding extension
  1618                                           vif)
  1606             if not vif['type']:
       
  1607                 anetname = self._set_evs_info(zone, brand, vifid, vif)
       
  1608             else:
       
  1609                 anetname = self._set_ovs_info(context, zone, brand, vifid, vif)
       
  1610 
  1619 
  1611             # create the required sysconfig file (or skip if this is part of a
  1620             # create the required sysconfig file (or skip if this is part of a
  1612             # resize or evacuate process)
  1621             # resize or evacuate process)
  1613             tstate = instance['task_state']
  1622             tstate = instance['task_state']
  1614             if tstate not in [task_states.RESIZE_FINISH,
  1623             if tstate not in [task_states.RESIZE_FINISH,
  1615                               task_states.RESIZE_REVERTING,
  1624                               task_states.RESIZE_REVERTING,
  1616                               task_states.RESIZE_MIGRATING,
  1625                               task_states.RESIZE_MIGRATING,
  1617                               task_states.REBUILD_SPAWNING] or \
  1626                               task_states.REBUILD_SPAWNING] or \
  1618                 (tstate == task_states.REBUILD_SPAWNING and
  1627                 (tstate == task_states.REBUILD_SPAWNING and
  1619                  instance.system_metadata['rebuilding']):
  1628                  instance.system_metadata['rebuilding'] == 'true'):
  1620                 if enable_dhcp:
  1629                 if enable_dhcp:
  1621                     tree = sysconfig.create_ncp_defaultfixed(
  1630                     tree = sysconfig.create_ncp_defaultfixed('dhcp',
  1622                         'dhcp', anetname, vifid, ip_version)
  1631                                                              anetname, vifid,
       
  1632                                                              ip_version)
  1623                 else:
  1633                 else:
  1624                     tree = sysconfig.create_ncp_defaultfixed(
  1634                     host_routes = vif['network']['subnets'][0]['routes']
  1625                         'static', anetname, vifid, ip_version, ip_cidr, route,
  1635                     tree = sysconfig.create_ncp_defaultfixed('static',
  1626                         nameservers)
  1636                                                              anetname, vifid,
       
  1637                                                              ip_version,
       
  1638                                                              ip_cidr, route,
       
  1639                                                              nameservers,
       
  1640                                                              host_routes)
  1627 
  1641 
  1628                 fp = os.path.join(sc_dir, 'zone-network-%d.xml' % vifid)
  1642                 fp = os.path.join(sc_dir, 'zone-network-%d.xml' % vifid)
  1629                 sysconfig.create_sc_profile(fp, tree)
  1643                 sysconfig.create_sc_profile(fp, tree)
  1630 
  1644 
  1631     def _set_suspend(self, instance):
  1645     def _set_suspend(self, instance):
  1634         name = instance['name']
  1648         name = instance['name']
  1635         zone = self._get_zone_by_name(name)
  1649         zone = self._get_zone_by_name(name)
  1636         if zone is None:
  1650         if zone is None:
  1637             raise exception.InstanceNotFound(instance_id=name)
  1651             raise exception.InstanceNotFound(instance_id=name)
  1638 
  1652 
  1639         path = os.path.join(CONF.zones_suspend_path, '%{zonename}')
  1653         path = os.path.join(CONF.solariszones.zones_suspend_path,
       
  1654                             '%{zonename}')
  1640         with ZoneConfig(zone) as zc:
  1655         with ZoneConfig(zone) as zc:
  1641             zc.addresource('suspend', [zonemgr.Property('path', path)])
  1656             zc.addresource('suspend', [zonemgr.Property('path', path)])
  1642 
  1657 
  1643     def _verify_sysconfig(self, sc_dir, instance, admin_password=None):
  1658     def _verify_sysconfig(self, sc_dir, instance, admin_password=None):
  1644         """verify the SC profile(s) passed in contain an entry for
  1659         """verify the SC profile(s) passed in contain an entry for
  1735         if tstate not in [task_states.RESIZE_FINISH,
  1750         if tstate not in [task_states.RESIZE_FINISH,
  1736                           task_states.RESIZE_REVERTING,
  1751                           task_states.RESIZE_REVERTING,
  1737                           task_states.RESIZE_MIGRATING,
  1752                           task_states.RESIZE_MIGRATING,
  1738                           task_states.REBUILD_SPAWNING] or \
  1753                           task_states.REBUILD_SPAWNING] or \
  1739             (tstate == task_states.REBUILD_SPAWNING and
  1754             (tstate == task_states.REBUILD_SPAWNING and
  1740              instance.system_metadata['rebuilding']):
  1755              instance.system_metadata['rebuilding'] == 'true'):
  1741             sc_profile = extra_specs.get('install:sc_profile')
  1756             sc_profile = extra_specs.get('install:sc_profile')
  1742             if sc_profile is not None:
  1757             if sc_profile is not None:
  1743                 if os.path.isfile(sc_profile):
  1758                 if os.path.isfile(sc_profile):
  1744                     shutil.copy(sc_profile, sc_dir)
  1759                     shutil.copy(sc_profile, sc_dir)
  1745                 elif os.path.isdir(sc_profile):
  1760                 elif os.path.isdir(sc_profile):
  1746                     shutil.copytree(sc_profile, os.path.join(sc_dir,
  1761                     shutil.copytree(sc_profile,
  1747                                     'sysconfig'))
  1762                                     os.path.join(sc_dir, 'sysconfig'))
  1748 
  1763 
  1749             self._verify_sysconfig(sc_dir, instance, admin_password)
  1764             self._verify_sysconfig(sc_dir, instance, admin_password)
  1750 
  1765 
  1751         LOG.debug(_("Creating zone configuration for '%s' (%s)")
  1766         LOG.debug(_("Creating zone configuration for '%s' (%s)")
  1752                   % (name, instance['display_name']))
  1767                   % (name, instance['display_name']))
  1787             raise exception.ConsoleTypeUnavailable(console_type='vnc')
  1802             raise exception.ConsoleTypeUnavailable(console_type='vnc')
  1788 
  1803 
  1789         name = instance['name']
  1804         name = instance['name']
  1790         # TODO(npower): investigate using RAD instead of CLI invocation
  1805         # TODO(npower): investigate using RAD instead of CLI invocation
  1791         try:
  1806         try:
  1792             out, err = utils.execute('/usr/sbin/svccfg', '-s',
  1807             out, err = utils.execute('/usr/sbin/svccfg',
  1793                                      VNC_CONSOLE_BASE_FMRI, 'add', name)
  1808                                      '-s', VNC_CONSOLE_BASE_FMRI, 'add', name)
  1794         except processutils.ProcessExecutionError as ex:
  1809         except processutils.ProcessExecutionError as ex:
  1795             if self._has_vnc_console_service(instance):
  1810             if self._has_vnc_console_service(instance):
  1796                 LOG.debug(_("Ignoring attempt to create existing zone VNC "
  1811                 LOG.debug(_("Ignoring attempt to create existing zone VNC "
  1797                             "console SMF service for instance '%s'") % name)
  1812                             "console SMF service for instance '%s'") % name)
  1798                 return
  1813                 return
  1806         """Delete a VNC console SMF service for a Solaris Zone"""
  1821         """Delete a VNC console SMF service for a Solaris Zone"""
  1807         name = instance['name']
  1822         name = instance['name']
  1808         self._disable_vnc_console_service(instance)
  1823         self._disable_vnc_console_service(instance)
  1809         # TODO(npower): investigate using RAD instead of CLI invocation
  1824         # TODO(npower): investigate using RAD instead of CLI invocation
  1810         try:
  1825         try:
  1811             out, err = utils.execute('/usr/sbin/svccfg', '-s',
  1826             out, err = utils.execute('/usr/sbin/svccfg',
  1812                                      VNC_CONSOLE_BASE_FMRI, 'delete', name)
  1827                                      '-s', VNC_CONSOLE_BASE_FMRI, 'delete',
       
  1828                                      name)
  1813         except processutils.ProcessExecutionError as ex:
  1829         except processutils.ProcessExecutionError as ex:
  1814             if not self._has_vnc_console_service(instance):
  1830             if not self._has_vnc_console_service(instance):
  1815                 LOG.debug(_("Ignoring attempt to delete a non-existent zone "
  1831                 LOG.debug(_("Ignoring attempt to delete a non-existent zone "
  1816                             "VNC console SMF service for instance '%s'")
  1832                             "VNC console SMF service for instance '%s'")
  1817                           % name)
  1833                           % name)
  1895                         "console SMF service for instance '%s'") % name)
  1911                         "console SMF service for instance '%s'") % name)
  1896             return
  1912             return
  1897         console_fmri = VNC_CONSOLE_BASE_FMRI + ':' + name
  1913         console_fmri = VNC_CONSOLE_BASE_FMRI + ':' + name
  1898         # TODO(npower): investigate using RAD instead of CLI invocation
  1914         # TODO(npower): investigate using RAD instead of CLI invocation
  1899         try:
  1915         try:
  1900             out, err = utils.execute('/usr/sbin/svcadm', 'disable', '-s',
  1916             out, err = utils.execute('/usr/sbin/svcadm', 'disable',
  1901                                      console_fmri)
  1917                                      '-s', console_fmri)
  1902         except processutils.ProcessExecutionError as ex:
  1918         except processutils.ProcessExecutionError as ex:
  1903             reason = ex.stderr
  1919             reason = ex.stderr
  1904             LOG.error(_("Unable to disable zone VNC console SMF service "
  1920             LOG.error(_("Unable to disable zone VNC console SMF service "
  1905                         "'%s': %s") % (console_fmri, reason))
  1921                         "'%s': %s") % (console_fmri, reason))
  1906         # The console service sets a SMF instance property for the port
  1922         # The console service sets a SMF instance property for the port
  1990         # Attempt to update the zones hostid in the instance data, to catch
  2006         # Attempt to update the zones hostid in the instance data, to catch
  1991         # those instances that might have been created without a hostid stored.
  2007         # those instances that might have been created without a hostid stored.
  1992         self._set_instance_metahostid(instance)
  2008         self._set_instance_metahostid(instance)
  1993 
  2009 
  1994         bootargs = []
  2010         bootargs = []
  1995         if CONF.solariszones_boot_options:
  2011         if CONF.solariszones.solariszones_boot_options:
  1996             reset_bootargs = False
  2012             reset_bootargs = False
  1997             persistent = 'False'
  2013             persistent = 'False'
  1998 
  2014 
  1999             # Get any bootargs already set in the zone
  2015             # Get any bootargs already set in the zone
  2000             cur_bootargs = lookup_resource_property(zone, 'global', 'bootargs')
  2016             cur_bootargs = lookup_resource_property(zone, 'global', 'bootargs')
  2002             # Get any bootargs set in the instance metadata by the user
  2018             # Get any bootargs set in the instance metadata by the user
  2003             meta_bootargs = instance.metadata.get('bootargs')
  2019             meta_bootargs = instance.metadata.get('bootargs')
  2004 
  2020 
  2005             if meta_bootargs:
  2021             if meta_bootargs:
  2006                 bootargs = ['--', str(meta_bootargs)]
  2022                 bootargs = ['--', str(meta_bootargs)]
  2007                 persistent = str(instance.metadata.get('bootargs_persist',
  2023                 persistent = str(
  2008                                                        'False'))
  2024                     instance.metadata.get('bootargs_persist', 'False'))
  2009                 if cur_bootargs is not None and meta_bootargs != cur_bootargs:
  2025                 if cur_bootargs is not None and meta_bootargs != cur_bootargs:
  2010                     with ZoneConfig(zone) as zc:
  2026                     with ZoneConfig(zone) as zc:
  2011                         reset_bootargs = True
  2027                         reset_bootargs = True
  2012                         # Temporarily clear bootargs in zone config
  2028                         # Temporarily clear bootargs in zone config
  2013                         zc.clear_resource_props('global', ['bootargs'])
  2029                         zc.clear_resource_props('global', ['bootargs'])
  2019             reason = zonemgr_strerror(ex)
  2035             reason = zonemgr_strerror(ex)
  2020             LOG.error(_("Unable to power on instance '%s' via zonemgr(3RAD): "
  2036             LOG.error(_("Unable to power on instance '%s' via zonemgr(3RAD): "
  2021                         "%s") % (name, reason))
  2037                         "%s") % (name, reason))
  2022             raise exception.InstancePowerOnFailure(reason=reason)
  2038             raise exception.InstancePowerOnFailure(reason=reason)
  2023         finally:
  2039         finally:
  2024             if CONF.solariszones_boot_options:
  2040             if CONF.solariszones.solariszones_boot_options:
  2025                 if meta_bootargs and persistent.lower() == 'false':
  2041                 if meta_bootargs and persistent.lower() == 'false':
  2026                     # We have consumed the metadata bootargs and
  2042                     # We have consumed the metadata bootargs and
  2027                     # the user asked for them not to be persistent so
  2043                     # the user asked for them not to be persistent so
  2028                     # clear them out now.
  2044                     # clear them out now.
  2029                     instance.metadata.pop('bootargs', None)
  2045                     instance.metadata.pop('bootargs', None)
  2080 
  2096 
  2081         :param context: security context
  2097         :param context: security context
  2082         :param instance: nova.objects.instance.Instance
  2098         :param instance: nova.objects.instance.Instance
  2083                          This function should use the data there to guide
  2099                          This function should use the data there to guide
  2084                          the creation of the new instance.
  2100                          the creation of the new instance.
  2085         :param image_meta: image object returned by nova.image.glance that
  2101         :param nova.objects.ImageMeta image_meta:
  2086                            defines the image from which to boot this instance
  2102             The metadata of the image of the instance.
  2087         :param injected_files: User files to inject into instance.
  2103         :param injected_files: User files to inject into instance.
  2088         :param admin_password: Administrator password to set in instance.
  2104         :param admin_password: Administrator password to set in instance.
  2089         :param network_info:
  2105         :param network_info:
  2090            :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
  2106            :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
  2091         :param block_device_info: Information about block devices to be
  2107         :param block_device_info: Information about block devices to be
  2160             shutil.rmtree(sc_dir)
  2176             shutil.rmtree(sc_dir)
  2161 
  2177 
  2162         if connection_info is not None:
  2178         if connection_info is not None:
  2163             bdm_obj = objects.BlockDeviceMappingList()
  2179             bdm_obj = objects.BlockDeviceMappingList()
  2164             # there's only one bdm for this instance at this point
  2180             # there's only one bdm for this instance at this point
  2165             bdm = bdm_obj.get_by_instance_uuid(context,
  2181             bdm = bdm_obj.get_by_instance_uuid(
  2166                                                instance.uuid).objects[0]
  2182                 context, instance.uuid).objects[0]
  2167 
  2183 
  2168             # update the required attributes
  2184             # update the required attributes
  2169             bdm['connection_info'] = jsonutils.dumps(connection_info)
  2185             bdm['connection_info'] = jsonutils.dumps(connection_info)
  2170             bdm['source_type'] = 'volume'
  2186             bdm['source_type'] = 'volume'
  2171             bdm['destination_type'] = 'volume'
  2187             bdm['destination_type'] = 'volume'
  2364         if self._get_state(zone) == power_state.SHUTDOWN:
  2380         if self._get_state(zone) == power_state.SHUTDOWN:
  2365             self._power_on(instance, network_info)
  2381             self._power_on(instance, network_info)
  2366             return
  2382             return
  2367 
  2383 
  2368         bootargs = []
  2384         bootargs = []
  2369         if CONF.solariszones_boot_options:
  2385         if CONF.solariszones.solariszones_boot_options:
  2370             reset_bootargs = False
  2386             reset_bootargs = False
  2371             persistent = 'False'
  2387             persistent = 'False'
  2372 
  2388 
  2373             # Get any bootargs already set in the zone
  2389             # Get any bootargs already set in the zone
  2374             cur_bootargs = lookup_resource_property(zone, 'global', 'bootargs')
  2390             cur_bootargs = lookup_resource_property(zone, 'global', 'bootargs')
  2376             # Get any bootargs set in the instance metadata by the user
  2392             # Get any bootargs set in the instance metadata by the user
  2377             meta_bootargs = instance.metadata.get('bootargs')
  2393             meta_bootargs = instance.metadata.get('bootargs')
  2378 
  2394 
  2379             if meta_bootargs:
  2395             if meta_bootargs:
  2380                 bootargs = ['--', str(meta_bootargs)]
  2396                 bootargs = ['--', str(meta_bootargs)]
  2381                 persistent = str(instance.metadata.get('bootargs_persist',
  2397                 persistent = str(
  2382                                                        'False'))
  2398                     instance.metadata.get('bootargs_persist', 'False'))
  2383                 if cur_bootargs is not None and meta_bootargs != cur_bootargs:
  2399                 if cur_bootargs is not None and meta_bootargs != cur_bootargs:
  2384                     with ZoneConfig(zone) as zc:
  2400                     with ZoneConfig(zone) as zc:
  2385                         reset_bootargs = True
  2401                         reset_bootargs = True
  2386                         # Temporarily clear bootargs in zone config
  2402                         # Temporarily clear bootargs in zone config
  2387                         zc.clear_resource_props('global', ['bootargs'])
  2403                         zc.clear_resource_props('global', ['bootargs'])
  2398             reason = zonemgr_strerror(ex)
  2414             reason = zonemgr_strerror(ex)
  2399             LOG.error(_("Unable to reboot instance '%s' via zonemgr(3RAD): %s")
  2415             LOG.error(_("Unable to reboot instance '%s' via zonemgr(3RAD): %s")
  2400                       % (name, reason))
  2416                       % (name, reason))
  2401             raise exception.InstanceRebootFailure(reason=reason)
  2417             raise exception.InstanceRebootFailure(reason=reason)
  2402         finally:
  2418         finally:
  2403             if CONF.solariszones_boot_options:
  2419             if CONF.solariszones.solariszones_boot_options:
  2404                 if meta_bootargs and persistent.lower() == 'false':
  2420                 if meta_bootargs and persistent.lower() == 'false':
  2405                     # We have consumed the metadata bootargs and
  2421                     # We have consumed the metadata bootargs and
  2406                     # the user asked for them not to be persistent so
  2422                     # the user asked for them not to be persistent so
  2407                     # clear them out now.
  2423                     # clear them out now.
  2408                     instance.metadata.pop('bootargs', None)
  2424                     instance.metadata.pop('bootargs', None)
  2500             reason = ex.stderr
  2516             reason = ex.stderr
  2501             LOG.error(_("Unable to refresh zone VNC console SMF service "
  2517             LOG.error(_("Unable to refresh zone VNC console SMF service "
  2502                         "'%s': %s" % (console_fmri, reason)))
  2518                         "'%s': %s" % (console_fmri, reason)))
  2503             raise
  2519             raise
  2504 
  2520 
  2505         host = CONF.vncserver_proxyclient_address
  2521         host = CONF.vnc.vncserver_proxyclient_address
  2506         try:
  2522         try:
  2507             out, err = utils.execute('/usr/bin/svcprop', '-p', 'vnc/port',
  2523             out, err = utils.execute('/usr/bin/svcprop', '-p', 'vnc/port',
  2508                                      console_fmri)
  2524                                      console_fmri)
  2509             port = int(out.strip())
  2525             port = int(out.strip())
  2510             return ctype.ConsoleVNC(host=host,
  2526             return ctype.ConsoleVNC(host=host, port=port,
  2511                                     port=port,
       
  2512                                     internal_access_path=None)
  2527                                     internal_access_path=None)
  2513         except processutils.ProcessExecutionError as ex:
  2528         except processutils.ProcessExecutionError as ex:
  2514             reason = ex.stderr
  2529             reason = ex.stderr
  2515             LOG.error(_("Unable to read VNC console port from zone VNC "
  2530             LOG.error(_("Unable to read VNC console port from zone VNC "
  2516                         "console SMF service '%s': %s"
  2531                         "console SMF service '%s': %s"
  2544 
  2559 
  2545         :returns an instance of console.type.ConsoleSerial
  2560         :returns an instance of console.type.ConsoleSerial
  2546         """
  2561         """
  2547         raise NotImplementedError()
  2562         raise NotImplementedError()
  2548 
  2563 
       
  2564     def get_mks_console(self, context, instance):
       
  2565         """Get connection info for a MKS console.
       
  2566 
       
  2567         :param context: security context
       
  2568         :param instance: nova.objects.instance.Instance
       
  2569 
       
  2570         :returns an instance of console.type.ConsoleMKS
       
  2571         """
       
  2572         raise NotImplementedError()
       
  2573 
  2549     def _get_zone_diagnostics(self, zone):
  2574     def _get_zone_diagnostics(self, zone):
  2550         """Return data about Solaris Zone diagnostics."""
  2575         """Return data about Solaris Zone diagnostics."""
  2551         if zone.id == -1:
  2576         if zone.id == -1:
  2552             return None
  2577             return None
  2553 
  2578 
  2554         diagnostics = {}
  2579         diagnostics = {}
  2555         id = str(zone.id)
  2580         zone_id = str(zone.id)
  2556 
  2581 
  2557         kstat_data = self._get_kstat_by_name('zone_caps', 'caps', id,
  2582         kstat_data = self._get_kstat_by_name(
  2558                                              ''.join(('lockedmem_zone_', id)))
  2583             'zone_caps', 'caps', zone_id,
       
  2584             ''.join(('lockedmem_zone_', zone_id)))
  2559         if kstat_data is not None:
  2585         if kstat_data is not None:
  2560             diagnostics['lockedmem'] = kstat_data['usage']
  2586             diagnostics['lockedmem'] = self._get_kstat_statistic(kstat_data,
  2561 
  2587                                                                  'usage')
  2562         kstat_data = self._get_kstat_by_name('zone_caps', 'caps', id,
  2588 
  2563                                              ''.join(('nprocs_zone_', id)))
  2589         kstat_data = self._get_kstat_by_name(
       
  2590             'zone_caps', 'caps', zone_id, ''.join(('nprocs_zone_', zone_id)))
  2564         if kstat_data is not None:
  2591         if kstat_data is not None:
  2565             diagnostics['nprocs'] = kstat_data['usage']
  2592             diagnostics['nprocs'] = self._get_kstat_statistic(kstat_data,
  2566 
  2593                                                               'usage')
  2567         kstat_data = self._get_kstat_by_name('zone_caps', 'caps', id,
  2594 
  2568                                              ''.join(('swapresv_zone_', id)))
  2595         kstat_data = self._get_kstat_by_name(
       
  2596             'zone_caps', 'caps', zone_id, ''.join(('swapresv_zone_', zone_id)))
  2569         if kstat_data is not None:
  2597         if kstat_data is not None:
  2570             diagnostics['swapresv'] = kstat_data['usage']
  2598             diagnostics['swapresv'] = self._get_kstat_statistic(kstat_data,
  2571 
  2599                                                                 'usage')
  2572         kstat_data = self._get_kstat_by_name('zones', 'cpu', id,
  2600 
  2573                                              'sys_zone_aggr')
  2601         kstat_data = self._get_kstat_by_name('zones', 'cpu', None,
       
  2602                                              ''.join(('sys_zone_', zone_id)))
  2574         if kstat_data is not None:
  2603         if kstat_data is not None:
  2575             for key in kstat_data.keys():
  2604             for ks in kstat_data:
  2576                 if key not in ('class', 'crtime', 'snaptime'):
  2605                 key = ks.keys()[0]
  2577                     diagnostics[key] = kstat_data[key]
  2606                 if key in ('class', 'crtime', 'snaptime', 'zonename'):
       
  2607                     continue
       
  2608                 if key.endswith('_cur'):
       
  2609                         continue
       
  2610                 if diagnostics.get(key) is None:
       
  2611                     diagnostics[key] = 0
       
  2612                 else:
       
  2613                     diagnostics[key] += ks[key]
  2578         return diagnostics
  2614         return diagnostics
  2579 
  2615 
  2580     def get_diagnostics(self, instance):
  2616     def get_diagnostics(self, instance):
  2581         """Return data about VM diagnostics.
  2617         """Return diagnostics data about the given instance.
  2582 
  2618 
  2583         :param instance: nova.objects.instance.Instance
  2619         :param nova.objects.instance.Instance instance:
       
  2620             The instance to which the diagnostic data should be returned.
       
  2621 
       
  2622         :return: Has a big overlap to the return value of the newer interface
       
  2623             :func:`get_instance_diagnostics`
       
  2624         :rtype: dict
  2584         """
  2625         """
  2585         # TODO(Vek): Need to pass context in for access to auth_token
  2626         # TODO(Vek): Need to pass context in for access to auth_token
  2586         name = instance['name']
  2627         name = instance['name']
  2587         zone = self._get_zone_by_name(name)
  2628         zone = self._get_zone_by_name(name)
  2588         if zone is None:
  2629         if zone is None:
  2589             raise exception.InstanceNotFound(instance_id=name)
  2630             raise exception.InstanceNotFound(instance_id=name)
  2590         return self._get_zone_diagnostics(zone)
  2631         return self._get_zone_diagnostics(zone)
  2591 
  2632 
  2592     def get_instance_diagnostics(self, instance):
  2633     def get_instance_diagnostics(self, instance):
  2593         """Return data about VM diagnostics.
  2634         """Return diagnostics data about the given instance.
  2594 
  2635 
  2595         :param instance: nova.objects.instance.Instance
  2636         :param nova.objects.instance.Instance instance:
       
  2637             The instance to which the diagnostic data should be returned.
       
  2638 
       
  2639         :return: Has a big overlap to the return value of the older interface
       
  2640             :func:`get_diagnostics`
       
  2641         :rtype: nova.virt.diagnostics.Diagnostics
  2596         """
  2642         """
  2597         raise NotImplementedError()
  2643         raise NotImplementedError()
  2598 
  2644 
  2599     def get_all_bw_counters(self, instances):
  2645     def get_all_bw_counters(self, instances):
  2600         """Return bandwidth usage counters for each interface on each
  2646         """Return bandwidth usage counters for each interface on each
  2707 
  2753 
  2708                 raise
  2754                 raise
  2709 
  2755 
  2710     def swap_volume(self, old_connection_info, new_connection_info,
  2756     def swap_volume(self, old_connection_info, new_connection_info,
  2711                     instance, mountpoint, resize_to):
  2757                     instance, mountpoint, resize_to):
  2712         """Replace the disk attached to the instance.
  2758         """Replace the volume attached to the given `instance`.
  2713 
  2759 
  2714         :param instance: nova.objects.instance.Instance
  2760         :param dict old_connection_info:
  2715         :param resize_to: This parameter is used to indicate the new volume
  2761             The volume for this connection gets detached from the given
  2716                           size when the new volume lager than old volume.
  2762             `instance`.
  2717                           And the units is Gigabyte.
  2763         :param dict new_connection_info:
       
  2764             The volume for this connection gets attached to the given
       
  2765             'instance'.
       
  2766         :param nova.objects.instance.Instance instance:
       
  2767             The instance whose volume gets replaced by another one.
       
  2768         :param str mountpoint:
       
  2769             The mountpoint in the instance where the volume for
       
  2770             `old_connection_info` is attached to.
       
  2771         :param int resize_to:
       
  2772             If the new volume is larger than the old volume, it gets resized
       
  2773             to the given size (in Gigabyte) of `resize_to`.
       
  2774 
       
  2775         :return: None
  2718         """
  2776         """
  2719         raise NotImplementedError()
  2777         raise NotImplementedError()
  2720 
  2778 
  2721     def attach_interface(self, instance, image_meta, vif):
  2779     def attach_interface(self, instance, image_meta, vif):
  2722         """Attach an interface to the instance.
  2780         """Use hotplug to add a network interface to a running instance.
  2723 
  2781 
  2724         :param instance: nova.objects.instance.Instance
  2782         The counter action to this is :func:`detach_interface`.
  2725         """
  2783 
  2726         raise NotImplementedError()
  2784         :param nova.objects.instance.Instance instance:
       
  2785             The instance which will get an additional network interface.
       
  2786         :param nova.objects.ImageMeta image_meta:
       
  2787             The metadata of the image of the instance.
       
  2788         :param nova.network.model.NetworkInfo vif:
       
  2789             The object which has the information about the interface to attach.
       
  2790 
       
  2791         :raise nova.exception.NovaException: If the attach fails.
       
  2792 
       
  2793         :return: None
       
  2794         """
       
  2795         name = instance['name']
       
  2796         zone = self._get_zone_by_name(name)
       
  2797         if zone is None:
       
  2798             raise exception.InstanceNotFound(instance_id=name)
       
  2799 
       
  2800         ctxt = nova_context.get_admin_context()
       
  2801         extra_specs = self._get_extra_specs(instance)
       
  2802         brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
       
  2803         anetname = self._set_ovs_info(ctxt, zone, brand, False, vif)
       
  2804 
       
  2805         # apply the configuration if the vm is ACTIVE
       
  2806         if instance['vm_state'] == vm_states.ACTIVE:
       
  2807             try:
       
  2808                 zone.apply()
       
  2809             except Exception as ex:
       
  2810                 reason = zonemgr_strerror(ex)
       
  2811                 msg = (_("Unable to attach interface to instance '%s' via "
       
  2812                          "zonemgr(3RAD): %s") % (name, reason))
       
  2813                 with ZoneConfig(zone) as zc:
       
  2814                     prop_filter = [zonemgr.Property('mac-address',
       
  2815                                                     vif['address'])]
       
  2816                     zc.removeresources('anet', prop_filter)
       
  2817                 raise nova.exception.NovaException(msg)
       
  2818 
       
  2819             # add port to ovs bridge
       
  2820             anet = ''.join([name, '/', anetname])
       
  2821             self._ovs_add_port(instance, vif, anet)
  2727 
  2822 
  2728     def detach_interface(self, instance, vif):
  2823     def detach_interface(self, instance, vif):
  2729         """Detach an interface from the instance.
  2824         """Use hotunplug to remove a network interface from a running instance.
  2730 
  2825 
  2731         :param instance: nova.objects.instance.Instance
  2826         The counter action to this is :func:`attach_interface`.
  2732         """
  2827 
  2733         raise NotImplementedError()
  2828         :param nova.objects.instance.Instance instance:
       
  2829             The instance which gets a network interface removed.
       
  2830         :param nova.network.model.NetworkInfo vif:
       
  2831             The object which has the information about the interface to detach.
       
  2832 
       
  2833         :raise nova.exception.NovaException: If the detach fails.
       
  2834 
       
  2835         :return: None
       
  2836         """
       
  2837         name = instance['name']
       
  2838         zone = self._get_zone_by_name(name)
       
  2839         if zone is None:
       
  2840             raise exception.InstanceNotFound(instance_id=name)
       
  2841 
       
  2842         # Check if the specific property value exists before attempting removal
       
  2843         resource = lookup_resource_property_value(zone, 'anet',
       
  2844                                                   'mac-address',
       
  2845                                                   vif['address'])
       
  2846         if not resource:
       
  2847             msg = (_("Interface with MAC address '%s' is not attached to "
       
  2848                      "instance '%s'.") % (vif['address'], name))
       
  2849             raise nova.exception.NovaException(msg)
       
  2850 
       
  2851         extra_specs = self._get_extra_specs(instance)
       
  2852         brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
       
  2853         for prop in resource.properties:
       
  2854             if brand == ZONE_BRAND_SOLARIS and prop.name == 'linkname':
       
  2855                 anetname = prop.value
       
  2856                 break
       
  2857             elif brand != ZONE_BRAND_SOLARIS and prop.name == 'id':
       
  2858                 anetname = 'net%s' % prop.value
       
  2859                 break
       
  2860 
       
  2861         with ZoneConfig(zone) as zc:
       
  2862             zc.removeresources('anet', [zonemgr.Property('mac-address',
       
  2863                                                          vif['address'])])
       
  2864 
       
  2865         # apply the configuration if the vm is ACTIVE
       
  2866         if instance['vm_state'] == vm_states.ACTIVE:
       
  2867             try:
       
  2868                 zone.apply()
       
  2869             except:
       
  2870                 msg = (_("Unable to detach interface '%s' from running "
       
  2871                          "instance '%s' because the resource is most likely "
       
  2872                          "in use.") % (anetname, name))
       
  2873                 needed_props = ["lower-link", "configure-allowed-address",
       
  2874                                 "mac-address", "mtu"]
       
  2875                 if brand == ZONE_BRAND_SOLARIS:
       
  2876                     needed_props.append("linkname")
       
  2877                 else:
       
  2878                     needed_props.append("id")
       
  2879 
       
  2880                 props = filter(lambda prop: prop.name in needed_props,
       
  2881                                resource.properties)
       
  2882                 with ZoneConfig(zone) as zc:
       
  2883                     zc.addresource('anet', props)
       
  2884                 raise nova.exception.NovaException(msg)
       
  2885 
       
  2886             # remove anet from OVS bridge
       
  2887             port = ''.join([name, '/', anetname])
       
  2888             self._ovs_delete_port(port)
  2734 
  2889 
  2735     def _cleanup_migrate_disk(self, context, instance, volume):
  2890     def _cleanup_migrate_disk(self, context, instance, volume):
  2736         """Make a best effort at cleaning up the volume that was created to
  2891         """Make a best effort at cleaning up the volume that was created to
  2737         hold the new root disk
  2892         hold the new root disk
  2738 
  2893 
  2750                                    block_device_info=None,
  2905                                    block_device_info=None,
  2751                                    timeout=0, retry_interval=0):
  2906                                    timeout=0, retry_interval=0):
  2752         """Transfers the disk of a running instance in multiple phases, turning
  2907         """Transfers the disk of a running instance in multiple phases, turning
  2753         off the instance before the end.
  2908         off the instance before the end.
  2754 
  2909 
  2755         :param instance: nova.objects.instance.Instance
  2910         :param nova.objects.instance.Instance instance:
  2756         :param timeout: time to wait for GuestOS to shutdown
  2911             The instance whose disk should be migrated.
  2757         :param retry_interval: How often to signal guest while
  2912         :param str dest:
  2758                                waiting for it to shutdown
  2913             The IP address of the destination host.
       
  2914         :param nova.objects.flavor.Flavor flavor:
       
  2915             The flavor of the instance whose disk get migrated.
       
  2916         :param nova.network.model.NetworkInfo network_info:
       
  2917             The network information of the given `instance`.
       
  2918         :param dict block_device_info:
       
  2919             Information about the block devices.
       
  2920         :param int timeout:
       
  2921             The time in seconds to wait for the guest OS to shutdown.
       
  2922         :param int retry_interval:
       
  2923             How often to signal guest while waiting for it to shutdown.
       
  2924 
       
  2925         :return: A list of disk information dicts in JSON format.
       
  2926         :rtype: str
  2759         """
  2927         """
  2760         LOG.debug("Starting migrate_disk_and_power_off", instance=instance)
  2928         LOG.debug("Starting migrate_disk_and_power_off", instance=instance)
  2761 
  2929 
  2762         samehost = (dest == self.get_host_ip_addr())
  2930         samehost = (dest == self.get_host_ip_addr())
  2763         if samehost:
  2931         if samehost:
  2809             if volume_id is None:
  2977             if volume_id is None:
  2810                 msg = (_("Cannot find an attached root device."))
  2978                 msg = (_("Cannot find an attached root device."))
  2811                 raise exception.ResizeError(reason=msg)
  2979                 raise exception.ResizeError(reason=msg)
  2812 
  2980 
  2813             vinfo = self._volume_api.get(context, volume_id)
  2981             vinfo = self._volume_api.get(context, volume_id)
  2814             newvolume = self._volume_api.create(context, orgb,
  2982             newvolume = self._volume_api.create(
  2815                                                 vinfo['display_name'] +
  2983                 context, orgb, vinfo['display_name'] + '-resized',
  2816                                                 '-resized',
  2984                 vinfo['display_description'], source_volume=vinfo)
  2817                                                 vinfo['display_description'],
       
  2818                                                 source_volume=vinfo)
       
  2819 
  2985 
  2820             instance.system_metadata['old_instance_volid'] = volume_id
  2986             instance.system_metadata['old_instance_volid'] = volume_id
  2821             instance.system_metadata['new_instance_volid'] = newvolume['id']
  2987             instance.system_metadata['new_instance_volid'] = newvolume['id']
  2822 
  2988 
  2823             # TODO(npower): Polling is what nova/compute/manager also does when
  2989             # TODO(npower): Polling is what nova/compute/manager also does when
  2884         # service rejects Unified Archives (uar) and ZFS in metadata.
  3050         # service rejects Unified Archives (uar) and ZFS in metadata.
  2885         metadata['container_format'] = 'ovf'
  3051         metadata['container_format'] = 'ovf'
  2886         metadata['disk_format'] = 'raw'
  3052         metadata['disk_format'] = 'raw'
  2887 
  3053 
  2888         update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
  3054         update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
  2889         snapshot_directory = CONF.solariszones_snapshots_directory
  3055         snapshot_directory = CONF.solariszones.solariszones_snapshots_directory
  2890         fileutils.ensure_tree(snapshot_directory)
  3056         fileutils.ensure_tree(snapshot_directory)
  2891         snapshot_name = uuid.uuid4().hex
  3057         snapshot_name = uuid.uuid4().hex
  2892 
  3058 
  2893         with utils.tempdir(dir=snapshot_directory) as tmpdir:
  3059         with utils.tempdir(dir=snapshot_directory) as tmpdir:
  2894             out_path = os.path.join(tmpdir, snapshot_name)
  3060             out_path = os.path.join(tmpdir, snapshot_name)
  2902                 # Upload the archive image to the image service
  3068                 # Upload the archive image to the image service
  2903                 update_task_state(
  3069                 update_task_state(
  2904                     task_state=task_states.IMAGE_UPLOADING,
  3070                     task_state=task_states.IMAGE_UPLOADING,
  2905                     expected_state=task_states.IMAGE_PENDING_UPLOAD)
  3071                     expected_state=task_states.IMAGE_PENDING_UPLOAD)
  2906                 with open(out_path, 'r') as image_file:
  3072                 with open(out_path, 'r') as image_file:
  2907                     snapshot_service.update(context,
  3073                     snapshot_service.update(context, image_id, metadata,
  2908                                             image_id,
       
  2909                                             metadata,
       
  2910                                             image_file)
  3074                                             image_file)
  2911                     LOG.info(_("Snapshot image upload complete"),
  3075                     LOG.info(_("Snapshot image upload complete"),
  2912                              instance=instance)
  3076                              instance=instance)
  2913                 try:
  3077                 try:
  2914                     # Try to update the image metadata container and disk
  3078                     # Try to update the image metadata container and disk
  2915                     # formats more suitably for a unified archive if the
  3079                     # formats more suitably for a unified archive if the
  2916                     # glance server recognises them.
  3080                     # glance server recognises them.
  2917                     metadata['container_format'] = 'uar'
  3081                     metadata['container_format'] = 'uar'
  2918                     metadata['disk_format'] = 'zfs'
  3082                     metadata['disk_format'] = 'zfs'
  2919                     snapshot_service.update(context,
  3083                     snapshot_service.update(context, image_id, metadata, None)
  2920                                             image_id,
       
  2921                                             metadata,
       
  2922                                             None)
       
  2923                 except exception.Invalid:
  3084                 except exception.Invalid:
  2924                     LOG.warning(_("Image service rejected image metadata "
  3085                     LOG.warning(_("Image service rejected image metadata "
  2925                                   "container and disk formats 'uar' and "
  3086                                   "container and disk formats 'uar' and "
  2926                                   "'zfs'. Using generic values 'ovf' and "
  3087                                   "'zfs'. Using generic values 'ovf' and "
  2927                                   "'raw' as fallbacks."))
  3088                                   "'raw' as fallbacks."))
  2974             instance['node'] = instance['launched_on']
  3135             instance['node'] = instance['launched_on']
  2975 
  3136 
  2976     def finish_migration(self, context, migration, instance, disk_info,
  3137     def finish_migration(self, context, migration, instance, disk_info,
  2977                          network_info, image_meta, resize_instance,
  3138                          network_info, image_meta, resize_instance,
  2978                          block_device_info=None, power_on=True):
  3139                          block_device_info=None, power_on=True):
  2979         """Completes a resize.
  3140         """Completes a resize/migration.
  2980 
  3141 
  2981         :param context: the context for the migration/resize
  3142         :param context: the context for the migration/resize
  2982         :param migration: the migrate/resize information
  3143         :param migration: the migrate/resize information
  2983         :param instance: nova.objects.instance.Instance being migrated/resized
  3144         :param instance: nova.objects.instance.Instance being migrated/resized
  2984         :param disk_info: the newly transferred disk information
  3145         :param disk_info: the newly transferred disk information
  2985         :param network_info:
  3146         :param network_info:
  2986            :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
  3147            :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
  2987         :param image_meta: image object returned by nova.image.glance that
  3148         :param nova.objects.ImageMeta image_meta:
  2988                            defines the image from which this instance
  3149             The metadata of the image of the instance.
  2989                            was created
       
  2990         :param resize_instance: True if the instance is being resized,
  3150         :param resize_instance: True if the instance is being resized,
  2991                                 False otherwise
  3151                                 False otherwise
  2992         :param block_device_info: instance volume block device info
  3152         :param block_device_info: instance volume block device info
  2993         :param power_on: True if the instance should be powered on, False
  3153         :param power_on: True if the instance should be powered on, False
  2994                          otherwise
  3154                          otherwise
  3021                 # changed
  3181                 # changed
  3022                 if disk_info:
  3182                 if disk_info:
  3023                     rgb = instance.root_gb
  3183                     rgb = instance.root_gb
  3024                     self._resize_disk_migration(context, instance,
  3184                     self._resize_disk_migration(context, instance,
  3025                                                 root_ci['serial'],
  3185                                                 root_ci['serial'],
  3026                                                 disk_info['id'],
  3186                                                 disk_info['id'], rgb,
  3027                                                 rgb, mount_dev)
  3187                                                 mount_dev)
  3028 
  3188 
  3029             else:
  3189             else:
  3030                 # No need to check disk_info here, because when not on the
  3190                 # No need to check disk_info here, because when not on the
  3031                 # same host a disk_info is always passed in.
  3191                 # same host a disk_info is always passed in.
  3032                 mount_dev = 'c1d0'
  3192                 mount_dev = 'c1d0'
  3048                 zone.attach(['-x', 'initialize-hostdata'])
  3208                 zone.attach(['-x', 'initialize-hostdata'])
  3049 
  3209 
  3050                 bmap = block_device_info.get('block_device_mapping')
  3210                 bmap = block_device_info.get('block_device_mapping')
  3051                 for entry in bmap:
  3211                 for entry in bmap:
  3052                     if entry['mount_device'] != rootmp:
  3212                     if entry['mount_device'] != rootmp:
  3053                         self.attach_volume(context,
  3213                         self.attach_volume(context, entry['connection_info'],
  3054                                            entry['connection_info'], instance,
  3214                                            instance, entry['mount_device'])
  3055                                            entry['mount_device'])
       
  3056 
  3215 
  3057             if power_on:
  3216             if power_on:
  3058                 self._power_on(instance, network_info)
  3217                 self._power_on(instance, network_info)
  3059 
  3218 
  3060                 if brand == ZONE_BRAND_SOLARIS:
  3219                 if brand == ZONE_BRAND_SOLARIS:
  3079             self._cleanup_finish_migration(context, instance, disk_info,
  3238             self._cleanup_finish_migration(context, instance, disk_info,
  3080                                            network_info, samehost)
  3239                                            network_info, samehost)
  3081             raise
  3240             raise
  3082 
  3241 
  3083     def confirm_migration(self, context, migration, instance, network_info):
  3242     def confirm_migration(self, context, migration, instance, network_info):
  3084         """Confirms a resize, destroying the source VM.
  3243         """Confirms a resize/migration, destroying the source VM.
  3085 
  3244 
  3086         :param instance: nova.objects.instance.Instance
  3245         :param instance: nova.objects.instance.Instance
  3087         """
  3246         """
  3088         samehost = (migration['dest_host'] == self.get_host_ip_addr())
  3247         samehost = (migration['dest_host'] == self.get_host_ip_addr())
  3089         old_rvid = instance.system_metadata.get('old_instance_volid')
  3248         old_rvid = instance.system_metadata.get('old_instance_volid')
  3164         if not samehost:
  3323         if not samehost:
  3165             return connection_info
  3324             return connection_info
  3166 
  3325 
  3167     def finish_revert_migration(self, context, instance, network_info,
  3326     def finish_revert_migration(self, context, instance, network_info,
  3168                                 block_device_info=None, power_on=True):
  3327                                 block_device_info=None, power_on=True):
  3169         """Finish reverting a resize.
  3328         """Finish reverting a resize/migration.
  3170 
  3329 
  3171         :param context: the context for the finish_revert_migration
  3330         :param context: the context for the finish_revert_migration
  3172         :param instance: nova.objects.instance.Instance being migrated/resized
  3331         :param instance: nova.objects.instance.Instance being migrated/resized
  3173         :param network_info:
  3332         :param network_info:
  3174            :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
  3333            :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
  3215                 self._volume_api.delete(context, new_rvid)
  3374                 self._volume_api.delete(context, new_rvid)
  3216 
  3375 
  3217         self._power_on(instance, network_info)
  3376         self._power_on(instance, network_info)
  3218 
  3377 
  3219     def pause(self, instance):
  3378     def pause(self, instance):
  3220         """Pause the specified instance.
  3379         """Pause the given instance.
  3221 
  3380 
  3222         :param instance: nova.objects.instance.Instance
  3381         A paused instance doesn't use CPU cycles of the host anymore. The
       
  3382         state of the VM could be stored in the memory or storage space of the
       
  3383         host, depending on the underlying hypervisor technology.
       
  3384         A "stronger" version of `pause` is :func:'suspend'.
       
  3385         The counter action for `pause` is :func:`unpause`.
       
  3386 
       
  3387         :param nova.objects.instance.Instance instance:
       
  3388             The instance which should be paused.
       
  3389 
       
  3390         :return: None
  3223         """
  3391         """
  3224         # TODO(Vek): Need to pass context in for access to auth_token
  3392         # TODO(Vek): Need to pass context in for access to auth_token
  3225         raise NotImplementedError()
  3393         raise NotImplementedError()
  3226 
  3394 
  3227     def unpause(self, instance):
  3395     def unpause(self, instance):
  3228         """Unpause paused VM instance.
  3396         """Unpause the given paused instance.
  3229 
  3397 
  3230         :param instance: nova.objects.instance.Instance
  3398         The paused instance gets unpaused and will use CPU cycles of the
       
  3399         host again. The counter action for 'unpause' is :func:`pause`.
       
  3400         Depending on the underlying hypervisor technology, the guest has the
       
  3401         same state as before the 'pause'.
       
  3402 
       
  3403         :param nova.objects.instance.Instance instance:
       
  3404             The instance which should be unpaused.
       
  3405 
       
  3406         :return: None
  3231         """
  3407         """
  3232         # TODO(Vek): Need to pass context in for access to auth_token
  3408         # TODO(Vek): Need to pass context in for access to auth_token
  3233         raise NotImplementedError()
  3409         raise NotImplementedError()
  3234 
  3410 
  3235     def suspend(self, context, instance):
  3411     def suspend(self, context, instance):
  3236         """suspend the specified instance.
  3412         """Suspend the specified instance.
  3237 
  3413 
  3238         :param context: the context for the suspend
  3414         A suspended instance doesn't use CPU cycles or memory of the host
  3239         :param instance: nova.objects.instance.Instance
  3415         anymore. The state of the instance could be persisted on the host
       
  3416         and allocate storage space this way. A "softer" way of `suspend`
       
  3417         is :func:`pause`. The counter action for `suspend` is :func:`resume`.
       
  3418 
       
  3419         :param nova.context.RequestContext context:
       
  3420             The context for the suspend.
       
  3421         :param nova.objects.instance.Instance instance:
       
  3422             The instance to suspend.
       
  3423 
       
  3424         :return: None
  3240         """
  3425         """
  3241         name = instance['name']
  3426         name = instance['name']
  3242         zone = self._get_zone_by_name(name)
  3427         zone = self._get_zone_by_name(name)
  3243         if zone is None:
  3428         if zone is None:
  3244             raise exception.InstanceNotFound(instance_id=name)
  3429             raise exception.InstanceNotFound(instance_id=name)
  3254         if self._get_state(zone) != power_state.RUNNING:
  3439         if self._get_state(zone) != power_state.RUNNING:
  3255             reason = (_("Instance '%s' is not running.") % name)
  3440             reason = (_("Instance '%s' is not running.") % name)
  3256             raise exception.InstanceSuspendFailure(reason=reason)
  3441             raise exception.InstanceSuspendFailure(reason=reason)
  3257 
  3442 
  3258         try:
  3443         try:
  3259             new_path = os.path.join(CONF.zones_suspend_path, '%{zonename}')
  3444             new_path = os.path.join(CONF.solariszones.zones_suspend_path,
       
  3445                                     '%{zonename}')
  3260             if not lookup_resource(zone, 'suspend'):
  3446             if not lookup_resource(zone, 'suspend'):
  3261                 # add suspend if not configured
  3447                 # add suspend if not configured
  3262                 self._set_suspend(instance)
  3448                 self._set_suspend(instance)
  3263             elif lookup_resource_property(zone, 'suspend', 'path') != new_path:
  3449             elif lookup_resource_property(zone, 'suspend', 'path') != new_path:
  3264                 # replace the old suspend resource with the new one
  3450                 # replace the old suspend resource with the new one
  3273             LOG.error(_("Unable to suspend instance '%s' via "
  3459             LOG.error(_("Unable to suspend instance '%s' via "
  3274                         "zonemgr(3RAD): %s") % (name, reason))
  3460                         "zonemgr(3RAD): %s") % (name, reason))
  3275             raise exception.InstanceSuspendFailure(reason=reason)
  3461             raise exception.InstanceSuspendFailure(reason=reason)
  3276 
  3462 
  3277     def resume(self, context, instance, network_info, block_device_info=None):
  3463     def resume(self, context, instance, network_info, block_device_info=None):
  3278         """resume the specified instance.
  3464         """resume the specified suspended instance.
  3279 
  3465 
  3280         :param context: the context for the resume
  3466         The suspended instance gets resumed and will use CPU cycles and memory
  3281         :param instance: nova.objects.instance.Instance being resumed
  3467         of the host again. The counter action for 'resume' is :func:`suspend`.
  3282         :param network_info:
  3468         Depending on the underlying hypervisor technology, the guest has the
  3283            :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
  3469         same state as before the 'suspend'.
  3284         :param block_device_info: instance volume block device info
  3470 
       
  3471         :param nova.context.RequestContext context:
       
  3472             The context for the resume.
       
  3473         :param nova.objects.instance.Instance instance:
       
  3474             The suspended instance to resume.
       
  3475         :param nova.network.model.NetworkInfo network_info:
       
  3476             Necessary network information for the resume.
       
  3477         :param dict block_device_info:
       
  3478             Instance volume block device info.
       
  3479 
       
  3480         :return: None
  3285         """
  3481         """
  3286         name = instance['name']
  3482         name = instance['name']
  3287         zone = self._get_zone_by_name(name)
  3483         zone = self._get_zone_by_name(name)
  3288         if zone is None:
  3484         if zone is None:
  3289             raise exception.InstanceNotFound(instance_id=name)
  3485             raise exception.InstanceNotFound(instance_id=name)
  3329 
  3525 
  3330     def rescue(self, context, instance, network_info, image_meta,
  3526     def rescue(self, context, instance, network_info, image_meta,
  3331                rescue_password):
  3527                rescue_password):
  3332         """Rescue the specified instance.
  3528         """Rescue the specified instance.
  3333 
  3529 
  3334         :param instance: nova.objects.instance.Instance
  3530         :param nova.context.RequestContext context:
       
  3531             The context for the rescue.
       
  3532         :param nova.objects.instance.Instance instance:
       
  3533             The instance being rescued.
       
  3534         :param nova.network.model.NetworkInfo network_info:
       
  3535             Necessary network information for the resume.
       
  3536         :param nova.objects.ImageMeta image_meta:
       
  3537             The metadata of the image of the instance.
       
  3538         :param rescue_password: new root password to set for rescue.
  3335         """
  3539         """
  3336         raise NotImplementedError()
  3540         raise NotImplementedError()
  3337 
  3541 
  3338     def set_bootable(self, instance, is_bootable):
  3542     def set_bootable(self, instance, is_bootable):
  3339         """Set the ability to power on/off an instance.
  3543         """Set the ability to power on/off an instance.
  3366 
  3570 
  3367         :param instance: nova.objects.instance.Instance
  3571         :param instance: nova.objects.instance.Instance
  3368         """
  3572         """
  3369         self._power_on(instance, network_info)
  3573         self._power_on(instance, network_info)
  3370 
  3574 
       
  3575     def trigger_crash_dump(self, instance):
       
  3576         """Trigger crash dump mechanism on the given instance.
       
  3577 
       
  3578         Stalling instances can be triggered to dump the crash data. How the
       
  3579         guest OS reacts in details, depends on the configuration of it.
       
  3580 
       
  3581         :param nova.objects.instance.Instance instance:
       
  3582             The instance where the crash dump should be triggered.
       
  3583 
       
  3584         :return: None
       
  3585         """
       
  3586         raise NotImplementedError()
       
  3587 
  3371     def soft_delete(self, instance):
  3588     def soft_delete(self, instance):
  3372         """Soft delete the specified instance.
  3589         """Soft delete the specified instance.
  3373 
  3590 
  3374         :param instance: nova.objects.instance.Instance
  3591         A soft-deleted instance doesn't allocate any resources anymore, but is
       
  3592         still available as a database entry. The counter action :func:`restore`
       
  3593         uses the database entry to create a new instance based on that.
       
  3594 
       
  3595         :param nova.objects.instance.Instance instance:
       
  3596             The instance to soft-delete.
       
  3597 
       
  3598         :return: None
  3375         """
  3599         """
  3376         raise NotImplementedError()
  3600         raise NotImplementedError()
  3377 
  3601 
  3378     def restore(self, instance):
  3602     def restore(self, instance):
  3379         """Restore the specified instance.
  3603         """Restore the specified soft-deleted instance.
  3380 
  3604 
  3381         :param instance: nova.objects.instance.Instance
  3605         The restored instance will be automatically booted. The counter action
       
  3606         for `restore` is :func:`soft_delete`.
       
  3607 
       
  3608         :param nova.objects.instance.Instance instance:
       
  3609             The soft-deleted instance which should be restored from the
       
  3610             soft-deleted data.
       
  3611 
       
  3612         :return: None
  3382         """
  3613         """
  3383         raise NotImplementedError()
  3614         raise NotImplementedError()
  3384 
  3615 
  3385     def _get_zpool_property(self, prop, zpool):
  3616     def _get_zpool_property(self, prop, zpool):
  3386         """Get the value of property from the zpool."""
  3617         """Get the value of property from the zpool."""
  3399         return value
  3630         return value
  3400 
  3631 
  3401     def _update_host_stats(self):
  3632     def _update_host_stats(self):
  3402         """Update currently known host stats."""
  3633         """Update currently known host stats."""
  3403         host_stats = {}
  3634         host_stats = {}
       
  3635 
  3404         host_stats['vcpus'] = os.sysconf('SC_NPROCESSORS_ONLN')
  3636         host_stats['vcpus'] = os.sysconf('SC_NPROCESSORS_ONLN')
       
  3637 
  3405         pages = os.sysconf('SC_PHYS_PAGES')
  3638         pages = os.sysconf('SC_PHYS_PAGES')
  3406         host_stats['memory_mb'] = self._pages_to_kb(pages) / 1024
  3639         host_stats['memory_mb'] = self._pages_to_kb(pages) / 1024
  3407 
  3640 
  3408         out, err = utils.execute('/usr/sbin/zfs', 'list', '-Ho', 'name', '/')
  3641         out, err = utils.execute('/usr/sbin/zfs', 'list', '-Ho', 'name', '/')
  3409         root_zpool = out.split('/')[0]
  3642         root_zpool = out.split('/')[0]
  3411         if size is not None:
  3644         if size is not None:
  3412             host_stats['local_gb'] = Size(size).get(Size.gb_units)
  3645             host_stats['local_gb'] = Size(size).get(Size.gb_units)
  3413         else:
  3646         else:
  3414             host_stats['local_gb'] = 0
  3647             host_stats['local_gb'] = 0
  3415 
  3648 
  3416         # Account for any existing processor sets by looking at the the
  3649         # Account for any existing processor sets by looking at the the number
  3417         # number of CPUs not assigned to any processor sets.
  3650         # of CPUs not assigned to any processor sets.
  3418         kstat_data = self._get_kstat_by_name('misc', 'unix', '0', 'pset')
  3651         kstat_data = self._get_kstat_by_name('misc', 'unix', '0', 'pset')
  3419         if kstat_data is not None:
  3652         if kstat_data is not None:
  3420             host_stats['vcpus_used'] = \
  3653             unassigned = self._get_kstat_statistic(kstat_data, 'ncpus')
  3421                 host_stats['vcpus'] - kstat_data['ncpus']
  3654             host_stats['vcpus_used'] = host_stats['vcpus'] - unassigned
  3422         else:
  3655         else:
  3423             host_stats['vcpus_used'] = 0
  3656             host_stats['vcpus_used'] = 0
  3424 
  3657 
  3425         # Subtract the number of free pages from the total to get the
  3658         # Subtract the number of free pages from the total to get the used.
  3426         # used.
       
  3427         kstat_data = self._get_kstat_by_name('pages', 'unix', '0',
  3659         kstat_data = self._get_kstat_by_name('pages', 'unix', '0',
  3428                                              'system_pages')
  3660                                              'system_pages')
  3429         if kstat_data is not None:
  3661         if kstat_data is not None:
  3430             free_ram_mb = self._pages_to_kb(kstat_data['freemem']) / 1024
  3662             free_ram_mb = self._get_kstat_statistic(kstat_data, 'freemem')
       
  3663             free_ram_mb = self._pages_to_kb(free_ram_mb) / 1024
  3431             host_stats['memory_mb_used'] = \
  3664             host_stats['memory_mb_used'] = \
  3432                 host_stats['memory_mb'] - free_ram_mb
  3665                 host_stats['memory_mb'] - free_ram_mb
  3433         else:
  3666         else:
  3434             host_stats['memory_mb_used'] = 0
  3667             host_stats['memory_mb_used'] = 0
  3435 
  3668 
  3440             free_disk_gb = 0
  3673             free_disk_gb = 0
  3441         host_stats['local_gb_used'] = host_stats['local_gb'] - free_disk_gb
  3674         host_stats['local_gb_used'] = host_stats['local_gb'] - free_disk_gb
  3442 
  3675 
  3443         host_stats['hypervisor_type'] = 'solariszones'
  3676         host_stats['hypervisor_type'] = 'solariszones'
  3444         host_stats['hypervisor_version'] = \
  3677         host_stats['hypervisor_version'] = \
  3445             utils.convert_version_to_int(HYPERVISOR_VERSION)
  3678             versionutils.convert_version_to_int(HYPERVISOR_VERSION)
  3446         host_stats['hypervisor_hostname'] = self._uname[1]
  3679         host_stats['hypervisor_hostname'] = self._uname[1]
  3447 
  3680 
  3448         if self._uname[4] == 'i86pc':
  3681         if self._uname[4] == 'i86pc':
  3449             architecture = 'x86_64'
  3682             architecture = arch.X86_64
  3450         else:
  3683         else:
  3451             architecture = 'sparc64'
  3684             architecture = arch.SPARC64
  3452         cpu_info = {
  3685         cpu_info = {
  3453             'arch': architecture
  3686             'arch': architecture
  3454         }
  3687         }
  3455         host_stats['cpu_info'] = jsonutils.dumps(cpu_info)
  3688         host_stats['cpu_info'] = jsonutils.dumps(cpu_info)
  3456 
  3689 
  3457         host_stats['disk_available_least'] = 0
  3690         host_stats['disk_available_least'] = free_disk_gb
  3458 
  3691         host_stats['supported_instances'] = [
  3459         supported_instances = [
  3692             (architecture, hv_type.SOLARISZONES, vm_mode.SOL)
  3460             (architecture, 'solariszones', 'solariszones')
       
  3461         ]
  3693         ]
  3462         host_stats['supported_instances'] = \
  3694         host_stats['numa_topology'] = None
  3463             jsonutils.dumps(supported_instances)
       
  3464 
  3695 
  3465         self._host_stats = host_stats
  3696         self._host_stats = host_stats
  3466 
  3697 
  3467     def get_available_resource(self, nodename):
  3698     def get_available_resource(self, nodename):
  3468         """Retrieve resource information.
  3699         """Retrieve resource information.
  3489         resources['hypervisor_version'] = host_stats['hypervisor_version']
  3720         resources['hypervisor_version'] = host_stats['hypervisor_version']
  3490         resources['hypervisor_hostname'] = host_stats['hypervisor_hostname']
  3721         resources['hypervisor_hostname'] = host_stats['hypervisor_hostname']
  3491         resources['cpu_info'] = host_stats['cpu_info']
  3722         resources['cpu_info'] = host_stats['cpu_info']
  3492         resources['disk_available_least'] = host_stats['disk_available_least']
  3723         resources['disk_available_least'] = host_stats['disk_available_least']
  3493         resources['supported_instances'] = host_stats['supported_instances']
  3724         resources['supported_instances'] = host_stats['supported_instances']
       
  3725         resources['numa_topology'] = host_stats['numa_topology']
  3494         return resources
  3726         return resources
  3495 
  3727 
  3496     def pre_live_migration(self, context, instance, block_device_info,
  3728     def pre_live_migration(self, context, instance, block_device_info,
  3497                            network_info, disk_info, migrate_data=None):
  3729                            network_info, disk_info, migrate_data=None):
  3498         """Prepare an instance for live migration
  3730         """Prepare an instance for live migration
  3500         :param context: security context
  3732         :param context: security context
  3501         :param instance: nova.objects.instance.Instance object
  3733         :param instance: nova.objects.instance.Instance object
  3502         :param block_device_info: instance block device information
  3734         :param block_device_info: instance block device information
  3503         :param network_info: instance network information
  3735         :param network_info: instance network information
  3504         :param disk_info: instance disk information
  3736         :param disk_info: instance disk information
  3505         :param migrate_data: implementation specific data dict.
  3737         :param migrate_data: a LiveMigrateData object
  3506         """
  3738         """
  3507         return {}
  3739         return migrate_data
  3508 
  3740 
  3509     def _live_migration(self, name, dest, dry_run=False):
  3741     def _live_migration(self, name, dest, dry_run=False):
  3510         """Live migration of a Solaris kernel zone to another host."""
  3742         """Live migration of a Solaris kernel zone to another host."""
  3511         zone = self._get_zone_by_name(name)
  3743         zone = self._get_zone_by_name(name)
  3512         if zone is None:
  3744         if zone is None:
  3513             raise exception.InstanceNotFound(instance_id=name)
  3745             raise exception.InstanceNotFound(instance_id=name)
  3514 
  3746 
  3515         options = []
  3747         options = []
  3516         live_migration_cipher = CONF.live_migration_cipher
  3748         live_migration_cipher = CONF.solariszones.live_migration_cipher
  3517         if live_migration_cipher is not None:
  3749         if live_migration_cipher is not None:
  3518             options.extend(['-c', live_migration_cipher])
  3750             options.extend(['-c', live_migration_cipher])
  3519         if dry_run:
  3751         if dry_run:
  3520             options.append('-nq')
  3752             options.append('-nq')
  3521         options.append('ssh://nova@' + dest)
  3753         options.append('ssh://nova@' + dest)
  3536             expected nova.compute.manager._post_live_migration.
  3768             expected nova.compute.manager._post_live_migration.
  3537         :param recover_method:
  3769         :param recover_method:
  3538             recovery method when any exception occurs.
  3770             recovery method when any exception occurs.
  3539             expected nova.compute.manager._rollback_live_migration.
  3771             expected nova.compute.manager._rollback_live_migration.
  3540         :param block_migration: if true, migrate VM disk.
  3772         :param block_migration: if true, migrate VM disk.
  3541         :param migrate_data: implementation specific params.
  3773         :param migrate_data: a LiveMigrateData object
  3542 
  3774 
  3543         """
  3775         """
  3544         name = instance['name']
  3776         name = instance['name']
  3545         try:
  3777         try:
  3546             self._live_migration(name, dest, dry_run=False)
  3778             self._live_migration(name, dest, dry_run=False)
  3552                           % (name, dest, reason))
  3784                           % (name, dest, reason))
  3553                 recover_method(context, instance, dest, block_migration)
  3785                 recover_method(context, instance, dest, block_migration)
  3554 
  3786 
  3555         post_method(context, instance, dest, block_migration, migrate_data)
  3787         post_method(context, instance, dest, block_migration, migrate_data)
  3556 
  3788 
       
  3789     def live_migration_force_complete(self, instance):
       
  3790         """Force live migration to complete
       
  3791 
       
  3792         :param instance: Instance being live migrated
       
  3793 
       
  3794         """
       
  3795         raise NotImplementedError()
       
  3796 
       
  3797     def live_migration_abort(self, instance):
       
  3798         """Abort an in-progress live migration.
       
  3799 
       
  3800         :param instance: instance that is live migrating
       
  3801 
       
  3802         """
       
  3803         raise NotImplementedError()
       
  3804 
  3557     def rollback_live_migration_at_destination(self, context, instance,
  3805     def rollback_live_migration_at_destination(self, context, instance,
  3558                                                network_info,
  3806                                                network_info,
  3559                                                block_device_info,
  3807                                                block_device_info,
  3560                                                destroy_disks=True,
  3808                                                destroy_disks=True,
  3561                                                migrate_data=None):
  3809                                                migrate_data=None):
  3565         :param instance: instance object that was being migrated
  3813         :param instance: instance object that was being migrated
  3566         :param network_info: instance network information
  3814         :param network_info: instance network information
  3567         :param block_device_info: instance block device information
  3815         :param block_device_info: instance block device information
  3568         :param destroy_disks:
  3816         :param destroy_disks:
  3569             if true, destroy disks at destination during cleanup
  3817             if true, destroy disks at destination during cleanup
  3570         :param migrate_data: implementation specific params
  3818         :param migrate_data: a LiveMigrateData object
  3571 
  3819 
  3572         """
  3820         """
  3573         pass
  3821         pass
  3574 
  3822 
  3575     def post_live_migration(self, context, instance, block_device_info,
  3823     def post_live_migration(self, context, instance, block_device_info,
  3577         """Post operation of live migration at source host.
  3825         """Post operation of live migration at source host.
  3578 
  3826 
  3579         :param context: security context
  3827         :param context: security context
  3580         :instance: instance object that was migrated
  3828         :instance: instance object that was migrated
  3581         :block_device_info: instance block device information
  3829         :block_device_info: instance block device information
  3582         :param migrate_data: if not None, it is a dict which has data
  3830         :param migrate_data: a LiveMigrateData object
  3583         """
  3831         """
  3584         try:
  3832         try:
  3585             # These methods log if problems occur so no need to double log
  3833             # These methods log if problems occur so no need to double log
  3586             # here. Just catch any stray exceptions and allow destroy to
  3834             # here. Just catch any stray exceptions and allow destroy to
  3587             # proceed.
  3835             # proceed.
  3669         :param instance: nova.db.sqlalchemy.models.Instance
  3917         :param instance: nova.db.sqlalchemy.models.Instance
  3670         :param src_compute_info: Info about the sending machine
  3918         :param src_compute_info: Info about the sending machine
  3671         :param dst_compute_info: Info about the receiving machine
  3919         :param dst_compute_info: Info about the receiving machine
  3672         :param block_migration: if true, prepare for block migration
  3920         :param block_migration: if true, prepare for block migration
  3673         :param disk_over_commit: if true, allow disk over commit
  3921         :param disk_over_commit: if true, allow disk over commit
  3674         :returns: a dict containing migration info (hypervisor-dependent)
  3922         :returns: a LiveMigrateData object (hypervisor-dependent)
  3675         """
  3923         """
  3676         src_cpu_info = jsonutils.loads(src_compute_info['cpu_info'])
  3924         src_cpu_info = jsonutils.loads(src_compute_info['cpu_info'])
  3677         src_cpu_arch = src_cpu_info['arch']
  3925         src_cpu_arch = src_cpu_info['arch']
  3678         dst_cpu_info = jsonutils.loads(dst_compute_info['cpu_info'])
  3926         dst_cpu_info = jsonutils.loads(dst_compute_info['cpu_info'])
  3679         dst_cpu_arch = dst_cpu_info['arch']
  3927         dst_cpu_arch = dst_cpu_info['arch']
  3698             raise exception.MigrationPreCheckError(reason=reason)
  3946             raise exception.MigrationPreCheckError(reason=reason)
  3699         if disk_over_commit:
  3947         if disk_over_commit:
  3700             reason = (_('Disk overcommit is not currently supported.'))
  3948             reason = (_('Disk overcommit is not currently supported.'))
  3701             raise exception.MigrationPreCheckError(reason=reason)
  3949             raise exception.MigrationPreCheckError(reason=reason)
  3702 
  3950 
  3703         dest_check_data = {
  3951         dest_check_data = objects.SolarisZonesLiveMigrateData()
  3704             'hypervisor_hostname': dst_compute_info['hypervisor_hostname']
  3952         dest_check_data.hypervisor_hostname = \
  3705         }
  3953             dst_compute_info['hypervisor_hostname']
  3706         return dest_check_data
  3954         return dest_check_data
  3707 
  3955 
  3708     def check_can_live_migrate_destination_cleanup(self, context,
  3956     def check_can_live_migrate_destination_cleanup(self, context,
  3709                                                    dest_check_data):
  3957                                                    dest_check_data):
  3710         """Do required cleanup on dest host after check_can_live_migrate calls
  3958         """Do required cleanup on dest host after check_can_live_migrate calls
  3734 
  3982 
  3735         :param context: security context
  3983         :param context: security context
  3736         :param instance: nova.db.sqlalchemy.models.Instance
  3984         :param instance: nova.db.sqlalchemy.models.Instance
  3737         :param dest_check_data: result of check_can_live_migrate_destination
  3985         :param dest_check_data: result of check_can_live_migrate_destination
  3738         :param block_device_info: result of _get_instance_block_device_info
  3986         :param block_device_info: result of _get_instance_block_device_info
  3739         :returns: a dict containing migration info (hypervisor-dependent)
  3987         :returns: a LiveMigrateData object
  3740         """
  3988         """
       
  3989         if not isinstance(dest_check_data, migrate_data_obj.LiveMigrateData):
       
  3990             obj = objects.SolarisZonesLiveMigrateData()
       
  3991             obj.from_legacy_dict(dest_check_data)
       
  3992             dest_check_data = obj
       
  3993 
  3741         self._check_local_volumes_present(block_device_info)
  3994         self._check_local_volumes_present(block_device_info)
  3742         name = instance['name']
  3995         name = instance['name']
  3743         dest = dest_check_data['hypervisor_hostname']
  3996         dest = dest_check_data.hypervisor_hostname
  3744         try:
  3997         try:
  3745             self._live_migration(name, dest, dry_run=True)
  3998             self._live_migration(name, dest, dry_run=True)
  3746         except Exception as ex:
  3999         except Exception as ex:
  3747             reason = zonemgr_strerror(ex)
  4000             reason = zonemgr_strerror(ex)
  3748             raise exception.MigrationPreCheckError(reason=reason)
  4001             raise exception.MigrationPreCheckError(reason=reason)
  3780 
  4033 
  3781         """
  4034         """
  3782         # TODO(Vek): Need to pass context in for access to auth_token
  4035         # TODO(Vek): Need to pass context in for access to auth_token
  3783         raise NotImplementedError()
  4036         raise NotImplementedError()
  3784 
  4037 
  3785     def refresh_security_group_members(self, security_group_id):
       
  3786         """This method is called when a security group is added to an instance.
       
  3787 
       
  3788         This message is sent to the virtualization drivers on hosts that are
       
  3789         running an instance that belongs to a security group that has a rule
       
  3790         that references the security group identified by `security_group_id`.
       
  3791         It is the responsibility of this method to make sure any rules
       
  3792         that authorize traffic flow with members of the security group are
       
  3793         updated and any new members can communicate, and any removed members
       
  3794         cannot.
       
  3795 
       
  3796         Scenario:
       
  3797             * we are running on host 'H0' and we have an instance 'i-0'.
       
  3798             * instance 'i-0' is a member of security group 'speaks-b'
       
  3799             * group 'speaks-b' has an ingress rule that authorizes group 'b'
       
  3800             * another host 'H1' runs an instance 'i-1'
       
  3801             * instance 'i-1' is a member of security group 'b'
       
  3802 
       
  3803             When 'i-1' launches or terminates we will receive the message
       
  3804             to update members of group 'b', at which time we will make
       
  3805             any changes needed to the rules for instance 'i-0' to allow
       
  3806             or deny traffic coming from 'i-1', depending on if it is being
       
  3807             added or removed from the group.
       
  3808 
       
  3809         In this scenario, 'i-1' could just as easily have been running on our
       
  3810         host 'H0' and this method would still have been called.  The point was
       
  3811         that this method isn't called on the host where instances of that
       
  3812         group are running (as is the case with
       
  3813         :py:meth:`refresh_security_group_rules`) but is called where references
       
  3814         are made to authorizing those instances.
       
  3815 
       
  3816         An error should be raised if the operation cannot complete.
       
  3817 
       
  3818         """
       
  3819         # TODO(Vek): Need to pass context in for access to auth_token
       
  3820         raise NotImplementedError()
       
  3821 
       
  3822     def refresh_provider_fw_rules(self):
       
  3823         """This triggers a firewall update based on database changes.
       
  3824 
       
  3825         When this is called, rules have either been added or removed from the
       
  3826         datastore.  You can retrieve rules with
       
  3827         :py:meth:`nova.db.provider_fw_rule_get_all`.
       
  3828 
       
  3829         Provider rules take precedence over security group rules.  If an IP
       
  3830         would be allowed by a security group ingress rule, but blocked by
       
  3831         a provider rule, then packets from the IP are dropped.  This includes
       
  3832         intra-project traffic in the case of the allow_project_net_traffic
       
  3833         flag for the libvirt-derived classes.
       
  3834 
       
  3835         """
       
  3836         # TODO(Vek): Need to pass context in for access to auth_token
       
  3837         raise NotImplementedError()
       
  3838 
       
  3839     def refresh_instance_security_rules(self, instance):
  4038     def refresh_instance_security_rules(self, instance):
  3840         """Refresh security group rules
  4039         """Refresh security group rules
  3841 
  4040 
  3842         Gets called when an instance gets added to or removed from
  4041         Gets called when an instance gets added to or removed from
  3843         the security group the instance is a member of or if the
  4042         the security group the instance is a member of or if the
  3939         """inject network info for specified instance."""
  4138         """inject network info for specified instance."""
  3940         # TODO(Vek): Need to pass context in for access to auth_token
  4139         # TODO(Vek): Need to pass context in for access to auth_token
  3941         pass
  4140         pass
  3942 
  4141 
  3943     def poll_rebooting_instances(self, timeout, instances):
  4142     def poll_rebooting_instances(self, timeout, instances):
  3944         """Poll for rebooting instances
  4143         """Perform a reboot on all given 'instances'.
  3945 
  4144 
  3946         :param timeout: the currently configured timeout for considering
  4145         Reboots the given `instances` which are longer in the rebooting state
  3947                         rebooting instances to be stuck
  4146         than `timeout` seconds.
  3948         :param instances: instances that have been in rebooting state
  4147 
  3949                           longer than the configured timeout
  4148         :param int timeout:
       
  4149             The timeout (in seconds) for considering rebooting instances
       
  4150             to be stuck.
       
  4151         :param list instances:
       
  4152             A list of nova.objects.instance.Instance objects that have been
       
  4153             in rebooting state longer than the configured timeout.
       
  4154 
       
  4155         :return: None
  3950         """
  4156         """
  3951         # TODO(Vek): Need to pass context in for access to auth_token
  4157         # TODO(Vek): Need to pass context in for access to auth_token
  3952         raise NotImplementedError()
  4158         raise NotImplementedError()
  3953 
  4159 
  3954     def host_power_action(self, action):
  4160     def host_power_action(self, action):
  3955         """Reboots, shuts down or powers up the host."""
  4161         """Reboots, shuts down or powers up the host.
       
  4162 
       
  4163         :param str action:
       
  4164             The action the host should perform. The valid actions are:
       
  4165             ""startup", "shutdown" and "reboot".
       
  4166 
       
  4167         :return: The result of the power action
       
  4168         :rtype: : str
       
  4169         """
       
  4170 
  3956         raise NotImplementedError()
  4171         raise NotImplementedError()
  3957 
  4172 
  3958     def host_maintenance_mode(self, host, mode):
  4173     def host_maintenance_mode(self, host, mode):
  3959         """Start/Stop host maintenance window. On start, it triggers
  4174         """Start/Stop host maintenance window.
  3960         guest VMs evacuation.
  4175 
  3961         """
  4176         On start, it triggers the migration of all instances to other hosts.
       
  4177         Consider the combination with :func:`set_host_enabled`.
       
  4178 
       
  4179         :param str host:
       
  4180             The name of the host whose maintenance mode should be changed.
       
  4181         :param bool mode:
       
  4182             If `True`, go into maintenance mode. If `False`, leave the
       
  4183             maintenance mode.
       
  4184 
       
  4185         :return: "on_maintenance" if switched to maintenance mode or
       
  4186                  "off_maintenance" if maintenance mode got left.
       
  4187         :rtype: str
       
  4188         """
       
  4189 
  3962         raise NotImplementedError()
  4190         raise NotImplementedError()
  3963 
  4191 
  3964     def set_host_enabled(self, enabled):
  4192     def set_host_enabled(self, enabled):
  3965         """Sets the specified host's ability to accept new instances."""
  4193         """Sets the ability of this host to accept new instances.
       
  4194 
       
  4195         :param bool enabled:
       
  4196             If this is `True`, the host will accept new instances. If it is
       
  4197             `False`, the host won't accept new instances.
       
  4198 
       
  4199         :return: If the host can accept further instances, return "enabled",
       
  4200                  if further instances shouldn't be scheduled to this host,
       
  4201                  return "disabled".
       
  4202         :rtype: str
       
  4203         """
  3966         # TODO(Vek): Need to pass context in for access to auth_token
  4204         # TODO(Vek): Need to pass context in for access to auth_token
  3967         raise NotImplementedError()
  4205         raise NotImplementedError()
  3968 
  4206 
  3969     def get_host_uptime(self):
  4207     def get_host_uptime(self):
  3970         """Returns the result of calling "uptime" on the target host."""
  4208         """Returns the result of calling the Linux command `uptime` on this
       
  4209         host.
       
  4210 
       
  4211         :return: A text which contains the uptime of this host since the
       
  4212                  last boot.
       
  4213         :rtype: str
       
  4214         """
  3971         # TODO(Vek): Need to pass context in for access to auth_token
  4215         # TODO(Vek): Need to pass context in for access to auth_token
  3972         return utils.execute('/usr/bin/uptime')[0]
  4216         return utils.execute('/usr/bin/uptime')[0]
  3973 
  4217 
  3974     def plug_vifs(self, instance, network_info):
  4218     def plug_vifs(self, instance, network_info):
  3975         """Plug VIFs into networks.
  4219         """Plug virtual interfaces (VIFs) into the given `instance` at
  3976 
  4220         instance boot time.
  3977         :param instance: nova.objects.instance.Instance
  4221 
       
  4222         The counter action is :func:`unplug_vifs`.
       
  4223 
       
  4224         :param nova.objects.instance.Instance instance:
       
  4225             The instance which gets VIFs plugged.
       
  4226         :param nova.network.model.NetworkInfo network_info:
       
  4227             The object which contains information about the VIFs to plug.
       
  4228 
       
  4229         :return: None
  3978         """
  4230         """
  3979         # TODO(Vek): Need to pass context in for access to auth_token
  4231         # TODO(Vek): Need to pass context in for access to auth_token
  3980         pass
  4232         pass
  3981 
  4233 
  3982     def unplug_vifs(self, instance, network_info):
  4234     def unplug_vifs(self, instance, network_info):
  3983         """Unplug VIFs from networks.
  4235         # NOTE(markus_z): 2015-08-18
  3984 
  4236         # The compute manager doesn't use this interface, which seems odd
  3985         :param instance: nova.objects.instance.Instance
  4237         # since the manager should be the controlling thing here.
       
  4238         """Unplug virtual interfaces (VIFs) from networks.
       
  4239 
       
  4240         The counter action is :func:`plug_vifs`.
       
  4241 
       
  4242         :param nova.objects.instance.Instance instance:
       
  4243             The instance which gets VIFs unplugged.
       
  4244         :param nova.network.model.NetworkInfo network_info:
       
  4245             The object which contains information about the VIFs to unplug.
       
  4246 
       
  4247         :return: None
  3986         """
  4248         """
  3987         raise NotImplementedError()
  4249         raise NotImplementedError()
  3988 
  4250 
  3989     def get_host_cpu_stats(self):
  4251     def get_host_cpu_stats(self):
  3990         """Get the currently known host CPU stats.
  4252         """Get the currently known host CPU stats.
  4068         the appropriate options out to the DHCP service. Most hypervisors can
  4330         the appropriate options out to the DHCP service. Most hypervisors can
  4069         use the default implementation which returns None.
  4331         use the default implementation which returns None.
  4070 
  4332 
  4071         This is called during spawn_instance by the compute manager.
  4333         This is called during spawn_instance by the compute manager.
  4072 
  4334 
  4073         Note that the format of the return value is specific to Quantum
  4335         Note that the format of the return value is specific to the Neutron
  4074         client API.
  4336         client API.
  4075 
  4337 
  4076         :return: None, or a set of DHCP options, eg:
  4338         :return: None, or a set of DHCP options, eg:
  4077 
  4339 
  4078              |    [{'opt_name': 'bootfile-name',
  4340              |    [{'opt_name': 'bootfile-name',
  4097         :param all_instances: nova.objects.instance.InstanceList
  4359         :param all_instances: nova.objects.instance.InstanceList
  4098         """
  4360         """
  4099         pass
  4361         pass
  4100 
  4362 
  4101     def add_to_aggregate(self, context, aggregate, host, **kwargs):
  4363     def add_to_aggregate(self, context, aggregate, host, **kwargs):
  4102         """Add a compute host to an aggregate."""
  4364         """Add a compute host to an aggregate.
       
  4365 
       
  4366         The counter action to this is :func:`remove_from_aggregate`
       
  4367 
       
  4368         :param nova.context.RequestContext context:
       
  4369             The security context.
       
  4370         :param nova.objects.aggregate.Aggregate aggregate:
       
  4371             The aggregate which should add the given `host`
       
  4372         :param str host:
       
  4373             The name of the host to add to the given `aggregate`.
       
  4374         :param dict kwargs:
       
  4375             A free-form thingy...
       
  4376 
       
  4377         :return: None
       
  4378         """
  4103         # NOTE(jogo) Currently only used for XenAPI-Pool
  4379         # NOTE(jogo) Currently only used for XenAPI-Pool
  4104         raise NotImplementedError()
  4380         raise NotImplementedError()
  4105 
  4381 
  4106     def remove_from_aggregate(self, context, aggregate, host, **kwargs):
  4382     def remove_from_aggregate(self, context, aggregate, host, **kwargs):
  4107         """Remove a compute host from an aggregate."""
  4383         """Remove a compute host from an aggregate.
       
  4384 
       
  4385         The counter action to this is :func:`add_to_aggregate`
       
  4386 
       
  4387         :param nova.context.RequestContext context:
       
  4388             The security context.
       
  4389         :param nova.objects.aggregate.Aggregate aggregate:
       
  4390             The aggregate which should remove the given `host`
       
  4391         :param str host:
       
  4392             The name of the host to remove from the given `aggregate`.
       
  4393         :param dict kwargs:
       
  4394             A free-form thingy...
       
  4395 
       
  4396         :return: None
       
  4397         """
  4108         raise NotImplementedError()
  4398         raise NotImplementedError()
  4109 
  4399 
  4110     def undo_aggregate_operation(self, context, op, aggregate,
  4400     def undo_aggregate_operation(self, context, op, aggregate,
  4111                                  host, set_error=True):
  4401                                  host, set_error=True):
  4112         """Undo for Resource Pools."""
  4402         """Undo for Resource Pools."""
  4127                 'wwpns': wwpns,
  4417                 'wwpns': wwpns,
  4128                 'host': hostname
  4418                 'host': hostname
  4129             }
  4419             }
  4130 
  4420 
  4131         """
  4421         """
  4132         connector = {'ip': self.get_host_ip_addr(),
  4422         connector = {
  4133                      'host': CONF.host}
  4423             'ip': self.get_host_ip_addr(),
       
  4424             'host': CONF.host
       
  4425         }
  4134         if not self._initiator:
  4426         if not self._initiator:
  4135             self._initiator = self._get_iscsi_initiator()
  4427             self._initiator = self._get_iscsi_initiator()
  4136 
  4428 
  4137         if self._initiator:
  4429         if self._initiator:
  4138             connector['initiator'] = self._initiator
  4430             connector['initiator'] = self._initiator
  4199         .. note::
  4491         .. note::
  4200             Used in rebuild for HA implementation and required for validation
  4492             Used in rebuild for HA implementation and required for validation
  4201             of access to instance shared disk files
  4493             of access to instance shared disk files
  4202         """
  4494         """
  4203         bdmobj = objects.BlockDeviceMappingList
  4495         bdmobj = objects.BlockDeviceMappingList
  4204         bdms = bdmobj.get_by_instance_uuid(
  4496         bdms = bdmobj.get_by_instance_uuid(nova_context.get_admin_context(),
  4205             nova_context.get_admin_context(),
  4497                                            instance['uuid'])
  4206             instance['uuid'])
       
  4207 
  4498 
  4208         root_ci = None
  4499         root_ci = None
  4209         rootmp = instance['root_device_name']
  4500         rootmp = instance['root_device_name']
  4210         for entry in bdms:
  4501         for entry in bdms:
  4211             if entry['connection_info'] is None:
  4502             if entry['connection_info'] is None:
  4284 
  4575 
  4285     def volume_snapshot_create(self, context, instance, volume_id,
  4576     def volume_snapshot_create(self, context, instance, volume_id,
  4286                                create_info):
  4577                                create_info):
  4287         """Snapshots volumes attached to a specified instance.
  4578         """Snapshots volumes attached to a specified instance.
  4288 
  4579 
  4289         :param context: request context
  4580         The counter action to this is :func:`volume_snapshot_delete`
  4290         :param instance: nova.objects.instance.Instance that has the volume
  4581 
  4291                attached
  4582         :param nova.context.RequestContext context:
  4292         :param volume_id: Volume to be snapshotted
  4583             The security context.
       
  4584         :param nova.objects.instance.Instance  instance:
       
  4585             The instance that has the volume attached
       
  4586         :param uuid volume_id:
       
  4587             Volume to be snapshotted
  4293         :param create_info: The data needed for nova to be able to attach
  4588         :param create_info: The data needed for nova to be able to attach
  4294                to the volume.  This is the same data format returned by
  4589                to the volume.  This is the same data format returned by
  4295                Cinder's initialize_connection() API call.  In the case of
  4590                Cinder's initialize_connection() API call.  In the case of
  4296                doing a snapshot, it is the image file Cinder expects to be
  4591                doing a snapshot, it is the image file Cinder expects to be
  4297                used as the active disk after the snapshot operation has
  4592                used as the active disk after the snapshot operation has
  4300         """
  4595         """
  4301         raise NotImplementedError()
  4596         raise NotImplementedError()
  4302 
  4597 
  4303     def volume_snapshot_delete(self, context, instance, volume_id,
  4598     def volume_snapshot_delete(self, context, instance, volume_id,
  4304                                snapshot_id, delete_info):
  4599                                snapshot_id, delete_info):
  4305         """Snapshots volumes attached to a specified instance.
  4600         """Deletes a snapshot of a volume attached to a specified instance.
  4306 
  4601 
  4307         :param context: request context
  4602         The counter action to this is :func:`volume_snapshot_create`
  4308         :param instance: nova.objects.instance.Instance that has the volume
  4603 
  4309                attached
  4604         :param nova.context.RequestContext context:
  4310         :param volume_id: Attached volume associated with the snapshot
  4605             The security context.
  4311         :param snapshot_id: The snapshot to delete.
  4606         :param nova.objects.instance.Instance instance:
  4312         :param delete_info: Volume backend technology specific data needed to
  4607             The instance that has the volume attached.
  4313                be able to complete the snapshot.  For example, in the case of
  4608         :param uuid volume_id:
  4314                qcow2 backed snapshots, this would include the file being
  4609             Attached volume associated with the snapshot
  4315                merged, and the file being merged into (if appropriate).
  4610         :param uuid snapshot_id:
       
  4611             The snapshot to delete.
       
  4612         :param dict delete_info:
       
  4613             Volume backend technology specific data needed to be able to
       
  4614             complete the snapshot.  For example, in the case of qcow2 backed
       
  4615             snapshots, this would include the file being merged, and the file
       
  4616             being merged into (if appropriate).
       
  4617 
       
  4618         :return: None
  4316         """
  4619         """
  4317         raise NotImplementedError()
  4620         raise NotImplementedError()
  4318 
  4621 
  4319     def default_root_device_name(self, instance, image_meta, root_bdm):
  4622     def default_root_device_name(self, instance, image_meta, root_bdm):
  4320         """Provide a default root device name for the driver."""
  4623         """Provide a default root device name for the driver.
       
  4624 
       
  4625         :param nova.objects.instance.Instance instance:
       
  4626             The instance to get the root device for.
       
  4627         :param nova.objects.ImageMeta image_meta:
       
  4628             The metadata of the image of the instance.
       
  4629         :param nova.objects.BlockDeviceMapping root_bdm:
       
  4630             The description of the root device.
       
  4631         """
  4321         raise NotImplementedError()
  4632         raise NotImplementedError()
  4322 
  4633 
  4323     def default_device_names_for_instance(self, instance, root_device_name,
  4634     def default_device_names_for_instance(self, instance, root_device_name,
  4324                                           *block_device_lists):
  4635                                           *block_device_lists):
  4325         """Default the missing device names in the block device mapping."""
  4636         """Default the missing device names in the block device mapping."""
       
  4637         raise NotImplementedError()
       
  4638 
       
  4639     def get_device_name_for_instance(self, instance,
       
  4640                                      bdms, block_device_obj):
       
  4641         """Get the next device name based on the block device mapping.
       
  4642 
       
  4643         :param instance: nova.objects.instance.Instance that volume is
       
  4644                          requesting a device name
       
  4645         :param bdms: a nova.objects.BlockDeviceMappingList for the instance
       
  4646         :param block_device_obj: A nova.objects.BlockDeviceMapping instance
       
  4647                                  with all info about the requested block
       
  4648                                  device. device_name does not need to be set,
       
  4649                                  and should be decided by the driver
       
  4650                                  implementation if not set.
       
  4651 
       
  4652         :returns: The chosen device name.
       
  4653         """
  4326         raise NotImplementedError()
  4654         raise NotImplementedError()
  4327 
  4655 
  4328     def is_supported_fs_format(self, fs_type):
  4656     def is_supported_fs_format(self, fs_type):
  4329         """Check whether the file format is supported by this driver
  4657         """Check whether the file format is supported by this driver
  4330 
  4658 
  4344         InstanceQuiesceNotSupported is raised. When it fails to quiesce by
  4672         InstanceQuiesceNotSupported is raised. When it fails to quiesce by
  4345         other errors (e.g. agent timeout), NovaException is raised.
  4673         other errors (e.g. agent timeout), NovaException is raised.
  4346 
  4674 
  4347         :param context:  request context
  4675         :param context:  request context
  4348         :param instance: nova.objects.instance.Instance to be quiesced
  4676         :param instance: nova.objects.instance.Instance to be quiesced
  4349         :param image_meta: image object returned by nova.image.glance that
  4677         :param nova.objects.ImageMeta image_meta:
  4350                            defines the image from which this instance
  4678             The metadata of the image of the instance.
  4351                            was created
       
  4352         """
  4679         """
  4353         raise NotImplementedError()
  4680         raise NotImplementedError()
  4354 
  4681 
  4355     def unquiesce(self, context, instance, image_meta):
  4682     def unquiesce(self, context, instance, image_meta):
  4356         """Unquiesce the specified instance after snapshots.
  4683         """Unquiesce the specified instance after snapshots.
  4359         InstanceQuiesceNotSupported is raised. When it fails to quiesce by
  4686         InstanceQuiesceNotSupported is raised. When it fails to quiesce by
  4360         other errors (e.g. agent timeout), NovaException is raised.
  4687         other errors (e.g. agent timeout), NovaException is raised.
  4361 
  4688 
  4362         :param context:  request context
  4689         :param context:  request context
  4363         :param instance: nova.objects.instance.Instance to be unquiesced
  4690         :param instance: nova.objects.instance.Instance to be unquiesced
  4364         :param image_meta: image object returned by nova.image.glance that
  4691         :param nova.objects.ImageMeta image_meta:
  4365                            defines the image from which this instance
  4692             The metadata of the image of the instance.
  4366                            was created
       
  4367         """
  4693         """
  4368         raise NotImplementedError()
  4694         raise NotImplementedError()
       
  4695 
       
  4696     def network_binding_host_id(self, context, instance):
       
  4697         """Get host ID to associate with network ports.
       
  4698 
       
  4699         :param context:  request context
       
  4700         :param instance: nova.objects.instance.Instance that the network
       
  4701                          ports will be associated with
       
  4702         :returns: a string representing the host ID
       
  4703         """
       
  4704         return instance.get('host')