components/openstack/nova/files/solariszones/driver.py
changeset 4585 86590f6eefcb
parent 4553 13705ca3643b
child 4669 342ab6111bb5
equal deleted inserted replaced
4584:a5e61533b5ac 4585:86590f6eefcb
    48 from nova import context as nova_context
    48 from nova import context as nova_context
    49 from nova import exception
    49 from nova import exception
    50 from nova.i18n import _
    50 from nova.i18n import _
    51 from nova.image import glance
    51 from nova.image import glance
    52 from nova.network import neutronv2
    52 from nova.network import neutronv2
       
    53 from nova import objects
    53 from nova.objects import flavor as flavor_obj
    54 from nova.objects import flavor as flavor_obj
    54 from nova.openstack.common import fileutils
    55 from nova.openstack.common import fileutils
    55 from nova.openstack.common import jsonutils
    56 from nova.openstack.common import jsonutils
    56 from nova.openstack.common import log as logging
    57 from nova.openstack.common import log as logging
    57 from nova.openstack.common import loopingcall
    58 from nova.openstack.common import loopingcall
   841                                   "set on flavor for instance '%s'")
   842                                   "set on flavor for instance '%s'")
   842                                 % (prop, name))
   843                                 % (prop, name))
   843                     continue
   844                     continue
   844                 zc.setprop('global', prop, value)
   845                 zc.setprop('global', prop, value)
   845 
   846 
   846     def _connect_boot_volume(self, context, instance, extra_specs):
   847     def _create_boot_volume(self, context, instance):
   847         """Provision a (Cinder) volume service backed boot volume"""
   848         """Create a (Cinder) volume service backed boot volume"""
   848         brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
       
   849         connection_info = None
       
   850         try:
   849         try:
   851             vol = self._volume_api.create(
   850             vol = self._volume_api.create(
   852                 context,
   851                 context,
   853                 instance['root_gb'],
   852                 instance['root_gb'],
   854                 instance['display_name'] + "-rootzpool",
   853                 instance['display_name'] + "-rootzpool",
   857             # TODO(npower): Polling is what nova/compute/manager also does when
   856             # TODO(npower): Polling is what nova/compute/manager also does when
   858             # creating a new volume, so we do likewise here.
   857             # creating a new volume, so we do likewise here.
   859             while True:
   858             while True:
   860                 volume = self._volume_api.get(context, vol['id'])
   859                 volume = self._volume_api.get(context, vol['id'])
   861                 if volume['status'] != 'creating':
   860                 if volume['status'] != 'creating':
   862                     break
   861                     return volume
   863                 greenthread.sleep(1)
   862                 greenthread.sleep(1)
   864 
   863 
   865         except Exception as reason:
   864         except Exception as reason:
   866             LOG.error(_("Unable to create root zpool volume for instance '%s'"
   865             LOG.error(_("Unable to create root zpool volume for instance '%s'"
   867                         ": %s") % (instance['name'], reason))
   866                         ": %s") % (instance['name'], reason))
   868             raise
   867             raise
   869 
   868 
       
   869     def _connect_boot_volume(self, volume, mountpoint, context, instance,
       
   870                              extra_specs):
       
   871         """Connect a (Cinder) volume service backed boot volume"""
       
   872         brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
   870         instance_uuid = instance['uuid']
   873         instance_uuid = instance['uuid']
   871         volume_id = volume['id']
   874         volume_id = volume['id']
   872         # TODO(npower): Adequate for default boot device. We currently
   875 
   873         # ignore this value, but cinder gets stroppy about this if we set it to
   876         connector = self.get_volume_connector(instance)
   874         # None
   877         connection_info = self._volume_api.initialize_connection(
   875         mountpoint = "c1d0"
   878             context, volume_id, connector)
   876 
   879 
   877         try:
   880         # Check connection_info to determine if the provided volume is
   878             connector = self.get_volume_connector(instance)
   881         # local to this compute node. If it is, then don't use it for
   879             connection_info = self._volume_api.initialize_connection(
   882         # Solaris branded zones in order to avoid a known ZFS deadlock issue
   880                 context, volume_id, connector)
   883         # when using a zpool within another zpool on the same system.
   881             # Check connection_info to determine if the provided volume is
   884         if brand == ZONE_BRAND_SOLARIS:
   882             # local to this compute node. If it is, then don't use it for
   885             driver_type = connection_info['driver_volume_type']
   883             # Solaris branded zones in order to avoid a know ZFS deadlock issue
   886             if driver_type == 'local':
   884             # when using a zpool within another zpool on the same system.
   887                 msg = _("Detected 'local' zvol driver volume type "
   885             delete_boot_volume = False
   888                         "from volume service, which should not be "
   886             if brand == ZONE_BRAND_SOLARIS:
   889                         "used as a boot device for 'solaris' "
   887                 driver_type = connection_info['driver_volume_type']
   890                         "branded zones.")
   888                 if driver_type == 'local':
   891                 raise exception.InvalidVolume(reason=msg)
   889                     LOG.warning(_("Detected 'local' zvol driver volume type "
   892             elif driver_type == 'iscsi':
   890                                   "from volume service, which should not be "
   893                 # Check for a potential loopback iSCSI situation
   891                                   "used as a boot device for 'solaris' "
   894                 data = connection_info['data']
   892                                   "branded zones."))
   895                 target_portal = data['target_portal']
   893                     delete_boot_volume = True
   896                 # Strip off the port number (eg. 127.0.0.1:3260)
   894                 elif driver_type == 'iscsi':
   897                 host = target_portal.rsplit(':', 1)
   895                     # Check for a potential loopback iSCSI situation
   898                 # Strip any enclosing '[' and ']' brackets for
   896                     data = connection_info['data']
   899                 # IPV6 addresses.
   897                     target_portal = data['target_portal']
   900                 target_host = host[0].strip('[]')
   898                     # Strip off the port number (eg. 127.0.0.1:3260)
   901 
   899                     host = target_portal.rsplit(':', 1)
   902                 # Check if target_host is an IP or hostname matching the
   900                     # Strip any enclosing '[' and ']' brackets for
   903                 # connector host or IP, which would mean the provisioned
   901                     # IPV6 addresses.
   904                 # iSCSI LUN is on the same host as the instance.
   902                     target_host = host[0].strip('[]')
   905                 if target_host in [connector['ip'], connector['host']]:
   903 
   906                     msg = _("iSCSI connection info from volume "
   904                     # Check if target_host is an IP or hostname matching the
   907                             "service indicates that the target is a "
   905                     # connector host or IP, which would mean the provisioned
   908                             "local volume, which should not be used "
   906                     # iSCSI LUN is on the same host as the instance.
   909                             "as a boot device for 'solaris' branded "
   907                     if target_host in [connector['ip'], connector['host']]:
   910                             "zones.")
   908                         LOG.warning(_("iSCSI connection info from volume "
   911                     raise exception.InvalidVolume(reason=msg)
   909                                       "service indicates that the target is a "
   912             # Assuming that fibre_channel is non-local
   910                                       "local volume, which should not be used "
   913             elif driver_type != 'fibre_channel':
   911                                       "as a boot device for 'solaris' branded "
   914                 # Some other connection type that we don't understand
   912                                       "zones."))
   915                 # Let zone use some local fallback instead.
   913                         delete_boot_volume = True
   916                 msg = _("Unsupported volume driver type '%s' can not be used "
   914                 # Assuming that fibre_channel is non-local
   917                         "as a boot device for zones." % driver_type)
   915                 elif driver_type != 'fibre_channel':
   918                 raise exception.InvalidVolume(reason=msg)
   916                     # Some other connection type that we don't understand
   919 
   917                     # Let zone use some local fallback instead.
   920         # Volume looks OK to use. Notify Cinder of the attachment.
   918                     LOG.warning(_("Unsupported volume driver type '%s' "
   921         self._volume_api.attach(context, volume_id, instance_uuid,
   919                                   "can not be used as a boot device for "
   922                                 mountpoint)
   920                                   "'solaris' branded zones."))
       
   921                     delete_boot_volume = True
       
   922 
       
   923             if delete_boot_volume:
       
   924                 LOG.warning(_("Volume '%s' is being discarded") % volume['id'])
       
   925                 self._volume_api.delete(context, volume_id)
       
   926                 return None
       
   927 
       
   928             # Notify Cinder DB of the volume attachment.
       
   929             self._volume_api.attach(context, volume_id, instance_uuid,
       
   930                                     mountpoint)
       
   931             values = {
       
   932                 'instance_uuid': instance['uuid'],
       
   933                 'connection_info': jsonutils.dumps(connection_info),
       
   934                 # TODO(npower): device_name also ignored currently, but Cinder
       
   935                 # breaks without it. Figure out a sane mapping scheme.
       
   936                 'device_name': mountpoint,
       
   937                 'delete_on_termination': True,
       
   938                 'virtual_name': None,
       
   939                 'snapshot_id': None,
       
   940                 'volume_id': volume_id,
       
   941                 'volume_size': instance['root_gb'],
       
   942                 'no_device': None}
       
   943             self._conductor_api.block_device_mapping_update_or_create(context,
       
   944                                                                       values)
       
   945 
       
   946         except Exception as reason:
       
   947             LOG.error(_("Unable to attach root zpool volume '%s' to instance "
       
   948                         "%s: %s") % (volume['id'], instance['name'], reason))
       
   949             self._volume_api.detach(context, volume_id)
       
   950             self._volume_api.delete(context, volume_id)
       
   951             raise
       
   952         return connection_info
   923         return connection_info
   953 
   924 
   954     def _set_boot_device(self, name, connection_info, brand):
   925     def _set_boot_device(self, name, connection_info, brand):
   955         """Set the boot device specified by connection_info"""
   926         """Set the boot device specified by connection_info"""
   956         zone = self._get_zone_by_name(name)
   927         zone = self._get_zone_by_name(name)
  1395         name = instance['name']
  1366         name = instance['name']
  1396         zone = self._get_zone_by_name(name)
  1367         zone = self._get_zone_by_name(name)
  1397         if zone is None:
  1368         if zone is None:
  1398             raise exception.InstanceNotFound(instance_id=name)
  1369             raise exception.InstanceNotFound(instance_id=name)
  1399 
  1370 
       
  1371         if zone.state == ZONE_STATE_CONFIGURED:
       
  1372             LOG.debug(_("Uninstall not required for zone '%s' in state '%s'")
       
  1373                       % (name, zone.state))
       
  1374             return
  1400         try:
  1375         try:
  1401             zone.uninstall(['-F'])
  1376             zone.uninstall(['-F'])
  1402         except Exception as reason:
  1377         except Exception as reason:
  1403             LOG.error(_("Unable to uninstall root file system for instance "
  1378             LOG.error(_("Unable to uninstall root file system for instance "
  1404                         "'%s' via zonemgr(3RAD): %s") % (name, reason))
  1379                         "'%s' via zonemgr(3RAD): %s") % (name, reason))
  1454         sc_dir = tempfile.mkdtemp(prefix="nova-sysconfig-",
  1429         sc_dir = tempfile.mkdtemp(prefix="nova-sysconfig-",
  1455                                   dir=CONF.state_path)
  1430                                   dir=CONF.state_path)
  1456         os.chmod(sc_dir, 0755)
  1431         os.chmod(sc_dir, 0755)
  1457 
  1432 
  1458         # Attempt to provision a (Cinder) volume service backed boot volume
  1433         # Attempt to provision a (Cinder) volume service backed boot volume
  1459         connection_info = self._connect_boot_volume(context, instance,
  1434         volume = self._create_boot_volume(context, instance)
  1460                                                     extra_specs)
  1435         volume_id = volume['id']
       
  1436         # c1d0 is the standard dev for for default boot device.
       
  1437         # Irrelevant value for ZFS, but Cinder gets stroppy without it.
       
  1438         mountpoint = "c1d0"
       
  1439         try:
       
  1440             connection_info = self._connect_boot_volume(volume, mountpoint,
       
  1441                                                         context, instance,
       
  1442                                                         extra_specs)
       
  1443         except exception.InvalidVolume as badvol:
       
  1444             # This Cinder volume is not usable for ZOSS so discard it.
       
  1445             # zonecfg will apply default zonepath dataset configuration
       
  1446             # instead. Carry on
       
  1447             LOG.warning(_("Volume '%s' is being discarded: %s")
       
  1448                         % (volume_id, badvol))
       
  1449             self._volume_api.delete(context, volume_id)
       
  1450             connection_info = None
       
  1451         except Exception as reason:
       
  1452             # Something really bad happened. Don't pass Go.
       
  1453             LOG.error(_("Unable to attach root zpool volume '%s' to instance "
       
  1454                         "%s: %s") % (volume['id'], instance['name'], reason))
       
  1455             self._volume_api.delete(context, volume_id)
       
  1456             raise
       
  1457 
  1461         name = instance['name']
  1458         name = instance['name']
  1462 
  1459 
  1463         LOG.debug(_("creating zone configuration for '%s' (%s)") %
  1460         LOG.debug(_("creating zone configuration for '%s' (%s)") %
  1464                   (name, instance['display_name']))
  1461                   (name, instance['display_name']))
  1465         self._create_config(context, instance, network_info,
  1462         self._create_config(context, instance, network_info,
  1469             self._power_on(instance)
  1466             self._power_on(instance)
  1470         except Exception as reason:
  1467         except Exception as reason:
  1471             LOG.error(_("Unable to spawn instance '%s' via zonemgr(3RAD): %s")
  1468             LOG.error(_("Unable to spawn instance '%s' via zonemgr(3RAD): %s")
  1472                       % (name, reason))
  1469                       % (name, reason))
  1473             self._uninstall(instance)
  1470             self._uninstall(instance)
       
  1471             if connection_info:
       
  1472                 self._volume_api.detach(context, volume_id)
       
  1473                 self._volume_api.delete(context, volume_id)
  1474             self._delete_config(instance)
  1474             self._delete_config(instance)
  1475             raise
  1475             raise
       
  1476 
       
  1477         if connection_info:
       
  1478             bdm = objects.BlockDeviceMapping(
       
  1479                     source_type='volume', destination_type='volume',
       
  1480                     instance_uuid=instance.uuid,
       
  1481                     volume_id=volume_id,
       
  1482                     connection_info=jsonutils.dumps(connection_info),
       
  1483                     device_name=mountpoint,
       
  1484                     delete_on_termination=True,
       
  1485                     volume_size=instance['root_gb'])
       
  1486             bdm.create(context)
       
  1487             bdm.save()
  1476 
  1488 
  1477     def _power_off(self, instance, halt_type):
  1489     def _power_off(self, instance, halt_type):
  1478         """Power off a Solaris Zone."""
  1490         """Power off a Solaris Zone."""
  1479         name = instance['name']
  1491         name = instance['name']
  1480         zone = self._get_zone_by_name(name)
  1492         zone = self._get_zone_by_name(name)