components/openstack/nova/files/solariszones/driver.py
changeset 1760 353323c7bdc1
child 1840 bfe6a6253fcd
child 3141 e4ac7e52627e
equal deleted inserted replaced
1759:b412ae0aa701 1760:353323c7bdc1
       
     1 # vim: tabstop=4 shiftwidth=4 softtabstop=4
       
     2 
       
     3 # Copyright 2011 Justin Santa Barbara
       
     4 # All Rights Reserved.
       
     5 #
       
     6 # Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
       
     7 #
       
     8 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
       
     9 #    not use this file except in compliance with the License. You may obtain
       
    10 #    a copy of the License at
       
    11 #
       
    12 #         http://www.apache.org/licenses/LICENSE-2.0
       
    13 #
       
    14 #    Unless required by applicable law or agreed to in writing, software
       
    15 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
       
    16 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
       
    17 #    License for the specific language governing permissions and limitations
       
    18 #    under the License.
       
    19 
       
    20 """
       
    21 Driver for Solaris Zones (nee Containers):
       
    22 """
       
    23 
       
    24 import glob
       
    25 import os
       
    26 import platform
       
    27 import shutil
       
    28 import tempfile
       
    29 import uuid
       
    30 
       
    31 import rad.bindings.com.oracle.solaris.rad.kstat as kstat
       
    32 import rad.bindings.com.oracle.solaris.rad.zonesbridge as zonesbridge
       
    33 import rad.bindings.com.oracle.solaris.rad.zonemgr as zonemgr
       
    34 import rad.client
       
    35 import rad.connect
       
    36 from solaris_install.archive.checkpoints import InstantiateUnifiedArchive
       
    37 from solaris_install.archive import LOGFILE as ARCHIVE_LOGFILE
       
    38 from solaris_install.archive import UnifiedArchive
       
    39 from solaris_install.engine import InstallEngine
       
    40 
       
    41 from eventlet import greenthread
       
    42 from lxml import etree
       
    43 from oslo.config import cfg
       
    44 
       
    45 from nova.compute import power_state
       
    46 from nova.compute import task_states
       
    47 from nova.compute import vm_mode
       
    48 from nova import conductor
       
    49 from nova import context as nova_context
       
    50 from nova import exception
       
    51 from nova.image import glance
       
    52 from nova.network import quantumv2
       
    53 from nova.openstack.common import fileutils
       
    54 from nova.openstack.common import jsonutils
       
    55 from nova.openstack.common import log as logging
       
    56 from nova import paths
       
    57 from nova import utils
       
    58 from nova.virt import driver
       
    59 from nova.virt import event as virtevent
       
    60 from nova.virt import images
       
    61 from nova.virt.solariszones import sysconfig
       
    62 from nova import volume
       
    63 
       
    64 solariszones_opts = [
       
    65     cfg.StrOpt('glancecache_dirname',
       
    66                default='$state_path/images',
       
    67                help='Default path to Glance cache for Solaris Zones.'),
       
    68     cfg.StrOpt('solariszones_snapshots_directory',
       
    69                default='$instances_path/snapshots',
       
    70                help='Location where solariszones driver will store snapshots '
       
    71                     'before uploading them to the Glance image service'),
       
    72 ]
       
    73 
       
    74 CONF = cfg.CONF
       
    75 CONF.register_opts(solariszones_opts)
       
    76 LOG = logging.getLogger(__name__)
       
    77 
       
    78 # These should match the strings returned by the zone_state_str()
       
    79 # function in the (private) libzonecfg library. These values are in turn
       
    80 # returned in the 'state' string of the Solaris Zones' RAD interface by
       
    81 # the zonemgr(3RAD) provider.
       
    82 ZONE_STATE_CONFIGURED = 'configured'
       
    83 ZONE_STATE_INCOMPLETE = 'incomplete'
       
    84 ZONE_STATE_UNAVAILABLE = 'unavailable'
       
    85 ZONE_STATE_INSTALLED = 'installed'
       
    86 ZONE_STATE_READY = 'ready'
       
    87 ZONE_STATE_RUNNING = 'running'
       
    88 ZONE_STATE_SHUTTING_DOWN = 'shutting_down'
       
    89 ZONE_STATE_DOWN = 'down'
       
    90 ZONE_STATE_MOUNTED = 'mounted'
       
    91 
       
    92 # Mapping between zone state and Nova power_state.
       
    93 SOLARISZONES_POWER_STATE = {
       
    94     ZONE_STATE_CONFIGURED:      power_state.NOSTATE,
       
    95     ZONE_STATE_INCOMPLETE:      power_state.BUILDING,
       
    96     ZONE_STATE_UNAVAILABLE:     power_state.NOSTATE,
       
    97     ZONE_STATE_INSTALLED:       power_state.SHUTDOWN,
       
    98     ZONE_STATE_READY:           power_state.RUNNING,
       
    99     ZONE_STATE_RUNNING:         power_state.RUNNING,
       
   100     ZONE_STATE_SHUTTING_DOWN:   power_state.RUNNING,
       
   101     ZONE_STATE_DOWN:            power_state.RUNNING,
       
   102     ZONE_STATE_MOUNTED:         power_state.NOSTATE
       
   103 }
       
   104 
       
   105 # Solaris Zones brands as defined in brands(5).
       
   106 ZONE_BRAND_LABELED = 'labeled'
       
   107 ZONE_BRAND_SOLARIS = 'solaris'
       
   108 ZONE_BRAND_SOLARIS_KZ = 'solaris-kz'
       
   109 ZONE_BRAND_SOLARIS10 = 'solaris10'
       
   110 
       
   111 # Mapping between supported zone brands and the name of the corresponding
       
   112 # brand template.
       
   113 ZONE_BRAND_TEMPLATE = {
       
   114     ZONE_BRAND_SOLARIS:         'SYSdefault',
       
   115     ZONE_BRAND_SOLARIS_KZ:      'SYSsolaris-kz',
       
   116 }
       
   117 
       
   118 MAX_CONSOLE_BYTES = 102400
       
   119 
       
   120 
       
   121 def lookup_resource_property(zone, resource, prop, filter=None):
       
   122     """Lookup specified property from specified Solaris Zone resource."""
       
   123     try:
       
   124         val = zone.getResourceProperties(zonemgr.Resource(resource, filter),
       
   125                                          [prop])
       
   126     except rad.client.ObjectError:
       
   127         return None
       
   128     except Exception:
       
   129         raise
       
   130     return val[0].value if val else None
       
   131 
       
   132 
       
   133 def lookup_resource_property_value(zone, resource, prop, value):
       
   134     """Lookup specified property with value from specified Solaris Zone
       
   135        resource. Returns property if matching value is found, else None
       
   136     """
       
   137     try:
       
   138         resources = zone.getResources(zonemgr.Resource(resource))
       
   139         for resource in resources:
       
   140             for propertee in resource.properties:
       
   141                 if propertee.name == prop and propertee.value == value:
       
   142                     return propertee
       
   143         else:
       
   144             return None
       
   145     except rad.client.ObjectError:
       
   146         return None
       
   147     except Exception:
       
   148         raise
       
   149 
       
   150 
       
   151 class ZoneConfig(object):
       
   152     """ ZoneConfig - context manager for access zone configurations.
       
   153     Automatically opens the configuration for a zone and commits any changes
       
   154     before exiting
       
   155     """
       
   156     def __init__(self, zone):
       
   157         """ zone is a zonemgr object representing either a kernel zone or
       
   158         non-glboal zone.
       
   159         """
       
   160         self.zone = zone
       
   161         self.editing = False
       
   162 
       
   163     def __enter__(self):
       
   164         """ enables the editing of the zone.
       
   165         """
       
   166         try:
       
   167             self.zone.editConfig()
       
   168             self.editing = True
       
   169             return self
       
   170         except rad.client.ObjectError as err:
       
   171             LOG.error(_("Unable to initialize editing of instance '%s' via "
       
   172                         "zonemgr(3RAD): %s") % (self.zone.name, err))
       
   173             raise
       
   174 
       
   175     def __exit__(self, exc_type, exc_val, exc_tb):
       
   176         """ looks for any kind of exception before exiting.  If one is found,
       
   177         cancel any configuration changes and reraise the exception.  If not,
       
   178         commit the new configuration.
       
   179         """
       
   180         if exc_type is not None and self.editing:
       
   181             # We received some kind of exception.  Cancel the config and raise.
       
   182             self.zone.cancelConfig()
       
   183             raise
       
   184         else:
       
   185             # commit the config
       
   186             try:
       
   187                 self.zone.commitConfig()
       
   188             except rad.client.ObjectError as err:
       
   189                 LOG.error(_("Unable to commit the new configuration for "
       
   190                             "instance '%s' via zonemgr(3RAD): %s")
       
   191                           % (self.zone.name, err))
       
   192                 raise
       
   193 
       
   194     def setprop(self, resource, prop, value):
       
   195         """ sets a property for an existing resource OR creates a new resource
       
   196         with the given property(s).
       
   197         """
       
   198         current = lookup_resource_property(self.zone, resource, prop)
       
   199         if current is not None and current == value:
       
   200             # the value is already set
       
   201             return
       
   202 
       
   203         try:
       
   204             if current is None:
       
   205                 self.zone.addResource(zonemgr.Resource(
       
   206                     resource, [zonemgr.Property(prop, value)]))
       
   207             else:
       
   208                 self.zone.setResourceProperties(
       
   209                     zonemgr.Resource(resource),
       
   210                     [zonemgr.Property(prop, value)])
       
   211         except rad.client.ObjectError as err:
       
   212             LOG.error(_("Unable to set '%s' property on '%s' resource for "
       
   213                         "instance '%s' via zonemgr(3RAD): %s")
       
   214                       % (prop, resource, self.zone.name, err))
       
   215             raise
       
   216 
       
   217     def addresource(self, resource, props=None):
       
   218         """ creates a new resource with an optional property list.
       
   219         """
       
   220         if props is None:
       
   221             props = []
       
   222 
       
   223         try:
       
   224             self.zone.addResource(zonemgr.Resource(resource, props))
       
   225         except rad.client.ObjectError as err:
       
   226             LOG.error(_("Unable to create new resource '%s' for instance '%s'"
       
   227                         "via zonemgr(3RAD): %s")
       
   228                       % (resource, self.zone.name, err))
       
   229             raise
       
   230 
       
   231     def removeresources(self, resource, props=None):
       
   232         """ removes resources whose properties include the optional property
       
   233             list specified in props.
       
   234         """
       
   235         if props is None:
       
   236             props = []
       
   237 
       
   238         try:
       
   239             self.zone.removeResources(zonemgr.Resource(resource, props))
       
   240         except rad.client.ObjectError as err:
       
   241             LOG.error(_("Unable to remove resource '%s' for instance '%s' via "
       
   242                         "zonemgr(3RAD): %s") % (resource, self.zone.name, err))
       
   243             raise
       
   244 
       
   245 
       
   246 class SolarisZonesDriver(driver.ComputeDriver):
       
   247     """Solaris Zones Driver using the zonemgr(3RAD) and kstat(3RAD) providers.
       
   248 
       
   249     The interface to this class talks in terms of 'instances' (Amazon EC2 and
       
   250     internal Nova terminology), by which we mean 'running virtual machine'
       
   251     (XenAPI terminology) or domain (Xen or libvirt terminology).
       
   252 
       
   253     An instance has an ID, which is the identifier chosen by Nova to represent
       
   254     the instance further up the stack.  This is unfortunately also called a
       
   255     'name' elsewhere.  As far as this layer is concerned, 'instance ID' and
       
   256     'instance name' are synonyms.
       
   257 
       
   258     Note that the instance ID or name is not human-readable or
       
   259     customer-controlled -- it's an internal ID chosen by Nova.  At the
       
   260     nova.virt layer, instances do not have human-readable names at all -- such
       
   261     things are only known higher up the stack.
       
   262 
       
   263     Most virtualization platforms will also have their own identity schemes,
       
   264     to uniquely identify a VM or domain.  These IDs must stay internal to the
       
   265     platform-specific layer, and never escape the connection interface.  The
       
   266     platform-specific layer is responsible for keeping track of which instance
       
   267     ID maps to which platform-specific ID, and vice versa.
       
   268 
       
   269     Some methods here take an instance of nova.compute.service.Instance.  This
       
   270     is the data structure used by nova.compute to store details regarding an
       
   271     instance, and pass them into this layer.  This layer is responsible for
       
   272     translating that generic data structure into terms that are specific to the
       
   273     virtualization platform.
       
   274 
       
   275     """
       
   276 
       
   277     capabilities = {
       
   278         "has_imagecache": False,
       
   279         "supports_recreate": False,
       
   280     }
       
   281 
       
   282     def __init__(self, virtapi):
       
   283         self.virtapi = virtapi
       
   284         self._compute_event_callback = None
       
   285         self._conductor_api = conductor.API()
       
   286         self._host_stats = {}
       
   287         self._initiator = None
       
   288         self._install_engine = None
       
   289         self._pagesize = os.sysconf('SC_PAGESIZE')
       
   290         self._uname = os.uname()
       
   291         self._validated_archives = list()
       
   292         self._volume_api = volume.API()
       
   293 
       
   294     def _init_rad(self):
       
   295         """Connect to RAD providers for kernel statistics and Solaris
       
   296         Zones. By connecting to the local rad(1M) service through a
       
   297         UNIX domain socket, kernel statistics can be read via
       
   298         kstat(3RAD) and Solaris Zones can be configured and controlled
       
   299         via zonemgr(3RAD).
       
   300         """
       
   301 
       
   302         # TODO(dcomay): Arrange to call this in the event of losing the
       
   303         # connection to RAD.
       
   304         try:
       
   305             self._rad_instance = rad.connect.connect_unix()
       
   306             self._kstat_control = self._rad_instance.get_object(
       
   307                 kstat.Control())
       
   308         except Exception as reason:
       
   309             msg = (_('Unable to connect to svc:/system/rad:local: %s')
       
   310                    % reason)
       
   311             raise exception.NovaException(msg)
       
   312 
       
   313     def init_host(self, host):
       
   314         """Initialize anything that is necessary for the driver to function,
       
   315         including catching up with currently running VM's on the given host."""
       
   316         # TODO(Vek): Need to pass context in for access to auth_token
       
   317 
       
   318         self._init_rad()
       
   319 
       
   320     def _get_iscsi_initiator(self):
       
   321         """ Return the iSCSI initiator node name IQN for this host """
       
   322         out, err = utils.execute('/usr/sbin/iscsiadm', 'list',
       
   323                                  'initiator-node')
       
   324         # Sample first line of command output:
       
   325         # Initiator node name: iqn.1986-03.com.sun:01:e00000000000.4f757217
       
   326         initiator_name_line = out.splitlines()[0]
       
   327         initiator_iqn = initiator_name_line.rsplit(' ', 1)[1]
       
   328         return initiator_iqn
       
   329 
       
   330     def _get_zone_by_name(self, name):
       
   331         """Return a Solaris Zones object via RAD by name."""
       
   332         try:
       
   333             zone = self._rad_instance.get_object(
       
   334                 zonemgr.Zone(), rad.client.ADRGlobPattern({'name': name}))
       
   335         except rad.client.NotFoundError:
       
   336             return None
       
   337         except Exception:
       
   338             raise
       
   339 
       
   340         return zone
       
   341 
       
   342     def _get_state(self, zone):
       
   343         """Return the running state, one of the power_state codes."""
       
   344         return SOLARISZONES_POWER_STATE[zone.state]
       
   345 
       
   346     def _pages_to_kb(self, pages):
       
   347         """Convert a number of pages of memory into a total size in KBytes."""
       
   348         return (pages * self._pagesize) / 1024
       
   349 
       
   350     def _get_max_mem(self, zone):
       
   351         """Return the maximum memory in KBytes allowed."""
       
   352         max_mem = lookup_resource_property(zone, 'capped-memory', 'physical')
       
   353         if max_mem is not None:
       
   354             return utils.to_bytes(max_mem) / 1024
       
   355 
       
   356         # If physical property in capped-memory doesn't exist, this may
       
   357         # represent a non-global zone so just return the system's total
       
   358         # memory.
       
   359         return self._pages_to_kb(os.sysconf('SC_PHYS_PAGES'))
       
   360 
       
   361     def _get_mem(self, zone):
       
   362         """Return the memory in KBytes used by the domain."""
       
   363 
       
   364         # There isn't any way of determining this from the hypervisor
       
   365         # perspective in Solaris, so just return the _get_max_mem() value
       
   366         # for now.
       
   367         return self._get_max_mem(zone)
       
   368 
       
   369     def _get_num_cpu(self, zone):
       
   370         """Return the number of virtual CPUs for the domain.
       
   371 
       
   372         In the case of kernel zones, the number of virtual CPUs a zone
       
   373         ends up with depends on whether or not there were 'virtual-cpu'
       
   374         or 'dedicated-cpu' resources in the configuration or whether
       
   375         there was an assigned pool in the configuration. This algorithm
       
   376         attempts to emulate what the virtual platform code does to
       
   377         determine a number of virtual CPUs to use.
       
   378         """
       
   379 
       
   380         # If a 'virtual-cpu' resource exists, use the minimum number of
       
   381         # CPUs defined there.
       
   382         ncpus = lookup_resource_property(zone, 'virtual-cpu', 'ncpus')
       
   383         if ncpus is not None:
       
   384             min = ncpus.split('-', 1)[0]
       
   385             if min.isdigit():
       
   386                 return int(min)
       
   387 
       
   388         # Otherwise if a 'dedicated-cpu' resource exists, use the maximum
       
   389         # number of CPUs defined there.
       
   390         ncpus = lookup_resource_property(zone, 'dedicated-cpu', 'ncpus')
       
   391         if ncpus is not None:
       
   392             max = ncpus.split('-', 1)[-1]
       
   393             if max.isdigit():
       
   394                 return int(max)
       
   395 
       
   396         # Finally if neither resource exists but the zone was assigned a
       
   397         # pool in the configuration, the number of CPUs would be the size
       
   398         # of the processor set. Currently there's no way of easily
       
   399         # determining this so use the system's notion of the total number
       
   400         # of online CPUs.
       
   401         return os.sysconf('SC_NPROCESSORS_ONLN')
       
   402 
       
   403     def _get_kstat_by_name(self, kstat_class, module, instance, name):
       
   404         """Return Kstat snapshot data via RAD as a dictionary."""
       
   405         pattern = {
       
   406             'class':    kstat_class,
       
   407             'module':   module,
       
   408             'instance': instance,
       
   409             'name':     name
       
   410         }
       
   411         try:
       
   412             self._kstat_control.update()
       
   413             kstat_object = self._rad_instance.get_object(
       
   414                 kstat.Kstat(), rad.client.ADRGlobPattern(pattern))
       
   415         except Exception as reason:
       
   416             LOG.warning(_("Unable to retrieve kstat object '%s:%s:%s' of "
       
   417                           "class '%s' via kstat(3RAD): %s")
       
   418                         % (module, instance, name, kstat_class, reason))
       
   419             return None
       
   420 
       
   421         kstat_data = {}
       
   422         for named in kstat_object.fresh_snapshot().data.NAMED:
       
   423             kstat_data[named.name] = getattr(named.value,
       
   424                                              str(named.value.discriminant))
       
   425 
       
   426         return kstat_data
       
   427 
       
   428     def _get_cpu_time(self, zone):
       
   429         """Return the CPU time used in nanoseconds."""
       
   430         if zone.id == -1:
       
   431             return 0
       
   432 
       
   433         kstat_data = self._get_kstat_by_name('zones', 'cpu', str(zone.id),
       
   434                                              'sys_zone_aggr')
       
   435         if kstat_data is None:
       
   436             return 0
       
   437 
       
   438         return kstat_data['cpu_nsec_kernel'] + kstat_data['cpu_nsec_user']
       
   439 
       
   440     def get_info(self, instance):
       
   441         """Get the current status of an instance, by name (not ID!)
       
   442 
       
   443         Returns a dict containing:
       
   444 
       
   445         :state:           the running state, one of the power_state codes
       
   446         :max_mem:         (int) the maximum memory in KBytes allowed
       
   447         :mem:             (int) the memory in KBytes used by the domain
       
   448         :num_cpu:         (int) the number of virtual CPUs for the domain
       
   449         :cpu_time:        (int) the CPU time used in nanoseconds
       
   450         """
       
   451         # TODO(Vek): Need to pass context in for access to auth_token
       
   452         name = instance['name']
       
   453         zone = self._get_zone_by_name(name)
       
   454         if zone is None:
       
   455             LOG.error(_("Unable to find instance '%s' via zonemgr(3RAD)")
       
   456                       % name)
       
   457             raise exception.InstanceNotFound(instance_id=name)
       
   458 
       
   459         return {
       
   460             'state':    self._get_state(zone),
       
   461             'max_mem':  self._get_max_mem(zone),
       
   462             'mem':      self._get_mem(zone),
       
   463             'num_cpu':  self._get_num_cpu(zone),
       
   464             'cpu_time': self._get_cpu_time(zone)
       
   465         }
       
   466 
       
   467     def get_num_instances(self):
       
   468         """Return the total number of virtual machines.
       
   469 
       
   470         Return the number of virtual machines that the hypervisor knows
       
   471         about.
       
   472 
       
   473         .. note::
       
   474 
       
   475             This implementation works for all drivers, but it is
       
   476             not particularly efficient. Maintainers of the virt drivers are
       
   477             encouraged to override this method with something more
       
   478             efficient.
       
   479         """
       
   480         return len(self.list_instances())
       
   481 
       
   482     def instance_exists(self, instance_id):
       
   483         """Checks existence of an instance on the host.
       
   484 
       
   485         :param instance_id: The ID / name of the instance to lookup
       
   486 
       
   487         Returns True if an instance with the supplied ID exists on
       
   488         the host, False otherwise.
       
   489 
       
   490         .. note::
       
   491 
       
   492             This implementation works for all drivers, but it is
       
   493             not particularly efficient. Maintainers of the virt drivers are
       
   494             encouraged to override this method with something more
       
   495             efficient.
       
   496         """
       
   497         return instance_id in self.list_instances()
       
   498 
       
   499     def _get_list_zone_object(self):
       
   500         """Return a list of all Solaris Zones objects via RAD."""
       
   501         return self._rad_instance.list_objects(zonemgr.Zone())
       
   502 
       
   503     def list_instances(self):
       
   504         """
       
   505         Return the names of all the instances known to the virtualization
       
   506         layer, as a list.
       
   507         """
       
   508         # TODO(Vek): Need to pass context in for access to auth_token
       
   509         instances_list = []
       
   510         for zone in self._get_list_zone_object():
       
   511             instances_list.append(self._rad_instance.get_object(zone).name)
       
   512 
       
   513         return instances_list
       
   514 
       
   515     def list_instance_uuids(self):
       
   516         """
       
   517         Return the UUIDS of all the instances known to the virtualization
       
   518         layer, as a list.
       
   519         """
       
   520         raise NotImplementedError()
       
   521 
       
   522     def _fetch_image(self, context, instance):
       
   523         """Fetch an image using Glance given the instance's image_ref."""
       
   524         glancecache_dirname = CONF.glancecache_dirname
       
   525         fileutils.ensure_tree(glancecache_dirname)
       
   526         image = ''.join([glancecache_dirname, '/', instance['image_ref']])
       
   527         if os.path.exists(image):
       
   528             LOG.debug(_("Using existing, cached Glance image: id %s")
       
   529                       % instance['image_ref'])
       
   530             return image
       
   531 
       
   532         LOG.debug(_("Fetching new Glance image: id %s")
       
   533                   % instance['image_ref'])
       
   534         try:
       
   535             images.fetch(context, instance['image_ref'], image,
       
   536                          instance['user_id'], instance['project_id'])
       
   537         except Exception as reason:
       
   538             LOG.error(_("Unable to fetch Glance image: id %s: %s")
       
   539                       % (instance['image_ref'], reason))
       
   540             raise
       
   541 
       
   542         return image
       
   543 
       
   544     def _validate_image(self, image, instance):
       
   545         """Validate a glance image for compatibility with the instance"""
       
   546         # Skip if the image was already checked and confirmed as valid
       
   547         if instance['image_ref'] in self._validated_archives:
       
   548             return
       
   549 
       
   550         if self._install_engine is None:
       
   551             self._install_engine = InstallEngine(ARCHIVE_LOGFILE)
       
   552 
       
   553         try:
       
   554             init_ua_cp = InstantiateUnifiedArchive(instance['image_ref'],
       
   555                                                    image)
       
   556             init_ua_cp.execute()
       
   557         except Exception:
       
   558             reason = (_("Image query failed. Possibly invalid or corrupt. "
       
   559                         "Log file location: %s:%s")
       
   560                       % (self._uname[1], ARCHIVE_LOGFILE))
       
   561             LOG.error(reason)
       
   562             raise exception.ImageUnacceptable(image_id=instance['image_ref'],
       
   563                                               reason=reason)
       
   564 
       
   565         try:
       
   566             ua = self._install_engine.doc.volatile.get_first_child(
       
   567                 class_type=UnifiedArchive)
       
   568             # Validate the image at this point to ensure:
       
   569             # - contains one deployable system
       
   570             deployables = ua.archive_objects
       
   571             if len(deployables) != 1:
       
   572                 reason = (_('Image must contain only 1 deployable system'))
       
   573                 raise exception.ImageUnacceptable(
       
   574                     image_id=instance['image_ref'],
       
   575                     reason=reason)
       
   576             # - matching architecture
       
   577             deployable_arch = deployables[0].system.arch
       
   578             compute_arch = platform.processor()
       
   579             if deployable_arch != compute_arch:
       
   580                 reason = (_('Image architecture "%s" is incompatible with this'
       
   581                           'compute host architecture: "%s"')
       
   582                           % (deployable_arch, compute_arch))
       
   583                 raise exception.ImageUnacceptable(
       
   584                     image_id=instance['image_ref'],
       
   585                     reason=reason)
       
   586             # - single root pool only
       
   587             streams = deployables[0].zfs_streams
       
   588             stream_pools = set(stream.zpool for stream in streams)
       
   589             if len(stream_pools) > 1:
       
   590                 reason = (_('Image contains more than one zpool: "%s"')
       
   591                           % (stream_pools))
       
   592                 raise exception.ImageUnacceptable(
       
   593                     image_id=instance['image_ref'],
       
   594                     reason=reason)
       
   595             # - looks like it's OK
       
   596             self._validated_archives.append(instance['image_ref'])
       
   597         finally:
       
   598             # Clear the reference to the UnifiedArchive object in the engine
       
   599             # data cache to avoid collision with the next checkpoint execution.
       
   600             self._install_engine.doc.volatile.delete_children(
       
   601                 class_type=UnifiedArchive)
       
   602 
       
   603     def _suri_from_volume_info(self, connection_info):
       
   604         """Returns a suri(5) formatted string based on connection_info
       
   605            Currently supports local ZFS volume and iSCSI driver types.
       
   606         """
       
   607         driver_type = connection_info['driver_volume_type']
       
   608         if driver_type not in ['iscsi', 'local']:
       
   609             raise exception.VolumeDriverNotFound(driver_type=driver_type)
       
   610         if driver_type == 'local':
       
   611             suri = 'dev:/dev/zvol/dsk/%s' % connection_info['volume_path']
       
   612         elif driver_type == 'iscsi':
       
   613             data = connection_info['data']
       
   614             # suri(5) format:
       
   615             #       iscsi://<host>[:<port>]/target.<IQN>,lun.<LUN>
       
   616             # Sample iSCSI connection data values:
       
   617             # target_portal: 192.168.1.244:3260
       
   618             # target_iqn: iqn.2010-10.org.openstack:volume-a89c.....
       
   619             # target_lun: 1
       
   620             suri = 'iscsi://%s/target.%s,lun.%d' % (data['target_portal'],
       
   621                                                     data['target_iqn'],
       
   622                                                     data['target_lun'])
       
   623             # TODO(npower): need to handle CHAP authentication also
       
   624 
       
   625         return suri
       
   626 
       
   627     def _set_global_properties(self, name, extra_specs, brand):
       
   628         """Set Solaris Zone's global properties if supplied via flavor."""
       
   629         zone = self._get_zone_by_name(name)
       
   630         if zone is None:
       
   631             raise exception.InstanceNotFound(instance_id=name)
       
   632 
       
   633         # TODO(dcomay): Should figure this out via the brands themselves.
       
   634         zonecfg_items = [
       
   635             'bootargs',
       
   636             'brand',
       
   637             'hostid'
       
   638         ]
       
   639         if brand == ZONE_BRAND_SOLARIS:
       
   640             zonecfg_items.extend(
       
   641                 ['file-mac-profile', 'fs-allowed', 'limitpriv'])
       
   642 
       
   643         with ZoneConfig(zone) as zc:
       
   644             for key, value in extra_specs.iteritems():
       
   645                 # Ignore not-zonecfg-scoped brand properties.
       
   646                 if not key.startswith('zonecfg:'):
       
   647                     continue
       
   648                 _scope, prop = key.split(':', 1)
       
   649                 # Ignore the 'brand' property if present.
       
   650                 if prop == 'brand':
       
   651                     continue
       
   652                 # Ignore but warn about unsupported zonecfg-scoped properties.
       
   653                 if prop not in zonecfg_items:
       
   654                     LOG.warning(_("Ignoring unsupported zone property '%s' "
       
   655                                   "set on flavor for instance '%s'")
       
   656                                 % (prop, name))
       
   657                     continue
       
   658                 zc.setprop('global', prop, value)
       
   659 
       
   660     def _connect_boot_volume(self, context, instance, extra_specs):
       
   661         """Provision a (Cinder) volume service backed boot volume"""
       
   662         brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
       
   663         connection_info = None
       
   664         try:
       
   665             vol = self._volume_api.create(
       
   666                 context,
       
   667                 instance['root_gb'],
       
   668                 instance['display_name'] + "-rootzpool",
       
   669                 "Boot volume for instance '%s' (%s)"
       
   670                 % (instance['name'], instance['uuid']))
       
   671             # TODO(npower): Polling is what nova/compute/manager also does when
       
   672             # creating a new volume, so we do likewise here.
       
   673             while True:
       
   674                 volume = self._volume_api.get(context, vol['id'])
       
   675                 if volume['status'] != 'creating':
       
   676                     break
       
   677                 greenthread.sleep(1)
       
   678 
       
   679         except Exception as reason:
       
   680             LOG.error(_("Unable to create root zpool volume for instance '%s':"
       
   681                         "%s") % (instance['name'], reason))
       
   682             raise
       
   683 
       
   684         instance_uuid = instance['uuid']
       
   685         # TODO(npower): Adequate for default boot device. We currently
       
   686         # ignore this value, but cinder gets stroppy about this if we set it to
       
   687         # None
       
   688         mountpoint = "c1d0"
       
   689 
       
   690         try:
       
   691             connector = self.get_volume_connector(instance)
       
   692             connection_info = self._volume_api.initialize_connection(context,
       
   693                                                                      volume,
       
   694                                                                      connector)
       
   695             # Check connection_info to determine if the provided volume is
       
   696             # local to this compute node. If it is, then don't use it for
       
   697             # Solaris branded zones in order to avoid a know ZFS deadlock issue
       
   698             # when using a zpool within another zpool on the same system.
       
   699             delete_boot_volume = False
       
   700             if brand == ZONE_BRAND_SOLARIS:
       
   701                 driver_type = connection_info['driver_volume_type']
       
   702                 if driver_type == 'local':
       
   703                     LOG.warning(_("Detected 'local' zvol driver volume type "
       
   704                                 "from volume service, which should not be "
       
   705                                 "used as a boot device for 'solaris' branded "
       
   706                                 "zones."))
       
   707                     delete_boot_volume = True
       
   708                 elif driver_type == 'iscsi':
       
   709                     # Check for a potential loopback iSCSI situation
       
   710                     data = connection_info['data']
       
   711                     target_portal = data['target_portal']
       
   712                     # Strip off the port number (eg. 127.0.0.1:3260)
       
   713                     host = target_portal.rsplit(':', 1)
       
   714                     # Strip any enclosing '[' and ']' brackets for
       
   715                     # IPV6 addresses.
       
   716                     target_host = host[0].strip('[]')
       
   717 
       
   718                     # Check if target_host is an IP or hostname matching the
       
   719                     # connector host or IP, which would mean the provisioned
       
   720                     # iSCSI LUN is on the same host as the instance.
       
   721                     if target_host in [connector['ip'], connector['host']]:
       
   722                         LOG.warning(_("iSCSI connection info from volume "
       
   723                                     "service indicates that the target is a "
       
   724                                     "local volume, which should not be used "
       
   725                                     "as a boot device for 'solaris' branded "
       
   726                                     "zones."))
       
   727                         delete_boot_volume = True
       
   728                 else:
       
   729                     # Some other connection type that we don't understand
       
   730                     # Let zone use some local fallback instead.
       
   731                     LOG.warning(_("Unsupported volume driver type '%s' "
       
   732                                 "can not be used as a boot device for "
       
   733                                 "'solaris' branded zones."))
       
   734                     delete_boot_volume = True
       
   735 
       
   736             if delete_boot_volume:
       
   737                 LOG.warning(_("Volume '%s' is being discarded") % volume['id'])
       
   738                 self._volume_api.delete(context, volume)
       
   739                 return None
       
   740 
       
   741             # Notify Cinder DB of the volume attachment.
       
   742             self._volume_api.attach(context, volume, instance_uuid, mountpoint)
       
   743             values = {
       
   744                 'instance_uuid': instance['uuid'],
       
   745                 'connection_info': jsonutils.dumps(connection_info),
       
   746                 # TODO(npower): device_name also ignored currently, but Cinder
       
   747                 # breaks without it. Figure out a sane mapping scheme.
       
   748                 'device_name': mountpoint,
       
   749                 'delete_on_termination': True,
       
   750                 'virtual_name': None,
       
   751                 'snapshot_id': None,
       
   752                 'volume_id': volume['id'],
       
   753                 'volume_size': instance['root_gb'],
       
   754                 'no_device': None}
       
   755             self._conductor_api.block_device_mapping_update_or_create(context,
       
   756                                                                       values)
       
   757 
       
   758         except Exception as reason:
       
   759             LOG.error(_("Unable to attach root zpool volume '%s' to instance "
       
   760                         "%s: %s") % (volume['id'], instance['name'], reason))
       
   761             self._volume_api.detach(context, volume)
       
   762             self._volume_api.delete(context, volume)
       
   763             raise
       
   764 
       
   765         return connection_info
       
   766 
       
   767     def _set_boot_device(self, name, connection_info, brand):
       
   768         """Set the boot device specified by connection_info"""
       
   769         zone = self._get_zone_by_name(name)
       
   770         if zone is None:
       
   771             raise exception.InstanceNotFound(instance_id=name)
       
   772 
       
   773         suri = self._suri_from_volume_info(connection_info)
       
   774 
       
   775         with ZoneConfig(zone) as zc:
       
   776             # ZOSS device configuration is different for the solaris-kz brand
       
   777             if brand == ZONE_BRAND_SOLARIS_KZ:
       
   778                 zc.zone.setResourceProperties(
       
   779                     zonemgr.Resource(
       
   780                         "device",
       
   781                         [zonemgr.Property("bootpri", "0")]),
       
   782                     [zonemgr.Property("storage", suri)])
       
   783             else:
       
   784                 zc.addresource(
       
   785                     "rootzpool",
       
   786                     [zonemgr.Property("storage", listvalue=[suri])])
       
   787 
       
   788     def _set_num_cpu(self, name, vcpus, brand):
       
   789         """Set number of VCPUs in a Solaris Zone configuration."""
       
   790         zone = self._get_zone_by_name(name)
       
   791         if zone is None:
       
   792             raise exception.InstanceNotFound(instance_id=name)
       
   793 
       
   794         # The Solaris Zone brand type is used to specify the type of
       
   795         # 'cpu' resource set in the Solaris Zone configuration.
       
   796         if brand == ZONE_BRAND_SOLARIS:
       
   797             vcpu_resource = 'capped-cpu'
       
   798         else:
       
   799             vcpu_resource = 'virtual-cpu'
       
   800 
       
   801         # TODO(dcomay): Until 17881862 is resolved, this should be turned into
       
   802         # an appropriate 'rctl' resource for the 'capped-cpu' case.
       
   803         with ZoneConfig(zone) as zc:
       
   804             zc.setprop(vcpu_resource, 'ncpus', str(vcpus))
       
   805 
       
   806     def _set_memory_cap(self, name, memory_mb, brand):
       
   807         """Set memory cap in a Solaris Zone configuration."""
       
   808         zone = self._get_zone_by_name(name)
       
   809         if zone is None:
       
   810             raise exception.InstanceNotFound(instance_id=name)
       
   811 
       
   812         # The Solaris Zone brand type is used to specify the type of
       
   813         # 'memory' cap set in the Solaris Zone configuration.
       
   814         if brand == ZONE_BRAND_SOLARIS:
       
   815             mem_resource = 'swap'
       
   816         else:
       
   817             mem_resource = 'physical'
       
   818 
       
   819         with ZoneConfig(zone) as zc:
       
   820             zc.setprop('capped-memory', mem_resource, '%dM' % memory_mb)
       
   821 
       
   822     def _set_network(self, context, name, instance, network_info, brand,
       
   823                      sc_dir):
       
   824         """ add networking information to the zone.
       
   825         """
       
   826         zone = self._get_zone_by_name(name)
       
   827         if zone is None:
       
   828             raise exception.InstanceNotFound(instance_id=name)
       
   829 
       
   830         tenant_id = None
       
   831         for netid, network in enumerate(network_info):
       
   832             if tenant_id is None:
       
   833                 tenant_id = network['network']['meta']['tenant_id']
       
   834             network_uuid = network['network']['id']
       
   835             port_uuid = network['id']
       
   836             ip = network['network']['subnets'][0]['ips'][0]['address']
       
   837             ip_version = network['network']['subnets'][0]['version']
       
   838             route = network['network']['subnets'][0]['gateway']['address']
       
   839             dns_list = network['network']['subnets'][0]['dns']
       
   840             nameservers = []
       
   841             for dns in dns_list:
       
   842                 if dns['type'] == 'dns':
       
   843                     nameservers.append(dns['address'])
       
   844 
       
   845             with ZoneConfig(zone) as zc:
       
   846                 if netid == 0:
       
   847                     zc.setprop('anet', 'configure-allowed-address', 'false')
       
   848                     zc.setprop('anet', 'evs', network_uuid)
       
   849                     zc.setprop('anet', 'vport', port_uuid)
       
   850                 else:
       
   851                     zc.addresource(
       
   852                         'anet',
       
   853                         [zonemgr.Property('configure-allowed-address',
       
   854                                           'false'),
       
   855                          zonemgr.Property('evs', network_uuid),
       
   856                          zonemgr.Property('vport', port_uuid)])
       
   857 
       
   858                 filter = [zonemgr.Property('vport', port_uuid)]
       
   859                 if brand == ZONE_BRAND_SOLARIS:
       
   860                     linkname = lookup_resource_property(zc.zone, 'anet',
       
   861                                                         'linkname', filter)
       
   862                 else:
       
   863                     id = lookup_resource_property(zc.zone, 'anet', 'id',
       
   864                                                   filter)
       
   865                     linkname = 'net%s' % id
       
   866 
       
   867             # create the required sysconfig file
       
   868             network_plugin = quantumv2.get_client(context)
       
   869             port = network_plugin.show_port(port_uuid)['port']
       
   870             subnet_uuid = port['fixed_ips'][0]['subnet_id']
       
   871             subnet = network_plugin.show_subnet(subnet_uuid)['subnet']
       
   872 
       
   873             if subnet['enable_dhcp']:
       
   874                 tree = sysconfig.create_ncp_defaultfixed('dhcp', linkname,
       
   875                                                          netid, ip_version)
       
   876             else:
       
   877                 tree = sysconfig.create_ncp_defaultfixed('static', linkname,
       
   878                                                          netid, ip_version, ip,
       
   879                                                          route, nameservers)
       
   880 
       
   881             fp = os.path.join(sc_dir, 'evs-network-%d.xml' % netid)
       
   882             sysconfig.create_sc_profile(fp, tree)
       
   883 
       
   884         if tenant_id is not None:
       
   885             # set the tenant id
       
   886             with ZoneConfig(zone) as zc:
       
   887                 zc.setprop('global', 'tenant', tenant_id)
       
   888 
       
   889     def _verify_sysconfig(self, sc_dir, instance):
       
   890         """ verify the SC profile(s) passed in contain an entry for
       
   891         system/config-user to configure the root account.  If an SSH key is
       
   892         specified, configure root's profile to use it.
       
   893 
       
   894         """
       
   895         usercheck = lambda e: e.attrib.get('name') == 'system/config-user'
       
   896         hostcheck = lambda e: e.attrib.get('name') == 'system/identity'
       
   897 
       
   898         root_account_needed = True
       
   899         hostname_needed = True
       
   900         sshkey = instance.get('key_data')
       
   901         name = instance.get('display_name')
       
   902 
       
   903         # find all XML files in sc_dir
       
   904         for root, dirs, files in os.walk(sc_dir):
       
   905             for fname in [f for f in files if f.endswith(".xml")]:
       
   906                 root = etree.parse(os.path.join(root, fname))
       
   907 
       
   908                 # look for config-user properties
       
   909                 if filter(usercheck, root.findall('service')):
       
   910                     # a service element was found for config-user.  Verify
       
   911                     # root's password is set, the admin account name is set and
       
   912                     # the admin's password is set
       
   913                     pgs = root.iter('property_group')
       
   914                     for pg in pgs:
       
   915                         if pg.attrib.get('name') == 'root_account':
       
   916                             root_account_needed = False
       
   917 
       
   918                 # look for identity properties
       
   919                 if filter(hostcheck, root.findall('service')):
       
   920                     for props in root.iter('propval'):
       
   921                         if props.attrib.get('name') == 'nodename':
       
   922                             hostname_needed = False
       
   923 
       
   924         # Verify all of the requirements were met.  Create the required SMF
       
   925         # profile(s) if needed.
       
   926         if root_account_needed:
       
   927             fp = os.path.join(sc_dir, 'config-root.xml')
       
   928 
       
   929             if sshkey is not None:
       
   930                 # set up the root account as 'normal' with no expiration and
       
   931                 # an ssh key
       
   932                 tree = sysconfig.create_default_root_account(sshkey=sshkey)
       
   933             else:
       
   934                 # set up the root account as 'normal' but to expire immediately
       
   935                 tree = sysconfig.create_default_root_account(expire='0')
       
   936 
       
   937             sysconfig.create_sc_profile(fp, tree)
       
   938 
       
   939         elif sshkey is not None:
       
   940             fp = os.path.join(sc_dir, 'config-root-ssh-keys.xml')
       
   941             tree = sysconfig.create_root_ssh_keys(sshkey)
       
   942             sysconfig.create_sc_profile(fp, tree)
       
   943 
       
   944         if hostname_needed and name is not None:
       
   945             fp = os.path.join(sc_dir, 'hostname.xml')
       
   946             sysconfig.create_sc_profile(fp, sysconfig.create_hostname(name))
       
   947 
       
   948     def _create_config(self, context, instance, network_info,
       
   949                        connection_info, extra_specs, sc_dir):
       
   950         """Create a new Solaris Zone configuration."""
       
   951         name = instance['name']
       
   952         if self._get_zone_by_name(name) is not None:
       
   953             raise exception.InstanceExists(name=name)
       
   954 
       
   955         # If unspecified, default zone brand is ZONE_BRAND_SOLARIS
       
   956         brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)
       
   957         template = ZONE_BRAND_TEMPLATE.get(brand)
       
   958         # TODO(dcomay): Detect capability via libv12n(3LIB) or virtinfo(1M).
       
   959         if template is None:
       
   960             msg = (_("Invalid brand '%s' specified for instance '%s'"
       
   961                    % (brand, name)))
       
   962             raise exception.NovaException(msg)
       
   963 
       
   964         sc_profile = extra_specs.get('install:sc_profile')
       
   965         if sc_profile is not None:
       
   966             if os.path.isfile(sc_profile):
       
   967                 shutil.copy(sc_profile, sc_dir)
       
   968             elif os.path.isdir(sc_profile):
       
   969                 shutil.copytree(sc_profile, os.path.join(sc_dir, 'sysconfig'))
       
   970 
       
   971         self._verify_sysconfig(sc_dir, instance)
       
   972 
       
   973         zonemanager = self._rad_instance.get_object(zonemgr.ZoneManager())
       
   974         try:
       
   975             zonemanager.create(name, None, template)
       
   976             self._set_global_properties(name, extra_specs, brand)
       
   977             if connection_info:
       
   978                 self._set_boot_device(name, connection_info, brand)
       
   979             self._set_num_cpu(name, instance['vcpus'], brand)
       
   980             self._set_memory_cap(name, instance['memory_mb'], brand)
       
   981             self._set_network(context, name, instance, network_info, brand,
       
   982                               sc_dir)
       
   983         except Exception as reason:
       
   984             LOG.error(_("Unable to create configuration for instance '%s' via "
       
   985                         "zonemgr(3RAD): %s") % (name, reason))
       
   986             raise
       
   987 
       
   988     def _install(self, instance, image, extra_specs, sc_dir):
       
   989         """Install a new Solaris Zone root file system."""
       
   990         name = instance['name']
       
   991         zone = self._get_zone_by_name(name)
       
   992         if zone is None:
       
   993             raise exception.InstanceNotFound(instance_id=name)
       
   994 
       
   995         # log the zone's configuration
       
   996         with ZoneConfig(zone) as zc:
       
   997             LOG.debug("-" * 80)
       
   998             LOG.debug(zc.zone.exportConfig(True))
       
   999             LOG.debug("-" * 80)
       
  1000 
       
  1001         options = ['-a ', image]
       
  1002 
       
  1003         if os.listdir(sc_dir):
       
  1004             # the directory isn't empty so pass it along to install
       
  1005             options.extend(['-c ', sc_dir])
       
  1006 
       
  1007         try:
       
  1008             LOG.debug(_("installing instance '%s' (%s)") %
       
  1009                       (name, instance['display_name']))
       
  1010             zone.install(options=options)
       
  1011         except Exception as reason:
       
  1012             LOG.error(_("Unable to install root file system for instance '%s' "
       
  1013                         "via zonemgr(3RAD): %s") % (name, reason))
       
  1014             raise
       
  1015 
       
  1016         LOG.debug(_("installation of instance '%s' (%s) complete") %
       
  1017                   (name, instance['display_name']))
       
  1018 
       
  1019         if os.listdir(sc_dir):
       
  1020             # remove the sc_profile temp directory
       
  1021             shutil.rmtree(sc_dir)
       
  1022 
       
  1023     def _power_on(self, instance):
       
  1024         """Power on a Solaris Zone."""
       
  1025         name = instance['name']
       
  1026         zone = self._get_zone_by_name(name)
       
  1027         if zone is None:
       
  1028             raise exception.InstanceNotFound(instance_id=name)
       
  1029 
       
  1030         try:
       
  1031             zone.boot()
       
  1032         except Exception as reason:
       
  1033             LOG.error(_("Unable to power on instance '%s' via zonemgr(3RAD): "
       
  1034                         "%s") % (name, reason))
       
  1035             raise exception.InstancePowerOnFailure(reason=reason)
       
  1036 
       
  1037     def _uninstall(self, instance):
       
  1038         """Uninstall an existing Solaris Zone root file system."""
       
  1039         name = instance['name']
       
  1040         zone = self._get_zone_by_name(name)
       
  1041         if zone is None:
       
  1042             raise exception.InstanceNotFound(instance_id=name)
       
  1043 
       
  1044         try:
       
  1045             zone.uninstall(['-F'])
       
  1046         except Exception as reason:
       
  1047             LOG.error(_("Unable to uninstall root file system for instance "
       
  1048                         "'%s' via zonemgr(3RAD): %s") % (name, reason))
       
  1049             raise
       
  1050 
       
  1051     def _delete_config(self, instance):
       
  1052         """Delete an existing Solaris Zone configuration."""
       
  1053         name = instance['name']
       
  1054         if self._get_zone_by_name(name) is None:
       
  1055             raise exception.InstanceNotFound(instance_id=name)
       
  1056 
       
  1057         zonemanager = self._rad_instance.get_object(zonemgr.ZoneManager())
       
  1058         try:
       
  1059             zonemanager.delete(name)
       
  1060         except Exception as reason:
       
  1061             LOG.error(_("Unable to delete configuration for instance '%s' via "
       
  1062                         "zonemgr(3RAD): %s") % (name, reason))
       
  1063             raise
       
  1064 
       
  1065     def spawn(self, context, instance, image_meta, injected_files,
       
  1066               admin_password, network_info=None, block_device_info=None):
       
  1067         """
       
  1068         Create a new instance/VM/domain on the virtualization platform.
       
  1069 
       
  1070         Once this successfully completes, the instance should be
       
  1071         running (power_state.RUNNING).
       
  1072 
       
  1073         If this fails, any partial instance should be completely
       
  1074         cleaned up, and the virtualization platform should be in the state
       
  1075         that it was before this call began.
       
  1076 
       
  1077         :param context: security context
       
  1078         :param instance: Instance object as returned by DB layer.
       
  1079                          This function should use the data there to guide
       
  1080                          the creation of the new instance.
       
  1081         :param image_meta: image object returned by nova.image.glance that
       
  1082                            defines the image from which to boot this instance
       
  1083         :param injected_files: User files to inject into instance.
       
  1084         :param admin_password: Administrator password to set in instance.
       
  1085         :param network_info:
       
  1086            :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
       
  1087         :param block_device_info: Information about block devices to be
       
  1088                                   attached to the instance.
       
  1089         """
       
  1090         inst_type = self.virtapi.instance_type_get(
       
  1091             nova_context.get_admin_context(read_deleted='yes'),
       
  1092             instance['instance_type_id'])
       
  1093         extra_specs = inst_type['extra_specs'].copy()
       
  1094 
       
  1095         image = self._fetch_image(context, instance)
       
  1096         self._validate_image(image, instance)
       
  1097 
       
  1098         # create a new directory for SC profiles
       
  1099         sc_dir = tempfile.mkdtemp(prefix="nova-sysconfig-",
       
  1100                                   dir=CONF.state_path)
       
  1101         os.chmod(sc_dir, 0755)
       
  1102 
       
  1103         # Attempt to provision a (Cinder) volume service backed boot volume
       
  1104         connection_info = self._connect_boot_volume(context, instance,
       
  1105                                                     extra_specs)
       
  1106 
       
  1107         LOG.debug(_("creating zone configuration for '%s' (%s)") %
       
  1108                   (instance['name'], instance['display_name']))
       
  1109         self._create_config(context, instance, network_info,
       
  1110                             connection_info, extra_specs, sc_dir)
       
  1111         try:
       
  1112             self._install(instance, image, extra_specs, sc_dir)
       
  1113             self._power_on(instance)
       
  1114         except Exception as reason:
       
  1115             LOG.error(_("Unable to spawn instance '%s' via zonemgr(3RAD): %s")
       
  1116                       % (instance['name'], reason))
       
  1117             self._uninstall(instance)
       
  1118             self._delete_config(instance)
       
  1119             raise
       
  1120 
       
  1121     def _power_off(self, instance, halt_type):
       
  1122         """Power off a Solaris Zone."""
       
  1123         name = instance['name']
       
  1124         zone = self._get_zone_by_name(name)
       
  1125         if zone is None:
       
  1126             raise exception.InstanceNotFound(instance_id=name)
       
  1127 
       
  1128         try:
       
  1129             if halt_type == 'SOFT':
       
  1130                 zone.shutdown()
       
  1131             else:
       
  1132                 zone.halt()
       
  1133             return
       
  1134         except rad.client.ObjectError as reason:
       
  1135             result = reason.get_payload()
       
  1136             if result.code == zonemgr.ErrorCode.COMMAND_ERROR:
       
  1137                 LOG.warning(_("Ignoring command error returned while trying "
       
  1138                               "to power off instance '%s' via zonemgr(3RAD): "
       
  1139                               "%s" % (name, reason)))
       
  1140                 return
       
  1141         except Exception as reason:
       
  1142             LOG.error(_("Unable to power off instance '%s' via zonemgr(3RAD): "
       
  1143                         "%s") % (name, reason))
       
  1144             raise exception.InstancePowerOffFailure(reason=reason)
       
  1145 
       
  1146     def destroy(self, instance, network_info, block_device_info=None,
       
  1147                 destroy_disks=True):
       
  1148         """Destroy (shutdown and delete) the specified instance.
       
  1149 
       
  1150         If the instance is not found (for example if networking failed), this
       
  1151         function should still succeed.  It's probably a good idea to log a
       
  1152         warning in that case.
       
  1153 
       
  1154         :param instance: Instance object as returned by DB layer.
       
  1155         :param network_info:
       
  1156            :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
       
  1157         :param block_device_info: Information about block devices that should
       
  1158                                   be detached from the instance.
       
  1159         :param destroy_disks: Indicates if disks should be destroyed
       
  1160 
       
  1161         """
       
  1162         # TODO(Vek): Need to pass context in for access to auth_token
       
  1163         name = instance['name']
       
  1164         zone = self._get_zone_by_name(name)
       
  1165         # If instance cannot be found, just return.
       
  1166         if zone is None:
       
  1167             LOG.warning(_("Unable to find instance '%s' via zonemgr(3RAD)")
       
  1168                         % name)
       
  1169             return
       
  1170 
       
  1171         try:
       
  1172             if self._get_state(zone) == power_state.RUNNING:
       
  1173                 self._power_off(instance, 'HARD')
       
  1174             if self._get_state(zone) == power_state.SHUTDOWN:
       
  1175                 self._uninstall(instance)
       
  1176             if self._get_state(zone) == power_state.NOSTATE:
       
  1177                 self._delete_config(instance)
       
  1178         except Exception as reason:
       
  1179             LOG.warning(_("Unable to destroy instance '%s' via zonemgr(3RAD): "
       
  1180                           "%s") % (name, reason))
       
  1181 
       
  1182     def reboot(self, context, instance, network_info, reboot_type,
       
  1183                block_device_info=None, bad_volumes_callback=None):
       
  1184         """Reboot the specified instance.
       
  1185 
       
  1186         After this is called successfully, the instance's state
       
  1187         goes back to power_state.RUNNING. The virtualization
       
  1188         platform should ensure that the reboot action has completed
       
  1189         successfully even in cases in which the underlying domain/vm
       
  1190         is paused or halted/stopped.
       
  1191 
       
  1192         :param instance: Instance object as returned by DB layer.
       
  1193         :param network_info:
       
  1194            :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
       
  1195         :param reboot_type: Either a HARD or SOFT reboot
       
  1196         :param block_device_info: Info pertaining to attached volumes
       
  1197         :param bad_volumes_callback: Function to handle any bad volumes
       
  1198             encountered
       
  1199         """
       
  1200         name = instance['name']
       
  1201         zone = self._get_zone_by_name(name)
       
  1202         if zone is None:
       
  1203             raise exception.InstanceNotFound(instance_id=name)
       
  1204 
       
  1205         try:
       
  1206             if reboot_type == 'SOFT':
       
  1207                 zone.shutdown(['-r'])
       
  1208             else:
       
  1209                 zone.reboot()
       
  1210         except Exception as reason:
       
  1211             LOG.error(_("Unable to reboot instance '%s' via zonemgr(3RAD): %s")
       
  1212                       % (name, reason))
       
  1213             raise exception.InstanceRebootFailure(reason=reason)
       
  1214 
       
  1215     def get_console_pool_info(self, console_type):
       
  1216         # TODO(Vek): Need to pass context in for access to auth_token
       
  1217         raise NotImplementedError()
       
  1218 
       
  1219     def _get_console_output(self, instance):
       
  1220         """Builds a string containing the console output (capped at
       
  1221         MAX_CONSOLE_BYTES characters) by reassembling the log files
       
  1222         that Solaris Zones framework maintains for each zone.
       
  1223         """
       
  1224         console_str = ""
       
  1225         avail = MAX_CONSOLE_BYTES
       
  1226 
       
  1227         # Examine the log files in most-recently modified order, keeping
       
  1228         # track of the size of each file and of how many characters have
       
  1229         # been seen. If there are still characters left to incorporate,
       
  1230         # then the contents of the log file in question are prepended to
       
  1231         # the console string built so far. When the number of characters
       
  1232         # available has run out, the last fragment under consideration
       
  1233         # will likely begin within the middle of a line. As such, the
       
  1234         # start of the fragment up to the next newline is thrown away.
       
  1235         # The remainder constitutes the start of the resulting console
       
  1236         # output which is then prepended to the console string built so
       
  1237         # far and the result returned.
       
  1238         logfile_pattern = '/var/log/zones/%s.console*' % instance['name']
       
  1239         logfiles = sorted(glob.glob(logfile_pattern), key=os.path.getmtime,
       
  1240                           reverse=True)
       
  1241         for file in logfiles:
       
  1242             size = os.path.getsize(file)
       
  1243             if size == 0:
       
  1244                 continue
       
  1245             avail -= size
       
  1246             with open(file, 'r') as log:
       
  1247                 if avail < 0:
       
  1248                     (fragment, _) = utils.last_bytes(log, avail + size)
       
  1249                     remainder = fragment.find('\n') + 1
       
  1250                     console_str = fragment[remainder:] + console_str
       
  1251                     break
       
  1252                 fragment = ''
       
  1253                 for line in log.readlines():
       
  1254                     fragment += line
       
  1255                 console_str = fragment + console_str
       
  1256 
       
  1257         return console_str
       
  1258 
       
  1259     def get_console_output(self, instance):
       
  1260         # TODO(Vek): Need to pass context in for access to auth_token
       
  1261         return self._get_console_output(instance)
       
  1262 
       
  1263     def get_vnc_console(self, instance):
       
  1264         # TODO(Vek): Need to pass context in for access to auth_token
       
  1265         raise NotImplementedError()
       
  1266 
       
  1267     def get_spice_console(self, instance):
       
  1268         # TODO(Vek): Need to pass context in for access to auth_token
       
  1269         raise NotImplementedError()
       
  1270 
       
  1271     def _get_zone_diagnostics(self, zone):
       
  1272         """Return data about Solaris Zone diagnostics."""
       
  1273         if zone.id == -1:
       
  1274             return None
       
  1275 
       
  1276         diagnostics = {}
       
  1277         id = str(zone.id)
       
  1278 
       
  1279         kstat_data = self._get_kstat_by_name('zone_caps', 'caps', id,
       
  1280                                              ''.join(('lockedmem_zone_', id)))
       
  1281         if kstat_data is not None:
       
  1282             diagnostics['lockedmem'] = kstat_data['usage']
       
  1283 
       
  1284         kstat_data = self._get_kstat_by_name('zone_caps', 'caps', id,
       
  1285                                              ''.join(('nprocs_zone_', id)))
       
  1286         if kstat_data is not None:
       
  1287             diagnostics['nprocs'] = kstat_data['usage']
       
  1288 
       
  1289         kstat_data = self._get_kstat_by_name('zone_caps', 'caps', id,
       
  1290                                              ''.join(('swapresv_zone_', id)))
       
  1291         if kstat_data is not None:
       
  1292             diagnostics['swapresv'] = kstat_data['usage']
       
  1293 
       
  1294         kstat_data = self._get_kstat_by_name('zones', 'cpu', id,
       
  1295                                              'sys_zone_aggr')
       
  1296         if kstat_data is not None:
       
  1297             for key in kstat_data.keys():
       
  1298                 if key not in ('class', 'crtime', 'snaptime'):
       
  1299                     diagnostics[key] = kstat_data[key]
       
  1300 
       
  1301         return diagnostics
       
  1302 
       
  1303     def get_diagnostics(self, instance):
       
  1304         """Return data about VM diagnostics."""
       
  1305         # TODO(Vek): Need to pass context in for access to auth_token
       
  1306         name = instance['name']
       
  1307         zone = self._get_zone_by_name(name)
       
  1308         if zone is None:
       
  1309             LOG.error(_("Unable to find instance '%s' via zonemgr(3RAD)")
       
  1310                       % name)
       
  1311             raise exception.InstanceNotFound(instance_id=name)
       
  1312 
       
  1313         return self._get_zone_diagnostics(zone)
       
  1314 
       
  1315     def get_all_bw_counters(self, instances):
       
  1316         """Return bandwidth usage counters for each interface on each
       
  1317            running VM"""
       
  1318         raise NotImplementedError()
       
  1319 
       
  1320     def get_all_volume_usage(self, context, compute_host_bdms):
       
  1321         """Return usage info for volumes attached to vms on
       
  1322            a given host"""
       
  1323         raise NotImplementedError()
       
  1324 
       
  1325     def get_host_ip_addr(self):
       
  1326         """
       
  1327         Retrieves the IP address of the dom0
       
  1328         """
       
  1329         # TODO(Vek): Need to pass context in for access to auth_token
       
  1330         return CONF.my_ip
       
  1331 
       
  1332     def attach_volume(self, connection_info, instance, mountpoint):
       
  1333         """Attach the disk to the instance at mountpoint using info."""
       
  1334         # TODO(npower): Apply mountpoint in a meaningful way to the zone
       
  1335         # (I don't think this is even possible for Solaris brand zones)
       
  1336         name = instance['name']
       
  1337         zone = self._get_zone_by_name(name)
       
  1338         if zone is None:
       
  1339             raise exception.InstanceNotFound(instance_id=name)
       
  1340 
       
  1341         zprop = lookup_resource_property_value(zone, "global", "brand",
       
  1342                                                ZONE_BRAND_SOLARIS_KZ)
       
  1343         if not zprop:
       
  1344             # Only Solaris Kernel zones are currently supported.
       
  1345             raise NotImplementedError()
       
  1346 
       
  1347         suri = self._suri_from_volume_info(connection_info)
       
  1348 
       
  1349         with ZoneConfig(zone) as zc:
       
  1350             zc.addresource("device", [zonemgr.Property("storage", suri)])
       
  1351 
       
  1352     def detach_volume(self, connection_info, instance, mountpoint):
       
  1353         """Detach the disk attached to the instance."""
       
  1354         name = instance['name']
       
  1355         zone = self._get_zone_by_name(name)
       
  1356         if zone is None:
       
  1357             raise exception.InstanceNotFound(instance_id=name)
       
  1358 
       
  1359         zprop = lookup_resource_property_value(zone, "global", "brand",
       
  1360                                                ZONE_BRAND_SOLARIS_KZ)
       
  1361         if not zprop:
       
  1362             # Only Solaris Kernel zones are currently supported.
       
  1363             raise NotImplementedError()
       
  1364 
       
  1365         suri = self._suri_from_volume_info(connection_info)
       
  1366 
       
  1367         # Check if the specific property value exists before attempting removal
       
  1368         prop = lookup_resource_property_value(zone, "device", "storage", suri)
       
  1369         if not prop:
       
  1370             LOG.warning(_("Storage resource '%s' is not attached to instance "
       
  1371                         "'%s'") % (suri, name))
       
  1372             return
       
  1373 
       
  1374         with ZoneConfig(zone) as zc:
       
  1375             zc.removeresources("device", [zonemgr.Property("storage", suri)])
       
  1376 
       
  1377     def attach_interface(self, instance, image_meta, network_info):
       
  1378         """Attach an interface to the instance."""
       
  1379         raise NotImplementedError()
       
  1380 
       
  1381     def detach_interface(self, instance, network_info):
       
  1382         """Detach an interface from the instance."""
       
  1383         raise NotImplementedError()
       
  1384 
       
  1385     def migrate_disk_and_power_off(self, context, instance, dest,
       
  1386                                    instance_type, network_info,
       
  1387                                    block_device_info=None):
       
  1388         """
       
  1389         Transfers the disk of a running instance in multiple phases, turning
       
  1390         off the instance before the end.
       
  1391         """
       
  1392         raise NotImplementedError()
       
  1393 
       
  1394     def snapshot(self, context, instance, image_id, update_task_state):
       
  1395         """
       
  1396         Snapshots the specified instance.
       
  1397 
       
  1398         :param context: security context
       
  1399         :param instance: Instance object as returned by DB layer.
       
  1400         :param image_id: Reference to a pre-created image that will
       
  1401                          hold the snapshot.
       
  1402         """
       
  1403         # Get original base image info
       
  1404         (base_service, base_id) = glance.get_remote_image_service(
       
  1405             context, instance['image_ref'])
       
  1406         try:
       
  1407             base = base_service.show(context, base_id)
       
  1408         except exception.ImageNotFound:
       
  1409             base = {}
       
  1410 
       
  1411         snapshot_service, snapshot_id = glance.get_remote_image_service(
       
  1412             context, image_id)
       
  1413 
       
  1414         # Build updated snapshot image metadata
       
  1415         snapshot = snapshot_service.show(context, snapshot_id)
       
  1416         metadata = {
       
  1417             'is_public': False,
       
  1418             'status': 'active',
       
  1419             'name': snapshot['name'],
       
  1420             'properties': {
       
  1421                 'image_location': 'snapshot',
       
  1422                 'image_state': 'available',
       
  1423                 'owner_id': instance['project_id'],
       
  1424             }
       
  1425         }
       
  1426         # Match architecture, hypervisor_type and vm_mode properties to base
       
  1427         # image.
       
  1428         for prop in ['architecture', 'hypervisor_type', 'vm_mode']:
       
  1429             if prop in base.get('properties', {}):
       
  1430                 base_prop = base['properties'][prop]
       
  1431                 metadata['properties'][prop] = base_prop
       
  1432 
       
  1433         # Set generic container and disk formats initially in case the glance
       
  1434         # service rejects unified archives (uar) and zfs in metadata
       
  1435         metadata['container_format'] = 'ovf'
       
  1436         metadata['disk_format'] = 'raw'
       
  1437 
       
  1438         update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
       
  1439         snapshot_directory = CONF.solariszones_snapshots_directory
       
  1440         fileutils.ensure_tree(snapshot_directory)
       
  1441         snapshot_name = uuid.uuid4().hex
       
  1442 
       
  1443         with utils.tempdir(dir=snapshot_directory) as tmpdir:
       
  1444             out_path = os.path.join(tmpdir, snapshot_name)
       
  1445 
       
  1446             # TODO(npower): archiveadm invocation needs --root-only arg
       
  1447             # passed once it is available. Assume the instance contains
       
  1448             # root pool only for now.
       
  1449             zone_name = instance['name']
       
  1450             utils.execute('/usr/sbin/archiveadm', 'create', '--root-only',
       
  1451                           '-z', zone_name, out_path)
       
  1452 
       
  1453             LOG.info(_("Snapshot extracted, beginning image upload"),
       
  1454                      instance=instance)
       
  1455             try:
       
  1456                 # Upload the archive image to the image service
       
  1457                 update_task_state(task_state=task_states.IMAGE_UPLOADING,
       
  1458                                   expected_state=
       
  1459                                   task_states.IMAGE_PENDING_UPLOAD)
       
  1460                 with open(out_path, 'r') as image_file:
       
  1461                     snapshot_service.update(context,
       
  1462                                             image_id,
       
  1463                                             metadata,
       
  1464                                             image_file)
       
  1465                     LOG.info(_("Snapshot image upload complete"),
       
  1466                              instance=instance)
       
  1467                 try:
       
  1468                     # Try to update the image metadata container and disk
       
  1469                     # formats more suitably for a unified archive if the
       
  1470                     # glance server recognises them.
       
  1471                     metadata['container_format'] = 'uar'
       
  1472                     metadata['disk_format'] = 'zfs'
       
  1473                     snapshot_service.update(context,
       
  1474                                             image_id,
       
  1475                                             metadata,
       
  1476                                             None)
       
  1477                 except exception.Invalid as invalid:
       
  1478                     LOG.warning(_("Image service rejected image metadata "
       
  1479                                   "container and disk formats 'uar' and "
       
  1480                                   "'zfs'. Using generic values 'ovf' and "
       
  1481                                   "'raw' as fallbacks."))
       
  1482             finally:
       
  1483                 # Delete the snapshot image file source
       
  1484                 os.unlink(out_path)
       
  1485 
       
  1486     def finish_migration(self, context, migration, instance, disk_info,
       
  1487                          network_info, image_meta, resize_instance,
       
  1488                          block_device_info=None):
       
  1489         """Completes a resize, turning on the migrated instance
       
  1490 
       
  1491         :param network_info:
       
  1492            :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
       
  1493         :param image_meta: image object returned by nova.image.glance that
       
  1494                            defines the image from which this instance
       
  1495                            was created
       
  1496         """
       
  1497         raise NotImplementedError()
       
  1498 
       
  1499     def confirm_migration(self, migration, instance, network_info):
       
  1500         """Confirms a resize, destroying the source VM."""
       
  1501         # TODO(Vek): Need to pass context in for access to auth_token
       
  1502         raise NotImplementedError()
       
  1503 
       
  1504     def finish_revert_migration(self, instance, network_info,
       
  1505                                 block_device_info=None):
       
  1506         """Finish reverting a resize, powering back on the instance."""
       
  1507         # TODO(Vek): Need to pass context in for access to auth_token
       
  1508         raise NotImplementedError()
       
  1509 
       
  1510     def pause(self, instance):
       
  1511         """Pause the specified instance."""
       
  1512         # TODO(Vek): Need to pass context in for access to auth_token
       
  1513         raise NotImplementedError()
       
  1514 
       
  1515     def unpause(self, instance):
       
  1516         """Unpause paused VM instance."""
       
  1517         # TODO(Vek): Need to pass context in for access to auth_token
       
  1518         raise NotImplementedError()
       
  1519 
       
  1520     def _suspend(self, instance):
       
  1521         """Suspend a Solaris Zone."""
       
  1522         name = instance['name']
       
  1523         zone = self._get_zone_by_name(name)
       
  1524         if zone is None:
       
  1525             raise exception.InstanceNotFound(instance_id=name)
       
  1526 
       
  1527         if self._uname[4] != 'i86pc':
       
  1528             # Only x86 platforms are currently supported.
       
  1529             raise NotImplementedError()
       
  1530 
       
  1531         zprop = lookup_resource_property_value(zone, "global", "brand",
       
  1532                                                ZONE_BRAND_SOLARIS_KZ)
       
  1533         if not zprop:
       
  1534             # Only Solaris Kernel zones are currently supported.
       
  1535             raise NotImplementedError()
       
  1536 
       
  1537         try:
       
  1538             zone.suspend()
       
  1539         except Exception as reason:
       
  1540             # TODO(dcomay): Try to recover in cases where zone has been
       
  1541             # resumed automatically.
       
  1542             LOG.error(_("Unable to suspend instance '%s' via zonemgr(3RAD): "
       
  1543                         "%s") % (name, reason))
       
  1544             raise exception.InstanceSuspendFailure(reason=reason)
       
  1545 
       
  1546     def suspend(self, instance):
       
  1547         """suspend the specified instance."""
       
  1548         # TODO(Vek): Need to pass context in for access to auth_token
       
  1549         self._suspend(instance)
       
  1550 
       
  1551     def resume(self, instance, network_info, block_device_info=None):
       
  1552         """resume the specified instance."""
       
  1553         # TODO(Vek): Need to pass context in for access to auth_token
       
  1554         try:
       
  1555             self._power_on(instance)
       
  1556         except Exception as reason:
       
  1557             raise exception.InstanceResumeFailure(reason=reason)
       
  1558 
       
  1559     def resume_state_on_host_boot(self, context, instance, network_info,
       
  1560                                   block_device_info=None):
       
  1561         """resume guest state when a host is booted."""
       
  1562         name = instance['name']
       
  1563         zone = self._get_zone_by_name(name)
       
  1564         if zone is None:
       
  1565             raise exception.InstanceNotFound(instance_id=name)
       
  1566 
       
  1567         # TODO(dcomay): Should reconcile with value of zone's autoboot
       
  1568         # property.
       
  1569         if self._get_state(zone) not in (power_state.CRASHED,
       
  1570                                          power_state.SHUTDOWN):
       
  1571             return
       
  1572 
       
  1573         self._power_on(instance)
       
  1574 
       
  1575     def rescue(self, context, instance, network_info, image_meta,
       
  1576                rescue_password):
       
  1577         """Rescue the specified instance."""
       
  1578         raise NotImplementedError()
       
  1579 
       
  1580     def unrescue(self, instance, network_info):
       
  1581         """Unrescue the specified instance."""
       
  1582         # TODO(Vek): Need to pass context in for access to auth_token
       
  1583         raise NotImplementedError()
       
  1584 
       
  1585     def power_off(self, instance):
       
  1586         """Power off the specified instance."""
       
  1587         self._power_off(instance, 'SOFT')
       
  1588 
       
  1589     def power_on(self, context, instance, network_info,
       
  1590                  block_device_info=None):
       
  1591         """Power on the specified instance."""
       
  1592         self._power_on(instance)
       
  1593 
       
  1594     def soft_delete(self, instance):
       
  1595         """Soft delete the specified instance."""
       
  1596         raise NotImplementedError()
       
  1597 
       
  1598     def restore(self, instance):
       
  1599         """Restore the specified instance."""
       
  1600         raise NotImplementedError()
       
  1601 
       
  1602     def _update_host_stats(self):
       
  1603         """Update currently known host stats."""
       
  1604         host_stats = {}
       
  1605         host_stats['vcpus'] = os.sysconf('SC_NPROCESSORS_ONLN')
       
  1606         pages = os.sysconf('SC_PHYS_PAGES')
       
  1607         host_stats['memory_mb'] = self._pages_to_kb(pages) / 1024
       
  1608         host_stats['local_gb'] = 0
       
  1609 
       
  1610         # Account for any existing processor sets by looking at the the
       
  1611         # number of CPUs not assigned to any processor sets.
       
  1612         kstat_data = self._get_kstat_by_name('misc', 'unix', '0', 'pset')
       
  1613         if kstat_data is not None:
       
  1614             host_stats['vcpus_used'] = \
       
  1615                 host_stats['vcpus'] - kstat_data['ncpus']
       
  1616         else:
       
  1617             host_stats['vcpus_used'] = 0
       
  1618 
       
  1619         # Subtract the number of free pages from the total to get the
       
  1620         # used.
       
  1621         kstat_data = self._get_kstat_by_name('pages', 'unix', '0',
       
  1622                                              'system_pages')
       
  1623         if kstat_data is not None:
       
  1624             host_stats['memory_mb_used'] = \
       
  1625                 self._pages_to_kb((pages - kstat_data['freemem'])) / 1024
       
  1626         else:
       
  1627             host_stats['memory_mb_used'] = 0
       
  1628 
       
  1629         host_stats['local_gb_used'] = 0
       
  1630         host_stats['hypervisor_type'] = 'solariszones'
       
  1631         host_stats['hypervisor_version'] = int(self._uname[2].replace('.', ''))
       
  1632         host_stats['hypervisor_hostname'] = self._uname[1]
       
  1633         if self._uname[4] == 'i86pc':
       
  1634             architecture = 'x86_64'
       
  1635         else:
       
  1636             architecture = 'sparc64'
       
  1637         host_stats['cpu_info'] = str({'arch': architecture})
       
  1638         host_stats['disk_available_least'] = 0
       
  1639 
       
  1640         supported_instances = [
       
  1641             (architecture, 'solariszones', 'zones')
       
  1642         ]
       
  1643         host_stats['supported_instances'] = supported_instances
       
  1644 
       
  1645         self._host_stats = host_stats
       
  1646 
       
  1647     def get_available_resource(self, nodename):
       
  1648         """Retrieve resource information.
       
  1649 
       
  1650         This method is called when nova-compute launches, and
       
  1651         as part of a periodic task
       
  1652 
       
  1653         :param nodename:
       
  1654             node which the caller want to get resources from
       
  1655             a driver that manages only one node can safely ignore this
       
  1656         :returns: Dictionary describing resources
       
  1657         """
       
  1658         self._update_host_stats()
       
  1659         host_stats = self._host_stats
       
  1660 
       
  1661         resources = {}
       
  1662         resources['vcpus'] = host_stats['vcpus']
       
  1663         resources['memory_mb'] = host_stats['memory_mb']
       
  1664         resources['local_gb'] = host_stats['local_gb']
       
  1665         resources['vcpus_used'] = host_stats['vcpus_used']
       
  1666         resources['memory_mb_used'] = host_stats['memory_mb_used']
       
  1667         resources['local_gb_used'] = host_stats['local_gb_used']
       
  1668         resources['hypervisor_type'] = host_stats['hypervisor_type']
       
  1669         resources['hypervisor_version'] = host_stats['hypervisor_version']
       
  1670         resources['hypervisor_hostname'] = host_stats['hypervisor_hostname']
       
  1671         resources['cpu_info'] = host_stats['cpu_info']
       
  1672         resources['disk_available_least'] = host_stats['disk_available_least']
       
  1673 
       
  1674         return resources
       
  1675 
       
  1676     def pre_live_migration(self, ctxt, instance_ref, block_device_info,
       
  1677                            network_info, disk_info, migrate_data=None):
       
  1678         """Prepare an instance for live migration
       
  1679 
       
  1680         :param ctxt: security context
       
  1681         :param instance_ref: instance object that will be migrated
       
  1682         :param block_device_info: instance block device information
       
  1683         :param network_info: instance network information
       
  1684         :param disk_info: instance disk information
       
  1685         :param migrate_data: implementation specific data dict.
       
  1686         """
       
  1687         raise NotImplementedError()
       
  1688 
       
  1689     def live_migration(self, ctxt, instance_ref, dest,
       
  1690                        post_method, recover_method, block_migration=False,
       
  1691                        migrate_data=None):
       
  1692         """Live migration of an instance to another host.
       
  1693 
       
  1694         :params ctxt: security context
       
  1695         :params instance_ref:
       
  1696             nova.db.sqlalchemy.models.Instance object
       
  1697             instance object that is migrated.
       
  1698         :params dest: destination host
       
  1699         :params post_method:
       
  1700             post operation method.
       
  1701             expected nova.compute.manager.post_live_migration.
       
  1702         :params recover_method:
       
  1703             recovery method when any exception occurs.
       
  1704             expected nova.compute.manager.recover_live_migration.
       
  1705         :params block_migration: if true, migrate VM disk.
       
  1706         :params migrate_data: implementation specific params.
       
  1707 
       
  1708         """
       
  1709         raise NotImplementedError()
       
  1710 
       
  1711     def post_live_migration_at_destination(self, ctxt, instance_ref,
       
  1712                                            network_info,
       
  1713                                            block_migration=False,
       
  1714                                            block_device_info=None):
       
  1715         """Post operation of live migration at destination host.
       
  1716 
       
  1717         :param ctxt: security context
       
  1718         :param instance_ref: instance object that is migrated
       
  1719         :param network_info: instance network information
       
  1720         :param block_migration: if true, post operation of block_migration.
       
  1721         """
       
  1722         raise NotImplementedError()
       
  1723 
       
  1724     def check_can_live_migrate_destination(self, ctxt, instance_ref,
       
  1725                                            src_compute_info, dst_compute_info,
       
  1726                                            block_migration=False,
       
  1727                                            disk_over_commit=False):
       
  1728         """Check if it is possible to execute live migration.
       
  1729 
       
  1730         This runs checks on the destination host, and then calls
       
  1731         back to the source host to check the results.
       
  1732 
       
  1733         :param ctxt: security context
       
  1734         :param instance_ref: nova.db.sqlalchemy.models.Instance
       
  1735         :param src_compute_info: Info about the sending machine
       
  1736         :param dst_compute_info: Info about the receiving machine
       
  1737         :param block_migration: if true, prepare for block migration
       
  1738         :param disk_over_commit: if true, allow disk over commit
       
  1739         """
       
  1740         raise NotImplementedError()
       
  1741 
       
  1742     def check_can_live_migrate_destination_cleanup(self, ctxt,
       
  1743                                                    dest_check_data):
       
  1744         """Do required cleanup on dest host after check_can_live_migrate calls
       
  1745 
       
  1746         :param ctxt: security context
       
  1747         :param dest_check_data: result of check_can_live_migrate_destination
       
  1748         """
       
  1749         raise NotImplementedError()
       
  1750 
       
  1751     def check_can_live_migrate_source(self, ctxt, instance_ref,
       
  1752                                       dest_check_data):
       
  1753         """Check if it is possible to execute live migration.
       
  1754 
       
  1755         This checks if the live migration can succeed, based on the
       
  1756         results from check_can_live_migrate_destination.
       
  1757 
       
  1758         :param context: security context
       
  1759         :param instance_ref: nova.db.sqlalchemy.models.Instance
       
  1760         :param dest_check_data: result of check_can_live_migrate_destination
       
  1761         """
       
  1762         raise NotImplementedError()
       
  1763 
       
  1764     def refresh_security_group_rules(self, security_group_id):
       
  1765         """This method is called after a change to security groups.
       
  1766 
       
  1767         All security groups and their associated rules live in the datastore,
       
  1768         and calling this method should apply the updated rules to instances
       
  1769         running the specified security group.
       
  1770 
       
  1771         An error should be raised if the operation cannot complete.
       
  1772 
       
  1773         """
       
  1774         # TODO(Vek): Need to pass context in for access to auth_token
       
  1775         raise NotImplementedError()
       
  1776 
       
  1777     def refresh_security_group_members(self, security_group_id):
       
  1778         """This method is called when a security group is added to an instance.
       
  1779 
       
  1780         This message is sent to the virtualization drivers on hosts that are
       
  1781         running an instance that belongs to a security group that has a rule
       
  1782         that references the security group identified by `security_group_id`.
       
  1783         It is the responsibility of this method to make sure any rules
       
  1784         that authorize traffic flow with members of the security group are
       
  1785         updated and any new members can communicate, and any removed members
       
  1786         cannot.
       
  1787 
       
  1788         Scenario:
       
  1789             * we are running on host 'H0' and we have an instance 'i-0'.
       
  1790             * instance 'i-0' is a member of security group 'speaks-b'
       
  1791             * group 'speaks-b' has an ingress rule that authorizes group 'b'
       
  1792             * another host 'H1' runs an instance 'i-1'
       
  1793             * instance 'i-1' is a member of security group 'b'
       
  1794 
       
  1795             When 'i-1' launches or terminates we will receive the message
       
  1796             to update members of group 'b', at which time we will make
       
  1797             any changes needed to the rules for instance 'i-0' to allow
       
  1798             or deny traffic coming from 'i-1', depending on if it is being
       
  1799             added or removed from the group.
       
  1800 
       
  1801         In this scenario, 'i-1' could just as easily have been running on our
       
  1802         host 'H0' and this method would still have been called.  The point was
       
  1803         that this method isn't called on the host where instances of that
       
  1804         group are running (as is the case with
       
  1805         :py:meth:`refresh_security_group_rules`) but is called where references
       
  1806         are made to authorizing those instances.
       
  1807 
       
  1808         An error should be raised if the operation cannot complete.
       
  1809 
       
  1810         """
       
  1811         # TODO(Vek): Need to pass context in for access to auth_token
       
  1812         raise NotImplementedError()
       
  1813 
       
  1814     def refresh_provider_fw_rules(self):
       
  1815         """This triggers a firewall update based on database changes.
       
  1816 
       
  1817         When this is called, rules have either been added or removed from the
       
  1818         datastore.  You can retrieve rules with
       
  1819         :py:meth:`nova.db.provider_fw_rule_get_all`.
       
  1820 
       
  1821         Provider rules take precedence over security group rules.  If an IP
       
  1822         would be allowed by a security group ingress rule, but blocked by
       
  1823         a provider rule, then packets from the IP are dropped.  This includes
       
  1824         intra-project traffic in the case of the allow_project_net_traffic
       
  1825         flag for the libvirt-derived classes.
       
  1826 
       
  1827         """
       
  1828         # TODO(Vek): Need to pass context in for access to auth_token
       
  1829         raise NotImplementedError()
       
  1830 
       
  1831     def reset_network(self, instance):
       
  1832         """reset networking for specified instance."""
       
  1833         # TODO(Vek): Need to pass context in for access to auth_token
       
  1834         pass
       
  1835 
       
  1836     def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
       
  1837         """Setting up filtering rules and waiting for its completion.
       
  1838 
       
  1839         To migrate an instance, filtering rules to hypervisors
       
  1840         and firewalls are inevitable on destination host.
       
  1841         ( Waiting only for filtering rules to hypervisor,
       
  1842         since filtering rules to firewall rules can be set faster).
       
  1843 
       
  1844         Concretely, the below method must be called.
       
  1845         - setup_basic_filtering (for nova-basic, etc.)
       
  1846         - prepare_instance_filter(for nova-instance-instance-xxx, etc.)
       
  1847 
       
  1848         to_xml may have to be called since it defines PROJNET, PROJMASK.
       
  1849         but libvirt migrates those value through migrateToURI(),
       
  1850         so , no need to be called.
       
  1851 
       
  1852         Don't use thread for this method since migration should
       
  1853         not be started when setting-up filtering rules operations
       
  1854         are not completed.
       
  1855 
       
  1856         :params instance_ref: nova.db.sqlalchemy.models.Instance object
       
  1857 
       
  1858         """
       
  1859         # TODO(Vek): Need to pass context in for access to auth_token
       
  1860         raise NotImplementedError()
       
  1861 
       
  1862     def filter_defer_apply_on(self):
       
  1863         """Defer application of IPTables rules."""
       
  1864         pass
       
  1865 
       
  1866     def filter_defer_apply_off(self):
       
  1867         """Turn off deferral of IPTables rules and apply the rules now."""
       
  1868         pass
       
  1869 
       
  1870     def unfilter_instance(self, instance, network_info):
       
  1871         """Stop filtering instance."""
       
  1872         # TODO(Vek): Need to pass context in for access to auth_token
       
  1873         raise NotImplementedError()
       
  1874 
       
  1875     def set_admin_password(self, context, instance_id, new_pass=None):
       
  1876         """
       
  1877         Set the root password on the specified instance.
       
  1878 
       
  1879         The first parameter is an instance of nova.compute.service.Instance,
       
  1880         and so the instance is being specified as instance.name. The second
       
  1881         parameter is the value of the new password.
       
  1882         """
       
  1883         raise NotImplementedError()
       
  1884 
       
  1885     def inject_file(self, instance, b64_path, b64_contents):
       
  1886         """
       
  1887         Writes a file on the specified instance.
       
  1888 
       
  1889         The first parameter is an instance of nova.compute.service.Instance,
       
  1890         and so the instance is being specified as instance.name. The second
       
  1891         parameter is the base64-encoded path to which the file is to be
       
  1892         written on the instance; the third is the contents of the file, also
       
  1893         base64-encoded.
       
  1894         """
       
  1895         # TODO(Vek): Need to pass context in for access to auth_token
       
  1896         raise NotImplementedError()
       
  1897 
       
  1898     def change_instance_metadata(self, context, instance, diff):
       
  1899         """
       
  1900         Applies a diff to the instance metadata.
       
  1901 
       
  1902         This is an optional driver method which is used to publish
       
  1903         changes to the instance's metadata to the hypervisor.  If the
       
  1904         hypervisor has no means of publishing the instance metadata to
       
  1905         the instance, then this method should not be implemented.
       
  1906         """
       
  1907         pass
       
  1908 
       
  1909     def inject_network_info(self, instance, nw_info):
       
  1910         """inject network info for specified instance."""
       
  1911         # TODO(Vek): Need to pass context in for access to auth_token
       
  1912         pass
       
  1913 
       
  1914     def poll_rebooting_instances(self, timeout, instances):
       
  1915         """Poll for rebooting instances
       
  1916 
       
  1917         :param timeout: the currently configured timeout for considering
       
  1918                         rebooting instances to be stuck
       
  1919         :param instances: instances that have been in rebooting state
       
  1920                           longer than the configured timeout
       
  1921         """
       
  1922         # TODO(Vek): Need to pass context in for access to auth_token
       
  1923         raise NotImplementedError()
       
  1924 
       
  1925     def host_power_action(self, host, action):
       
  1926         """Reboots, shuts down or powers up the host."""
       
  1927         raise NotImplementedError()
       
  1928 
       
  1929     def host_maintenance_mode(self, host, mode):
       
  1930         """Start/Stop host maintenance window. On start, it triggers
       
  1931         guest VMs evacuation."""
       
  1932         raise NotImplementedError()
       
  1933 
       
  1934     def set_host_enabled(self, host, enabled):
       
  1935         """Sets the specified host's ability to accept new instances."""
       
  1936         # TODO(Vek): Need to pass context in for access to auth_token
       
  1937         raise NotImplementedError()
       
  1938 
       
  1939     def get_host_uptime(self, host):
       
  1940         """Returns the result of calling "uptime" on the target host."""
       
  1941         # TODO(Vek): Need to pass context in for access to auth_token
       
  1942         return utils.execute('/usr/bin/uptime')[0]
       
  1943 
       
  1944     def plug_vifs(self, instance, network_info):
       
  1945         """Plug VIFs into networks."""
       
  1946         # TODO(Vek): Need to pass context in for access to auth_token
       
  1947         pass
       
  1948 
       
  1949     def unplug_vifs(self, instance, network_info):
       
  1950         """Unplug VIFs from networks."""
       
  1951         raise NotImplementedError()
       
  1952 
       
  1953     def get_host_stats(self, refresh=False):
       
  1954         """Return currently known host stats."""
       
  1955         if refresh:
       
  1956             self._update_host_stats()
       
  1957 
       
  1958         return self._host_stats
       
  1959 
       
  1960     def block_stats(self, instance_name, disk_id):
       
  1961         """
       
  1962         Return performance counters associated with the given disk_id on the
       
  1963         given instance_name.  These are returned as [rd_req, rd_bytes, wr_req,
       
  1964         wr_bytes, errs], where rd indicates read, wr indicates write, req is
       
  1965         the total number of I/O requests made, bytes is the total number of
       
  1966         bytes transferred, and errs is the number of requests held up due to a
       
  1967         full pipeline.
       
  1968 
       
  1969         All counters are long integers.
       
  1970 
       
  1971         This method is optional.  On some platforms (e.g. XenAPI) performance
       
  1972         statistics can be retrieved directly in aggregate form, without Nova
       
  1973         having to do the aggregation.  On those platforms, this method is
       
  1974         unused.
       
  1975 
       
  1976         Note that this function takes an instance ID.
       
  1977         """
       
  1978         raise NotImplementedError()
       
  1979 
       
  1980     def interface_stats(self, instance_name, iface_id):
       
  1981         """
       
  1982         Return performance counters associated with the given iface_id on the
       
  1983         given instance_id.  These are returned as [rx_bytes, rx_packets,
       
  1984         rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx
       
  1985         indicates receive, tx indicates transmit, bytes and packets indicate
       
  1986         the total number of bytes or packets transferred, and errs and dropped
       
  1987         is the total number of packets failed / dropped.
       
  1988 
       
  1989         All counters are long integers.
       
  1990 
       
  1991         This method is optional.  On some platforms (e.g. XenAPI) performance
       
  1992         statistics can be retrieved directly in aggregate form, without Nova
       
  1993         having to do the aggregation.  On those platforms, this method is
       
  1994         unused.
       
  1995 
       
  1996         Note that this function takes an instance ID.
       
  1997         """
       
  1998         raise NotImplementedError()
       
  1999 
       
  2000     def legacy_nwinfo(self):
       
  2001         """True if the driver requires the legacy network_info format."""
       
  2002         # TODO(tr3buchet): update all subclasses and remove this method and
       
  2003         # related helpers.
       
  2004         return False
       
  2005 
       
  2006     def macs_for_instance(self, instance):
       
  2007         """What MAC addresses must this instance have?
       
  2008 
       
  2009         Some hypervisors (such as bare metal) cannot do freeform virtualisation
       
  2010         of MAC addresses. This method allows drivers to return a set of MAC
       
  2011         addresses that the instance is to have. allocate_for_instance will take
       
  2012         this into consideration when provisioning networking for the instance.
       
  2013 
       
  2014         Mapping of MAC addresses to actual networks (or permitting them to be
       
  2015         freeform) is up to the network implementation layer. For instance,
       
  2016         with openflow switches, fixed MAC addresses can still be virtualised
       
  2017         onto any L2 domain, with arbitrary VLANs etc, but regular switches
       
  2018         require pre-configured MAC->network mappings that will match the
       
  2019         actual configuration.
       
  2020 
       
  2021         Most hypervisors can use the default implementation which returns None.
       
  2022         Hypervisors with MAC limits should return a set of MAC addresses, which
       
  2023         will be supplied to the allocate_for_instance call by the compute
       
  2024         manager, and it is up to that call to ensure that all assigned network
       
  2025         details are compatible with the set of MAC addresses.
       
  2026 
       
  2027         This is called during spawn_instance by the compute manager.
       
  2028 
       
  2029         :return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
       
  2030             None means 'no constraints', a set means 'these and only these
       
  2031             MAC addresses'.
       
  2032         """
       
  2033         return None
       
  2034 
       
  2035     def manage_image_cache(self, context, all_instances):
       
  2036         """
       
  2037         Manage the driver's local image cache.
       
  2038 
       
  2039         Some drivers chose to cache images for instances on disk. This method
       
  2040         is an opportunity to do management of that cache which isn't directly
       
  2041         related to other calls into the driver. The prime example is to clean
       
  2042         the cache and remove images which are no longer of interest.
       
  2043         """
       
  2044         pass
       
  2045 
       
  2046     def add_to_aggregate(self, context, aggregate, host, **kwargs):
       
  2047         """Add a compute host to an aggregate."""
       
  2048         #NOTE(jogo) Currently only used for XenAPI-Pool
       
  2049         raise NotImplementedError()
       
  2050 
       
  2051     def remove_from_aggregate(self, context, aggregate, host, **kwargs):
       
  2052         """Remove a compute host from an aggregate."""
       
  2053         raise NotImplementedError()
       
  2054 
       
  2055     def undo_aggregate_operation(self, context, op, aggregate,
       
  2056                                  host, set_error=True):
       
  2057         """Undo for Resource Pools."""
       
  2058         raise NotImplementedError()
       
  2059 
       
  2060     def get_volume_connector(self, instance):
       
  2061         """Get connector information for the instance for attaching to volumes.
       
  2062 
       
  2063         Connector information is a dictionary representing the ip of the
       
  2064         machine that will be making the connection, the name of the iscsi
       
  2065         initiator and the hostname of the machine as follows::
       
  2066 
       
  2067             {
       
  2068                 'ip': ip,
       
  2069                 'initiator': initiator,
       
  2070                 'host': hostname
       
  2071             }
       
  2072         """
       
  2073         connector = {'ip': self.get_host_ip_addr(),
       
  2074                      'host': CONF.host}
       
  2075         if not self._initiator:
       
  2076             self._initiator = self._get_iscsi_initiator()
       
  2077 
       
  2078         if self._initiator:
       
  2079             connector['initiator'] = self._initiator
       
  2080         else:
       
  2081             LOG.warning(_("Could not determine iSCSI initiator name"),
       
  2082                         instance=instance)
       
  2083 
       
  2084         return connector
       
  2085 
       
  2086     def get_available_nodes(self):
       
  2087         """Returns nodenames of all nodes managed by the compute service.
       
  2088 
       
  2089         This method is for multi compute-nodes support. If a driver supports
       
  2090         multi compute-nodes, this method returns a list of nodenames managed
       
  2091         by the service. Otherwise, this method should return
       
  2092         [hypervisor_hostname].
       
  2093         """
       
  2094         stats = self.get_host_stats(refresh=True)
       
  2095         if not isinstance(stats, list):
       
  2096             stats = [stats]
       
  2097         return [s['hypervisor_hostname'] for s in stats]
       
  2098 
       
  2099     def get_per_instance_usage(self):
       
  2100         """Get information about instance resource usage.
       
  2101 
       
  2102         :returns: dict of  nova uuid => dict of usage info
       
  2103         """
       
  2104         return {}
       
  2105 
       
  2106     def instance_on_disk(self, instance):
       
  2107         """Checks access of instance files on the host.
       
  2108 
       
  2109         :param instance: instance to lookup
       
  2110 
       
  2111         Returns True if files of an instance with the supplied ID accessible on
       
  2112         the host, False otherwise.
       
  2113 
       
  2114         .. note::
       
  2115             Used in rebuild for HA implementation and required for validation
       
  2116             of access to instance shared disk files
       
  2117         """
       
  2118         return False
       
  2119 
       
  2120     def register_event_listener(self, callback):
       
  2121         """Register a callback to receive events.
       
  2122 
       
  2123         Register a callback to receive asynchronous event
       
  2124         notifications from hypervisors. The callback will
       
  2125         be invoked with a single parameter, which will be
       
  2126         an instance of the nova.virt.event.Event class."""
       
  2127 
       
  2128         self._compute_event_callback = callback
       
  2129 
       
  2130     def emit_event(self, event):
       
  2131         """Dispatches an event to the compute manager.
       
  2132 
       
  2133         Invokes the event callback registered by the
       
  2134         compute manager to dispatch the event. This
       
  2135         must only be invoked from a green thread."""
       
  2136 
       
  2137         if not self._compute_event_callback:
       
  2138             LOG.debug("Discarding event %s" % str(event))
       
  2139             return
       
  2140 
       
  2141         if not isinstance(event, virtevent.Event):
       
  2142             raise ValueError(
       
  2143                 _("Event must be an instance of nova.virt.event.Event"))
       
  2144 
       
  2145         try:
       
  2146             LOG.debug("Emitting event %s" % str(event))
       
  2147             self._compute_event_callback(event)
       
  2148         except Exception as ex:
       
  2149             LOG.error(_("Exception dispatching event %(event)s: %(ex)s")
       
  2150                       % locals())