27 import shutil |
27 import shutil |
28 import tempfile |
28 import tempfile |
29 import uuid |
29 import uuid |
30 |
30 |
31 import rad.bindings.com.oracle.solaris.rad.kstat as kstat |
31 import rad.bindings.com.oracle.solaris.rad.kstat as kstat |
32 import rad.bindings.com.oracle.solaris.rad.zonesbridge as zonesbridge |
|
33 import rad.bindings.com.oracle.solaris.rad.zonemgr as zonemgr |
32 import rad.bindings.com.oracle.solaris.rad.zonemgr as zonemgr |
34 import rad.client |
33 import rad.client |
35 import rad.connect |
34 import rad.connect |
36 from solaris_install.archive.checkpoints import InstantiateUnifiedArchive |
35 from solaris_install.archive.checkpoints import InstantiateUnifiedArchive |
37 from solaris_install.archive import LOGFILE as ARCHIVE_LOGFILE |
36 from solaris_install.archive import LOGFILE as ARCHIVE_LOGFILE |
38 from solaris_install.archive import UnifiedArchive |
37 from solaris_install.archive import UnifiedArchive |
39 from solaris_install.engine import InstallEngine |
38 from solaris_install.engine import InstallEngine |
|
39 from solaris_install.target.size import Size |
40 |
40 |
41 from eventlet import greenthread |
41 from eventlet import greenthread |
42 from lxml import etree |
42 from lxml import etree |
43 from oslo.config import cfg |
43 from oslo.config import cfg |
44 |
44 |
45 from nova.compute import power_state |
45 from nova.compute import power_state |
46 from nova.compute import task_states |
46 from nova.compute import task_states |
47 from nova.compute import vm_mode |
47 from nova.compute import vm_states |
48 from nova import conductor |
48 from nova import conductor |
49 from nova import context as nova_context |
49 from nova import context as nova_context |
50 from nova import exception |
50 from nova import exception |
51 from nova.image import glance |
51 from nova.image import glance |
52 from nova.network import quantumv2 |
52 from nova.network import neutronv2 |
53 from nova.openstack.common import fileutils |
53 from nova.openstack.common import fileutils |
|
54 from nova.openstack.common.gettextutils import _ |
54 from nova.openstack.common import jsonutils |
55 from nova.openstack.common import jsonutils |
55 from nova.openstack.common import log as logging |
56 from nova.openstack.common import log as logging |
56 from nova import paths |
57 from nova.openstack.common import loopingcall |
|
58 from nova.openstack.common import processutils |
|
59 from nova.openstack.common import strutils |
57 from nova import utils |
60 from nova import utils |
58 from nova.virt import driver |
61 from nova.virt import driver |
59 from nova.virt import event as virtevent |
62 from nova.virt import event as virtevent |
60 from nova.virt import images |
63 from nova.virt import images |
61 from nova.virt.solariszones import sysconfig |
64 from nova.virt.solariszones import sysconfig |
147 except Exception: |
150 except Exception: |
148 raise |
151 raise |
149 |
152 |
150 |
153 |
151 class ZoneConfig(object): |
154 class ZoneConfig(object): |
152 """ ZoneConfig - context manager for access zone configurations. |
155 """ZoneConfig - context manager for access zone configurations. |
153 Automatically opens the configuration for a zone and commits any changes |
156 Automatically opens the configuration for a zone and commits any changes |
154 before exiting |
157 before exiting |
155 """ |
158 """ |
156 def __init__(self, zone): |
159 def __init__(self, zone): |
157 """ zone is a zonemgr object representing either a kernel zone or |
160 """zone is a zonemgr object representing either a kernel zone or |
158 non-glboal zone. |
161 non-glboal zone. |
159 """ |
162 """ |
160 self.zone = zone |
163 self.zone = zone |
161 self.editing = False |
164 self.editing = False |
162 |
165 |
163 def __enter__(self): |
166 def __enter__(self): |
164 """ enables the editing of the zone. |
167 """enables the editing of the zone.""" |
165 """ |
|
166 try: |
168 try: |
167 self.zone.editConfig() |
169 self.zone.editConfig() |
168 self.editing = True |
170 self.editing = True |
169 return self |
171 return self |
170 except rad.client.ObjectError as err: |
172 except rad.client.ObjectError as err: |
171 LOG.error(_("Unable to initialize editing of instance '%s' via " |
173 LOG.error(_("Unable to initialize editing of instance '%s' via " |
172 "zonemgr(3RAD): %s") % (self.zone.name, err)) |
174 "zonemgr(3RAD): %s") % (self.zone.name, err)) |
173 raise |
175 raise |
174 |
176 |
175 def __exit__(self, exc_type, exc_val, exc_tb): |
177 def __exit__(self, exc_type, exc_val, exc_tb): |
176 """ looks for any kind of exception before exiting. If one is found, |
178 """looks for any kind of exception before exiting. If one is found, |
177 cancel any configuration changes and reraise the exception. If not, |
179 cancel any configuration changes and reraise the exception. If not, |
178 commit the new configuration. |
180 commit the new configuration. |
179 """ |
181 """ |
180 if exc_type is not None and self.editing: |
182 if exc_type is not None and self.editing: |
181 # We received some kind of exception. Cancel the config and raise. |
183 # We received some kind of exception. Cancel the config and raise. |
190 "instance '%s' via zonemgr(3RAD): %s") |
192 "instance '%s' via zonemgr(3RAD): %s") |
191 % (self.zone.name, err)) |
193 % (self.zone.name, err)) |
192 raise |
194 raise |
193 |
195 |
194 def setprop(self, resource, prop, value): |
196 def setprop(self, resource, prop, value): |
195 """ sets a property for an existing resource OR creates a new resource |
197 """sets a property for an existing resource OR creates a new resource |
196 with the given property(s). |
198 with the given property(s). |
197 """ |
199 """ |
198 current = lookup_resource_property(self.zone, resource, prop) |
200 current = lookup_resource_property(self.zone, resource, prop) |
199 if current is not None and current == value: |
201 if current is not None and current == value: |
200 # the value is already set |
202 # the value is already set |
281 |
282 |
282 def __init__(self, virtapi): |
283 def __init__(self, virtapi): |
283 self.virtapi = virtapi |
284 self.virtapi = virtapi |
284 self._compute_event_callback = None |
285 self._compute_event_callback = None |
285 self._conductor_api = conductor.API() |
286 self._conductor_api = conductor.API() |
|
287 self._fc_hbas = None |
|
288 self._fc_wwnns = None |
|
289 self._fc_wwpns = None |
286 self._host_stats = {} |
290 self._host_stats = {} |
287 self._initiator = None |
291 self._initiator = None |
288 self._install_engine = None |
292 self._install_engine = None |
289 self._pagesize = os.sysconf('SC_PAGESIZE') |
293 self._pagesize = os.sysconf('SC_PAGESIZE') |
290 self._uname = os.uname() |
294 self._uname = os.uname() |
310 % reason) |
314 % reason) |
311 raise exception.NovaException(msg) |
315 raise exception.NovaException(msg) |
312 |
316 |
313 def init_host(self, host): |
317 def init_host(self, host): |
314 """Initialize anything that is necessary for the driver to function, |
318 """Initialize anything that is necessary for the driver to function, |
315 including catching up with currently running VM's on the given host.""" |
319 including catching up with currently running VM's on the given host. |
316 # TODO(Vek): Need to pass context in for access to auth_token |
320 """ |
317 |
321 # TODO(Vek): Need to pass context in for access to auth_token |
318 self._init_rad() |
322 self._init_rad() |
|
323 |
|
324 def _get_fc_hbas(self): |
|
325 """Get Fibre Channel HBA information.""" |
|
326 if self._fc_hbas: |
|
327 return self._fc_hbas |
|
328 |
|
329 out = None |
|
330 try: |
|
331 out, err = utils.execute('/usr/sbin/fcinfo', 'hba-port') |
|
332 except processutils.ProcessExecutionError as err: |
|
333 return [] |
|
334 |
|
335 if out is None: |
|
336 raise RuntimeError(_("Cannot find any Fibre Channel HBAs")) |
|
337 |
|
338 hbas = [] |
|
339 hba = {} |
|
340 for line in out.splitlines(): |
|
341 line = line.strip() |
|
342 # Collect the following hba-port data: |
|
343 # 1: Port WWN |
|
344 # 2: State (online|offline) |
|
345 # 3: Node WWN |
|
346 if line.startswith("HBA Port WWN:"): |
|
347 # New HBA port entry |
|
348 hba = {} |
|
349 wwpn = line.split()[-1] |
|
350 hba['port_name'] = wwpn |
|
351 continue |
|
352 elif line.startswith("Port Mode:"): |
|
353 mode = line.split()[-1] |
|
354 # Skip Target mode ports |
|
355 if mode != 'Initiator': |
|
356 break |
|
357 elif line.startswith("State:"): |
|
358 state = line.split()[-1] |
|
359 hba['port_state'] = state |
|
360 continue |
|
361 elif line.startswith("Node WWN:"): |
|
362 wwnn = line.split()[-1] |
|
363 hba['node_name'] = wwnn |
|
364 continue |
|
365 if len(hba) == 3: |
|
366 hbas.append(hba) |
|
367 hba = {} |
|
368 self._fc_hbas = hbas |
|
369 return self._fc_hbas |
|
370 |
|
371 def _get_fc_wwnns(self): |
|
372 """Get Fibre Channel WWNNs from the system, if any.""" |
|
373 hbas = self._get_fc_hbas() |
|
374 |
|
375 wwnns = [] |
|
376 for hba in hbas: |
|
377 if hba['port_state'] == 'online': |
|
378 wwnn = hba['node_name'] |
|
379 wwnns.append(wwnn) |
|
380 return wwnns |
|
381 |
|
382 def _get_fc_wwpns(self): |
|
383 """Get Fibre Channel WWPNs from the system, if any.""" |
|
384 hbas = self._get_fc_hbas() |
|
385 |
|
386 wwpns = [] |
|
387 for hba in hbas: |
|
388 if hba['port_state'] == 'online': |
|
389 wwpn = hba['port_name'] |
|
390 wwpns.append(wwpn) |
|
391 return wwpns |
319 |
392 |
320 def _get_iscsi_initiator(self): |
393 def _get_iscsi_initiator(self): |
321 """ Return the iSCSI initiator node name IQN for this host """ |
394 """ Return the iSCSI initiator node name IQN for this host """ |
322 out, err = utils.execute('/usr/sbin/iscsiadm', 'list', |
395 out, err = utils.execute('/usr/sbin/iscsiadm', 'list', |
323 'initiator-node') |
396 'initiator-node') |
325 # Initiator node name: iqn.1986-03.com.sun:01:e00000000000.4f757217 |
398 # Initiator node name: iqn.1986-03.com.sun:01:e00000000000.4f757217 |
326 initiator_name_line = out.splitlines()[0] |
399 initiator_name_line = out.splitlines()[0] |
327 initiator_iqn = initiator_name_line.rsplit(' ', 1)[1] |
400 initiator_iqn = initiator_name_line.rsplit(' ', 1)[1] |
328 return initiator_iqn |
401 return initiator_iqn |
329 |
402 |
|
403 def _get_zone_auto_install_state(self, zone_name): |
|
404 """Returns the SMF state of the auto-installer service, |
|
405 or None if auto-installer service is non-existent |
|
406 """ |
|
407 try: |
|
408 out, err = utils.execute('/usr/sbin/zlogin', '-S', zone_name, |
|
409 '/usr/bin/svcs', '-H', '-o', 'state', |
|
410 'auto-installer:default') |
|
411 return out.strip() |
|
412 except processutils.ProcessExecutionError as err: |
|
413 # No auto-installer instance most likely. |
|
414 return None |
|
415 |
330 def _get_zone_by_name(self, name): |
416 def _get_zone_by_name(self, name): |
331 """Return a Solaris Zones object via RAD by name.""" |
417 """Return a Solaris Zones object via RAD by name.""" |
332 try: |
418 try: |
333 zone = self._rad_instance.get_object( |
419 zone = self._rad_instance.get_object( |
334 zonemgr.Zone(), rad.client.ADRGlobPattern({'name': name})) |
420 zonemgr.Zone(), rad.client.ADRGlobPattern({'name': name})) |
335 except rad.client.NotFoundError: |
421 except rad.client.NotFoundError: |
336 return None |
422 return None |
337 except Exception: |
423 except Exception: |
338 raise |
424 raise |
339 |
|
340 return zone |
425 return zone |
341 |
426 |
342 def _get_state(self, zone): |
427 def _get_state(self, zone): |
343 """Return the running state, one of the power_state codes.""" |
428 """Return the running state, one of the power_state codes.""" |
344 return SOLARISZONES_POWER_STATE[zone.state] |
429 return SOLARISZONES_POWER_STATE[zone.state] |
349 |
434 |
350 def _get_max_mem(self, zone): |
435 def _get_max_mem(self, zone): |
351 """Return the maximum memory in KBytes allowed.""" |
436 """Return the maximum memory in KBytes allowed.""" |
352 max_mem = lookup_resource_property(zone, 'capped-memory', 'physical') |
437 max_mem = lookup_resource_property(zone, 'capped-memory', 'physical') |
353 if max_mem is not None: |
438 if max_mem is not None: |
354 return utils.to_bytes(max_mem) / 1024 |
439 return strutils.to_bytes(max_mem) / 1024 |
355 |
440 |
356 # If physical property in capped-memory doesn't exist, this may |
441 # If physical property in capped-memory doesn't exist, this may |
357 # represent a non-global zone so just return the system's total |
442 # represent a non-global zone so just return the system's total |
358 # memory. |
443 # memory. |
359 return self._pages_to_kb(os.sysconf('SC_PHYS_PAGES')) |
444 return self._pages_to_kb(os.sysconf('SC_PHYS_PAGES')) |
493 not particularly efficient. Maintainers of the virt drivers are |
576 not particularly efficient. Maintainers of the virt drivers are |
494 encouraged to override this method with something more |
577 encouraged to override this method with something more |
495 efficient. |
578 efficient. |
496 """ |
579 """ |
497 return instance_id in self.list_instances() |
580 return instance_id in self.list_instances() |
|
581 |
|
582 def estimate_instance_overhead(self, instance_info): |
|
583 """Estimate the virtualization overhead required to build an instance |
|
584 of the given flavor. |
|
585 |
|
586 Defaults to zero, drivers should override if per-instance overhead |
|
587 calculations are desired. |
|
588 |
|
589 :param instance_info: Instance/flavor to calculate overhead for. |
|
590 :returns: Dict of estimated overhead values. |
|
591 """ |
|
592 return {'memory_mb': 0} |
498 |
593 |
499 def _get_list_zone_object(self): |
594 def _get_list_zone_object(self): |
500 """Return a list of all Solaris Zones objects via RAD.""" |
595 """Return a list of all Solaris Zones objects via RAD.""" |
501 return self._rad_instance.list_objects(zonemgr.Zone()) |
596 return self._rad_instance.list_objects(zonemgr.Zone()) |
502 |
597 |
600 self._install_engine.doc.volatile.delete_children( |
694 self._install_engine.doc.volatile.delete_children( |
601 class_type=UnifiedArchive) |
695 class_type=UnifiedArchive) |
602 |
696 |
603 def _suri_from_volume_info(self, connection_info): |
697 def _suri_from_volume_info(self, connection_info): |
604 """Returns a suri(5) formatted string based on connection_info |
698 """Returns a suri(5) formatted string based on connection_info |
605 Currently supports local ZFS volume and iSCSI driver types. |
699 Currently supports local ZFS volume and iSCSI driver types. |
606 """ |
700 """ |
607 driver_type = connection_info['driver_volume_type'] |
701 driver_type = connection_info['driver_volume_type'] |
608 if driver_type not in ['iscsi', 'local']: |
702 if driver_type not in ['iscsi', 'fibre_channel', 'local']: |
609 raise exception.VolumeDriverNotFound(driver_type=driver_type) |
703 raise exception.VolumeDriverNotFound(driver_type=driver_type) |
610 if driver_type == 'local': |
704 if driver_type == 'local': |
611 suri = 'dev:/dev/zvol/dsk/%s' % connection_info['volume_path'] |
705 suri = 'dev:/dev/zvol/dsk/%s' % connection_info['volume_path'] |
612 elif driver_type == 'iscsi': |
706 elif driver_type == 'iscsi': |
613 data = connection_info['data'] |
707 data = connection_info['data'] |
619 # target_lun: 1 |
713 # target_lun: 1 |
620 suri = 'iscsi://%s/target.%s,lun.%d' % (data['target_portal'], |
714 suri = 'iscsi://%s/target.%s,lun.%d' % (data['target_portal'], |
621 data['target_iqn'], |
715 data['target_iqn'], |
622 data['target_lun']) |
716 data['target_lun']) |
623 # TODO(npower): need to handle CHAP authentication also |
717 # TODO(npower): need to handle CHAP authentication also |
624 |
718 elif driver_type == 'fibre_channel': |
|
719 data = connection_info['data'] |
|
720 target_wwn = data['target_wwn'] |
|
721 # Check for multiple target_wwn values in a list |
|
722 if isinstance(target_wwn, list): |
|
723 target_wwn = target_wwn[0] |
|
724 # Ensure there's a fibre channel HBA. |
|
725 hbas = self._get_fc_hbas() |
|
726 if not hbas: |
|
727 LOG.error(_("Cannot attach Fibre Channel volume '%s' because " |
|
728 "no Fibre Channel HBA initiators were found") |
|
729 % (target_wwn)) |
|
730 raise exception.InvalidVolume(reason="No host FC initiator") |
|
731 |
|
732 target_lun = data['target_lun'] |
|
733 # If the volume was exported just a few seconds previously then |
|
734 # it will probably not be visible to the local adapter yet. |
|
735 # Invoke 'fcinfo remote-port' on all local HBA ports to trigger |
|
736 # a refresh. |
|
737 for wwpn in self._get_fc_wwpns(): |
|
738 utils.execute('/usr/sbin/fcinfo', 'remote-port', |
|
739 '-p', wwpn) |
|
740 |
|
741 # Use suriadm(1M) to generate a Fibre Channel storage URI. |
|
742 try: |
|
743 out, err = utils.execute('/usr/sbin/suriadm', 'lookup-uri', |
|
744 '-p', 'target=naa.%s' % target_wwn, |
|
745 '-p', 'lun=%s' % target_lun) |
|
746 except processutils.ProcessExecutionError as err: |
|
747 LOG.error(_("Lookup failure of Fibre Channel volume '%s', lun " |
|
748 "%s: %s") % (target_wwn, target_lun, err.stderr)) |
|
749 raise |
|
750 |
|
751 lines = out.split('\n') |
|
752 # Use the long form SURI on the second output line. |
|
753 suri = lines[1].strip() |
625 return suri |
754 return suri |
626 |
755 |
627 def _set_global_properties(self, name, extra_specs, brand): |
756 def _set_global_properties(self, name, extra_specs, brand): |
628 """Set Solaris Zone's global properties if supplied via flavor.""" |
757 """Set Solaris Zone's global properties if supplied via flavor.""" |
629 zone = self._get_zone_by_name(name) |
758 zone = self._get_zone_by_name(name) |
675 if volume['status'] != 'creating': |
804 if volume['status'] != 'creating': |
676 break |
805 break |
677 greenthread.sleep(1) |
806 greenthread.sleep(1) |
678 |
807 |
679 except Exception as reason: |
808 except Exception as reason: |
680 LOG.error(_("Unable to create root zpool volume for instance '%s':" |
809 LOG.error(_("Unable to create root zpool volume for instance '%s'" |
681 "%s") % (instance['name'], reason)) |
810 ": %s") % (instance['name'], reason)) |
682 raise |
811 raise |
683 |
812 |
684 instance_uuid = instance['uuid'] |
813 instance_uuid = instance['uuid'] |
|
814 volume_id = volume['id'] |
685 # TODO(npower): Adequate for default boot device. We currently |
815 # TODO(npower): Adequate for default boot device. We currently |
686 # ignore this value, but cinder gets stroppy about this if we set it to |
816 # ignore this value, but cinder gets stroppy about this if we set it to |
687 # None |
817 # None |
688 mountpoint = "c1d0" |
818 mountpoint = "c1d0" |
689 |
819 |
690 try: |
820 try: |
691 connector = self.get_volume_connector(instance) |
821 connector = self.get_volume_connector(instance) |
692 connection_info = self._volume_api.initialize_connection(context, |
822 connection_info = self._volume_api.initialize_connection( |
693 volume, |
823 context, volume_id, connector) |
694 connector) |
|
695 # Check connection_info to determine if the provided volume is |
824 # Check connection_info to determine if the provided volume is |
696 # local to this compute node. If it is, then don't use it for |
825 # local to this compute node. If it is, then don't use it for |
697 # Solaris branded zones in order to avoid a know ZFS deadlock issue |
826 # Solaris branded zones in order to avoid a know ZFS deadlock issue |
698 # when using a zpool within another zpool on the same system. |
827 # when using a zpool within another zpool on the same system. |
699 delete_boot_volume = False |
828 delete_boot_volume = False |
723 "service indicates that the target is a " |
852 "service indicates that the target is a " |
724 "local volume, which should not be used " |
853 "local volume, which should not be used " |
725 "as a boot device for 'solaris' branded " |
854 "as a boot device for 'solaris' branded " |
726 "zones.")) |
855 "zones.")) |
727 delete_boot_volume = True |
856 delete_boot_volume = True |
728 else: |
857 # Assuming that fibre_channel is non-local |
|
858 elif driver_type != 'fibre_channel': |
729 # Some other connection type that we don't understand |
859 # Some other connection type that we don't understand |
730 # Let zone use some local fallback instead. |
860 # Let zone use some local fallback instead. |
731 LOG.warning(_("Unsupported volume driver type '%s' " |
861 LOG.warning(_("Unsupported volume driver type '%s' " |
732 "can not be used as a boot device for " |
862 "can not be used as a boot device for " |
733 "'solaris' branded zones.")) |
863 "'solaris' branded zones.")) |
734 delete_boot_volume = True |
864 delete_boot_volume = True |
735 |
865 |
736 if delete_boot_volume: |
866 if delete_boot_volume: |
737 LOG.warning(_("Volume '%s' is being discarded") % volume['id']) |
867 LOG.warning(_("Volume '%s' is being discarded") % volume['id']) |
738 self._volume_api.delete(context, volume) |
868 self._volume_api.delete(context, volume_id) |
739 return None |
869 return None |
740 |
870 |
741 # Notify Cinder DB of the volume attachment. |
871 # Notify Cinder DB of the volume attachment. |
742 self._volume_api.attach(context, volume, instance_uuid, mountpoint) |
872 self._volume_api.attach(context, volume_id, instance_uuid, |
|
873 mountpoint) |
743 values = { |
874 values = { |
744 'instance_uuid': instance['uuid'], |
875 'instance_uuid': instance['uuid'], |
745 'connection_info': jsonutils.dumps(connection_info), |
876 'connection_info': jsonutils.dumps(connection_info), |
746 # TODO(npower): device_name also ignored currently, but Cinder |
877 # TODO(npower): device_name also ignored currently, but Cinder |
747 # breaks without it. Figure out a sane mapping scheme. |
878 # breaks without it. Figure out a sane mapping scheme. |
748 'device_name': mountpoint, |
879 'device_name': mountpoint, |
749 'delete_on_termination': True, |
880 'delete_on_termination': True, |
750 'virtual_name': None, |
881 'virtual_name': None, |
751 'snapshot_id': None, |
882 'snapshot_id': None, |
752 'volume_id': volume['id'], |
883 'volume_id': volume_id, |
753 'volume_size': instance['root_gb'], |
884 'volume_size': instance['root_gb'], |
754 'no_device': None} |
885 'no_device': None} |
755 self._conductor_api.block_device_mapping_update_or_create(context, |
886 self._conductor_api.block_device_mapping_update_or_create(context, |
756 values) |
887 values) |
757 |
888 |
758 except Exception as reason: |
889 except Exception as reason: |
759 LOG.error(_("Unable to attach root zpool volume '%s' to instance " |
890 LOG.error(_("Unable to attach root zpool volume '%s' to instance " |
760 "%s: %s") % (volume['id'], instance['name'], reason)) |
891 "%s: %s") % (volume['id'], instance['name'], reason)) |
761 self._volume_api.detach(context, volume) |
892 self._volume_api.detach(context, volume_id) |
762 self._volume_api.delete(context, volume) |
893 self._volume_api.delete(context, volume_id) |
763 raise |
894 raise |
764 |
|
765 return connection_info |
895 return connection_info |
766 |
896 |
767 def _set_boot_device(self, name, connection_info, brand): |
897 def _set_boot_device(self, name, connection_info, brand): |
768 """Set the boot device specified by connection_info""" |
898 """Set the boot device specified by connection_info""" |
769 zone = self._get_zone_by_name(name) |
899 zone = self._get_zone_by_name(name) |
819 with ZoneConfig(zone) as zc: |
949 with ZoneConfig(zone) as zc: |
820 zc.setprop('capped-memory', mem_resource, '%dM' % memory_mb) |
950 zc.setprop('capped-memory', mem_resource, '%dM' % memory_mb) |
821 |
951 |
822 def _set_network(self, context, name, instance, network_info, brand, |
952 def _set_network(self, context, name, instance, network_info, brand, |
823 sc_dir): |
953 sc_dir): |
824 """ add networking information to the zone. |
954 """add networking information to the zone.""" |
825 """ |
|
826 zone = self._get_zone_by_name(name) |
955 zone = self._get_zone_by_name(name) |
827 if zone is None: |
956 if zone is None: |
828 raise exception.InstanceNotFound(instance_id=name) |
957 raise exception.InstanceNotFound(instance_id=name) |
829 |
958 |
830 tenant_id = None |
959 tenant_id = None |
863 id = lookup_resource_property(zc.zone, 'anet', 'id', |
992 id = lookup_resource_property(zc.zone, 'anet', 'id', |
864 filter) |
993 filter) |
865 linkname = 'net%s' % id |
994 linkname = 'net%s' % id |
866 |
995 |
867 # create the required sysconfig file |
996 # create the required sysconfig file |
868 network_plugin = quantumv2.get_client(context) |
997 network_plugin = neutronv2.get_client(context) |
869 port = network_plugin.show_port(port_uuid)['port'] |
998 port = network_plugin.show_port(port_uuid)['port'] |
870 subnet_uuid = port['fixed_ips'][0]['subnet_id'] |
999 subnet_uuid = port['fixed_ips'][0]['subnet_id'] |
871 subnet = network_plugin.show_subnet(subnet_uuid)['subnet'] |
1000 subnet = network_plugin.show_subnet(subnet_uuid)['subnet'] |
872 |
1001 |
873 if subnet['enable_dhcp']: |
1002 if subnet['enable_dhcp']: |
885 # set the tenant id |
1014 # set the tenant id |
886 with ZoneConfig(zone) as zc: |
1015 with ZoneConfig(zone) as zc: |
887 zc.setprop('global', 'tenant', tenant_id) |
1016 zc.setprop('global', 'tenant', tenant_id) |
888 |
1017 |
889 def _verify_sysconfig(self, sc_dir, instance): |
1018 def _verify_sysconfig(self, sc_dir, instance): |
890 """ verify the SC profile(s) passed in contain an entry for |
1019 """verify the SC profile(s) passed in contain an entry for |
891 system/config-user to configure the root account. If an SSH key is |
1020 system/config-user to configure the root account. If an SSH key is |
892 specified, configure root's profile to use it. |
1021 specified, configure root's profile to use it. |
893 |
|
894 """ |
1022 """ |
895 usercheck = lambda e: e.attrib.get('name') == 'system/config-user' |
1023 usercheck = lambda e: e.attrib.get('name') == 'system/config-user' |
896 hostcheck = lambda e: e.attrib.get('name') == 'system/identity' |
1024 hostcheck = lambda e: e.attrib.get('name') == 'system/identity' |
897 |
1025 |
898 root_account_needed = True |
1026 root_account_needed = True |
1101 os.chmod(sc_dir, 0755) |
1229 os.chmod(sc_dir, 0755) |
1102 |
1230 |
1103 # Attempt to provision a (Cinder) volume service backed boot volume |
1231 # Attempt to provision a (Cinder) volume service backed boot volume |
1104 connection_info = self._connect_boot_volume(context, instance, |
1232 connection_info = self._connect_boot_volume(context, instance, |
1105 extra_specs) |
1233 extra_specs) |
|
1234 name = instance['name'] |
|
1235 |
|
1236 def _ai_health_check(zone): |
|
1237 # TODO(npower) A hung kernel zone installation will not always |
|
1238 # be detected by zoneadm in the host global zone, which locks |
|
1239 # out other zoneadm commands. |
|
1240 # Workaround: |
|
1241 # Check the state of the auto-installer:default SMF service in |
|
1242 # the kernel zone. If installation failed, it should be in the |
|
1243 # 'maintenance' state. Unclog zoneadm by executing a shutdown |
|
1244 # inside the kernel zone if that's the case. |
|
1245 # Eventually we'll be able to pass a boot option to the zone |
|
1246 # to have it automatically shutdown if the installation fails. |
|
1247 if instance['vm_state'] == vm_states.BUILDING: |
|
1248 if self._get_zone_auto_install_state(name) == 'maintenance': |
|
1249 # Poweroff the zone. This will cause the current call to |
|
1250 # self._install() to catch an exception and tear down |
|
1251 # the kernel zone. |
|
1252 LOG.error(_("Automated installation of instance '%s' " |
|
1253 "failed. Powering off the kernel zone '%s'.") |
|
1254 % (instance['display_name'], name)) |
|
1255 try: |
|
1256 utils.execute('/usr/sbin/zlogin', '-S', name, |
|
1257 '/usr/sbin/poweroff') |
|
1258 except processutils.ProcessExecutionError as err: |
|
1259 # poweroff pulls the rug from under zlogin, so ignore |
|
1260 # the anticipated error. |
|
1261 pass |
|
1262 finally: |
|
1263 raise loopingcall.LoopingCallDone() |
|
1264 else: |
|
1265 # Looks like it installed OK |
|
1266 if zone.state == ZONE_STATE_INSTALLED: |
|
1267 LOG.debug(_("Kernel zone '%s' (%s) state: %s.") |
|
1268 % (name, instance['display_name'], |
|
1269 zone.state)) |
|
1270 raise loopingcall.LoopingCallDone() |
|
1271 else: |
|
1272 return |
|
1273 else: |
|
1274 # Can't imagine why we'd get here under normal circumstances |
|
1275 LOG.warning(_("Unexpected vm_state during installation of " |
|
1276 "'%s' (%s): %s. Zone state: %s") |
|
1277 % (name, instance['display_name'], |
|
1278 instance['vm_state'], zone.state)) |
|
1279 raise loopingcall.LoopingCallDone() |
1106 |
1280 |
1107 LOG.debug(_("creating zone configuration for '%s' (%s)") % |
1281 LOG.debug(_("creating zone configuration for '%s' (%s)") % |
1108 (instance['name'], instance['display_name'])) |
1282 (name, instance['display_name'])) |
1109 self._create_config(context, instance, network_info, |
1283 self._create_config(context, instance, network_info, |
1110 connection_info, extra_specs, sc_dir) |
1284 connection_info, extra_specs, sc_dir) |
1111 try: |
1285 try: |
1112 self._install(instance, image, extra_specs, sc_dir) |
1286 zone = self._get_zone_by_name(name) |
|
1287 is_kz = lookup_resource_property_value(zone, "global", "brand", |
|
1288 ZONE_BRAND_SOLARIS_KZ) |
|
1289 # Monitor kernel zone installation explicitly |
|
1290 if is_kz: |
|
1291 monitor = loopingcall.FixedIntervalLoopingCall( |
|
1292 _ai_health_check, zone) |
|
1293 monitor.start(interval=15, initial_delay=60) |
|
1294 self._install(instance, image, extra_specs, sc_dir) |
|
1295 monitor.wait() |
|
1296 else: |
|
1297 self._install(instance, image, extra_specs, sc_dir) |
1113 self._power_on(instance) |
1298 self._power_on(instance) |
1114 except Exception as reason: |
1299 except Exception as reason: |
1115 LOG.error(_("Unable to spawn instance '%s' via zonemgr(3RAD): %s") |
1300 LOG.error(_("Unable to spawn instance '%s' via zonemgr(3RAD): %s") |
1116 % (instance['name'], reason)) |
1301 % (name, reason)) |
1117 self._uninstall(instance) |
1302 self._uninstall(instance) |
1118 self._delete_config(instance) |
1303 self._delete_config(instance) |
1119 raise |
1304 raise |
1120 |
1305 |
1121 def _power_off(self, instance, halt_type): |
1306 def _power_off(self, instance, halt_type): |
1127 |
1312 |
1128 try: |
1313 try: |
1129 if halt_type == 'SOFT': |
1314 if halt_type == 'SOFT': |
1130 zone.shutdown() |
1315 zone.shutdown() |
1131 else: |
1316 else: |
1132 zone.halt() |
1317 # 'HARD' |
|
1318 # TODO(npower) See comments for _ai_health_check() for why |
|
1319 # it is sometimes necessary to poweroff from within the zone, |
|
1320 # until zoneadm and auto-install can perform this internally. |
|
1321 zprop = lookup_resource_property_value(zone, "global", "brand", |
|
1322 ZONE_BRAND_SOLARIS_KZ) |
|
1323 if zprop and self._get_zone_auto_install_state(name): |
|
1324 # Don't really care what state the install service is in. |
|
1325 # Just shut it down ASAP. |
|
1326 try: |
|
1327 utils.execute('/usr/sbin/zlogin', '-S', name, |
|
1328 '/usr/sbin/poweroff') |
|
1329 except processutils.ProcessExecutionError as err: |
|
1330 # Poweroff pulls the rug from under zlogin, so ignore |
|
1331 # the anticipated error. |
|
1332 return |
|
1333 else: |
|
1334 zone.halt() |
1133 return |
1335 return |
1134 except rad.client.ObjectError as reason: |
1336 except rad.client.ObjectError as reason: |
1135 result = reason.get_payload() |
1337 result = reason.get_payload() |
1136 if result.code == zonemgr.ErrorCode.COMMAND_ERROR: |
1338 if result.code == zonemgr.ErrorCode.COMMAND_ERROR: |
1137 LOG.warning(_("Ignoring command error returned while trying " |
1339 LOG.warning(_("Ignoring command error returned while trying " |
1142 LOG.error(_("Unable to power off instance '%s' via zonemgr(3RAD): " |
1344 LOG.error(_("Unable to power off instance '%s' via zonemgr(3RAD): " |
1143 "%s") % (name, reason)) |
1345 "%s") % (name, reason)) |
1144 raise exception.InstancePowerOffFailure(reason=reason) |
1346 raise exception.InstancePowerOffFailure(reason=reason) |
1145 |
1347 |
1146 def destroy(self, instance, network_info, block_device_info=None, |
1348 def destroy(self, instance, network_info, block_device_info=None, |
1147 destroy_disks=True): |
1349 destroy_disks=True, context=None): |
1148 """Destroy (shutdown and delete) the specified instance. |
1350 """Destroy (shutdown and delete) the specified instance. |
1149 |
1351 |
1150 If the instance is not found (for example if networking failed), this |
1352 If the instance is not found (for example if networking failed), this |
1151 function should still succeed. It's probably a good idea to log a |
1353 function should still succeed. It's probably a good idea to log a |
1152 warning in that case. |
1354 warning in that case. |
1307 zone = self._get_zone_by_name(name) |
1507 zone = self._get_zone_by_name(name) |
1308 if zone is None: |
1508 if zone is None: |
1309 LOG.error(_("Unable to find instance '%s' via zonemgr(3RAD)") |
1509 LOG.error(_("Unable to find instance '%s' via zonemgr(3RAD)") |
1310 % name) |
1510 % name) |
1311 raise exception.InstanceNotFound(instance_id=name) |
1511 raise exception.InstanceNotFound(instance_id=name) |
1312 |
|
1313 return self._get_zone_diagnostics(zone) |
1512 return self._get_zone_diagnostics(zone) |
1314 |
1513 |
1315 def get_all_bw_counters(self, instances): |
1514 def get_all_bw_counters(self, instances): |
1316 """Return bandwidth usage counters for each interface on each |
1515 """Return bandwidth usage counters for each interface on each |
1317 running VM""" |
1516 running VM. |
|
1517 """ |
1318 raise NotImplementedError() |
1518 raise NotImplementedError() |
1319 |
1519 |
1320 def get_all_volume_usage(self, context, compute_host_bdms): |
1520 def get_all_volume_usage(self, context, compute_host_bdms): |
1321 """Return usage info for volumes attached to vms on |
1521 """Return usage info for volumes attached to vms on |
1322 a given host""" |
1522 a given host.- |
|
1523 """ |
1323 raise NotImplementedError() |
1524 raise NotImplementedError() |
1324 |
1525 |
1325 def get_host_ip_addr(self): |
1526 def get_host_ip_addr(self): |
1326 """ |
1527 """ |
1327 Retrieves the IP address of the dom0 |
1528 Retrieves the IP address of the dom0 |
1328 """ |
1529 """ |
1329 # TODO(Vek): Need to pass context in for access to auth_token |
1530 # TODO(Vek): Need to pass context in for access to auth_token |
1330 return CONF.my_ip |
1531 return CONF.my_ip |
1331 |
1532 |
1332 def attach_volume(self, connection_info, instance, mountpoint): |
1533 def attach_volume(self, context, connection_info, instance, mountpoint, |
|
1534 encryption=None): |
1333 """Attach the disk to the instance at mountpoint using info.""" |
1535 """Attach the disk to the instance at mountpoint using info.""" |
1334 # TODO(npower): Apply mountpoint in a meaningful way to the zone |
1536 # TODO(npower): Apply mountpoint in a meaningful way to the zone |
1335 # (I don't think this is even possible for Solaris brand zones) |
1537 # (I don't think this is even possible for Solaris brand zones) |
1336 name = instance['name'] |
1538 name = instance['name'] |
1337 zone = self._get_zone_by_name(name) |
1539 zone = self._get_zone_by_name(name) |
1347 suri = self._suri_from_volume_info(connection_info) |
1549 suri = self._suri_from_volume_info(connection_info) |
1348 |
1550 |
1349 with ZoneConfig(zone) as zc: |
1551 with ZoneConfig(zone) as zc: |
1350 zc.addresource("device", [zonemgr.Property("storage", suri)]) |
1552 zc.addresource("device", [zonemgr.Property("storage", suri)]) |
1351 |
1553 |
1352 def detach_volume(self, connection_info, instance, mountpoint): |
1554 def detach_volume(self, connection_info, instance, mountpoint, |
|
1555 encryption=None): |
1353 """Detach the disk attached to the instance.""" |
1556 """Detach the disk attached to the instance.""" |
1354 name = instance['name'] |
1557 name = instance['name'] |
1355 zone = self._get_zone_by_name(name) |
1558 zone = self._get_zone_by_name(name) |
1356 if zone is None: |
1559 if zone is None: |
1357 raise exception.InstanceNotFound(instance_id=name) |
1560 raise exception.InstanceNotFound(instance_id=name) |
1372 return |
1575 return |
1373 |
1576 |
1374 with ZoneConfig(zone) as zc: |
1577 with ZoneConfig(zone) as zc: |
1375 zc.removeresources("device", [zonemgr.Property("storage", suri)]) |
1578 zc.removeresources("device", [zonemgr.Property("storage", suri)]) |
1376 |
1579 |
1377 def attach_interface(self, instance, image_meta, network_info): |
1580 def swap_volume(self, old_connection_info, new_connection_info, |
|
1581 instance, mountpoint): |
|
1582 """Replace the disk attached to the instance.""" |
|
1583 raise NotImplementedError() |
|
1584 |
|
1585 def attach_interface(self, instance, image_meta, vif): |
1378 """Attach an interface to the instance.""" |
1586 """Attach an interface to the instance.""" |
1379 raise NotImplementedError() |
1587 raise NotImplementedError() |
1380 |
1588 |
1381 def detach_interface(self, instance, network_info): |
1589 def detach_interface(self, instance, vif): |
1382 """Detach an interface from the instance.""" |
1590 """Detach an interface from the instance.""" |
1383 raise NotImplementedError() |
1591 raise NotImplementedError() |
1384 |
1592 |
1385 def migrate_disk_and_power_off(self, context, instance, dest, |
1593 def migrate_disk_and_power_off(self, context, instance, dest, |
1386 instance_type, network_info, |
1594 instance_type, network_info, |
1387 block_device_info=None): |
1595 block_device_info=None): |
1388 """ |
1596 """ |
1389 Transfers the disk of a running instance in multiple phases, turning |
1597 Transfers the disk of a running instance in multiple phases, turning |
1390 off the instance before the end. |
1598 off the instance before the end. |
|
1599 """ |
|
1600 raise NotImplementedError() |
|
1601 |
|
1602 def live_snapshot(self, context, instance, image_id, update_task_state): |
|
1603 """ |
|
1604 Live-snapshots the specified instance (includes ram and proc state). |
|
1605 |
|
1606 :param context: security context |
|
1607 :param instance: Instance object as returned by DB layer. |
|
1608 :param image_id: Reference to a pre-created image that will |
|
1609 hold the snapshot. |
1391 """ |
1610 """ |
1392 raise NotImplementedError() |
1611 raise NotImplementedError() |
1393 |
1612 |
1394 def snapshot(self, context, instance, image_id, update_task_state): |
1613 def snapshot(self, context, instance, image_id, update_task_state): |
1395 """ |
1614 """ |
1483 # Delete the snapshot image file source |
1702 # Delete the snapshot image file source |
1484 os.unlink(out_path) |
1703 os.unlink(out_path) |
1485 |
1704 |
1486 def finish_migration(self, context, migration, instance, disk_info, |
1705 def finish_migration(self, context, migration, instance, disk_info, |
1487 network_info, image_meta, resize_instance, |
1706 network_info, image_meta, resize_instance, |
1488 block_device_info=None): |
1707 block_device_info=None, power_on=True): |
1489 """Completes a resize, turning on the migrated instance |
1708 """Completes a resize. |
1490 |
1709 |
|
1710 :param context: the context for the migration/resize |
|
1711 :param migration: the migrate/resize information |
|
1712 :param instance: the instance being migrated/resized |
|
1713 :param disk_info: the newly transferred disk information |
1491 :param network_info: |
1714 :param network_info: |
1492 :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` |
1715 :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` |
1493 :param image_meta: image object returned by nova.image.glance that |
1716 :param image_meta: image object returned by nova.image.glance that |
1494 defines the image from which this instance |
1717 defines the image from which this instance |
1495 was created |
1718 was created |
|
1719 :param resize_instance: True if the instance is being resized, |
|
1720 False otherwise |
|
1721 :param block_device_info: instance volume block device info |
|
1722 :param power_on: True if the instance should be powered on, False |
|
1723 otherwise |
1496 """ |
1724 """ |
1497 raise NotImplementedError() |
1725 raise NotImplementedError() |
1498 |
1726 |
1499 def confirm_migration(self, migration, instance, network_info): |
1727 def confirm_migration(self, migration, instance, network_info): |
1500 """Confirms a resize, destroying the source VM.""" |
1728 """Confirms a resize, destroying the source VM.""" |
1501 # TODO(Vek): Need to pass context in for access to auth_token |
1729 # TODO(Vek): Need to pass context in for access to auth_token |
1502 raise NotImplementedError() |
1730 raise NotImplementedError() |
1503 |
1731 |
1504 def finish_revert_migration(self, instance, network_info, |
1732 def finish_revert_migration(self, instance, network_info, |
1505 block_device_info=None): |
1733 block_device_info=None, power_on=True): |
1506 """Finish reverting a resize, powering back on the instance.""" |
1734 """ |
|
1735 Finish reverting a resize. |
|
1736 |
|
1737 :param instance: the instance being migrated/resized |
|
1738 :param network_info: |
|
1739 :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` |
|
1740 :param block_device_info: instance volume block device info |
|
1741 :param power_on: True if the instance should be powered on, False |
|
1742 otherwise |
|
1743 """ |
1507 # TODO(Vek): Need to pass context in for access to auth_token |
1744 # TODO(Vek): Need to pass context in for access to auth_token |
1508 raise NotImplementedError() |
1745 raise NotImplementedError() |
1509 |
1746 |
1510 def pause(self, instance): |
1747 def pause(self, instance): |
1511 """Pause the specified instance.""" |
1748 """Pause the specified instance.""" |
1515 def unpause(self, instance): |
1752 def unpause(self, instance): |
1516 """Unpause paused VM instance.""" |
1753 """Unpause paused VM instance.""" |
1517 # TODO(Vek): Need to pass context in for access to auth_token |
1754 # TODO(Vek): Need to pass context in for access to auth_token |
1518 raise NotImplementedError() |
1755 raise NotImplementedError() |
1519 |
1756 |
1520 def _suspend(self, instance): |
|
1521 """Suspend a Solaris Zone.""" |
|
1522 name = instance['name'] |
|
1523 zone = self._get_zone_by_name(name) |
|
1524 if zone is None: |
|
1525 raise exception.InstanceNotFound(instance_id=name) |
|
1526 |
|
1527 if self._uname[4] != 'i86pc': |
|
1528 # Only x86 platforms are currently supported. |
|
1529 raise NotImplementedError() |
|
1530 |
|
1531 zprop = lookup_resource_property_value(zone, "global", "brand", |
|
1532 ZONE_BRAND_SOLARIS_KZ) |
|
1533 if not zprop: |
|
1534 # Only Solaris Kernel zones are currently supported. |
|
1535 raise NotImplementedError() |
|
1536 |
|
1537 try: |
|
1538 zone.suspend() |
|
1539 except Exception as reason: |
|
1540 # TODO(dcomay): Try to recover in cases where zone has been |
|
1541 # resumed automatically. |
|
1542 LOG.error(_("Unable to suspend instance '%s' via zonemgr(3RAD): " |
|
1543 "%s") % (name, reason)) |
|
1544 raise exception.InstanceSuspendFailure(reason=reason) |
|
1545 |
|
1546 def suspend(self, instance): |
1757 def suspend(self, instance): |
1547 """suspend the specified instance.""" |
1758 """suspend the specified instance.""" |
1548 # TODO(Vek): Need to pass context in for access to auth_token |
1759 # TODO(Vek): Need to pass context in for access to auth_token |
1549 self._suspend(instance) |
1760 raise NotImplementedError() |
1550 |
1761 |
1551 def resume(self, instance, network_info, block_device_info=None): |
1762 def resume(self, context, instance, network_info, block_device_info=None): |
1552 """resume the specified instance.""" |
1763 """ |
1553 # TODO(Vek): Need to pass context in for access to auth_token |
1764 resume the specified instance. |
1554 try: |
1765 |
1555 self._power_on(instance) |
1766 :param context: the context for the resume |
1556 except Exception as reason: |
1767 :param instance: the instance being resumed |
1557 raise exception.InstanceResumeFailure(reason=reason) |
1768 :param network_info: |
|
1769 :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` |
|
1770 :param block_device_info: instance volume block device info |
|
1771 """ |
|
1772 raise NotImplementedError() |
1558 |
1773 |
1559 def resume_state_on_host_boot(self, context, instance, network_info, |
1774 def resume_state_on_host_boot(self, context, instance, network_info, |
1560 block_device_info=None): |
1775 block_device_info=None): |
1561 """resume guest state when a host is booted.""" |
1776 """resume guest state when a host is booted.""" |
1562 name = instance['name'] |
1777 name = instance['name'] |
1602 def _get_zpool_property(self, prop, zpool): |
1817 def _get_zpool_property(self, prop, zpool): |
1603 """Get the value of property from the zpool.""" |
1818 """Get the value of property from the zpool.""" |
1604 try: |
1819 try: |
1605 value = None |
1820 value = None |
1606 (out, _err) = utils.execute('/usr/sbin/zpool', 'get', prop, zpool) |
1821 (out, _err) = utils.execute('/usr/sbin/zpool', 'get', prop, zpool) |
1607 except exception.ProcessExecutionError as err: |
1822 except processutils.ProcessExecutionError as err: |
1608 LOG.error(_("Failed to get property '%s' from zpool '%s': %s") |
1823 LOG.error(_("Failed to get property '%s' from zpool '%s': %s") |
1609 % (prop, zpool, err.stderr)) |
1824 % (prop, zpool, err.stderr)) |
1610 return value |
1825 return value |
1611 |
1826 |
1612 zpool_prop = out.splitlines()[1].split() |
1827 zpool_prop = out.splitlines()[1].split() |
1613 if zpool_prop[1] == prop: |
1828 if zpool_prop[1] == prop: |
1614 value = zpool_prop[2] |
1829 value = zpool_prop[2] |
1615 |
|
1616 return value |
1830 return value |
1617 |
1831 |
1618 def _update_host_stats(self): |
1832 def _update_host_stats(self): |
1619 """Update currently known host stats.""" |
1833 """Update currently known host stats.""" |
1620 host_stats = {} |
1834 host_stats = {} |
1623 host_stats['memory_mb'] = self._pages_to_kb(pages) / 1024 |
1837 host_stats['memory_mb'] = self._pages_to_kb(pages) / 1024 |
1624 |
1838 |
1625 out, err = utils.execute('/usr/sbin/zfs', 'list', '-Ho', 'name', '/') |
1839 out, err = utils.execute('/usr/sbin/zfs', 'list', '-Ho', 'name', '/') |
1626 root_zpool = out.split('/')[0] |
1840 root_zpool = out.split('/')[0] |
1627 size = self._get_zpool_property('size', root_zpool) |
1841 size = self._get_zpool_property('size', root_zpool) |
1628 if size is None: |
1842 if size is not None: |
|
1843 host_stats['local_gb'] = Size(size).get(Size.gb_units) |
|
1844 else: |
1629 host_stats['local_gb'] = 0 |
1845 host_stats['local_gb'] = 0 |
1630 else: |
|
1631 host_stats['local_gb'] = utils.to_bytes(size)/(1024 ** 3) |
|
1632 |
1846 |
1633 # Account for any existing processor sets by looking at the the |
1847 # Account for any existing processor sets by looking at the the |
1634 # number of CPUs not assigned to any processor sets. |
1848 # number of CPUs not assigned to any processor sets. |
1635 kstat_data = self._get_kstat_by_name('misc', 'unix', '0', 'pset') |
1849 kstat_data = self._get_kstat_by_name('misc', 'unix', '0', 'pset') |
1636 if kstat_data is not None: |
1850 if kstat_data is not None: |
1642 # Subtract the number of free pages from the total to get the |
1856 # Subtract the number of free pages from the total to get the |
1643 # used. |
1857 # used. |
1644 kstat_data = self._get_kstat_by_name('pages', 'unix', '0', |
1858 kstat_data = self._get_kstat_by_name('pages', 'unix', '0', |
1645 'system_pages') |
1859 'system_pages') |
1646 if kstat_data is not None: |
1860 if kstat_data is not None: |
|
1861 free_ram_mb = self._pages_to_kb(kstat_data['freemem']) / 1024 |
1647 host_stats['memory_mb_used'] = \ |
1862 host_stats['memory_mb_used'] = \ |
1648 self._pages_to_kb((pages - kstat_data['freemem'])) / 1024 |
1863 host_stats['memory_mb'] - free_ram_mb |
1649 else: |
1864 else: |
1650 host_stats['memory_mb_used'] = 0 |
1865 host_stats['memory_mb_used'] = 0 |
1651 |
1866 |
1652 host_stats['local_gb_used'] = 0 |
1867 free = self._get_zpool_property('free', root_zpool) |
|
1868 if free is not None: |
|
1869 free_disk_gb = Size(free).get(Size.gb_units) |
|
1870 else: |
|
1871 free_disk_gb = 0 |
|
1872 host_stats['local_gb_used'] = host_stats['local_gb'] - free_disk_gb |
|
1873 |
1653 host_stats['hypervisor_type'] = 'solariszones' |
1874 host_stats['hypervisor_type'] = 'solariszones' |
1654 host_stats['hypervisor_version'] = int(self._uname[2].replace('.', '')) |
1875 host_stats['hypervisor_version'] = int(self._uname[2].replace('.', '')) |
1655 host_stats['hypervisor_hostname'] = self._uname[1] |
1876 host_stats['hypervisor_hostname'] = self._uname[1] |
|
1877 |
1656 if self._uname[4] == 'i86pc': |
1878 if self._uname[4] == 'i86pc': |
1657 architecture = 'x86_64' |
1879 architecture = 'x86_64' |
1658 else: |
1880 else: |
1659 architecture = 'sparc64' |
1881 architecture = 'sparc64' |
1660 host_stats['cpu_info'] = str({'arch': architecture}) |
1882 cpu_info = { |
|
1883 'arch': architecture |
|
1884 } |
|
1885 host_stats['cpu_info'] = jsonutils.dumps(cpu_info) |
|
1886 |
1661 host_stats['disk_available_least'] = 0 |
1887 host_stats['disk_available_least'] = 0 |
1662 |
1888 |
1663 supported_instances = [ |
1889 supported_instances = [ |
1664 (architecture, 'solariszones', 'zones') |
1890 (architecture, 'solariszones', 'solariszones') |
1665 ] |
1891 ] |
1666 host_stats['supported_instances'] = supported_instances |
1892 host_stats['supported_instances'] = \ |
|
1893 jsonutils.dumps(supported_instances) |
1667 |
1894 |
1668 self._host_stats = host_stats |
1895 self._host_stats = host_stats |
1669 |
1896 |
1670 def get_available_resource(self, nodename): |
1897 def get_available_resource(self, nodename): |
1671 """Retrieve resource information. |
1898 """Retrieve resource information. |
1672 |
1899 |
1673 This method is called when nova-compute launches, and |
1900 This method is called when nova-compute launches, and |
1674 as part of a periodic task |
1901 as part of a periodic task that records the results in the DB. |
1675 |
1902 |
1676 :param nodename: |
1903 :param nodename: |
1677 node which the caller want to get resources from |
1904 node which the caller want to get resources from |
1678 a driver that manages only one node can safely ignore this |
1905 a driver that manages only one node can safely ignore this |
1679 :returns: Dictionary describing resources |
1906 :returns: Dictionary describing resources |
1691 resources['hypervisor_type'] = host_stats['hypervisor_type'] |
1918 resources['hypervisor_type'] = host_stats['hypervisor_type'] |
1692 resources['hypervisor_version'] = host_stats['hypervisor_version'] |
1919 resources['hypervisor_version'] = host_stats['hypervisor_version'] |
1693 resources['hypervisor_hostname'] = host_stats['hypervisor_hostname'] |
1920 resources['hypervisor_hostname'] = host_stats['hypervisor_hostname'] |
1694 resources['cpu_info'] = host_stats['cpu_info'] |
1921 resources['cpu_info'] = host_stats['cpu_info'] |
1695 resources['disk_available_least'] = host_stats['disk_available_least'] |
1922 resources['disk_available_least'] = host_stats['disk_available_least'] |
1696 |
1923 resources['supported_instances'] = host_stats['supported_instances'] |
1697 return resources |
1924 return resources |
1698 |
1925 |
1699 def pre_live_migration(self, ctxt, instance_ref, block_device_info, |
1926 def pre_live_migration(self, ctxt, instance_ref, block_device_info, |
1700 network_info, disk_info, migrate_data=None): |
1927 network_info, disk_info, migrate_data=None): |
1701 """Prepare an instance for live migration |
1928 """Prepare an instance for live migration |
1729 :params migrate_data: implementation specific params. |
1956 :params migrate_data: implementation specific params. |
1730 |
1957 |
1731 """ |
1958 """ |
1732 raise NotImplementedError() |
1959 raise NotImplementedError() |
1733 |
1960 |
|
1961 def post_live_migration(self, ctxt, instance_ref, block_device_info): |
|
1962 """Post operation of live migration at source host. |
|
1963 |
|
1964 :param ctxt: security contet |
|
1965 :instance_ref: instance object that was migrated |
|
1966 :block_device_info: instance block device information |
|
1967 """ |
|
1968 pass |
|
1969 |
1734 def post_live_migration_at_destination(self, ctxt, instance_ref, |
1970 def post_live_migration_at_destination(self, ctxt, instance_ref, |
1735 network_info, |
1971 network_info, |
1736 block_migration=False, |
1972 block_migration=False, |
1737 block_device_info=None): |
1973 block_device_info=None): |
1738 """Post operation of live migration at destination host. |
1974 """Post operation of live migration at destination host. |
1741 :param instance_ref: instance object that is migrated |
1977 :param instance_ref: instance object that is migrated |
1742 :param network_info: instance network information |
1978 :param network_info: instance network information |
1743 :param block_migration: if true, post operation of block_migration. |
1979 :param block_migration: if true, post operation of block_migration. |
1744 """ |
1980 """ |
1745 raise NotImplementedError() |
1981 raise NotImplementedError() |
|
1982 |
|
1983 def check_instance_shared_storage_local(self, ctxt, instance): |
|
1984 """Check if instance files located on shared storage. |
|
1985 |
|
1986 This runs check on the destination host, and then calls |
|
1987 back to the source host to check the results. |
|
1988 |
|
1989 :param ctxt: security context |
|
1990 :param instance: nova.db.sqlalchemy.models.Instance |
|
1991 """ |
|
1992 raise NotImplementedError() |
|
1993 |
|
1994 def check_instance_shared_storage_remote(self, ctxt, data): |
|
1995 """Check if instance files located on shared storage. |
|
1996 |
|
1997 :param context: security context |
|
1998 :param data: result of check_instance_shared_storage_local |
|
1999 """ |
|
2000 raise NotImplementedError() |
|
2001 |
|
2002 def check_instance_shared_storage_cleanup(self, ctxt, data): |
|
2003 """Do cleanup on host after check_instance_shared_storage calls |
|
2004 |
|
2005 :param ctxt: security context |
|
2006 :param data: result of check_instance_shared_storage_local |
|
2007 """ |
|
2008 pass |
1746 |
2009 |
1747 def check_can_live_migrate_destination(self, ctxt, instance_ref, |
2010 def check_can_live_migrate_destination(self, ctxt, instance_ref, |
1748 src_compute_info, dst_compute_info, |
2011 src_compute_info, dst_compute_info, |
1749 block_migration=False, |
2012 block_migration=False, |
1750 disk_over_commit=False): |
2013 disk_over_commit=False): |
1757 :param instance_ref: nova.db.sqlalchemy.models.Instance |
2020 :param instance_ref: nova.db.sqlalchemy.models.Instance |
1758 :param src_compute_info: Info about the sending machine |
2021 :param src_compute_info: Info about the sending machine |
1759 :param dst_compute_info: Info about the receiving machine |
2022 :param dst_compute_info: Info about the receiving machine |
1760 :param block_migration: if true, prepare for block migration |
2023 :param block_migration: if true, prepare for block migration |
1761 :param disk_over_commit: if true, allow disk over commit |
2024 :param disk_over_commit: if true, allow disk over commit |
|
2025 :returns: a dict containing migration info (hypervisor-dependent) |
1762 """ |
2026 """ |
1763 raise NotImplementedError() |
2027 raise NotImplementedError() |
1764 |
2028 |
1765 def check_can_live_migrate_destination_cleanup(self, ctxt, |
2029 def check_can_live_migrate_destination_cleanup(self, ctxt, |
1766 dest_check_data): |
2030 dest_check_data): |
1949 """Reboots, shuts down or powers up the host.""" |
2214 """Reboots, shuts down or powers up the host.""" |
1950 raise NotImplementedError() |
2215 raise NotImplementedError() |
1951 |
2216 |
1952 def host_maintenance_mode(self, host, mode): |
2217 def host_maintenance_mode(self, host, mode): |
1953 """Start/Stop host maintenance window. On start, it triggers |
2218 """Start/Stop host maintenance window. On start, it triggers |
1954 guest VMs evacuation.""" |
2219 guest VMs evacuation. |
|
2220 """ |
1955 raise NotImplementedError() |
2221 raise NotImplementedError() |
1956 |
2222 |
1957 def set_host_enabled(self, host, enabled): |
2223 def set_host_enabled(self, host, enabled): |
1958 """Sets the specified host's ability to accept new instances.""" |
2224 """Sets the specified host's ability to accept new instances.""" |
1959 # TODO(Vek): Need to pass context in for access to auth_token |
2225 # TODO(Vek): Need to pass context in for access to auth_token |
1972 def unplug_vifs(self, instance, network_info): |
2238 def unplug_vifs(self, instance, network_info): |
1973 """Unplug VIFs from networks.""" |
2239 """Unplug VIFs from networks.""" |
1974 raise NotImplementedError() |
2240 raise NotImplementedError() |
1975 |
2241 |
1976 def get_host_stats(self, refresh=False): |
2242 def get_host_stats(self, refresh=False): |
1977 """Return currently known host stats.""" |
2243 """Return currently known host stats. |
1978 if refresh: |
2244 |
|
2245 If the hypervisor supports pci passthrough, the returned |
|
2246 dictionary includes a key-value pair for it. |
|
2247 The key of pci passthrough device is "pci_passthrough_devices" |
|
2248 and the value is a json string for the list of assignable |
|
2249 pci devices. Each device is a dictionary, with mandatory |
|
2250 keys of 'address', 'vendor_id', 'product_id', 'dev_type', |
|
2251 'dev_id', 'label' and other optional device specific information. |
|
2252 |
|
2253 Refer to the objects/pci_device.py for more idea of these keys. |
|
2254 """ |
|
2255 if refresh or not self._host_stats: |
1979 self._update_host_stats() |
2256 self._update_host_stats() |
1980 |
|
1981 return self._host_stats |
2257 return self._host_stats |
1982 |
2258 |
1983 def block_stats(self, instance_name, disk_id): |
2259 def block_stats(self, instance_name, disk_id): |
1984 """ |
2260 """ |
1985 Return performance counters associated with the given disk_id on the |
2261 Return performance counters associated with the given disk_id on the |
2053 None means 'no constraints', a set means 'these and only these |
2323 None means 'no constraints', a set means 'these and only these |
2054 MAC addresses'. |
2324 MAC addresses'. |
2055 """ |
2325 """ |
2056 return None |
2326 return None |
2057 |
2327 |
|
2328 def dhcp_options_for_instance(self, instance): |
|
2329 """Get DHCP options for this instance. |
|
2330 |
|
2331 Some hypervisors (such as bare metal) require that instances boot from |
|
2332 the network, and manage their own TFTP service. This requires passing |
|
2333 the appropriate options out to the DHCP service. Most hypervisors can |
|
2334 use the default implementation which returns None. |
|
2335 |
|
2336 This is called during spawn_instance by the compute manager. |
|
2337 |
|
2338 Note that the format of the return value is specific to Quantum |
|
2339 client API. |
|
2340 |
|
2341 :return: None, or a set of DHCP options, eg: |
|
2342 [{'opt_name': 'bootfile-name', |
|
2343 'opt_value': '/tftpboot/path/to/config'}, |
|
2344 {'opt_name': 'server-ip-address', |
|
2345 'opt_value': '1.2.3.4'}, |
|
2346 {'opt_name': 'tftp-server', |
|
2347 'opt_value': '1.2.3.4'} |
|
2348 ] |
|
2349 """ |
|
2350 pass |
|
2351 |
2058 def manage_image_cache(self, context, all_instances): |
2352 def manage_image_cache(self, context, all_instances): |
2059 """ |
2353 """ |
2060 Manage the driver's local image cache. |
2354 Manage the driver's local image cache. |
2061 |
2355 |
2062 Some drivers chose to cache images for instances on disk. This method |
2356 Some drivers chose to cache images for instances on disk. This method |
2083 def get_volume_connector(self, instance): |
2377 def get_volume_connector(self, instance): |
2084 """Get connector information for the instance for attaching to volumes. |
2378 """Get connector information for the instance for attaching to volumes. |
2085 |
2379 |
2086 Connector information is a dictionary representing the ip of the |
2380 Connector information is a dictionary representing the ip of the |
2087 machine that will be making the connection, the name of the iscsi |
2381 machine that will be making the connection, the name of the iscsi |
2088 initiator and the hostname of the machine as follows:: |
2382 initiator, the WWPN and WWNN values of the Fibre Channel initiator, |
|
2383 and the hostname of the machine as follows: |
2089 |
2384 |
2090 { |
2385 { |
2091 'ip': ip, |
2386 'ip': ip, |
2092 'initiator': initiator, |
2387 'initiator': initiator, |
|
2388 'wwnns': wwnns, |
|
2389 'wwpns': wwpns, |
2093 'host': hostname |
2390 'host': hostname |
2094 } |
2391 } |
2095 """ |
2392 """ |
2096 connector = {'ip': self.get_host_ip_addr(), |
2393 connector = {'ip': self.get_host_ip_addr(), |
2097 'host': CONF.host} |
2394 'host': CONF.host} |
2102 connector['initiator'] = self._initiator |
2399 connector['initiator'] = self._initiator |
2103 else: |
2400 else: |
2104 LOG.warning(_("Could not determine iSCSI initiator name"), |
2401 LOG.warning(_("Could not determine iSCSI initiator name"), |
2105 instance=instance) |
2402 instance=instance) |
2106 |
2403 |
|
2404 if not self._fc_wwnns: |
|
2405 self._fc_wwnns = self._get_fc_wwnns() |
|
2406 if not self._fc_wwnns or len(self._fc_wwnns) == 0: |
|
2407 LOG.debug(_('Could not determine Fibre Channel ' |
|
2408 'World Wide Node Names'), |
|
2409 instance=instance) |
|
2410 |
|
2411 if not self._fc_wwpns: |
|
2412 self._fc_wwpns = self._get_fc_wwpns() |
|
2413 if not self._fc_wwpns or len(self._fc_wwpns) == 0: |
|
2414 LOG.debug(_('Could not determine Fibre channel ' |
|
2415 'World Wide Port Names'), |
|
2416 instance=instance) |
|
2417 |
|
2418 if self._fc_wwnns and self._fc_wwpns: |
|
2419 connector["wwnns"] = self._fc_wwnns |
|
2420 connector["wwpns"] = self._fc_wwpns |
2107 return connector |
2421 return connector |
2108 |
2422 |
2109 def get_available_nodes(self): |
2423 def get_available_nodes(self, refresh=False): |
2110 """Returns nodenames of all nodes managed by the compute service. |
2424 """Returns nodenames of all nodes managed by the compute service. |
2111 |
2425 |
2112 This method is for multi compute-nodes support. If a driver supports |
2426 This method is for multi compute-nodes support. If a driver supports |
2113 multi compute-nodes, this method returns a list of nodenames managed |
2427 multi compute-nodes, this method returns a list of nodenames managed |
2114 by the service. Otherwise, this method should return |
2428 by the service. Otherwise, this method should return |
2115 [hypervisor_hostname]. |
2429 [hypervisor_hostname]. |
2116 """ |
2430 """ |
2117 stats = self.get_host_stats(refresh=True) |
2431 stats = self.get_host_stats(refresh=refresh) |
2118 if not isinstance(stats, list): |
2432 if not isinstance(stats, list): |
2119 stats = [stats] |
2433 stats = [stats] |
2120 return [s['hypervisor_hostname'] for s in stats] |
2434 return [s['hypervisor_hostname'] for s in stats] |
2121 |
2435 |
|
2436 def node_is_available(self, nodename): |
|
2437 """Return whether this compute service manages a particular node.""" |
|
2438 if nodename in self.get_available_nodes(): |
|
2439 return True |
|
2440 # Refresh and check again. |
|
2441 return nodename in self.get_available_nodes(refresh=True) |
|
2442 |
2122 def get_per_instance_usage(self): |
2443 def get_per_instance_usage(self): |
2123 """Get information about instance resource usage. |
2444 """Get information about instance resource usage. |
2124 |
2445 |
2125 :returns: dict of nova uuid => dict of usage info |
2446 :returns: dict of nova uuid => dict of usage info |
2126 """ |
2447 """ |
2144 """Register a callback to receive events. |
2465 """Register a callback to receive events. |
2145 |
2466 |
2146 Register a callback to receive asynchronous event |
2467 Register a callback to receive asynchronous event |
2147 notifications from hypervisors. The callback will |
2468 notifications from hypervisors. The callback will |
2148 be invoked with a single parameter, which will be |
2469 be invoked with a single parameter, which will be |
2149 an instance of the nova.virt.event.Event class.""" |
2470 an instance of the nova.virt.event.Event class. |
|
2471 """ |
2150 |
2472 |
2151 self._compute_event_callback = callback |
2473 self._compute_event_callback = callback |
2152 |
2474 |
2153 def emit_event(self, event): |
2475 def emit_event(self, event): |
2154 """Dispatches an event to the compute manager. |
2476 """Dispatches an event to the compute manager. |
2155 |
2477 |
2156 Invokes the event callback registered by the |
2478 Invokes the event callback registered by the |
2157 compute manager to dispatch the event. This |
2479 compute manager to dispatch the event. This |
2158 must only be invoked from a green thread.""" |
2480 must only be invoked from a green thread. |
|
2481 """ |
2159 |
2482 |
2160 if not self._compute_event_callback: |
2483 if not self._compute_event_callback: |
2161 LOG.debug("Discarding event %s" % str(event)) |
2484 LOG.debug(_("Discarding event %s") % str(event)) |
2162 return |
2485 return |
2163 |
2486 |
2164 if not isinstance(event, virtevent.Event): |
2487 if not isinstance(event, virtevent.Event): |
2165 raise ValueError( |
2488 raise ValueError( |
2166 _("Event must be an instance of nova.virt.event.Event")) |
2489 _("Event must be an instance of nova.virt.event.Event")) |
2167 |
2490 |
2168 try: |
2491 try: |
2169 LOG.debug("Emitting event %s" % str(event)) |
2492 LOG.debug(_("Emitting event %s") % str(event)) |
2170 self._compute_event_callback(event) |
2493 self._compute_event_callback(event) |
2171 except Exception as ex: |
2494 except Exception as ex: |
2172 LOG.error(_("Exception dispatching event %(event)s: %(ex)s") |
2495 LOG.error(_("Exception dispatching event %(event)s: %(ex)s"), |
2173 % locals()) |
2496 {'event': event, 'ex': ex}) |
|
2497 |
|
2498 def delete_instance_files(self, instance): |
|
2499 """Delete any lingering instance files for an instance. |
|
2500 |
|
2501 :returns: True if the instance was deleted from disk, False otherwise. |
|
2502 """ |
|
2503 return True |
|
2504 |
|
2505 @property |
|
2506 def need_legacy_block_device_info(self): |
|
2507 """Tell the caller if the driver requires legacy block device info. |
|
2508 |
|
2509 Tell the caller weather we expect the legacy format of block |
|
2510 device info to be passed in to methods that expect it. |
|
2511 """ |
|
2512 return True |
|
2513 |
|
2514 def volume_snapshot_create(self, context, instance, volume_id, |
|
2515 create_info): |
|
2516 """ |
|
2517 Snapshots volumes attached to a specified instance. |
|
2518 |
|
2519 :param context: request context |
|
2520 :param instance: Instance object that has the volume attached |
|
2521 :param volume_id: Volume to be snapshotted |
|
2522 :param create_info: The data needed for nova to be able to attach |
|
2523 to the volume. This is the same data format returned by |
|
2524 Cinder's initialize_connection() API call. In the case of |
|
2525 doing a snapshot, it is the image file Cinder expects to be |
|
2526 used as the active disk after the snapshot operation has |
|
2527 completed. There may be other data included as well that is |
|
2528 needed for creating the snapshot. |
|
2529 """ |
|
2530 raise NotImplementedError() |
|
2531 |
|
2532 def volume_snapshot_delete(self, context, instance, volume_id, |
|
2533 snapshot_id, delete_info): |
|
2534 """ |
|
2535 Snapshots volumes attached to a specified instance. |
|
2536 |
|
2537 :param context: request context |
|
2538 :param instance: Instance object that has the volume attached |
|
2539 :param volume_id: Attached volume associated with the snapshot |
|
2540 :param snapshot_id: The snapshot to delete. |
|
2541 :param delete_info: Volume backend technology specific data needed to |
|
2542 be able to complete the snapshot. For example, in the case of |
|
2543 qcow2 backed snapshots, this would include the file being |
|
2544 merged, and the file being merged into (if appropriate). |
|
2545 """ |
|
2546 raise NotImplementedError() |
|
2547 |
|
2548 def default_root_device_name(self, instance, image_meta, root_bdm): |
|
2549 """Provide a default root device name for the driver.""" |
|
2550 raise NotImplementedError() |
|
2551 |
|
2552 def default_device_names_for_instance(self, instance, root_device_name, |
|
2553 *block_device_lists): |
|
2554 """Default the missing device names in the block device mapping.""" |
|
2555 raise NotImplementedError() |