33 import rad.client |
33 import rad.client |
34 import rad.connect |
34 import rad.connect |
35 from solaris_install.target.size import Size |
35 from solaris_install.target.size import Size |
36 |
36 |
37 from cinderclient import exceptions as cinder_exception |
37 from cinderclient import exceptions as cinder_exception |
|
38 from cinderclient.v1 import client as v1_client |
38 from eventlet import greenthread |
39 from eventlet import greenthread |
|
40 from keystoneclient import exceptions as keystone_exception |
39 from lxml import etree |
41 from lxml import etree |
|
42 from oslo_concurrency import processutils |
40 from oslo_config import cfg |
43 from oslo_config import cfg |
|
44 from oslo_log import log as logging |
|
45 from oslo_serialization import jsonutils |
|
46 from oslo_utils import excutils |
|
47 from oslo_utils import strutils |
41 from passlib.hash import sha256_crypt |
48 from passlib.hash import sha256_crypt |
42 |
49 |
43 from nova.api.metadata import password |
50 from nova.api.metadata import password |
44 from nova.compute import power_state |
51 from nova.compute import power_state |
45 from nova.compute import task_states |
52 from nova.compute import task_states |
47 from nova.console import type as ctype |
54 from nova.console import type as ctype |
48 from nova import conductor |
55 from nova import conductor |
49 from nova import context as nova_context |
56 from nova import context as nova_context |
50 from nova import crypto |
57 from nova import crypto |
51 from nova import exception |
58 from nova import exception |
52 from nova.i18n import _ |
59 from nova.i18n import _, _LE, _LI |
53 from nova.image import glance |
60 from nova.image import glance |
54 from nova.network import neutronv2 |
61 from nova.network.neutronv2 import api as neutronv2_api |
55 from nova import objects |
62 from nova import objects |
56 from nova.objects import flavor as flavor_obj |
63 from nova.objects import flavor as flavor_obj |
57 from nova.openstack.common import excutils |
|
58 from nova.openstack.common import fileutils |
64 from nova.openstack.common import fileutils |
59 from nova.openstack.common import jsonutils |
|
60 from nova.openstack.common import log as logging |
|
61 from nova.openstack.common import loopingcall |
|
62 from nova.openstack.common import processutils |
|
63 from nova.openstack.common import strutils |
|
64 from nova import utils |
65 from nova import utils |
65 from nova.virt import driver |
66 from nova.virt import driver |
66 from nova.virt import event as virtevent |
67 from nova.virt import event as virtevent |
|
68 from nova.virt import hardware |
67 from nova.virt import images |
69 from nova.virt import images |
68 from nova.virt.solariszones import sysconfig |
70 from nova.virt.solariszones import sysconfig |
69 from nova.volume.cinder import API |
71 from nova.volume.cinder import API |
70 from nova.volume.cinder import cinderclient |
72 from nova.volume.cinder import cinderclient |
71 from nova.volume.cinder import get_cinder_client_version |
73 from nova.volume.cinder import get_cinder_client_version |
240 :param availability_zone: zone:host where the volume is to be created |
242 :param availability_zone: zone:host where the volume is to be created |
241 :param source_volume: Volume object |
243 :param source_volume: Volume object |
242 |
244 |
243 Returns a volume object |
245 Returns a volume object |
244 """ |
246 """ |
|
247 client = cinderclient(context) |
|
248 |
245 if snapshot is not None: |
249 if snapshot is not None: |
246 snapshot_id = snapshot['id'] |
250 snapshot_id = snapshot['id'] |
247 else: |
251 else: |
248 snapshot_id = None |
252 snapshot_id = None |
249 |
253 |
259 availability_zone=availability_zone, |
263 availability_zone=availability_zone, |
260 metadata=metadata, |
264 metadata=metadata, |
261 imageRef=image_id, |
265 imageRef=image_id, |
262 source_volid=source_volid) |
266 source_volid=source_volid) |
263 |
267 |
264 version = get_cinder_client_version(context) |
268 if isinstance(client, v1_client.Client): |
265 if version == '1': |
|
266 kwargs['display_name'] = name |
269 kwargs['display_name'] = name |
267 kwargs['display_description'] = description |
270 kwargs['display_description'] = description |
268 elif version == '2': |
271 else: |
269 kwargs['name'] = name |
272 kwargs['name'] = name |
270 kwargs['description'] = description |
273 kwargs['description'] = description |
271 |
274 |
272 try: |
275 try: |
273 item = cinderclient(context).volumes.create(size, **kwargs) |
276 item = cinderclient(context).volumes.create(size, **kwargs) |
274 return _untranslate_volume_summary_view(context, item) |
277 return _untranslate_volume_summary_view(context, item) |
275 except cinder_exception.OverLimit: |
278 except cinder_exception.OverLimit: |
276 raise exception.OverQuota(overs='volumes') |
279 raise exception.OverQuota(overs='volumes') |
277 except cinder_exception.BadRequest as err: |
280 except (cinder_exception.BadRequest, |
278 raise exception.InvalidInput(reason=unicode(err)) |
281 keystone_exception.BadRequest) as reason: |
|
282 raise exception.InvalidInput(reason=reason) |
279 |
283 |
280 @translate_volume_exception |
284 @translate_volume_exception |
281 def update(self, context, volume_id, fields): |
285 def update(self, context, volume_id, fields): |
282 """Update the fields of a volume for example used to rename a volume |
286 """Update the fields of a volume for example used to rename a volume |
283 via a call to cinderclient |
287 via a call to cinderclient |
702 def get_info(self, instance): |
706 def get_info(self, instance): |
703 """Get the current status of an instance, by name (not ID!) |
707 """Get the current status of an instance, by name (not ID!) |
704 |
708 |
705 :param instance: nova.objects.instance.Instance object |
709 :param instance: nova.objects.instance.Instance object |
706 |
710 |
707 Returns a dict containing: |
711 Returns a InstanceInfo object |
708 |
|
709 :state: the running state, one of the power_state codes |
|
710 :max_mem: (int) the maximum memory in KBytes allowed |
|
711 :mem: (int) the memory in KBytes used by the domain |
|
712 :num_cpu: (int) the number of virtual CPUs for the domain |
|
713 :cpu_time: (int) the CPU time used in nanoseconds |
|
714 """ |
712 """ |
715 # TODO(Vek): Need to pass context in for access to auth_token |
713 # TODO(Vek): Need to pass context in for access to auth_token |
716 name = instance['name'] |
714 name = instance['name'] |
717 zone = self._get_zone_by_name(name) |
715 zone = self._get_zone_by_name(name) |
718 if zone is None: |
716 if zone is None: |
719 raise exception.InstanceNotFound(instance_id=name) |
717 raise exception.InstanceNotFound(instance_id=name) |
720 return { |
718 return hardware.InstanceInfo(state=self._get_state(zone), |
721 'state': self._get_state(zone), |
719 max_mem_kb=self._get_max_mem(zone), |
722 'max_mem': self._get_max_mem(zone), |
720 mem_kb=self._get_mem(zone), |
723 'mem': self._get_mem(zone), |
721 num_cpu=self._get_num_cpu(zone), |
724 'num_cpu': self._get_num_cpu(zone), |
722 cpu_time_ns=self._get_cpu_time(zone)) |
725 'cpu_time': self._get_cpu_time(zone) |
|
726 } |
|
727 |
723 |
728 def get_num_instances(self): |
724 def get_num_instances(self): |
729 """Return the total number of virtual machines. |
725 """Return the total number of virtual machines. |
730 |
726 |
731 Return the number of virtual machines that the hypervisor knows |
727 Return the number of virtual machines that the hypervisor knows |
889 # So the root device is not expected to be local so we can move |
885 # So the root device is not expected to be local so we can move |
890 # forward with building the zone. |
886 # forward with building the zone. |
891 if driver_type not in shared_storage: |
887 if driver_type not in shared_storage: |
892 msg = (_("Root device is not on shared storage for instance " |
888 msg = (_("Root device is not on shared storage for instance " |
893 "'%s'.") % instance['name']) |
889 "'%s'.") % instance['name']) |
|
890 |
894 raise exception.NovaException(msg) |
891 raise exception.NovaException(msg) |
895 |
892 |
896 if not recreate: |
893 if not recreate: |
897 self.destroy(context, instance, network_info, block_device_info) |
894 self.destroy(context, instance, network_info, block_device_info) |
898 if root_ci is not None: |
895 if root_ci is not None: |
899 self._volume_api.detach(context, root_ci['serial']) |
896 self._volume_api.detach(context, root_ci['serial']) |
900 self._volume_api.delete(context, root_ci['serial']) |
897 self._volume_api.delete(context, root_ci['serial']) |
901 |
|
902 # We need to clear the block device mapping for the root device |
|
903 bdmobj = objects.BlockDeviceMapping() |
|
904 bdm = bdmobj.get_by_volume_id(context, root_ci['serial']) |
|
905 bdm.destroy(context) |
|
906 |
898 |
907 instance.task_state = task_states.REBUILD_SPAWNING |
899 instance.task_state = task_states.REBUILD_SPAWNING |
908 instance.save( |
900 instance.save( |
909 expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING]) |
901 expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING]) |
910 |
902 |
986 return |
978 return |
987 |
979 |
988 try: |
980 try: |
989 ua = self._archive_manager.getArchive(image) |
981 ua = self._archive_manager.getArchive(image) |
990 except Exception as ex: |
982 except Exception as ex: |
991 reason = ex.get_payload().info |
983 reason = ex.get_payload().info |
992 raise exception.ImageUnacceptable( |
984 raise exception.ImageUnacceptable( |
993 image_id=instance['image_ref'], |
985 image_id=instance['image_ref'], |
994 reason=reason) |
986 reason=reason) |
995 |
987 |
996 # Validate the image at this point to ensure: |
988 # Validate the image at this point to ensure: |
997 # - contains one deployable system |
989 # - contains one deployable system |
998 deployables = ua.getArchivedSystems() |
990 deployables = ua.getArchivedSystems() |
999 if len(deployables) != 1: |
991 if len(deployables) != 1: |
1016 reason=reason) |
1008 reason=reason) |
1017 # - looks like it's OK |
1009 # - looks like it's OK |
1018 self._validated_archives.append(instance['image_ref']) |
1010 self._validated_archives.append(instance['image_ref']) |
1019 |
1011 |
1020 def _suri_from_volume_info(self, connection_info): |
1012 def _suri_from_volume_info(self, connection_info): |
1021 """Returns a suri(5) formatted string based on connection_info |
1013 """Returns a suri(5) formatted string based on connection_info. |
1022 Currently supports local ZFS volume and iSCSI driver types. |
1014 Currently supports local ZFS volume, NFS, Fibre Channel and iSCSI |
|
1015 driver types. |
1023 """ |
1016 """ |
1024 driver_type = connection_info['driver_volume_type'] |
1017 driver_type = connection_info['driver_volume_type'] |
1025 if driver_type not in ['iscsi', 'fibre_channel', 'local']: |
1018 if driver_type not in ['iscsi', 'fibre_channel', 'local', 'nfs']: |
1026 raise exception.VolumeDriverNotFound(driver_type=driver_type) |
1019 raise exception.VolumeDriverNotFound(driver_type=driver_type) |
1027 if driver_type == 'local': |
1020 if driver_type == 'local': |
1028 suri = 'dev:/dev/zvol/dsk/%s' % connection_info['volume_path'] |
1021 suri = 'dev:/dev/zvol/dsk/%s' % connection_info['volume_path'] |
1029 elif driver_type == 'iscsi': |
1022 elif driver_type == 'iscsi': |
1030 data = connection_info['data'] |
1023 data = connection_info['data'] |
1036 # target_lun: 1 |
1029 # target_lun: 1 |
1037 suri = 'iscsi://%s/target.%s,lun.%d' % (data['target_portal'], |
1030 suri = 'iscsi://%s/target.%s,lun.%d' % (data['target_portal'], |
1038 data['target_iqn'], |
1031 data['target_iqn'], |
1039 data['target_lun']) |
1032 data['target_lun']) |
1040 # TODO(npower): need to handle CHAP authentication also |
1033 # TODO(npower): need to handle CHAP authentication also |
|
1034 elif driver_type == 'nfs': |
|
1035 data = connection_info['data'] |
|
1036 suri = ( |
|
1037 'nfs://cinder:cinder@%s/%s' % |
|
1038 (data['export'].replace(':', ''), data['name']) |
|
1039 ) |
|
1040 |
1041 elif driver_type == 'fibre_channel': |
1041 elif driver_type == 'fibre_channel': |
1042 data = connection_info['data'] |
1042 data = connection_info['data'] |
1043 target_wwn = data['target_wwn'] |
1043 target_wwn = data['target_wwn'] |
1044 # Check for multiple target_wwn values in a list |
1044 # Check for multiple target_wwn values in a list |
1045 if isinstance(target_wwn, list): |
1045 if isinstance(target_wwn, list): |
1261 else: |
1261 else: |
1262 zc.removeresources("anet", [zonemgr.Property("id", "0")]) |
1262 zc.removeresources("anet", [zonemgr.Property("id", "0")]) |
1263 return |
1263 return |
1264 |
1264 |
1265 tenant_id = None |
1265 tenant_id = None |
1266 network_plugin = neutronv2.get_client(context) |
1266 network_plugin = neutronv2_api.get_client(context) |
1267 for netid, network in enumerate(network_info): |
1267 for netid, network in enumerate(network_info): |
1268 if tenant_id is None: |
1268 if tenant_id is None: |
1269 tenant_id = network['network']['meta']['tenant_id'] |
1269 tenant_id = network['network']['meta']['tenant_id'] |
1270 port_uuid = network['id'] |
1270 port_uuid = network['id'] |
1271 port = network_plugin.show_port(port_uuid)['port'] |
1271 port = network_plugin.show_port(port_uuid)['port'] |
1636 """Returns True if the instance has a zone VNC console SMF service""" |
1636 """Returns True if the instance has a zone VNC console SMF service""" |
1637 name = instance['name'] |
1637 name = instance['name'] |
1638 console_fmri = VNC_CONSOLE_BASE_FMRI + ':' + name |
1638 console_fmri = VNC_CONSOLE_BASE_FMRI + ':' + name |
1639 # TODO(npower): investigate using RAD instead of CLI invocation |
1639 # TODO(npower): investigate using RAD instead of CLI invocation |
1640 try: |
1640 try: |
1641 utils.execute('/usr/bin/svcs', '-H', '-o', 'state', |
1641 utils.execute('/usr/bin/svcs', '-H', '-o', 'state', console_fmri) |
1642 console_fmri) |
|
1643 return True |
1642 return True |
1644 except Exception: |
1643 except Exception: |
1645 return False |
1644 return False |
1646 |
1645 |
1647 def _install(self, instance, image, sc_dir): |
1646 def _install(self, instance, image, sc_dir): |
1807 finally: |
1806 finally: |
1808 # remove the sc_profile temp directory |
1807 # remove the sc_profile temp directory |
1809 shutil.rmtree(sc_dir) |
1808 shutil.rmtree(sc_dir) |
1810 |
1809 |
1811 if connection_info is not None: |
1810 if connection_info is not None: |
1812 bdm = objects.BlockDeviceMapping( |
1811 bdm_obj = objects.BlockDeviceMappingList() |
1813 source_type='volume', |
1812 # there's only one bdm for this instance at this point |
1814 destination_type='volume', |
1813 bdm = bdm_obj.get_by_instance_uuid(context, |
1815 instance_uuid=instance.uuid, |
1814 instance.uuid).objects[0] |
1816 volume_id=volume_id, |
1815 |
1817 connection_info=jsonutils.dumps(connection_info), |
1816 # update the required attributes |
1818 device_name=mountpoint, |
1817 bdm['connection_info'] = jsonutils.dumps(connection_info) |
1819 delete_on_termination=True, |
1818 bdm['source_type'] = 'volume' |
1820 volume_size=instance['root_gb']) |
1819 bdm['destination_type'] = 'volume' |
1821 bdm.create(context) |
1820 bdm['device_name'] = mountpoint |
|
1821 bdm['delete_on_termination'] = True |
|
1822 bdm['volume_id'] = volume_id |
|
1823 bdm['volume_size'] = instance['root_gb'] |
1822 bdm.save() |
1824 bdm.save() |
1823 |
1825 |
1824 def _power_off(self, instance, halt_type): |
1826 def _power_off(self, instance, halt_type): |
1825 """Power off a Solaris Zone.""" |
1827 """Power off a Solaris Zone.""" |
1826 name = instance['name'] |
1828 name = instance['name'] |
1851 block_device_info): |
1853 block_device_info): |
1852 """Reverts the zones configuration to pre-resize config |
1854 """Reverts the zones configuration to pre-resize config |
1853 """ |
1855 """ |
1854 self.power_off(instance) |
1856 self.power_off(instance) |
1855 |
1857 |
1856 inst_type = flavor_obj.Flavor.get_by_id( |
1858 extra_specs = self._get_extra_specs(instance) |
1857 nova_context.get_admin_context(read_deleted='yes'), |
|
1858 instance['instance_type_id']) |
|
1859 extra_specs = inst_type['extra_specs'].copy() |
|
1860 brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS) |
1859 brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS) |
1861 |
1860 |
1862 name = instance['name'] |
1861 name = instance['name'] |
1863 |
1862 |
1864 cpu = int(instance.system_metadata['old_instance_type_vcpus']) |
1863 self._set_num_cpu(name, instance.vcpus, brand) |
1865 mem = int(instance.system_metadata['old_instance_type_memory_mb']) |
1864 self._set_memory_cap(name, instance.memory_mb, brand) |
1866 |
1865 |
1867 self._set_num_cpu(name, cpu, brand) |
1866 rgb = instance.root_gb |
1868 self._set_memory_cap(name, mem, brand) |
|
1869 |
|
1870 rgb = int(instance.system_metadata['new_instance_type_root_gb']) |
|
1871 old_rvid = instance.system_metadata.get('old_instance_volid') |
1867 old_rvid = instance.system_metadata.get('old_instance_volid') |
1872 if old_rvid: |
1868 if old_rvid: |
1873 new_rvid = instance.system_metadata.get('new_instance_volid') |
1869 new_rvid = instance.system_metadata.get('new_instance_volid') |
1874 newvname = instance['display_name'] + "-" + self._rootzpool_suffix |
1870 newvname = instance['display_name'] + "-" + self._rootzpool_suffix |
1875 mount_dev = instance['root_device_name'] |
1871 mount_dev = instance['root_device_name'] |
1895 :param destroy_disks: Indicates if disks should be destroyed |
1891 :param destroy_disks: Indicates if disks should be destroyed |
1896 :param migrate_data: implementation specific params |
1892 :param migrate_data: implementation specific params |
1897 """ |
1893 """ |
1898 if (instance['task_state'] == task_states.RESIZE_REVERTING and |
1894 if (instance['task_state'] == task_states.RESIZE_REVERTING and |
1899 instance.system_metadata['old_vm_state'] == vm_states.RESIZED): |
1895 instance.system_metadata['old_vm_state'] == vm_states.RESIZED): |
1900 self._samehost_revert_resize(context, instance, network_info, |
|
1901 block_device_info) |
|
1902 return |
1896 return |
1903 |
1897 |
1904 # A destroy is issued for the original zone for an evac case. If |
1898 # A destroy is issued for the original zone for an evac case. If |
1905 # the evac fails we need to protect the zone from deletion when |
1899 # the evac fails we need to protect the zone from deletion when |
1906 # power comes back on. |
1900 # power comes back on. |
2339 waiting for it to shutdown |
2333 waiting for it to shutdown |
2340 """ |
2334 """ |
2341 LOG.debug("Starting migrate_disk_and_power_off", instance=instance) |
2335 LOG.debug("Starting migrate_disk_and_power_off", instance=instance) |
2342 |
2336 |
2343 samehost = (dest == self.get_host_ip_addr()) |
2337 samehost = (dest == self.get_host_ip_addr()) |
2344 inst_type = flavor_obj.Flavor.get_by_id( |
2338 if samehost: |
2345 nova_context.get_admin_context(read_deleted='yes'), |
2339 instance.system_metadata['resize_samehost'] = samehost |
2346 instance['instance_type_id']) |
2340 |
2347 extra_specs = inst_type['extra_specs'].copy() |
2341 extra_specs = self._get_extra_specs(instance) |
2348 brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS) |
2342 brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS) |
2349 if brand != ZONE_BRAND_SOLARIS_KZ and not samehost: |
2343 if brand != ZONE_BRAND_SOLARIS_KZ and not samehost: |
2350 reason = (_("'%s' branded zones do not currently support resize " |
2344 reason = (_("'%s' branded zones do not currently support resize " |
2351 "to a different host.") % brand) |
2345 "to a different host.") % brand) |
2352 raise exception.MigrationPreCheckError(reason=reason) |
2346 raise exception.MigrationPreCheckError(reason=reason) |
2354 if brand != flavor['extra_specs'].get('zonecfg:brand'): |
2348 if brand != flavor['extra_specs'].get('zonecfg:brand'): |
2355 reason = (_("Unable to change brand of zone during resize.")) |
2349 reason = (_("Unable to change brand of zone during resize.")) |
2356 raise exception.MigrationPreCheckError(reason=reason) |
2350 raise exception.MigrationPreCheckError(reason=reason) |
2357 |
2351 |
2358 orgb = instance['root_gb'] |
2352 orgb = instance['root_gb'] |
2359 nrgb = int(instance.system_metadata['new_instance_type_root_gb']) |
2353 nrgb = flavor.root_gb |
2360 if orgb > nrgb: |
2354 if orgb > nrgb: |
2361 msg = (_("Unable to resize to a smaller boot volume.")) |
2355 msg = (_("Unable to resize to a smaller boot volume.")) |
2362 raise exception.ResizeError(reason=msg) |
2356 raise exception.ResizeError(reason=msg) |
2363 |
2357 |
2364 self.power_off(instance, timeout, retry_interval) |
2358 self.power_off(instance, timeout, retry_interval) |
2449 'properties': { |
2443 'properties': { |
2450 'image_location': 'snapshot', |
2444 'image_location': 'snapshot', |
2451 'image_state': 'available', |
2445 'image_state': 'available', |
2452 'owner_id': instance['project_id'], |
2446 'owner_id': instance['project_id'], |
2453 'instance_uuid': instance['uuid'], |
2447 'instance_uuid': instance['uuid'], |
|
2448 'image_type': snapshot['properties']['image_type'], |
2454 } |
2449 } |
2455 } |
2450 } |
2456 # Match architecture, hypervisor_type and vm_mode properties to base |
2451 # Match architecture, hypervisor_type and vm_mode properties to base |
2457 # image. |
2452 # image. |
2458 for prop in ['architecture', 'hypervisor_type', 'vm_mode']: |
2453 for prop in ['architecture', 'hypervisor_type', 'vm_mode']: |
2459 if prop in base.get('properties', {}): |
2454 if prop in base.get('properties', {}): |
2460 base_prop = base['properties'][prop] |
2455 base_prop = base['properties'][prop] |
2461 metadata['properties'][prop] = base_prop |
2456 metadata['properties'][prop] = base_prop |
2462 |
2457 |
2463 # Set generic container and disk formats initially in case the glance |
2458 # Set generic container and disk formats initially in case the glance |
2464 # service rejects unified archives (uar) and zfs in metadata |
2459 # service rejects Unified Archives (uar) and ZFS in metadata. |
2465 metadata['container_format'] = 'ovf' |
2460 metadata['container_format'] = 'ovf' |
2466 metadata['disk_format'] = 'raw' |
2461 metadata['disk_format'] = 'raw' |
2467 |
2462 |
2468 update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) |
2463 update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) |
2469 snapshot_directory = CONF.solariszones_snapshots_directory |
2464 snapshot_directory = CONF.solariszones_snapshots_directory |
2578 |
2573 |
2579 samehost = (migration['dest_node'] == migration['source_node']) |
2574 samehost = (migration['dest_node'] == migration['source_node']) |
2580 if samehost: |
2575 if samehost: |
2581 instance.system_metadata['old_vm_state'] = vm_states.RESIZED |
2576 instance.system_metadata['old_vm_state'] = vm_states.RESIZED |
2582 |
2577 |
2583 inst_type = flavor_obj.Flavor.get_by_id( |
2578 extra_specs = self._get_extra_specs(instance) |
2584 nova_context.get_admin_context(read_deleted='yes'), |
|
2585 instance['instance_type_id']) |
|
2586 extra_specs = inst_type['extra_specs'].copy() |
|
2587 brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS) |
2579 brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS) |
2588 name = instance['name'] |
2580 name = instance['name'] |
2589 |
2581 |
2590 if disk_info: |
2582 if disk_info: |
2591 bmap = block_device_info.get('block_device_mapping') |
2583 bmap = block_device_info.get('block_device_mapping') |
2596 root_ci = entry['connection_info'] |
2588 root_ci = entry['connection_info'] |
2597 break |
2589 break |
2598 |
2590 |
2599 try: |
2591 try: |
2600 if samehost: |
2592 if samehost: |
2601 metadstr = 'new_instance_type_vcpus' |
2593 cpu = instance.vcpus |
2602 cpu = int(instance.system_metadata[metadstr]) |
2594 mem = instance.memory_mb |
2603 metadstr = 'new_instance_type_memory_mb' |
|
2604 mem = int(instance.system_metadata[metadstr]) |
|
2605 self._set_num_cpu(name, cpu, brand) |
2595 self._set_num_cpu(name, cpu, brand) |
2606 self._set_memory_cap(name, mem, brand) |
2596 self._set_memory_cap(name, mem, brand) |
2607 |
2597 |
2608 # Add the new disk to the volume if the size of the disk |
2598 # Add the new disk to the volume if the size of the disk |
2609 # changed |
2599 # changed |
2610 if disk_info: |
2600 if disk_info: |
2611 metadstr = 'new_instance_type_root_gb' |
2601 rgb = instance.root_gb |
2612 rgb = int(instance.system_metadata[metadstr]) |
|
2613 self._resize_disk_migration(context, instance, |
2602 self._resize_disk_migration(context, instance, |
2614 root_ci['serial'], |
2603 root_ci['serial'], |
2615 disk_info['id'], |
2604 disk_info['id'], |
2616 rgb, mount_dev) |
2605 rgb, mount_dev) |
2617 |
2606 |
2686 self._volume_api.update(context, new_rvid, |
2675 self._volume_api.update(context, new_rvid, |
2687 {'display_name': new_vname}) |
2676 {'display_name': new_vname}) |
2688 |
2677 |
2689 if not samehost: |
2678 if not samehost: |
2690 self.destroy(context, instance, network_info) |
2679 self.destroy(context, instance, network_info) |
|
2680 else: |
|
2681 del instance.system_metadata['resize_samehost'] |
2691 |
2682 |
2692 def _resize_disk_migration(self, context, instance, configured, |
2683 def _resize_disk_migration(self, context, instance, configured, |
2693 replacement, newvolumesz, mountdev, |
2684 replacement, newvolumesz, mountdev, |
2694 samehost=True): |
2685 samehost=True): |
2695 """Handles the zone root volume switch-over or simply |
2686 """Handles the zone root volume switch-over or simply |
2762 :param block_device_info: instance volume block device info |
2753 :param block_device_info: instance volume block device info |
2763 :param power_on: True if the instance should be powered on, False |
2754 :param power_on: True if the instance should be powered on, False |
2764 otherwise |
2755 otherwise |
2765 """ |
2756 """ |
2766 # If this is not a samehost migration then we need to re-attach the |
2757 # If this is not a samehost migration then we need to re-attach the |
2767 # original volume to the instance. If this was processed in the |
2758 # original volume to the instance. Otherwise we need to update the |
2768 # initial revert handling this work has already been done. |
2759 # original zone configuration. |
|
2760 samehost = instance.system_metadata.get('resize_samehost') |
|
2761 if samehost: |
|
2762 self._samehost_revert_resize(context, instance, network_info, |
|
2763 block_device_info) |
|
2764 del instance.system_metadata['resize_samehost'] |
|
2765 |
2769 old_rvid = instance.system_metadata.get('old_instance_volid') |
2766 old_rvid = instance.system_metadata.get('old_instance_volid') |
2770 if old_rvid: |
2767 if old_rvid: |
2771 connector = self.get_volume_connector(instance) |
2768 connector = self.get_volume_connector(instance) |
2772 connection_info = self._volume_api.initialize_connection(context, |
2769 connection_info = self._volume_api.initialize_connection(context, |
2773 old_rvid, |
2770 old_rvid, |
2811 :param instance: nova.objects.instance.Instance |
2808 :param instance: nova.objects.instance.Instance |
2812 """ |
2809 """ |
2813 # TODO(Vek): Need to pass context in for access to auth_token |
2810 # TODO(Vek): Need to pass context in for access to auth_token |
2814 raise NotImplementedError() |
2811 raise NotImplementedError() |
2815 |
2812 |
2816 def suspend(self, instance): |
2813 def suspend(self, context, instance): |
2817 """suspend the specified instance. |
2814 """suspend the specified instance. |
2818 |
2815 |
|
2816 :param context: the context for the suspend |
2819 :param instance: nova.objects.instance.Instance |
2817 :param instance: nova.objects.instance.Instance |
2820 """ |
2818 """ |
2821 # TODO(Vek): Need to pass context in for access to auth_token |
|
2822 name = instance['name'] |
2819 name = instance['name'] |
2823 zone = self._get_zone_by_name(name) |
2820 zone = self._get_zone_by_name(name) |
2824 if zone is None: |
2821 if zone is None: |
2825 raise exception.InstanceNotFound(instance_id=name) |
2822 raise exception.InstanceNotFound(instance_id=name) |
2826 |
2823 |
3214 |
3211 |
3215 This runs check on the destination host, and then calls |
3212 This runs check on the destination host, and then calls |
3216 back to the source host to check the results. |
3213 back to the source host to check the results. |
3217 |
3214 |
3218 :param context: security context |
3215 :param context: security context |
3219 :param instance: nova.db.sqlalchemy.models.Instance |
3216 :param instance: nova.objects.instance.Instance object |
3220 """ |
3217 """ |
3221 raise NotImplementedError() |
3218 raise NotImplementedError() |
3222 |
3219 |
3223 def check_instance_shared_storage_remote(self, context, data): |
3220 def check_instance_shared_storage_remote(self, context, data): |
3224 """Check if instance files located on shared storage. |
3221 """Check if instance files located on shared storage. |
3304 reason = (_("Instances with attached '%s' volumes are not " |
3301 reason = (_("Instances with attached '%s' volumes are not " |
3305 "currently supported.") % driver_type) |
3302 "currently supported.") % driver_type) |
3306 raise exception.MigrationPreCheckError(reason=reason) |
3303 raise exception.MigrationPreCheckError(reason=reason) |
3307 |
3304 |
3308 def check_can_live_migrate_source(self, context, instance, |
3305 def check_can_live_migrate_source(self, context, instance, |
3309 dest_check_data, block_device_info): |
3306 dest_check_data, block_device_info=None): |
3310 """Check if it is possible to execute live migration. |
3307 """Check if it is possible to execute live migration. |
3311 |
3308 |
3312 This checks if the live migration can succeed, based on the |
3309 This checks if the live migration can succeed, based on the |
3313 results from check_can_live_migrate_destination. |
3310 results from check_can_live_migrate_destination. |
3314 |
3311 |
3326 except Exception as ex: |
3323 except Exception as ex: |
3327 reason = zonemgr_strerror(ex) |
3324 reason = zonemgr_strerror(ex) |
3328 raise exception.MigrationPreCheckError(reason=reason) |
3325 raise exception.MigrationPreCheckError(reason=reason) |
3329 return dest_check_data |
3326 return dest_check_data |
3330 |
3327 |
3331 def get_instance_disk_info(self, instance_name, |
3328 def get_instance_disk_info(self, instance, |
3332 block_device_info=None): |
3329 block_device_info=None): |
3333 """Retrieve information about actual disk sizes of an instance. |
3330 """Retrieve information about actual disk sizes of an instance. |
3334 |
3331 |
3335 :param instance_name: |
3332 :param instance: nova.objects.Instance |
3336 name of a nova instance as returned by list_instances() |
|
3337 :param block_device_info: |
3333 :param block_device_info: |
3338 Optional; Can be used to filter out devices which are |
3334 Optional; Can be used to filter out devices which are |
3339 actually volumes. |
3335 actually volumes. |
3340 :return: |
3336 :return: |
3341 json strings with below format:: |
3337 json strings with below format:: |
3472 |
3468 |
3473 def set_admin_password(self, instance, new_pass): |
3469 def set_admin_password(self, instance, new_pass): |
3474 """Set the root password on the specified instance. |
3470 """Set the root password on the specified instance. |
3475 |
3471 |
3476 :param instance: nova.objects.instance.Instance |
3472 :param instance: nova.objects.instance.Instance |
3477 :param new_password: the new password |
3473 :param new_pass: the new password |
3478 """ |
3474 """ |
3479 name = instance['name'] |
3475 name = instance['name'] |
3480 zone = self._get_zone_by_name(name) |
3476 zone = self._get_zone_by_name(name) |
3481 if zone is None: |
3477 if zone is None: |
3482 raise exception.InstanceNotFound(instance_id=name) |
3478 raise exception.InstanceNotFound(instance_id=name) |
3483 |
3479 |
3484 if zone.state == ZONE_STATE_RUNNING: |
3480 if zone.state == ZONE_STATE_RUNNING: |
3485 out, err = utils.execute('/usr/sbin/zlogin', '-S', name, |
3481 out, err = utils.execute('/usr/sbin/zlogin', '-S', name, |
3486 '/usr/bin/passwd', '-p', "'%s'" % |
3482 '/usr/bin/passwd', '-p', |
3487 sha256_crypt.encrypt(new_pass)) |
3483 "'%s'" % sha256_crypt.encrypt(new_pass)) |
3488 else: |
3484 else: |
3489 raise exception.InstanceNotRunning(instance_id=name) |
3485 raise exception.InstanceNotRunning(instance_id=name) |
3490 |
3486 |
3491 def inject_file(self, instance, b64_path, b64_contents): |
3487 def inject_file(self, instance, b64_path, b64_contents): |
3492 """Writes a file on the specified instance. |
3488 """Writes a file on the specified instance. |
3530 longer than the configured timeout |
3526 longer than the configured timeout |
3531 """ |
3527 """ |
3532 # TODO(Vek): Need to pass context in for access to auth_token |
3528 # TODO(Vek): Need to pass context in for access to auth_token |
3533 raise NotImplementedError() |
3529 raise NotImplementedError() |
3534 |
3530 |
3535 def host_power_action(self, host, action): |
3531 def host_power_action(self, action): |
3536 """Reboots, shuts down or powers up the host.""" |
3532 """Reboots, shuts down or powers up the host.""" |
3537 raise NotImplementedError() |
3533 raise NotImplementedError() |
3538 |
3534 |
3539 def host_maintenance_mode(self, host, mode): |
3535 def host_maintenance_mode(self, host, mode): |
3540 """Start/Stop host maintenance window. On start, it triggers |
3536 """Start/Stop host maintenance window. On start, it triggers |
3541 guest VMs evacuation. |
3537 guest VMs evacuation. |
3542 """ |
3538 """ |
3543 raise NotImplementedError() |
3539 raise NotImplementedError() |
3544 |
3540 |
3545 def set_host_enabled(self, host, enabled): |
3541 def set_host_enabled(self, enabled): |
3546 """Sets the specified host's ability to accept new instances.""" |
3542 """Sets the specified host's ability to accept new instances.""" |
3547 # TODO(Vek): Need to pass context in for access to auth_token |
3543 # TODO(Vek): Need to pass context in for access to auth_token |
3548 raise NotImplementedError() |
3544 raise NotImplementedError() |
3549 |
3545 |
3550 def get_host_uptime(self, host): |
3546 def get_host_uptime(self): |
3551 """Returns the result of calling "uptime" on the target host.""" |
3547 """Returns the result of calling "uptime" on the target host.""" |
3552 # TODO(Vek): Need to pass context in for access to auth_token |
3548 # TODO(Vek): Need to pass context in for access to auth_token |
3553 return utils.execute('/usr/bin/uptime')[0] |
3549 return utils.execute('/usr/bin/uptime')[0] |
3554 |
3550 |
3555 def plug_vifs(self, instance, network_info): |
3551 def plug_vifs(self, instance, network_info): |
3564 """Unplug VIFs from networks. |
3560 """Unplug VIFs from networks. |
3565 |
3561 |
3566 :param instance: nova.objects.instance.Instance |
3562 :param instance: nova.objects.instance.Instance |
3567 """ |
3563 """ |
3568 raise NotImplementedError() |
3564 raise NotImplementedError() |
3569 |
|
3570 def get_host_stats(self, refresh=False): |
|
3571 """Return currently known host stats. |
|
3572 |
|
3573 If the hypervisor supports pci passthrough, the returned |
|
3574 dictionary includes a key-value pair for it. |
|
3575 The key of pci passthrough device is "pci_passthrough_devices" |
|
3576 and the value is a json string for the list of assignable |
|
3577 pci devices. Each device is a dictionary, with mandatory |
|
3578 keys of 'address', 'vendor_id', 'product_id', 'dev_type', |
|
3579 'dev_id', 'label' and other optional device specific information. |
|
3580 |
|
3581 Refer to the objects/pci_device.py for more idea of these keys. |
|
3582 """ |
|
3583 if refresh or not self._host_stats: |
|
3584 self._update_host_stats() |
|
3585 return self._host_stats |
|
3586 |
3565 |
3587 def get_host_cpu_stats(self): |
3566 def get_host_cpu_stats(self): |
3588 """Get the currently known host CPU stats. |
3567 """Get the currently known host CPU stats. |
3589 |
3568 |
3590 :returns: a dict containing the CPU stat info, eg: |
3569 :returns: a dict containing the CPU stat info, eg: |
3604 long integers. |
3583 long integers. |
3605 |
3584 |
3606 """ |
3585 """ |
3607 raise NotImplementedError() |
3586 raise NotImplementedError() |
3608 |
3587 |
3609 def block_stats(self, instance_name, disk_id): |
3588 def block_stats(self, instance, disk_id): |
3610 """Return performance counters associated with the given disk_id on the |
3589 """Return performance counters associated with the given disk_id on the |
3611 given instance_name. These are returned as [rd_req, rd_bytes, wr_req, |
3590 given instance. These are returned as [rd_req, rd_bytes, wr_req, |
3612 wr_bytes, errs], where rd indicates read, wr indicates write, req is |
3591 wr_bytes, errs], where rd indicates read, wr indicates write, req is |
3613 the total number of I/O requests made, bytes is the total number of |
3592 the total number of I/O requests made, bytes is the total number of |
3614 bytes transferred, and errs is the number of requests held up due to a |
3593 bytes transferred, and errs is the number of requests held up due to a |
3615 full pipeline. |
3594 full pipeline. |
3616 |
|
3617 All counters are long integers. |
|
3618 |
|
3619 This method is optional. On some platforms (e.g. XenAPI) performance |
|
3620 statistics can be retrieved directly in aggregate form, without Nova |
|
3621 having to do the aggregation. On those platforms, this method is |
|
3622 unused. |
|
3623 |
|
3624 Note that this function takes an instance ID. |
|
3625 """ |
|
3626 raise NotImplementedError() |
|
3627 |
|
3628 def interface_stats(self, instance_name, iface_id): |
|
3629 """Return performance counters associated with the given iface_id |
|
3630 on the given instance_id. These are returned as [rx_bytes, rx_packets, |
|
3631 rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx |
|
3632 indicates receive, tx indicates transmit, bytes and packets indicate |
|
3633 the total number of bytes or packets transferred, and errs and dropped |
|
3634 is the total number of packets failed / dropped. |
|
3635 |
3595 |
3636 All counters are long integers. |
3596 All counters are long integers. |
3637 |
3597 |
3638 This method is optional. On some platforms (e.g. XenAPI) performance |
3598 This method is optional. On some platforms (e.g. XenAPI) performance |
3639 statistics can be retrieved directly in aggregate form, without Nova |
3599 statistics can be retrieved directly in aggregate form, without Nova |
3699 | {'opt_name': 'tftp-server', |
3659 | {'opt_name': 'tftp-server', |
3700 | 'opt_value': '1.2.3.4'} |
3660 | 'opt_value': '1.2.3.4'} |
3701 | ] |
3661 | ] |
3702 |
3662 |
3703 """ |
3663 """ |
3704 pass |
3664 return None |
3705 |
3665 |
3706 def manage_image_cache(self, context, all_instances): |
3666 def manage_image_cache(self, context, all_instances): |
3707 """Manage the driver's local image cache. |
3667 """Manage the driver's local image cache. |
3708 |
3668 |
3709 Some drivers chose to cache images for instances on disk. This method |
3669 Some drivers chose to cache images for instances on disk. This method |
3710 is an opportunity to do management of that cache which isn't directly |
3670 is an opportunity to do management of that cache which isn't directly |
3711 related to other calls into the driver. The prime example is to clean |
3671 related to other calls into the driver. The prime example is to clean |
3712 the cache and remove images which are no longer of interest. |
3672 the cache and remove images which are no longer of interest. |
3713 |
3673 |
3714 :param instances: nova.objects.instance.InstanceList |
3674 :param all_instances: nova.objects.instance.InstanceList |
3715 """ |
3675 """ |
3716 pass |
3676 pass |
3717 |
3677 |
3718 def add_to_aggregate(self, context, aggregate, host, **kwargs): |
3678 def add_to_aggregate(self, context, aggregate, host, **kwargs): |
3719 """Add a compute host to an aggregate.""" |
3679 """Add a compute host to an aggregate.""" |
3782 This method is for multi compute-nodes support. If a driver supports |
3742 This method is for multi compute-nodes support. If a driver supports |
3783 multi compute-nodes, this method returns a list of nodenames managed |
3743 multi compute-nodes, this method returns a list of nodenames managed |
3784 by the service. Otherwise, this method should return |
3744 by the service. Otherwise, this method should return |
3785 [hypervisor_hostname]. |
3745 [hypervisor_hostname]. |
3786 """ |
3746 """ |
3787 stats = self.get_host_stats(refresh=refresh) |
3747 if refresh or not self._host_stats: |
|
3748 self._update_host_stats() |
|
3749 stats = self._host_stats |
3788 if not isinstance(stats, list): |
3750 if not isinstance(stats, list): |
3789 stats = [stats] |
3751 stats = [stats] |
3790 return [s['hypervisor_hostname'] for s in stats] |
3752 return [s['hypervisor_hostname'] for s in stats] |
3791 |
3753 |
3792 def node_is_available(self, nodename): |
3754 def node_is_available(self, nodename): |
3867 |
3829 |
3868 try: |
3830 try: |
3869 LOG.debug("Emitting event %s", str(event)) |
3831 LOG.debug("Emitting event %s", str(event)) |
3870 self._compute_event_callback(event) |
3832 self._compute_event_callback(event) |
3871 except Exception as ex: |
3833 except Exception as ex: |
3872 LOG.error(_("Exception dispatching event %(event)s: %(ex)s"), |
3834 LOG.error(_LE("Exception dispatching event %(event)s: %(ex)s"), |
3873 {'event': event, 'ex': ex}) |
3835 {'event': event, 'ex': ex}) |
3874 |
3836 |
3875 def delete_instance_files(self, instance): |
3837 def delete_instance_files(self, instance): |
3876 """Delete any lingering instance files for an instance. |
3838 """Delete any lingering instance files for an instance. |
3877 |
3839 |
3949 # NOTE(jichenjc): Return False here so that every hypervisor |
3911 # NOTE(jichenjc): Return False here so that every hypervisor |
3950 # need to define their supported file system |
3912 # need to define their supported file system |
3951 # type and implement this function at their |
3913 # type and implement this function at their |
3952 # virt layer. |
3914 # virt layer. |
3953 return False |
3915 return False |
|
3916 |
|
3917 def quiesce(self, context, instance, image_meta): |
|
3918 """Quiesce the specified instance to prepare for snapshots. |
|
3919 |
|
3920 If the specified instance doesn't support quiescing, |
|
3921 InstanceQuiesceNotSupported is raised. When it fails to quiesce by |
|
3922 other errors (e.g. agent timeout), NovaException is raised. |
|
3923 |
|
3924 :param context: request context |
|
3925 :param instance: nova.objects.instance.Instance to be quiesced |
|
3926 :param image_meta: image object returned by nova.image.glance that |
|
3927 defines the image from which this instance |
|
3928 was created |
|
3929 """ |
|
3930 raise NotImplementedError() |
|
3931 |
|
3932 def unquiesce(self, context, instance, image_meta): |
|
3933 """Unquiesce the specified instance after snapshots. |
|
3934 |
|
3935 If the specified instance doesn't support quiescing, |
|
3936 InstanceQuiesceNotSupported is raised. When it fails to quiesce by |
|
3937 other errors (e.g. agent timeout), NovaException is raised. |
|
3938 |
|
3939 :param context: request context |
|
3940 :param instance: nova.objects.instance.Instance to be unquiesced |
|
3941 :param image_meta: image object returned by nova.image.glance that |
|
3942 defines the image from which this instance |
|
3943 was created |
|
3944 """ |
|
3945 raise NotImplementedError() |