|
1 # vim: tabstop=4 shiftwidth=4 softtabstop=4 |
|
2 # |
|
3 # Copyright 2012 Hewlett-Packard Development Company, L.P. |
|
4 # Copyright (c) 2012 NTT DOCOMO, INC. |
|
5 # Copyright 2014 International Business Machines Corporation |
|
6 # All Rights Reserved. |
|
7 # |
|
8 # Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. |
|
9 # |
|
10 # Licensed under the Apache License, Version 2.0 (the "License"); you may |
|
11 # not use this file except in compliance with the License. You may obtain |
|
12 # a copy of the License at |
|
13 # |
|
14 # http://www.apache.org/licenses/LICENSE-2.0 |
|
15 # |
|
16 # Unless required by applicable law or agreed to in writing, software |
|
17 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
|
18 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
|
19 # License for the specific language governing permissions and limitations |
|
20 # under the License. |
|
21 """ |
|
22 Solaris Driver and supporting meta-classes. |
|
23 """ |
|
24 |
|
25 import os |
|
26 import platform |
|
27 import re |
|
28 import select |
|
29 import shutil |
|
30 import socket |
|
31 from subprocess import Popen, PIPE |
|
32 import tempfile |
|
33 from threading import Thread |
|
34 import time |
|
35 import urllib2 |
|
36 from urlparse import urlparse |
|
37 |
|
38 from lockfile import LockFile, LockTimeout |
|
39 from oslo.config import cfg |
|
40 from pkg.fmri import is_valid_pkg_name |
|
41 from pkg.misc import valid_pub_prefix, valid_pub_url |
|
42 from scp import SCPClient |
|
43 |
|
44 from ironic.common import boot_devices, exception, images, keystone, states, \ |
|
45 utils |
|
46 from ironic.common.i18n import _, _LW |
|
47 from ironic.conductor import task_manager |
|
48 from ironic.conductor import utils as manager_utils |
|
49 from ironic.db import api as dbapi |
|
50 from ironic.drivers import base |
|
51 from ironic.drivers.modules import ipmitool |
|
52 from ironic.drivers import utils as driver_utils |
|
53 from ironic.openstack.common import log as logging |
|
54 from ironic.openstack.common import loopingcall, processutils |
|
55 |
|
56 PLATFORM = platform.system() |
|
57 if PLATFORM != "SunOS": |
|
58 import tarfile |
|
59 |
|
60 |
|
61 AI_OPTS = [ |
|
62 cfg.StrOpt('server', |
|
63 default='None', |
|
64 help='Host name for AI Server.'), |
|
65 cfg.StrOpt('username', |
|
66 default='None', |
|
67 help='Username to ssh to AI Server.'), |
|
68 cfg.StrOpt('password', |
|
69 default='None', |
|
70 help='Password for user to ssh to AI Server.'), |
|
71 cfg.StrOpt('port', |
|
72 default='22', |
|
73 help='SSH port to use.'), |
|
74 cfg.StrOpt('timeout', |
|
75 default='10', |
|
76 help='SSH socket timeout value in seconds.'), |
|
77 cfg.StrOpt('deploy_interval', |
|
78 default='10', |
|
79 help='Interval in seconds to check AI deployment status.'), |
|
80 cfg.StrOpt('derived_manifest', |
|
81 default='file:///usr/lib/ironic/ironic-manifest.ksh', |
|
82 help='Derived Manifest used for deployment.'), |
|
83 cfg.StrOpt('ssh_key_file', |
|
84 default='None', |
|
85 help='SSH Filename to use.'), |
|
86 cfg.StrOpt('ssh_key_contents', |
|
87 default='None', |
|
88 help='Actual SSH Key contents to use.') |
|
89 ] |
|
90 |
|
91 AUTH_OPTS = [ |
|
92 cfg.StrOpt('auth_strategy', |
|
93 default='keystone', |
|
94 help='Method to use for authentication: noauth or keystone.') |
|
95 ] |
|
96 |
|
97 SOLARIS_IPMI_OPTS = [ |
|
98 cfg.StrOpt('imagecache_dirname', |
|
99 default='/var/lib/ironic/images', |
|
100 help='Default path to image cache.'), |
|
101 cfg.StrOpt('imagecache_lock_timeout', |
|
102 default='60', |
|
103 help='Timeout to wait when attempting to lock refcount file.') |
|
104 ] |
|
105 |
|
106 LOG = logging.getLogger(__name__) |
|
107 |
|
108 CONF = cfg.CONF |
|
109 OPT_GROUP = cfg.OptGroup(name='ai', |
|
110 title='Options for the Automated Install driver') |
|
111 CONF.register_group(OPT_GROUP) |
|
112 CONF.register_opts(AI_OPTS, OPT_GROUP) |
|
113 CONF.register_opts(AUTH_OPTS) |
|
114 SOLARIS_IPMI_GROUP = cfg.OptGroup( |
|
115 name="solaris_ipmi", |
|
116 title="Options defined in ironic.drivers.modules.solaris_ipmi") |
|
117 CONF.register_group(SOLARIS_IPMI_GROUP) |
|
118 CONF.register_opts(SOLARIS_IPMI_OPTS, SOLARIS_IPMI_GROUP) |
|
119 |
|
120 VALID_ARCH = ['x86', 'SPARC'] |
|
121 VALID_ARCHIVE_SCHEMES = ["file", "http", "https", "glance"] |
|
122 VALID_URI_SCHEMES = VALID_ARCHIVE_SCHEMES |
|
123 DEFAULT_ARCHIVE_IMAGE_PATH = 'auto_install/manifest/default_archive.xml' |
|
124 AI_STRING = "Automated Installation" |
|
125 AI_SUCCESS_STRING = AI_STRING + " succeeded" |
|
126 AI_FAILURE_STRING = AI_STRING + " failed" |
|
127 AI_DEPLOY_STRING = AI_STRING + " started" |
|
128 |
|
129 REQUIRED_PROPERTIES = { |
|
130 'ipmi_address': _("IP address or hostname of the node. Required."), |
|
131 'ipmi_username': _("username to use for IPMI connection. Required."), |
|
132 'ipmi_password': _("password to use for IPMI connection. Required.") |
|
133 } |
|
134 |
|
135 OPTIONAL_PROPERTIES = { |
|
136 'ai_manifest': _("Automated install manifest to be used for provisioning. " |
|
137 "Optional."), |
|
138 'ai_service': _("Automated Install service name to use. Optional."), |
|
139 'archive_uri': _("URI of archive to deploy. Optional."), |
|
140 'fmri': _("List of IPS package FMRIs to be installed. " |
|
141 "Required if publishers property is set."), |
|
142 'install_profiles': _("List of configuration profiles to be applied " |
|
143 "to the installation environment during an install. " |
|
144 "Optional."), |
|
145 'ipmi_bridging': _("bridging_type; default is \"no\". One of \"single\", " |
|
146 "\"dual\", \"no\". Optional."), |
|
147 'ipmi_local_address': _("local IPMB address for bridged requests. " |
|
148 "Used only if ipmi_bridging is set " |
|
149 "to \"single\" or \"dual\". Optional."), |
|
150 'ipmi_priv_level': |
|
151 _("privilege level; default is ADMINISTRATOR. " |
|
152 "One of %s. Optional.") % '. '.join(ipmitool.VALID_PRIV_LEVELS), |
|
153 'ipmi_target_address': _("destination address for bridged request. " |
|
154 "Required only if ipmi_bridging is set " |
|
155 "to \"single\" or \"dual\"."), |
|
156 'ipmi_target_channel': _("destination channel for bridged request. " |
|
157 "Required only if ipmi_bridging is set to " |
|
158 "\"single\" or \"dual\"."), |
|
159 'ipmi_transit_address': _("transit address for bridged request. Required " |
|
160 "only if ipmi_bridging is set to \"dual\"."), |
|
161 'ipmi_transit_channel': _("transit channel for bridged request. Required " |
|
162 "only if ipmi_bridging is set to \"dual\"."), |
|
163 'publishers': _("List of IPS publishers to install from, in the format " |
|
164 "name@origin. Required if fmri property is set."), |
|
165 'sc_profiles': _("List of system configuration profiles to be applied " |
|
166 "to an installed system. Optional.") |
|
167 } |
|
168 |
|
169 COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy() |
|
170 COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES) |
|
171 |
|
172 LAST_CMD_TIME = {} |
|
173 TIMING_SUPPORT = None |
|
174 SINGLE_BRIDGE_SUPPORT = None |
|
175 DUAL_BRIDGE_SUPPORT = None |
|
176 |
|
177 |
|
178 def _ssh_execute(ssh_obj, ssh_cmd, raise_exception=True, err_msg=None): |
|
179 """Execute a command via SSH. |
|
180 |
|
181 :param ssh_obj: paramiko.SSHClient, an active ssh connection |
|
182 :param ssh_cmd: Command to execute over SSH. |
|
183 :param raise_exception: Wheter to raise exception or not |
|
184 :param err_msg: Custom error message to use |
|
185 :returns: tuple [stdout from command, returncode] |
|
186 :raises: SSHCommandFailed on an error from ssh, if specified to raise. |
|
187 """ |
|
188 LOG.debug("_ssh_execute():ssh_cmd: %s" % (ssh_cmd)) |
|
189 |
|
190 returncode = 0 |
|
191 try: |
|
192 stdout = processutils.ssh_execute(ssh_obj, ssh_cmd)[0] |
|
193 except Exception as err: |
|
194 LOG.debug(_("Cannot execute SSH cmd %(cmd)s. Reason: %(err)s.") % |
|
195 {'cmd': ssh_cmd, 'err': err}) |
|
196 returncode = 1 |
|
197 if raise_exception: |
|
198 if err_msg: |
|
199 raise SolarisIPMIError(msg=err_msg) |
|
200 else: |
|
201 raise exception.SSHCommandFailed(cmd=ssh_cmd) |
|
202 |
|
203 return stdout, returncode |
|
204 |
|
205 |
|
206 def _parse_driver_info(node): |
|
207 """Gets the parameters required for ipmitool to access the node. |
|
208 |
|
209 Copied from ironic/drivers/modules/ipmitool.py. No differences. |
|
210 Copied locally as REQUIRED_PROPERTIES differs from standard ipmitool. |
|
211 |
|
212 :param node: the Node of interest. |
|
213 :returns: dictionary of parameters. |
|
214 :raises: InvalidParameterValue when an invalid value is specified |
|
215 :raises: MissingParameterValue when a required ipmi parameter is missing. |
|
216 |
|
217 """ |
|
218 LOG.debug("_parse_driver_info()") |
|
219 info = node.driver_info or {} |
|
220 bridging_types = ['single', 'dual'] |
|
221 missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)] |
|
222 if missing_info: |
|
223 raise exception.MissingParameterValue( |
|
224 _("The following IPMI credentials are not supplied" |
|
225 " to IPMI driver: %s.") % missing_info) |
|
226 |
|
227 address = info.get('ipmi_address') |
|
228 username = info.get('ipmi_username') |
|
229 password = info.get('ipmi_password') |
|
230 port = info.get('ipmi_terminal_port') |
|
231 priv_level = info.get('ipmi_priv_level', 'ADMINISTRATOR') |
|
232 bridging_type = info.get('ipmi_bridging', 'no') |
|
233 local_address = info.get('ipmi_local_address') |
|
234 transit_channel = info.get('ipmi_transit_channel') |
|
235 transit_address = info.get('ipmi_transit_address') |
|
236 target_channel = info.get('ipmi_target_channel') |
|
237 target_address = info.get('ipmi_target_address') |
|
238 |
|
239 if port: |
|
240 try: |
|
241 port = int(port) |
|
242 except ValueError: |
|
243 raise exception.InvalidParameterValue(_( |
|
244 "IPMI terminal port is not an integer.")) |
|
245 |
|
246 # check if ipmi_bridging has proper value |
|
247 if bridging_type == 'no': |
|
248 # if bridging is not selected, then set all bridging params to None |
|
249 local_address = transit_channel = transit_address = \ |
|
250 target_channel = target_address = None |
|
251 elif bridging_type in bridging_types: |
|
252 # check if the particular bridging option is supported on host |
|
253 if not ipmitool._is_option_supported('%s_bridge' % bridging_type): |
|
254 raise exception.InvalidParameterValue(_( |
|
255 "Value for ipmi_bridging is provided as %s, but IPMI " |
|
256 "bridging is not supported by the IPMI utility installed " |
|
257 "on host. Ensure ipmitool version is > 1.8.11" |
|
258 ) % bridging_type) |
|
259 |
|
260 # ensure that all the required parameters are provided |
|
261 params_undefined = [param for param, value in [ |
|
262 ("ipmi_target_channel", target_channel), |
|
263 ('ipmi_target_address', target_address)] if value is None] |
|
264 if bridging_type == 'dual': |
|
265 params_undefined2 = [param for param, value in [ |
|
266 ("ipmi_transit_channel", transit_channel), |
|
267 ('ipmi_transit_address', transit_address) |
|
268 ] if value is None] |
|
269 params_undefined.extend(params_undefined2) |
|
270 else: |
|
271 # if single bridging was selected, set dual bridge params to None |
|
272 transit_channel = transit_address = None |
|
273 |
|
274 # If the required parameters were not provided, |
|
275 # raise an exception |
|
276 if params_undefined: |
|
277 raise exception.MissingParameterValue(_( |
|
278 "%(param)s not provided") % {'param': params_undefined}) |
|
279 else: |
|
280 raise exception.InvalidParameterValue(_( |
|
281 "Invalid value for ipmi_bridging: %(bridging_type)s," |
|
282 " the valid value can be one of: %(bridging_types)s" |
|
283 ) % {'bridging_type': bridging_type, |
|
284 'bridging_types': bridging_types + ['no']}) |
|
285 |
|
286 if priv_level not in ipmitool.VALID_PRIV_LEVELS: |
|
287 valid_priv_lvls = ', '.join(ipmitool.VALID_PRIV_LEVELS) |
|
288 raise exception.InvalidParameterValue(_( |
|
289 "Invalid privilege level value:%(priv_level)s, the valid value" |
|
290 " can be one of %(valid_levels)s") % |
|
291 {'priv_level': priv_level, 'valid_levels': valid_priv_lvls}) |
|
292 |
|
293 return { |
|
294 'address': address, |
|
295 'username': username, |
|
296 'password': password, |
|
297 'port': port, |
|
298 'uuid': node.uuid, |
|
299 'priv_level': priv_level, |
|
300 'local_address': local_address, |
|
301 'transit_channel': transit_channel, |
|
302 'transit_address': transit_address, |
|
303 'target_channel': target_channel, |
|
304 'target_address': target_address |
|
305 } |
|
306 |
|
307 |
|
308 def _exec_ipmitool(driver_info, command): |
|
309 """Execute the ipmitool command. |
|
310 |
|
311 This uses the lanplus interface to communicate with the BMC device driver. |
|
312 |
|
313 Copied from ironic/drivers/modules/ipmitool.py. Only one difference. |
|
314 ipmitool.py version expects a string of space separated commands, and |
|
315 it splits this into an list using 'space' as delimiter. |
|
316 This causes setting of bootmode script for SPARC network boot to fail. |
|
317 Solaris versions takes a list() as command paramater, and therefore |
|
318 we don't need to split. |
|
319 |
|
320 :param driver_info: the ipmitool parameters for accessing a node. |
|
321 :param command: list() : the ipmitool command to be executed. |
|
322 :returns: (stdout, stderr) from executing the command. |
|
323 :raises: PasswordFileFailedToCreate from creating or writing to the |
|
324 temporary file. |
|
325 :raises: processutils.ProcessExecutionError from executing the command. |
|
326 |
|
327 """ |
|
328 LOG.debug("SolarisDeploy._exec_ipmitool:driver_info: '%s', " |
|
329 "command: '%s'" % (driver_info, command)) |
|
330 args = ['/usr/sbin/ipmitool', |
|
331 '-I', |
|
332 'lanplus', |
|
333 '-H', |
|
334 driver_info['address'], |
|
335 '-L', driver_info.get('priv_level') |
|
336 ] |
|
337 |
|
338 if driver_info['username']: |
|
339 args.append('-U') |
|
340 args.append(driver_info['username']) |
|
341 |
|
342 for name, option in ipmitool.BRIDGING_OPTIONS: |
|
343 if driver_info[name] is not None: |
|
344 args.append(option) |
|
345 args.append(driver_info[name]) |
|
346 |
|
347 # specify retry timing more precisely, if supported |
|
348 if ipmitool._is_option_supported('timing'): |
|
349 num_tries = max( |
|
350 (CONF.ipmi.retry_timeout // CONF.ipmi.min_command_interval), 1) |
|
351 args.append('-R') |
|
352 args.append(str(num_tries)) |
|
353 |
|
354 args.append('-N') |
|
355 args.append(str(CONF.ipmi.min_command_interval)) |
|
356 |
|
357 # 'ipmitool' command will prompt password if there is no '-f' option, |
|
358 # we set it to '\0' to write a password file to support empty password |
|
359 with ipmitool._make_password_file(driver_info['password'] or '\0') \ |
|
360 as pw_file: |
|
361 args.append('-f') |
|
362 args.append(pw_file) |
|
363 args = args + list(command) # Append as a list don't split(" ") |
|
364 |
|
365 # NOTE(deva): ensure that no communications are sent to a BMC more |
|
366 # often than once every min_command_interval seconds. |
|
367 time_till_next_poll = CONF.ipmi.min_command_interval - ( |
|
368 time.time() - LAST_CMD_TIME.get(driver_info['address'], 0)) |
|
369 if time_till_next_poll > 0: |
|
370 time.sleep(time_till_next_poll) |
|
371 try: |
|
372 out, err = utils.execute(*args) |
|
373 finally: |
|
374 LAST_CMD_TIME[driver_info['address']] = time.time() |
|
375 return out, err |
|
376 |
|
377 |
|
378 def _get_node_architecture(node): |
|
379 """Queries the node for architecture type |
|
380 |
|
381 :param node: the Node of interest. |
|
382 :returns: SPARC or X86 depending on architecture discovered |
|
383 :raises: IPMIFailure if ipmitool command fails |
|
384 """ |
|
385 LOG.debug("SolarisDeploy._get_node_architecture") |
|
386 ipmi_cmd_args = ['sunoem', 'getval', '/System/Processors/architecture'] |
|
387 driver_info = _parse_driver_info(node) |
|
388 try: |
|
389 out, _err = _exec_ipmitool(driver_info, ipmi_cmd_args) |
|
390 except Exception: |
|
391 raise exception.IPMIFailure(cmd=ipmi_cmd_args) |
|
392 |
|
393 LOG.debug("SolarisDeploy._get_node_architecture: arch: '%s'" % (out)) |
|
394 |
|
395 if 'SPARC' in out: |
|
396 return 'SPARC' |
|
397 elif 'x86' in out: |
|
398 return 'x86' |
|
399 else: |
|
400 raise SolarisIPMIError(msg="Unknown node architecture: %s" % (out)) |
|
401 |
|
402 |
|
403 def _check_deploy_state(task, node_uuid, deploy_thread): |
|
404 """ Check deployment state of a running install |
|
405 |
|
406 Check the deployment status for this node ideally this will be |
|
407 achieved via communicating with the AI Server and querying the |
|
408 telemetry data returned by the AI Client install to the AI Server. |
|
409 |
|
410 However until that is integrated we need to maintain a connection |
|
411 with the Serial Console of the node being installed and parse the |
|
412 output to the console made during an install. |
|
413 |
|
414 :param task: a TaskManager instance. |
|
415 :param deploy_thread: Threaded class monitor deployment status |
|
416 :returns: Nothing, raises loopingcall.LoopingCallDone() once |
|
417 node deployment status is determined as done or failed. |
|
418 """ |
|
419 LOG.debug("_check_deploy_state()") |
|
420 LOG.debug("_check_deploy_state() deploy_thread_state: %s" % |
|
421 (deploy_thread.state)) |
|
422 |
|
423 # Get DB instance |
|
424 mydbapi = dbapi.get_instance() |
|
425 try: |
|
426 # Get current DB copy of node |
|
427 cur_node = mydbapi.get_node_by_uuid(node_uuid) |
|
428 except exception.NodeNotFound: |
|
429 LOG.info(_("During check_deploy_state, node %(node)s was not " |
|
430 "found and presumed deleted by another process.") % |
|
431 {'node': node_uuid}) |
|
432 # Thread should have stopped already, but let's make sure. |
|
433 deploy_thread.stop() |
|
434 if deploy_thread.state in [states.DEPLOYING, states.DEPLOYWAIT]: |
|
435 # Update node with done/fail state |
|
436 if task.node: |
|
437 task.node.provision_state = states.DEPLOYFAIL |
|
438 task.node.last_error = "Failed to find node." |
|
439 task.node.target_provision_state = states.NOSTATE |
|
440 task.node.save() |
|
441 raise loopingcall.LoopingCallDone() |
|
442 except Exception as err: |
|
443 LOG.info(_("During check_deploy_state, node %(node)s could " |
|
444 "not be retrieved: %(err)") % |
|
445 {'node': node_uuid, 'err': err}) |
|
446 # Thread should have stopped already, but lets make sure. |
|
447 deploy_thread.stop() |
|
448 if deploy_thread.state in [states.DEPLOYING, states.DEPLOYWAIT]: |
|
449 # Update node with done/fail state |
|
450 if task.node: |
|
451 task.node.last_error = "Failed to find node." |
|
452 task.node.provision_state = states.DEPLOYFAIL |
|
453 task.node.target_provision_state = states.NOSTATE |
|
454 task.node.save() |
|
455 raise loopingcall.LoopingCallDone() |
|
456 |
|
457 LOG.debug("_check_deploy_state().cur_node.target_provision_state: %s" % |
|
458 (cur_node.target_provision_state)) |
|
459 |
|
460 if deploy_thread.state not in [states.DEPLOYING, states.DEPLOYWAIT]: |
|
461 LOG.debug("_check_deploy_state().done: %s" % (deploy_thread.state)) |
|
462 # Node has completed deployment, success or failure |
|
463 |
|
464 # Thread should have stopped already, but lets make sure. |
|
465 deploy_thread.stop() |
|
466 |
|
467 # Update node with done/fail state |
|
468 if deploy_thread.state == states.DEPLOYDONE: |
|
469 cur_node.provision_state = states.ACTIVE |
|
470 elif deploy_thread.state == states.DEPLOYFAIL: |
|
471 cur_node.last_error = "Install failed; check install.log for " + \ |
|
472 "more details." |
|
473 cur_node.provision_state = deploy_thread.state |
|
474 else: |
|
475 cur_node.provision_state = deploy_thread.state |
|
476 cur_node.target_provision_state = states.NOSTATE |
|
477 cur_node.save() |
|
478 |
|
479 # Raise LoopincCallDone to terminate deployment checking. |
|
480 raise loopingcall.LoopingCallDone() |
|
481 |
|
482 elif deploy_thread.state == states.DEPLOYING and \ |
|
483 cur_node.provision_state != states.DEPLOYING: |
|
484 # Actual node deployment has initiated |
|
485 LOG.debug("_check_deploy_state().deploying: %s" % |
|
486 (deploy_thread.state)) |
|
487 cur_node.provision_state = states.DEPLOYING |
|
488 cur_node.save() |
|
489 |
|
490 elif cur_node.target_provision_state == states.NOSTATE: |
|
491 # Node was most likely deleted so end deployment completion checking |
|
492 LOG.debug("_check_deploy_state().deleted: %s" % |
|
493 (cur_node.target_provision_state)) |
|
494 deploy_thread.stop() |
|
495 raise loopingcall.LoopingCallDone() |
|
496 |
|
497 |
|
498 def _url_exists(url): |
|
499 """Validate specific exists |
|
500 |
|
501 :param url: HTTP url |
|
502 :returns: boolean, True of exists, otherwise False |
|
503 """ |
|
504 LOG.debug("_url_exists: url: %s" % (url.strip())) |
|
505 try: |
|
506 _open_url = urllib2.urlopen(urllib2.Request(url)) |
|
507 return True |
|
508 except Exception as err: |
|
509 LOG.debug(_("URL %s not reachable: %s") % (url, err)) |
|
510 return False |
|
511 |
|
512 |
|
513 def _image_refcount_acquire_lock(image_path): |
|
514 """Acquire a lock on reference count image file |
|
515 |
|
516 :param image_path: Path to image file |
|
517 :returns: Acquired LockFile lock |
|
518 """ |
|
519 LOG.debug("_image_refcount_acquire_lock: image_path: %s" % (image_path)) |
|
520 ref_filename = image_path + ".ref" |
|
521 lock = LockFile(ref_filename) |
|
522 while not lock.i_am_locking(): |
|
523 try: |
|
524 if os.path.exists(image_path): |
|
525 image_size_1 = os.path.getsize(image_path) |
|
526 else: |
|
527 image_size_1 = 0 |
|
528 lock.acquire( |
|
529 timeout=int(CONF.solaris_ipmi.imagecache_lock_timeout)) |
|
530 except LockTimeout: |
|
531 # Check if image_path size has changed, due to still downloading |
|
532 if os.path.exists(image_path): |
|
533 image_size_2 = os.path.getsize(image_path) |
|
534 else: |
|
535 image_size_2 = 0 |
|
536 |
|
537 if image_size_1 != image_size_2: |
|
538 LOG.debug("_image_refcount_acquire_lock: Image downloading...") |
|
539 continue |
|
540 else: |
|
541 # Assume lock is an old one, force it's removal |
|
542 LOG.debug("_image_refcount_acquire_lock: Breaking stale lock.") |
|
543 lock.break_lock() |
|
544 lock.acquire() |
|
545 |
|
546 return lock |
|
547 |
|
548 |
|
549 def _image_refcount_adjust(image_path, count, release=True): |
|
550 """Adjust cached image file reference counter |
|
551 |
|
552 :param image_path: Path to image file |
|
553 :param count: Integer count value to adjust reference by |
|
554 :param release: Release the acquired lock or return it. |
|
555 :returns: Acquired lock |
|
556 """ |
|
557 LOG.debug("_image_refcount_adjust: image_path: %s, " |
|
558 "count: %s" % (image_path, str(count))) |
|
559 |
|
560 if count == 0: |
|
561 # Adjusting by zero makes no sense just return |
|
562 err_msg = _("Zero reference count adjustment attempted " |
|
563 "on file: %s") % (image_path) |
|
564 LOG.error(err_msg) |
|
565 raise SolarisIPMIError(msg=err_msg) |
|
566 |
|
567 ref_filename = image_path + ".ref" |
|
568 |
|
569 if not os.path.exists(ref_filename): |
|
570 if count < 0: |
|
571 # Cannot decrement reference on non-existent file |
|
572 err_msg = _("Negative reference count adjustment attempted on " |
|
573 "non-existent file: %s") % (image_path) |
|
574 LOG.error(err_msg) |
|
575 raise SolarisIPMIError(msg=err_msg) |
|
576 |
|
577 # Create reference count file |
|
578 with open(ref_filename, "w") as fp: |
|
579 fp.write("0") |
|
580 |
|
581 # Acquire lock on refcount file |
|
582 lock = _image_refcount_acquire_lock(image_path) |
|
583 if lock is None: |
|
584 err_msg = _("Failed to acquire lock on image: %s") % (image_path) |
|
585 LOG.error(err_msg) |
|
586 raise SolarisIPMIError(msg=err_msg) |
|
587 |
|
588 with open(ref_filename, "r+") as fp: |
|
589 ref_count = fp.readline() |
|
590 if len(ref_count) == 0: |
|
591 ref_count = 1 |
|
592 ref_count = str(int(ref_count) + count) |
|
593 |
|
594 # Check if reference count is zero if so remove |
|
595 # refcount file and image file |
|
596 if int(ref_count) <= 0: |
|
597 lock.release() |
|
598 os.remove(ref_filename) |
|
599 os.remove(image_path) |
|
600 else: |
|
601 fp.seek(0) |
|
602 fp.write(ref_count) |
|
603 if release: |
|
604 lock.release() |
|
605 return lock |
|
606 |
|
607 |
|
608 def _fetch_uri(task, uri): |
|
609 """Retrieve the specified URI to local temporary file |
|
610 |
|
611 Removal of locally fetched file is the responsibility of the |
|
612 caller. |
|
613 |
|
614 :param task: a TaskManager instance |
|
615 :param uri: URI of file to fetch. |
|
616 """ |
|
617 LOG.debug("SolarisDeploy._fetch_uri:uri: '%s'" % (uri)) |
|
618 url = urlparse(uri) |
|
619 |
|
620 try: |
|
621 if url.scheme == "glance": |
|
622 temp_uri = os.path.join(CONF.solaris_ipmi.imagecache_dirname, |
|
623 url.netloc) |
|
624 |
|
625 # Check of image already in cache, retrieve if not |
|
626 if not os.path.isfile(temp_uri): |
|
627 try: |
|
628 # Increment reference, creates refcount file and returns |
|
629 # the acquired lock. |
|
630 lock = _image_refcount_adjust(temp_uri, 1, release=False) |
|
631 |
|
632 # Fetch URI from Glance into local file. |
|
633 images.fetch(task.context, url.netloc, temp_uri) |
|
634 |
|
635 # Release acquired lock now that file is retrieved |
|
636 lock.release() |
|
637 |
|
638 except Exception as err: |
|
639 LOG.error(_("Unable to fetch Glance image: id %s: %s") |
|
640 % (url.netloc, err)) |
|
641 raise |
|
642 else: |
|
643 # Increase reference count for this image |
|
644 _image_refcount_adjust(temp_uri, 1) |
|
645 |
|
646 else: # http/file scheme handled directly by curl |
|
647 if PLATFORM == "SunOS": |
|
648 _fd, temp_uri = tempfile.mkstemp( |
|
649 dir=CONF.solaris_ipmi.imagecache_dirname) |
|
650 cmd = ["/usr/bin/curl", "-sS", "-o", temp_uri, uri] |
|
651 pc = Popen(cmd, stdout=PIPE, stderr=PIPE) |
|
652 _stdout, err = pc.communicate() |
|
653 if pc.returncode != 0: |
|
654 err_msg = _("Failed to retrieve image: %s") % err |
|
655 raise SolarisIPMIError(msg=err_msg) |
|
656 else: # Linux compat |
|
657 temp_uri = os.path.join(CONF.solaris_ipmi.imagecache_dirname, |
|
658 url.path.replace("/", "")) |
|
659 if not os.path.isfile(temp_uri): |
|
660 try: |
|
661 # Increment reference, creates refcount file and |
|
662 # returns the acquired lock. |
|
663 lock = _image_refcount_adjust(temp_uri, 1, |
|
664 release=False) |
|
665 |
|
666 # Actually fetch the image |
|
667 cmd = ["/usr/bin/curl", "-sS", "-o", temp_uri, uri] |
|
668 pc = Popen(cmd, stdout=PIPE, stderr=PIPE) |
|
669 _stdout, err = pc.communicate() |
|
670 if pc.returncode != 0: |
|
671 err_msg = _("Failed to retrieve image: %s") % err |
|
672 raise SolarisIPMIError(msg=err_msg) |
|
673 |
|
674 # Release acquired lock now that file is retrieved |
|
675 lock.release() |
|
676 |
|
677 except Exception as err: |
|
678 LOG.error(_("Unable to fetch image: id %s: %s") |
|
679 % (url.netloc, err)) |
|
680 raise |
|
681 else: |
|
682 # Increase reference count for this image |
|
683 _image_refcount_adjust(temp_uri, 1) |
|
684 except Exception as err: |
|
685 # Only remove the temporary file if exception occurs |
|
686 # as noted above Caller is responsible for its removal |
|
687 LOG.error(_("Unable to fetch image: uri %s: %s") % (uri, err)) |
|
688 if url.scheme == "glance": |
|
689 _image_refcount_adjust(temp_uri, -1) |
|
690 else: |
|
691 os.remove(temp_uri) |
|
692 raise |
|
693 |
|
694 return temp_uri |
|
695 |
|
696 |
|
697 def _get_archive_iso_and_uuid(mount_dir, extract_iso=False): |
|
698 """Get ISO name and UUID |
|
699 |
|
700 Retrieved from mounted archive if on Solaris |
|
701 |
|
702 On non-Solaris systems we cannot mount a UAR so we need to parse the |
|
703 contents of the unified archive and extract ISO and UUID from |
|
704 cached UAR. In this scenario the caller is responsible for removing |
|
705 the extracted file. |
|
706 |
|
707 :param mount_dir: Location of locally mounted UAR or locally cached UAR |
|
708 :param extract_iso: Whether to extract ISO file to temp file |
|
709 :returns: Extracted ISO location and UUID |
|
710 """ |
|
711 LOG.debug("SolarisDeploy._get_archive_iso_and_uuid:mount_dir: '%s'" % |
|
712 (mount_dir)) |
|
713 uuid = None |
|
714 iso = None |
|
715 |
|
716 if PLATFORM == "SunOS": |
|
717 ovf_dir = os.path.join(mount_dir, "OVF") |
|
718 |
|
719 for uar_file in os.listdir(ovf_dir): |
|
720 if uar_file.endswith('.ovf'): |
|
721 uuid = uar_file.split('.ovf')[0] |
|
722 elif uar_file.endswith('.iso'): |
|
723 iso = os.path.join(ovf_dir, uar_file) |
|
724 else: |
|
725 tf = tarfile.open(name=mount_dir) |
|
726 |
|
727 for ti in tf.getmembers(): |
|
728 if ti.path.endswith('.ovf'): |
|
729 uuid = ti.path.split('.ovf')[0] |
|
730 elif ti.path.endswith('.iso') and extract_iso: |
|
731 try: |
|
732 temp_tar_dir = tempfile.mkdtemp( |
|
733 dir=CONF.solaris_ipmi.imagecache_dirname) |
|
734 tf.extractall(path=temp_tar_dir, members=[ti]) |
|
735 iso = os.path.join(temp_tar_dir, ti.path) |
|
736 except: |
|
737 # Remove temp_tar_dir and contents |
|
738 shutil.rmtree(temp_tar_dir) |
|
739 raise |
|
740 |
|
741 return iso, uuid |
|
742 |
|
743 |
|
744 def _mount_archive(task, archive_uri): |
|
745 """Mount a unified archive |
|
746 |
|
747 :param archive_uri: URI of unified archive to mount |
|
748 :returns: Path to mounted unified archive |
|
749 """ |
|
750 LOG.debug("SolarisDeploy._mount_archive:archive_uri: '%s'" % |
|
751 (archive_uri)) |
|
752 |
|
753 if urlparse(archive_uri).scheme == "glance": |
|
754 # TODO(mattk): |
|
755 # Ideally mounting the http ISO directly is preferred. |
|
756 # However mount(1M), does not support auth_token |
|
757 # thus we must fetch the image locally and then mount the |
|
758 # local image. |
|
759 # Tried putting a proxy in place to intercept the mount(1M) |
|
760 # http request and adding an auth_token as it proceeds. |
|
761 # However mount(1M) launches a new SMF instance for each HTTP |
|
762 # mount request, and each SMF instance has a minimal environment |
|
763 # set, which does not include http_proxy, thus the custom local |
|
764 # proxy never gets invoked. |
|
765 # Would love to have a new mount(1M) option to accept either |
|
766 # a proxy e.g. -o proxy=<proxy> or to accept setting of http headers |
|
767 # e.g. -o http_header="X-Auth-Token: askdalksjdlakjsd" |
|
768 |
|
769 # Retrieve UAR to local temp file for mounting |
|
770 temp_uar = _fetch_uri(task, archive_uri) |
|
771 archive_mount = temp_uar |
|
772 else: |
|
773 # Can mount archive directly |
|
774 temp_uar = None |
|
775 archive_mount = archive_uri |
|
776 |
|
777 mount_dir = tempfile.mkdtemp(dir=CONF.solaris_ipmi.imagecache_dirname) |
|
778 |
|
779 cmd = ["/usr/sbin/mount", "-F", "uvfs", "-o", |
|
780 "archive=%s" % (archive_mount), "/usr/lib/fs/uafs/uafs", mount_dir] |
|
781 LOG.debug("SolarisDeploy._mount_archive:cmd: '%s'" % (cmd)) |
|
782 pc = Popen(cmd, stdout=PIPE, stderr=PIPE) |
|
783 _stdout, err = pc.communicate() |
|
784 if pc.returncode != 0: |
|
785 err_msg = _("Failed to mount UAR %s: %s") % (archive_uri, err) |
|
786 shutil.rmtree(mount_dir) |
|
787 raise SolarisIPMIError(msg=err_msg) |
|
788 |
|
789 return mount_dir, temp_uar |
|
790 |
|
791 |
|
792 def _umount_archive(mount_dir, temp_uar): |
|
793 """ Unmount archive and remove mount point directory |
|
794 |
|
795 :param mount_dir: Path to mounted archive |
|
796 :param temp_uar: Path to glance local uar to remove |
|
797 """ |
|
798 LOG.debug("SolarisDeploy._umount_archive:mount_dir: '%s', temp_uar: %s" % |
|
799 (mount_dir, temp_uar)) |
|
800 |
|
801 cmd = ["/usr/sbin/umount", mount_dir] |
|
802 pc = Popen(cmd, stdout=PIPE, stderr=PIPE) |
|
803 _stdout, err = pc.communicate() |
|
804 if pc.returncode != 0: |
|
805 err_msg = _("Failed to unmount UAR %s: %s") % (mount_dir, err) |
|
806 raise SolarisIPMIError(msg=err_msg) |
|
807 |
|
808 shutil.rmtree(mount_dir) |
|
809 |
|
810 |
|
811 def _get_archive_uuid(task): |
|
812 """Get the UUID of an archive |
|
813 |
|
814 :param task: a TaskManager instance |
|
815 :returns: UUID string for an archive otherwise raise exception |
|
816 """ |
|
817 LOG.debug("SolarisDeploy._get_archive_uuid") |
|
818 uuid = None |
|
819 archive_uri = task.node.driver_info['archive_uri'] |
|
820 |
|
821 if PLATFORM == "SunOS": |
|
822 mount_dir, temp_uar = _mount_archive(task, archive_uri) |
|
823 try: |
|
824 _iso, uuid = _get_archive_iso_and_uuid(mount_dir) |
|
825 except: |
|
826 _umount_archive(mount_dir, temp_uar) |
|
827 raise |
|
828 _umount_archive(mount_dir, temp_uar) |
|
829 else: |
|
830 temp_uar = _fetch_uri(task, archive_uri) |
|
831 try: |
|
832 _iso, uuid = _get_archive_iso_and_uuid(temp_uar) |
|
833 except: |
|
834 _image_refcount_adjust(temp_uar, -1) |
|
835 raise |
|
836 |
|
837 if uuid is None: |
|
838 err_msg = _("Failed to extract UUID from UAR: %s") % archive_uri |
|
839 if PLATFORM != "SunOS": |
|
840 _image_refcount_adjust(temp_uar, -1) |
|
841 raise SolarisIPMIError(msg=err_msg) |
|
842 |
|
843 LOG.debug("SolarisDeploy._get_archive_uuid: uuid: %s" % (uuid)) |
|
844 return uuid |
|
845 |
|
846 |
|
847 def _validate_archive_uri(task): |
|
848 """Validate archive_uri for reachable, format, etc |
|
849 |
|
850 :param task: a TaskManager instance. |
|
851 :raises: InvalidParameterValie if invalid archive_uri |
|
852 """ |
|
853 LOG.debug("SolarisDeploy._validate_archive_uri") |
|
854 archive_uri = task.node.driver_info['archive_uri'] |
|
855 |
|
856 url = urlparse(archive_uri) |
|
857 |
|
858 if url.scheme not in VALID_ARCHIVE_SCHEMES: |
|
859 raise exception.InvalidParameterValue(_( |
|
860 "Unsupported archive scheme (%s) referenced in archive_uri (%s).") |
|
861 % (url.scheme, archive_uri)) |
|
862 |
|
863 if not url.netloc and not url.path: |
|
864 raise exception.InvalidParameterValue(_( |
|
865 "Missing archive name in archive_uri (%s).") % (archive_uri)) |
|
866 |
|
867 if url.scheme == "glance": |
|
868 # Glance schema only supported if using keystone authorization |
|
869 # otherwise ironic being used standalone |
|
870 if CONF.auth_strategy != "keystone": |
|
871 raise exception.InvalidParameterValue(_( |
|
872 "Glance scheme only supported when using Keystone (%s).") |
|
873 % (archive_uri)) |
|
874 |
|
875 # Format : glance://<glance UUID> |
|
876 # When parsed by urlparse, Glance image uuid appears as netloc param |
|
877 if not url.netloc: |
|
878 raise exception.InvalidParameterValue(_( |
|
879 "Missing Glance image UUID archive_uri (%s).") |
|
880 % (archive_uri)) |
|
881 |
|
882 # Validate glance image exists by attempting to get download size |
|
883 try: |
|
884 size = images.download_size(task.context, url.netloc) |
|
885 LOG.debug("Image %s size: %s" % (url.netloc, str(size))) |
|
886 if not size: |
|
887 raise exception.InvalidParameterValue(_( |
|
888 "Glance image not found: %s") % (url.netloc)) |
|
889 |
|
890 except Exception as err: |
|
891 raise exception.InvalidParameterValue(_( |
|
892 "Failed to validate Glance image '%s': %s") % |
|
893 (url.netloc, err)) |
|
894 |
|
895 elif url.scheme in ["http", "https"]: |
|
896 # Presuming client authentication using HTTPS is not being used. |
|
897 # Just a secure connection. |
|
898 # TODO(mattk): Do I need to support client side HTTPS authentication |
|
899 if not _url_exists(archive_uri): |
|
900 raise exception.InvalidParameterValue(_( |
|
901 "archive_uri does not exist (%s).") % (archive_uri)) |
|
902 elif url.scheme == "file": |
|
903 file_path = os.path.join(os.sep, |
|
904 url.netloc.strip(os.sep), |
|
905 url.path.strip(os.sep)) |
|
906 if not os.path.isfile(file_path): |
|
907 raise exception.InvalidParameterValue(_( |
|
908 "archive_uri does not exist (%s).") % (archive_uri)) |
|
909 |
|
910 |
|
911 def _format_archive_uri(task, archive_uri): |
|
912 """Format archive URL to be passed as boot argument to AI client |
|
913 |
|
914 Transformation of archive_uri is only required if URI scheme is glance. |
|
915 |
|
916 :param task: a TaskManager instance. |
|
917 :param archive_uri: URI path to unified archive |
|
918 :returns: Formatted archive URI, and auth_token if needed |
|
919 """ |
|
920 LOG.debug("SolarisDeploy._format_archive_uri: archive_uri: %s" % |
|
921 (archive_uri)) |
|
922 if archive_uri: |
|
923 url = urlparse(archive_uri) |
|
924 |
|
925 if url.scheme == "glance": |
|
926 # Transform uri from glance://<UUID> to |
|
927 # direct glance URL glance://<GLANCE_REST_API>/<UUID> |
|
928 new_uri = "http://%s:%s/v2/images/%s/file" % \ |
|
929 (CONF.glance.glance_host, CONF.glance.glance_port, |
|
930 url.netloc) |
|
931 auth_token = task.context.auth_token |
|
932 else: |
|
933 new_uri = archive_uri |
|
934 auth_token = None |
|
935 else: |
|
936 new_uri = None |
|
937 auth_token = None |
|
938 |
|
939 return new_uri, auth_token |
|
940 |
|
941 |
|
942 def _validate_ai_manifest(task): |
|
943 """Validate ai_manifest for format, etc |
|
944 |
|
945 driver_info/ai_manifest is used to specify a path to a single |
|
946 AI manifest to be used instead of the default derived script. |
|
947 e.g. http://path-to-manifest |
|
948 |
|
949 :param task: a TaskManager instance. |
|
950 :raises: InvalidParameterValue if invalid ai_manifest |
|
951 """ |
|
952 LOG.debug("SolarisDeploy._validate_ai_manifest") |
|
953 ai_manifest = task.node.driver_info['ai_manifest'] |
|
954 _validate_uri(task, ai_manifest) |
|
955 |
|
956 |
|
957 def _validate_profiles(task, profiles): |
|
958 """Validate profiles for format, etc |
|
959 |
|
960 Configuration profiles are specified as a plus(+) delimited list of paths |
|
961 e.g. http://path-to-profile+http://path-to-another-profile |
|
962 |
|
963 :param task: a TaskManager instance. |
|
964 :param profiles: Plus(+) delimited list of configuration profile |
|
965 :raises: InvalidParameterValue if invalid configuration profile |
|
966 """ |
|
967 LOG.debug("SolarisDeploy._validate_profiles: %s" % (profiles)) |
|
968 |
|
969 # Split profiles into list of paths@environment elements |
|
970 prof_list = [prof.strip() for prof in profiles.split('+') if prof.strip()] |
|
971 |
|
972 for profile in prof_list: |
|
973 _validate_uri(task, profile) |
|
974 |
|
975 |
|
976 def _validate_uri(task, uri): |
|
977 """Validate URI for AI Manifest or SC Profile |
|
978 |
|
979 :param task: a TaskManager instance. |
|
980 :param uri: URI to AI Manifest or SC profile |
|
981 :raises: InvalidParameterValue if invalid manifest/profile URI |
|
982 """ |
|
983 LOG.debug("SolarisDeploy._validate_uri: URI: %s" % (uri)) |
|
984 url = urlparse(uri) |
|
985 |
|
986 if url.scheme not in VALID_URI_SCHEMES: |
|
987 raise exception.InvalidParameterValue(_( |
|
988 "Unsupported uri scheme (%s) referenced" |
|
989 " in URI (%s).") % (url.scheme, uri)) |
|
990 |
|
991 if not url.netloc and not url.path: |
|
992 raise exception.InvalidParameterValue(_( |
|
993 "Missing URI name (%s).") % (uri)) |
|
994 |
|
995 if url.scheme in ["http", "https"]: |
|
996 # Presuming client authentication using HTTPS is not being used. |
|
997 # Just a secure connection. |
|
998 # TODO(mattk): Do I need to support client side HTTPS authentication |
|
999 if not _url_exists(uri): |
|
1000 raise exception.InvalidParameterValue(_( |
|
1001 "URI does not exist (%s).") % (uri)) |
|
1002 else: |
|
1003 LOG.debug("SolarisDeploy._validate_uri: %s exists." % |
|
1004 (uri)) |
|
1005 elif url.scheme == "file": |
|
1006 file_path = os.path.join(os.sep, |
|
1007 url.netloc.strip(os.sep), |
|
1008 url.path.strip(os.sep)) |
|
1009 if not os.path.isfile(file_path): |
|
1010 raise exception.InvalidParameterValue(_( |
|
1011 "URI does not exist (%s).") % (uri)) |
|
1012 else: |
|
1013 LOG.debug("SolarisDeploy._validate_uri: %s exists." % |
|
1014 (url.scheme)) |
|
1015 elif url.scheme == "glance": |
|
1016 # Glance schema only supported if using keystone authorization |
|
1017 # otherwise ironic being used standalone |
|
1018 if CONF.auth_strategy != "keystone": |
|
1019 raise exception.InvalidParameterValue(_( |
|
1020 "Glance scheme only supported when using Keystone (%s).") |
|
1021 % (uri)) |
|
1022 |
|
1023 # Format : glance://<glance UUID> |
|
1024 # When parsed by urlparse, Glance image uuid appears as netloc param |
|
1025 if not url.netloc: |
|
1026 raise exception.InvalidParameterValue(_( |
|
1027 "Missing Glance image UUID for URI (%s).") |
|
1028 % (uri)) |
|
1029 |
|
1030 # Validate glance uri exists by attempting to get download size |
|
1031 try: |
|
1032 size = images.download_size(task.context, url.netloc) |
|
1033 LOG.debug("Image %s size: %s" % (url.netloc, str(size))) |
|
1034 if not size: |
|
1035 raise exception.InvalidParameterValue(_( |
|
1036 "Glance image not found: %s") % (url.netloc)) |
|
1037 else: |
|
1038 LOG.debug("SolarisDeploy._validate_uri: %s exists." % |
|
1039 (uri)) |
|
1040 |
|
1041 except Exception as err: |
|
1042 raise exception.InvalidParameterValue(_( |
|
1043 "Failed to validate Glance URI '%s': %s") % |
|
1044 (url.netloc, err)) |
|
1045 |
|
1046 |
|
1047 def _validate_fmri(task): |
|
1048 """Validate fmri for format, etc |
|
1049 |
|
1050 driver_info/fmri is a plus(+) delimited list of IPS package |
|
1051 FMRIs to be installed. e.g. pkg:/pkg1+pkg:/pkg2 |
|
1052 |
|
1053 :param task: a TaskManager instance. |
|
1054 :raises: InvalidParameterValue if invalid fmri |
|
1055 """ |
|
1056 LOG.debug("SolarisDeploy._validate_fmri") |
|
1057 fmri = task.node.driver_info['fmri'] |
|
1058 |
|
1059 # Split fmri into list of possible packages |
|
1060 pkg_list = [pkg.strip() for pkg in fmri.split('+') if pkg.strip()] |
|
1061 for fmri in pkg_list: |
|
1062 _validate_fmri_format(fmri) |
|
1063 |
|
1064 |
|
1065 def _validate_fmri_format(fmri): |
|
1066 """Validate FMRI for format |
|
1067 FMRI must not contain the publisher and must be of the format: |
|
1068 |
|
1069 pkg:/<package path> |
|
1070 |
|
1071 Note the fmri only contains a single backslash. |
|
1072 |
|
1073 :param fmri: IPS FMRI |
|
1074 :raises: InvalidParameterValue if invalid FMRI |
|
1075 """ |
|
1076 LOG.debug("SolarisDeploy._validate_fmri_format: fmri: %s" % (fmri)) |
|
1077 url = urlparse(fmri) |
|
1078 |
|
1079 if url.scheme != "pkg": |
|
1080 raise exception.InvalidParameterValue(_( |
|
1081 "Unsupported IPS scheme (%s) referenced in fmri (%s).") |
|
1082 % (url.scheme, fmri)) |
|
1083 |
|
1084 if url.netloc: |
|
1085 raise exception.InvalidParameterValue(_( |
|
1086 "Cannot specify publisher name in fmri (%s).") % (fmri)) |
|
1087 |
|
1088 if not url.path: |
|
1089 raise exception.InvalidParameterValue(_( |
|
1090 "Missing IPS package name in fmri (%s).") % (fmri)) |
|
1091 else: |
|
1092 # Validate package name |
|
1093 if not is_valid_pkg_name(url.path.strip("/")): |
|
1094 raise exception.InvalidParameterValue(_( |
|
1095 "Malformed IPS package name in fmri (%s).") % (fmri)) |
|
1096 |
|
1097 |
|
1098 def _validate_publishers(task): |
|
1099 """Validate custom publisher name/origins for format |
|
1100 |
|
1101 publishers property is a plus(+) delimited list of IPS publishers |
|
1102 to be installed from, in the format name@origin. e.g. |
|
1103 solaris@http://pkg.oracle.com/solaris+extra@http://int.co.com/extras |
|
1104 |
|
1105 :param task: a TaskManager instance. |
|
1106 :raises: InvalidParameterValue if invalid publisher |
|
1107 """ |
|
1108 LOG.debug("SolarisDeploy._validate_publishers") |
|
1109 pubs = task.node.driver_info['publishers'] |
|
1110 |
|
1111 # Split publishers into list of name@origin publishers |
|
1112 pub_list = [pub.strip() for pub in pubs.split('+') if pub.strip()] |
|
1113 for pub in pub_list: |
|
1114 # Split into name origin |
|
1115 name, origin = pub.split('@', 1) |
|
1116 if not name or not origin: |
|
1117 raise exception.InvalidParameterValue(_( |
|
1118 "Malformed IPS publisher must be of format " |
|
1119 "name@origin (%s).") % (pub)) |
|
1120 |
|
1121 if not valid_pub_prefix(name): |
|
1122 raise exception.InvalidParameterValue(_( |
|
1123 "Malformed IPS publisher name (%s).") % (name)) |
|
1124 |
|
1125 if not valid_pub_url(origin): |
|
1126 raise exception.InvalidParameterValue(_( |
|
1127 "Malformed IPS publisher origin (%s).") % (origin)) |
|
1128 |
|
1129 |
|
1130 def _fetch_and_create(task, obj_type, obj_name, obj_uri, aiservice, mac, |
|
1131 env=None): |
|
1132 """Fetch manifest/profile and create on AI Server |
|
1133 |
|
1134 :param task: a TaskManager instance. |
|
1135 :param obj_type: Type of AI object to create "manifest" or "profile" |
|
1136 :param obj_name: manifest/profile name |
|
1137 :param obj_uri: URI to manifest/profile to use |
|
1138 :param aiservice: AI Service to create manifest/profile for |
|
1139 :param mac: MAC address criteria to use |
|
1140 :param env: Environment to apply profile to |
|
1141 :raises: AICreateProfileFail or AICreateManifestFail |
|
1142 """ |
|
1143 # Fetch URI to local file |
|
1144 url = urlparse(obj_uri) |
|
1145 temp_file = _fetch_uri(task, obj_uri) |
|
1146 |
|
1147 try: |
|
1148 # scp temp file to AI Server |
|
1149 remote_file = os.path.join("/tmp", obj_name) + ".xml" |
|
1150 aiservice.copy_remote_file(temp_file, remote_file) |
|
1151 except Exception as err: |
|
1152 LOG.error(_("Fetch and create failed for %s: name: %s: %s") % |
|
1153 (obj_type, obj_uri, err)) |
|
1154 if url.scheme == "glance": |
|
1155 _image_refcount_adjust(temp_file, -1) |
|
1156 else: |
|
1157 os.remove(temp_file) |
|
1158 raise |
|
1159 |
|
1160 try: |
|
1161 if obj_type == "manifest": |
|
1162 # Create AI Profile |
|
1163 aiservice.create_manifest(obj_name, remote_file, mac) |
|
1164 elif obj_type == "profile": |
|
1165 # Create AI Profile |
|
1166 aiservice.create_profile(obj_name, remote_file, mac, env) |
|
1167 |
|
1168 except (AICreateManifestFail, AICreateProfileFail) as _err: |
|
1169 aiservice.delete_remote_file(remote_file) |
|
1170 if url.scheme == "glance": |
|
1171 _image_refcount_adjust(temp_file, -1) |
|
1172 else: |
|
1173 os.remove(temp_file) |
|
1174 raise |
|
1175 |
|
1176 # Remove local and remote temporary profiles |
|
1177 aiservice.delete_remote_file(remote_file) |
|
1178 if url.scheme == "glance": |
|
1179 _image_refcount_adjust(temp_file, -1) |
|
1180 else: |
|
1181 os.remove(temp_file) |
|
1182 |
|
1183 |
|
1184 class DeployStateChecker(Thread): |
|
1185 """Thread class to check for deployment completion""" |
|
1186 |
|
1187 def __init__(self, task): |
|
1188 """Init method for thread class""" |
|
1189 LOG.debug("DeployStateChecker.__init__()") |
|
1190 Thread.__init__(self) |
|
1191 |
|
1192 self.task = task |
|
1193 self.node = task.node |
|
1194 self._state = states.DEPLOYWAIT |
|
1195 self.ssh_connection = None |
|
1196 self.running = True |
|
1197 |
|
1198 @property |
|
1199 def state(self): |
|
1200 """Deployment state property""" |
|
1201 return self._state |
|
1202 |
|
1203 def run(self): |
|
1204 """Start the thread """ |
|
1205 LOG.debug("DeployStateChecker.run(): Connecting...") |
|
1206 client = utils.ssh_connect(self._get_ssh_dict()) |
|
1207 channel = client.invoke_shell() |
|
1208 channel.settimeout(0.0) |
|
1209 channel.set_combine_stderr(True) |
|
1210 |
|
1211 # Continuously read stdout from console and parse |
|
1212 # specifically for success/failure output |
|
1213 while self.running: |
|
1214 with tempfile.TemporaryFile(dir='/var/lib/ironic') as tf: |
|
1215 while True: |
|
1216 rchans, _wchans, _echans = select.select([channel], [], []) |
|
1217 if channel in rchans: |
|
1218 try: |
|
1219 console_data = "" |
|
1220 while channel.recv_ready(): |
|
1221 console_data += channel.recv(1024) |
|
1222 |
|
1223 if len(console_data) == 0: |
|
1224 tf.write("\n*** EOF\n") |
|
1225 # Confirm string to search for on success |
|
1226 if self._string_in_file(tf, AI_SUCCESS_STRING): |
|
1227 self._state = states.DEPLOYDONE |
|
1228 else: |
|
1229 # Didn't succeed so default to failure |
|
1230 self._state = states.DEPLOYFAIL |
|
1231 self.stop() |
|
1232 break |
|
1233 tf.write(console_data) |
|
1234 tf.flush() |
|
1235 |
|
1236 # Read input buffer for prompt |
|
1237 if re.search("->", console_data): |
|
1238 # Send console start command |
|
1239 channel.send("start -script SP/Console\n") |
|
1240 |
|
1241 # Cater for Yes/No prompts always sending Yes |
|
1242 elif re.search("y/n", console_data): |
|
1243 channel.send("y\n") |
|
1244 |
|
1245 # Confirm string to search for on success |
|
1246 elif self._string_in_file(tf, AI_SUCCESS_STRING): |
|
1247 LOG.debug("DeployStateChecker.run(): Done") |
|
1248 self._state = states.DEPLOYDONE |
|
1249 self.stop() |
|
1250 break |
|
1251 |
|
1252 # Confirm string to search for on failure |
|
1253 elif self._string_in_file(tf, AI_FAILURE_STRING): |
|
1254 LOG.debug("DeployStateChecker.run(): FAIL") |
|
1255 self._state = states.DEPLOYFAIL |
|
1256 self.stop() |
|
1257 break |
|
1258 |
|
1259 elif self._string_in_file(tf, AI_DEPLOY_STRING): |
|
1260 LOG.debug( |
|
1261 "DeployStateChecker.run(): DEPLOYING") |
|
1262 self._state = states.DEPLOYING |
|
1263 except socket.timeout: |
|
1264 pass |
|
1265 |
|
1266 def stop(self): |
|
1267 """Stop the thread""" |
|
1268 LOG.debug("DeployStateChecker.stop()") |
|
1269 self.running = False |
|
1270 |
|
1271 def _string_in_file(self, fp, string): |
|
1272 """Read all data from file checking for string presence |
|
1273 |
|
1274 :param fp: Open file pointer to read |
|
1275 :param string: Specific string to check for |
|
1276 :returns: boolean True of string present in file, False if not |
|
1277 """ |
|
1278 found_string = False |
|
1279 |
|
1280 # Position read at start of file |
|
1281 fp.seek(0) |
|
1282 for line in fp: |
|
1283 if re.search(string, line): |
|
1284 found_string = True |
|
1285 break |
|
1286 |
|
1287 # Return current read point to end of file for subsequent writes |
|
1288 fp.seek(0, 2) |
|
1289 return found_string |
|
1290 |
|
1291 def _get_ssh_dict(self): |
|
1292 """Generate SSH Dictionary for SSH Connection via paramiko |
|
1293 |
|
1294 :returns: dictionary for paramiko connection |
|
1295 """ |
|
1296 LOG.debug("DeployStateChecker._get_ssh_dict()") |
|
1297 |
|
1298 driver_info = _parse_driver_info(self.node) |
|
1299 |
|
1300 ssh_dict = { |
|
1301 'host': driver_info.get('address'), |
|
1302 'username': driver_info.get('username'), |
|
1303 'port': driver_info.get('port', 22) |
|
1304 } |
|
1305 |
|
1306 if ssh_dict.get('port') is not None: |
|
1307 ssh_dict['port'] = int(ssh_dict.get('port')) |
|
1308 else: |
|
1309 del ssh_dict['port'] |
|
1310 |
|
1311 if driver_info['password']: |
|
1312 ssh_dict['password'] = driver_info['password'] |
|
1313 |
|
1314 LOG.debug("DeployStateChecker._get_ssh_dict():ssh_dict: %s" % |
|
1315 (ssh_dict)) |
|
1316 return ssh_dict |
|
1317 |
|
1318 |
|
1319 class SolarisDeploy(base.DeployInterface): |
|
1320 """AI Deploy Interface """ |
|
1321 |
|
1322 def get_properties(self): |
|
1323 """Return Solaris driver properties""" |
|
1324 return COMMON_PROPERTIES |
|
1325 |
|
1326 def validate(self, task): |
|
1327 """Validate the driver-specific Node deployment info. |
|
1328 |
|
1329 :param task: a task from TaskManager. |
|
1330 :raises: InvalidParameterValue. |
|
1331 :raises: MissingParameterValue. |
|
1332 """ |
|
1333 LOG.debug("SolarisDeploy.validate()") |
|
1334 LOG.debug(task.context.auth_token) |
|
1335 |
|
1336 # Validate IPMI credentials by getting node architecture |
|
1337 try: |
|
1338 _arch = _get_node_architecture(task.node) |
|
1339 except Exception as err: |
|
1340 raise exception.InvalidParameterValue(_(err)) |
|
1341 |
|
1342 if not driver_utils.get_node_mac_addresses(task): |
|
1343 raise exception.InvalidParameterValue( |
|
1344 _("Node %s does not have any port associated with it.") % |
|
1345 (task.node.uuid)) |
|
1346 |
|
1347 # Ensure server configured |
|
1348 if not CONF.ai.server or CONF.ai.server == "None": |
|
1349 raise exception.MissingParameterValue( |
|
1350 _("AI Server not specified in configuration file.")) |
|
1351 |
|
1352 # Ensure username configured |
|
1353 if not CONF.ai.username or CONF.ai.username == "None": |
|
1354 raise exception.MissingParameterValue( |
|
1355 _("AI Server user not specified in configuration file.")) |
|
1356 |
|
1357 # One of ssh_key_file / ssh_key_contents / password must be configured |
|
1358 if ((not CONF.ai.password or CONF.ai.password == "None") and |
|
1359 (not CONF.ai.ssh_key_file or CONF.ai.ssh_key_file == "None") and |
|
1360 (not CONF.ai.ssh_key_contents or |
|
1361 CONF.ai.ssh_key_contents == "None")): |
|
1362 raise exception.MissingParameterValue( |
|
1363 _("AI Server authentication not specified. One of password, " |
|
1364 "ssh_key_file and ssh_key_contents must be present in " |
|
1365 "configuration file.")) |
|
1366 |
|
1367 # archive_uri, publishers or fmri are ignored if a ai_manifest is |
|
1368 # defined. They should be contained within the custom manifest itself |
|
1369 if (task.node.driver_info.get('ai_manifest') and |
|
1370 (task.node.driver_info.get('archive_uri') or |
|
1371 task.node.driver_info.get('publishers') or |
|
1372 task.node.driver_info.get('fmri'))): |
|
1373 raise exception.InvalidParameterValue( |
|
1374 _("Custom Archive, Publishers or FMRI cannot be specified " |
|
1375 "when specifying a custom AI Manifest. They should be " |
|
1376 "contained within this custom AI Manifest.")) |
|
1377 |
|
1378 # Ensure ai_service is valid if specified in driver |
|
1379 if task.node.driver_info.get('ai_service'): |
|
1380 aiservice = AIService(task, |
|
1381 task.node.driver_info.get('ai_service')) |
|
1382 if not aiservice.exists: |
|
1383 raise exception.InvalidParameterValue( |
|
1384 _("AI Service %s does not exist.") % (aiservice.name)) |
|
1385 |
|
1386 # Ensure node archive_uri is valid if specified |
|
1387 if task.node.driver_info.get('archive_uri'): |
|
1388 # Validate archive_uri for reachable, format, etc |
|
1389 _validate_archive_uri(task) |
|
1390 |
|
1391 # Ensure custom publisher provided if FMRI provided |
|
1392 if task.node.driver_info.get('fmri') and \ |
|
1393 not task.node.driver_info.get('publishers'): |
|
1394 raise exception.MissingParameterValue(_( |
|
1395 "Must specify custom publisher with custom fmri.")) |
|
1396 |
|
1397 # Ensure node publishers are valid if specified |
|
1398 if task.node.driver_info.get('publishers'): |
|
1399 # Validate publishers for format, etc |
|
1400 _validate_publishers(task) |
|
1401 |
|
1402 # Ensure node fmri is valid if specified |
|
1403 if task.node.driver_info.get('fmri'): |
|
1404 # Validate fmri for format, etc |
|
1405 _validate_fmri(task) |
|
1406 |
|
1407 # Ensure node sc_profiles is valid if specified |
|
1408 if task.node.driver_info.get('sc_profiles'): |
|
1409 # Validate sc_profiles for format, etc |
|
1410 _validate_profiles(task, task.node.driver_info.get('sc_profiles')) |
|
1411 |
|
1412 # Ensure node install_profiles is valid if specified |
|
1413 if task.node.driver_info.get('install_profiles'): |
|
1414 # Validate install_profiles for format, etc |
|
1415 _validate_profiles(task, |
|
1416 task.node.driver_info.get('install_profiles')) |
|
1417 |
|
1418 # Ensure node manifest is valid of specified |
|
1419 if task.node.driver_info.get('ai_manifest'): |
|
1420 # Validate ai_manifest for format, etc |
|
1421 _validate_ai_manifest(task) |
|
1422 |
|
1423 # Try to get the URL of the Ironic API |
|
1424 try: |
|
1425 CONF.conductor.api_url or keystone.get_service_url() |
|
1426 except (exception.CatalogFailure, |
|
1427 exception.CatalogNotFound, |
|
1428 exception.CatalogUnauthorized): |
|
1429 raise exception.InvalidParameterValue(_( |
|
1430 "Couldn't get the URL of the Ironic API service from the " |
|
1431 "configuration file or Keystone catalog.")) |
|
1432 |
|
1433 # Validate driver_info by parsing contents |
|
1434 _parse_driver_info(task.node) |
|
1435 |
|
1436 @task_manager.require_exclusive_lock |
|
1437 def deploy(self, task): |
|
1438 """Perform start deployment a node. |
|
1439 |
|
1440 For AI Deployment of x86 machines, we simply need to set the chassis |
|
1441 boot device to pxe and reboot the physical node. |
|
1442 |
|
1443 For AI Deployment of SPARC Machines we need to supply a boot script |
|
1444 indicating to perform a network DHCP boot. |
|
1445 |
|
1446 AI Server settings for this node, e.g. client, manifest, boot args |
|
1447 etc, will have been configured via prepare() method which is called |
|
1448 before deploy(). |
|
1449 |
|
1450 :param task: a TaskManager instance. |
|
1451 :returns: deploy state DEPLOYWAIT. |
|
1452 """ |
|
1453 LOG.debug("SolarisDeploy.deploy()") |
|
1454 |
|
1455 arch = _get_node_architecture(task.node) |
|
1456 |
|
1457 # Ensure persistence is false so net boot only occurs once |
|
1458 if arch == 'x86': |
|
1459 # Set boot device to PXE network boot |
|
1460 dev_cmd = 'pxe' |
|
1461 elif arch == 'SPARC': |
|
1462 # Set bootmode script to network DHCP |
|
1463 dev_cmd = 'wanboot' |
|
1464 else: |
|
1465 raise exception.InvalidParameterValue( |
|
1466 _("Invalid node architecture of '%s'.") % (arch)) |
|
1467 |
|
1468 manager_utils.node_set_boot_device(task, dev_cmd, |
|
1469 persistent=False) |
|
1470 manager_utils.node_power_action(task, states.REBOOT) |
|
1471 |
|
1472 deploy_thread = DeployStateChecker(task) |
|
1473 deploy_thread.start() |
|
1474 timer = loopingcall.FixedIntervalLoopingCall(_check_deploy_state, |
|
1475 task, task.node.uuid, |
|
1476 deploy_thread) |
|
1477 timer.start(interval=int(CONF.ai.deploy_interval)) |
|
1478 |
|
1479 return states.DEPLOYWAIT |
|
1480 |
|
1481 @task_manager.require_exclusive_lock |
|
1482 def tear_down(self, task): |
|
1483 """Tear down a previous deployment. |
|
1484 |
|
1485 Reset boot device or bootmode script and power off the node. |
|
1486 All actual clean-up is done in the clean_up() |
|
1487 method which should be called separately. |
|
1488 |
|
1489 :param task: a TaskManager instance. |
|
1490 :returns: deploy state DELETED. |
|
1491 """ |
|
1492 LOG.debug("SolarisDeploy.tear_down()") |
|
1493 manager_utils.node_set_boot_device(task, 'disk', |
|
1494 persistent=False) |
|
1495 manager_utils.node_power_action(task, states.POWER_OFF) |
|
1496 |
|
1497 return states.DELETED |
|
1498 |
|
1499 def prepare(self, task): |
|
1500 """Prepare the deployment environment for this node. |
|
1501 |
|
1502 1. Ensure Node's AI Service is specified and it exists |
|
1503 2. (Re)Create AI Clients for each port/Mac specified for this Node |
|
1504 3. (Re)Create AI Manifest for each port/Mac specified for this Node |
|
1505 with specific criteria of MAC address |
|
1506 |
|
1507 AI Service to use for installation is determined from |
|
1508 driver_info properties archive_uri or ai_service. archive_uri |
|
1509 takes precedence over ai_service. |
|
1510 |
|
1511 1. archive_uri specified. |
|
1512 Extract AI ISO from UAR and create a new AI service if service |
|
1513 for this ID does not exist. |
|
1514 2. ai_service specified |
|
1515 AI Service must exist. |
|
1516 3. archive_uri & ai_service not specified |
|
1517 Use default architecture specific service to perform IPS |
|
1518 install. |
|
1519 |
|
1520 :param task: a TaskManager instance. |
|
1521 """ |
|
1522 LOG.debug("SolarisDeploy.prepare()") |
|
1523 |
|
1524 ai_manifest = task.node.driver_info.get('ai_manifest', None) |
|
1525 ai_service = task.node.driver_info.get('ai_service', None) |
|
1526 arch = _get_node_architecture(task.node) |
|
1527 archive_uri = task.node.driver_info.get('archive_uri', None) |
|
1528 fmri = task.node.driver_info.get('fmri', None) |
|
1529 install_profiles = task.node.driver_info.get('install_profiles', None) |
|
1530 publishers = task.node.driver_info.get('publishers', None) |
|
1531 sc_profiles = task.node.driver_info.get('sc_profiles', None) |
|
1532 |
|
1533 # Ensure cache dir exists |
|
1534 if not os.path.exists(CONF.solaris_ipmi.imagecache_dirname): |
|
1535 os.makedirs(CONF.solaris_ipmi.imagecache_dirname) |
|
1536 |
|
1537 # archive_uri, publishers or fmri are ignored if a ai_manifest is |
|
1538 # defined. They should be contained within the custom manifest itself |
|
1539 if ((ai_manifest) and (archive_uri or publishers or fmri)): |
|
1540 raise exception.InvalidParameterValue( |
|
1541 _("Custom Archive, Publishers or FMRI cannot be specified " |
|
1542 "when specifying a custom AI Manifest. They should be " |
|
1543 "contained within this custom AI Manifest.")) |
|
1544 |
|
1545 # 1. Ensure Node's AI Service exists, if archive_uri then |
|
1546 # create a new service of UUID of archive does not already exist |
|
1547 if archive_uri: |
|
1548 # Validate archive_uri, format, reachable, etc |
|
1549 _validate_archive_uri(task) |
|
1550 |
|
1551 # Extract UUID from archive UAR and instantiate AIService |
|
1552 ai_service = _get_archive_uuid(task) |
|
1553 aiservice = AIService(task, ai_service) |
|
1554 |
|
1555 elif ai_service: |
|
1556 # Instantiate AIService object for this node/service |
|
1557 aiservice = AIService(task, ai_service) |
|
1558 else: |
|
1559 # IPS Install, ensure default architecture service exists |
|
1560 if arch == "x86": |
|
1561 ai_service = "default-i386" |
|
1562 elif arch == 'SPARC': |
|
1563 ai_service = "default-sparc" |
|
1564 else: |
|
1565 raise exception.InvalidParameterValue( |
|
1566 _("Invalid node architecture of '%s'.") % (arch)) |
|
1567 |
|
1568 # Instantiate AIService object for this node/service |
|
1569 aiservice = AIService(task, ai_service) |
|
1570 |
|
1571 # Check if AI Service exists, raise exception of not |
|
1572 if not aiservice.exists: |
|
1573 if archive_uri: |
|
1574 # Create this service |
|
1575 aiservice.create_service(archive_uri) |
|
1576 else: |
|
1577 raise exception.InvalidParameterValue( |
|
1578 _("AI Service %s does not exist.") % (aiservice.name)) |
|
1579 |
|
1580 # Ensure custom publisher provided if FMRI provided |
|
1581 if fmri and not publishers: |
|
1582 raise exception.InvalidParameterValue(_( |
|
1583 "Must specify custom publisher with custom fmri.")) |
|
1584 |
|
1585 # Ensure node publishers are valid if specified |
|
1586 if publishers: |
|
1587 # Validate publishers for format, etc |
|
1588 _validate_publishers(task) |
|
1589 |
|
1590 # Ensure node fmri is valid if specified |
|
1591 if fmri: |
|
1592 # Validate fmri, format, etc |
|
1593 _validate_fmri(task) |
|
1594 |
|
1595 # Ensure node sc_profiles is of valid format if specified |
|
1596 if sc_profiles: |
|
1597 # Validate sc_profiles for format, etc |
|
1598 _validate_profiles(task, sc_profiles) |
|
1599 |
|
1600 # Ensure node install_profiles is of valid format if specified |
|
1601 if install_profiles: |
|
1602 # Validate install_profiles for format, etc |
|
1603 _validate_profiles(task, install_profiles) |
|
1604 |
|
1605 # Ensure node ai_manifest is valid if specified |
|
1606 if ai_manifest: |
|
1607 # Validate ai_manifest for format, etc |
|
1608 _validate_ai_manifest(task) |
|
1609 |
|
1610 for mac in driver_utils.get_node_mac_addresses(task): |
|
1611 # 2. Recreate AI Clients for each port/Mac specified for this Node |
|
1612 # Check if AI Client exists for this service and if so remove it |
|
1613 if mac.lower() in aiservice.clients: |
|
1614 # Client exists remove it |
|
1615 aiservice.delete_client(mac) |
|
1616 |
|
1617 # Recreate new ai client for this mac address |
|
1618 new_uri, auth_token = _format_archive_uri(task, archive_uri) |
|
1619 aiservice.create_client(mac, arch, new_uri, auth_token, |
|
1620 publishers, fmri) |
|
1621 |
|
1622 # 3. (Re)Create AI Manifest for each port/Mac specified for this |
|
1623 # Node. Manifest name will be MAC address stripped of colons |
|
1624 manifest_name = mac.replace(':', '') |
|
1625 |
|
1626 # Check if AI Manifest exists for this service and if so remove it |
|
1627 if manifest_name in aiservice.manifests: |
|
1628 # Manifest exists remove it |
|
1629 aiservice.delete_manifest(manifest_name) |
|
1630 |
|
1631 # (Re)Create new ai Manifest for this mac address |
|
1632 # If ai_manifest is specified use it as the manifest otherwise |
|
1633 # use derived manifest script specified by aiservice. |
|
1634 if ai_manifest is not None: |
|
1635 # Fetch manifest locally, copy to AI Server so that |
|
1636 # installadm create-manifest CLI works. |
|
1637 _fetch_and_create(task, "manifest", manifest_name, ai_manifest, |
|
1638 aiservice, mac) |
|
1639 else: |
|
1640 _fetch_and_create(task, "manifest", manifest_name, |
|
1641 aiservice.derived_manifest, aiservice, mac) |
|
1642 |
|
1643 # 4. (Re)Create AI Profiles for each port/MAC specified for this |
|
1644 # Node, adding a new profile for each SC Profile specified. |
|
1645 # Profile Name will be MAC address prefix and counter suffix. |
|
1646 # e.g. AAEEBBCCFF66-1 |
|
1647 profile_prefix = mac.replace(':', '') + "-" |
|
1648 |
|
1649 # Remove all profiles associated with this MAC address and service |
|
1650 for profile_name in aiservice.profiles: |
|
1651 # Profile name starts with MAC address, assuming ironic |
|
1652 # created this profile so remove it. |
|
1653 if profile_prefix in profile_name: |
|
1654 aiservice.delete_profile(profile_name) |
|
1655 |
|
1656 # Process both sc_profiles and install_profiles filtering into |
|
1657 # unique list of profiles and environments to be applied to. |
|
1658 if install_profiles is not None: |
|
1659 ins_list = [prof.strip() for prof in |
|
1660 install_profiles.split('+') if prof.strip()] |
|
1661 else: |
|
1662 ins_list = [] |
|
1663 |
|
1664 prof_dict = dict(((uri, "install") for uri in ins_list)) |
|
1665 |
|
1666 if sc_profiles is not None: |
|
1667 sc_list = [prof.strip() for prof in sc_profiles.split('+') |
|
1668 if prof.strip()] |
|
1669 else: |
|
1670 sc_list = [] |
|
1671 |
|
1672 for profile in sc_list: |
|
1673 if profile in prof_dict: |
|
1674 prof_dict[profile] = "all" |
|
1675 else: |
|
1676 prof_dict[profile] = "system" |
|
1677 |
|
1678 profile_index = 0 |
|
1679 for profile_uri, profile_env in prof_dict.iteritems(): |
|
1680 profile_index += 1 |
|
1681 profile_name = profile_prefix + str(profile_index) |
|
1682 |
|
1683 # Fetch profile locally, copy to AI Server so that |
|
1684 # installadm create-profile CLI works. |
|
1685 _fetch_and_create(task, "profile", profile_name, profile_uri, |
|
1686 aiservice, mac, env=profile_env) |
|
1687 |
|
1688 # Ensure local copy of archive_uri is removed if not needed |
|
1689 if archive_uri: |
|
1690 url = urlparse(archive_uri) |
|
1691 if url.scheme == "glance": |
|
1692 temp_uar = os.path.join(CONF.solaris_ipmi.imagecache_dirname, |
|
1693 url.netloc) |
|
1694 _image_refcount_adjust(temp_uar, -1) |
|
1695 elif PLATFORM != "SunOS": |
|
1696 temp_uar = os.path.join(CONF.solaris_ipmi.imagecache_dirname, |
|
1697 url.path.replace("/", "")) |
|
1698 _image_refcount_adjust(temp_uar, -1) |
|
1699 |
|
1700 def clean_up(self, task): |
|
1701 """Clean up the deployment environment for this node. |
|
1702 |
|
1703 As node is being torn down we need to clean up specific |
|
1704 AI Clients and Manifests associated with MAC addresses |
|
1705 associated with this node. |
|
1706 |
|
1707 1. Delete AI Clients for each port/Mac specified for this Node |
|
1708 2. Delete AI Manifest for each port/Mac specified for this Node |
|
1709 |
|
1710 :param task: a TaskManager instance. |
|
1711 """ |
|
1712 LOG.debug("SolarisDeploy.clean_up()") |
|
1713 |
|
1714 ai_service = task.node.driver_info.get('ai_service', None) |
|
1715 arch = _get_node_architecture(task.node) |
|
1716 archive_uri = task.node.driver_info.get('archive_uri', None) |
|
1717 |
|
1718 # Instantiate AIService object for this node/service |
|
1719 if archive_uri: |
|
1720 aiservice = AIService(task, _get_archive_uuid(task)) |
|
1721 elif ai_service: |
|
1722 aiservice = AIService(task, ai_service) |
|
1723 else: |
|
1724 if arch == "x86": |
|
1725 ai_service = "default-i386" |
|
1726 elif arch == 'SPARC': |
|
1727 ai_service = "default-sparc" |
|
1728 else: |
|
1729 raise exception.InvalidParameterValue( |
|
1730 _("Invalid node architecture of '%s'.") % (arch)) |
|
1731 aiservice = AIService(task, ai_service) |
|
1732 |
|
1733 # Check if AI Service exists, log message if already removed |
|
1734 if not aiservice.exists: |
|
1735 # There is nothing to clean up as service removed |
|
1736 LOG.info(_("AI Service %s already removed.") % (aiservice.name)) |
|
1737 else: |
|
1738 for mac in driver_utils.get_node_mac_addresses(task): |
|
1739 # 1. Delete AI Client for this MAC Address |
|
1740 if mac.lower() in aiservice.clients: |
|
1741 aiservice.delete_client(mac) |
|
1742 |
|
1743 # 2. Delete AI Manifest for this MAC Address |
|
1744 manifest_name = mac.replace(':', '') |
|
1745 if manifest_name in aiservice.manifests: |
|
1746 aiservice.delete_manifest(manifest_name) |
|
1747 |
|
1748 # 3. Remove AI Profiles for this MAC Address |
|
1749 profile_prefix = mac.replace(':', '') + "-" |
|
1750 |
|
1751 # Remove all profiles associated with this MAC address |
|
1752 for profile_name in aiservice.profiles: |
|
1753 if profile_prefix in profile_name: |
|
1754 aiservice.delete_profile(profile_name) |
|
1755 |
|
1756 # Ensure local copy of archive_uri is removed if not needed |
|
1757 if archive_uri: |
|
1758 url = urlparse(archive_uri) |
|
1759 if url.scheme == "glance": |
|
1760 temp_uar = os.path.join(CONF.solaris_ipmi.imagecache_dirname, |
|
1761 url.netloc) |
|
1762 _image_refcount_adjust(temp_uar, -1) |
|
1763 elif PLATFORM != "SunOS": |
|
1764 temp_uar = os.path.join(CONF.solaris_ipmi.imagecache_dirname, |
|
1765 url.path.replace("/", "")) |
|
1766 _image_refcount_adjust(temp_uar, -1) |
|
1767 |
|
1768 def take_over(self, _task): |
|
1769 """Take over management of this task's node from a dead conductor.""" |
|
1770 """ TODO(mattk): Determine if this is required""" |
|
1771 LOG.debug("SolarisDeploy.take_over()") |
|
1772 |
|
1773 |
|
1774 class SolarisManagement(base.ManagementInterface): |
|
1775 """Management class for solaris nodes.""" |
|
1776 |
|
1777 def get_properties(self): |
|
1778 """Return Solaris driver properties""" |
|
1779 return COMMON_PROPERTIES |
|
1780 |
|
1781 def __init__(self): |
|
1782 try: |
|
1783 ipmitool._check_option_support(['timing', 'single_bridge', |
|
1784 'dual_bridge']) |
|
1785 except OSError: |
|
1786 raise exception.DriverLoadError( |
|
1787 driver=self.__class__.__name__, |
|
1788 reason=_("Unable to locate usable ipmitool command in " |
|
1789 "the system path when checking ipmitool version")) |
|
1790 |
|
1791 def validate(self, task): |
|
1792 """Check that 'driver_info' contains IPMI credentials. |
|
1793 |
|
1794 Validates whether the 'driver_info' property of the supplied |
|
1795 task's node contains the required credentials information. |
|
1796 |
|
1797 :param task: a task from TaskManager. |
|
1798 :raises: InvalidParameterValue if required IPMI parameters |
|
1799 are missing. |
|
1800 :raises: MissingParameterValue if a required parameter is missing. |
|
1801 |
|
1802 """ |
|
1803 _parse_driver_info(task.node) |
|
1804 |
|
1805 def get_supported_boot_devices(self, task=None): |
|
1806 """Get a list of the supported boot devices. |
|
1807 |
|
1808 :param task: a task from TaskManager. |
|
1809 :returns: A list with the supported boot devices defined |
|
1810 in :mod:`ironic.common.boot_devices`. |
|
1811 |
|
1812 """ |
|
1813 if task is None: |
|
1814 return [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM, |
|
1815 boot_devices.BIOS, boot_devices.SAFE] |
|
1816 else: |
|
1817 # Get architecture of node and return supported boot devices |
|
1818 arch = _get_node_architecture(task.node) |
|
1819 if arch == 'x86': |
|
1820 return [boot_devices.PXE, boot_devices.DISK, |
|
1821 boot_devices.CDROM, boot_devices.BIOS, |
|
1822 boot_devices.SAFE] |
|
1823 elif arch == 'SPARC': |
|
1824 return [boot_devices.DISK, 'wanboot'] |
|
1825 else: |
|
1826 raise exception.InvalidParameterValue( |
|
1827 _("Invalid node architecture of '%s'.") % (arch)) |
|
1828 |
|
1829 @task_manager.require_exclusive_lock |
|
1830 def set_boot_device(self, task, device, persistent=False): |
|
1831 """Set the boot device for the task's node. |
|
1832 |
|
1833 Set the boot device to use on next reboot of the node. |
|
1834 |
|
1835 :param task: a task from TaskManager. |
|
1836 :param device: the boot device, one of |
|
1837 :mod:`ironic.common.boot_devices`. |
|
1838 :param persistent: Boolean value. True if the boot device will |
|
1839 persist to all future boots, False if not. |
|
1840 Default: False. |
|
1841 :raises: InvalidParameterValue if an invalid boot device is specified |
|
1842 :raises: MissingParameterValue if required ipmi parameters are missing. |
|
1843 :raises: IPMIFailure on an error from ipmitool. |
|
1844 |
|
1845 """ |
|
1846 LOG.debug("SolarisManagement.set_boot_device: %s" % device) |
|
1847 |
|
1848 arch = _get_node_architecture(task.node) |
|
1849 archive_uri = task.node.driver_info.get('archive_uri') |
|
1850 publishers = task.node.driver_info.get('publishers') |
|
1851 fmri = task.node.driver_info.get('fmri') |
|
1852 |
|
1853 if arch == 'x86': |
|
1854 if device not in self.get_supported_boot_devices(task=task): |
|
1855 raise exception.InvalidParameterValue(_( |
|
1856 "Invalid boot device %s specified.") % device) |
|
1857 cmd = ["chassis", "bootdev", device] |
|
1858 if persistent: |
|
1859 cmd = cmd + " options=persistent" |
|
1860 elif arch == 'SPARC': |
|
1861 # Set bootmode script to network DHCP or disk |
|
1862 if device == 'wanboot': |
|
1863 boot_cmd = 'set /HOST/bootmode script="' |
|
1864 script_str = 'boot net:dhcp - install' |
|
1865 if archive_uri: |
|
1866 new_uri, auth_token = _format_archive_uri(task, |
|
1867 archive_uri) |
|
1868 script_str += ' archive_uri=%s' % (new_uri) |
|
1869 |
|
1870 if auth_token is not None: |
|
1871 # Add auth_token to boot arg, AI archive transfer will |
|
1872 # use this by setting X-Auth-Token header when using |
|
1873 # curl to retrieve archive from glance. |
|
1874 script_str += ' auth_token=%s' % \ |
|
1875 (task.context.auth_token) |
|
1876 |
|
1877 if publishers: |
|
1878 pub_list = [pub.strip() for pub in publishers.split('+') |
|
1879 if pub.strip()] |
|
1880 script_str += ' publishers=%s' % ('+'.join(pub_list)) |
|
1881 |
|
1882 if fmri: |
|
1883 pkg_list = [pkg.strip() for pkg in fmri.split('+') |
|
1884 if pkg.strip()] |
|
1885 script_str += ' fmri=%s' % ('+'.join(pkg_list)) |
|
1886 |
|
1887 # bootmode script property has a size restriction of 255 |
|
1888 # characters raise error if this is breached. |
|
1889 if len(script_str) > 255: |
|
1890 raise exception.InvalidParameterValue(_( |
|
1891 "SPARC firmware bootmode script length exceeds 255:" |
|
1892 " %s") % script_str) |
|
1893 boot_cmd += script_str + '"' |
|
1894 cmd = ['sunoem', 'cli', boot_cmd] |
|
1895 elif device == 'disk': |
|
1896 cmd = ['sunoem', 'cli', |
|
1897 'set /HOST/bootmode script=""'] |
|
1898 else: |
|
1899 raise exception.InvalidParameterValue(_( |
|
1900 "Invalid boot device %s specified.") % (device)) |
|
1901 else: |
|
1902 raise exception.InvalidParameterValue( |
|
1903 _("Invalid node architecture of '%s'.") % (arch)) |
|
1904 |
|
1905 driver_info = _parse_driver_info(task.node) |
|
1906 try: |
|
1907 _out, _err = _exec_ipmitool(driver_info, cmd) |
|
1908 except (exception.PasswordFileFailedToCreate, |
|
1909 processutils.ProcessExecutionError) as err: |
|
1910 LOG.warning(_LW('IPMI set boot device failed for node %(node)s ' |
|
1911 'when executing "ipmitool %(cmd)s". ' |
|
1912 'Error: %(error)s'), |
|
1913 {'node': driver_info['uuid'], |
|
1914 'cmd': cmd, 'error': err}) |
|
1915 raise exception.IPMIFailure(cmd=cmd) |
|
1916 |
|
1917 def get_boot_device(self, task): |
|
1918 """Get the current boot device for the task's node. |
|
1919 |
|
1920 Returns the current boot device of the node. |
|
1921 |
|
1922 :param task: a task from TaskManager. |
|
1923 :raises: InvalidParameterValue if required IPMI parameters |
|
1924 are missing. |
|
1925 :raises: IPMIFailure on an error from ipmitool. |
|
1926 :raises: MissingParameterValue if a required parameter is missing. |
|
1927 :returns: a dictionary containing: |
|
1928 |
|
1929 :boot_device: the boot device, one of |
|
1930 :mod:`ironic.common.boot_devices` or None if it is unknown. |
|
1931 :persistent: Whether the boot device will persist to all |
|
1932 future boots or not, None if it is unknown. |
|
1933 |
|
1934 """ |
|
1935 LOG.debug("SolarisManagement.get_boot_device") |
|
1936 arch = _get_node_architecture(task.node) |
|
1937 driver_info = _parse_driver_info(task.node) |
|
1938 response = {'boot_device': None, 'persistent': None} |
|
1939 |
|
1940 if arch == 'x86': |
|
1941 cmd = ["chassis", "bootparam", "get", "5"] |
|
1942 elif arch == 'SPARC': |
|
1943 cmd = ['sunoem', 'getval', '/HOST/bootmode/script'] |
|
1944 else: |
|
1945 raise exception.InvalidParameterValue( |
|
1946 _("Invalid node architecture of '%s'.") % (arch)) |
|
1947 |
|
1948 try: |
|
1949 out, _err = _exec_ipmitool(driver_info, cmd) |
|
1950 except (exception.PasswordFileFailedToCreate, |
|
1951 processutils.ProcessExecutionError) as err: |
|
1952 LOG.warning(_LW('IPMI get boot device failed for node %(node)s ' |
|
1953 'when executing "ipmitool %(cmd)s". ' |
|
1954 'Error: %(error)s'), |
|
1955 {'node': driver_info['uuid'], |
|
1956 'cmd': cmd, 'error': err}) |
|
1957 raise exception.IPMIFailure(cmd=cmd) |
|
1958 |
|
1959 if arch == 'x86': |
|
1960 re_obj = re.search('Boot Device Selector : (.+)?\n', out) |
|
1961 if re_obj: |
|
1962 boot_selector = re_obj.groups('')[0] |
|
1963 if 'PXE' in boot_selector: |
|
1964 response['boot_device'] = boot_devices.PXE |
|
1965 elif 'Hard-Drive' in boot_selector: |
|
1966 if 'Safe-Mode' in boot_selector: |
|
1967 response['boot_device'] = boot_devices.SAFE |
|
1968 else: |
|
1969 response['boot_device'] = boot_devices.DISK |
|
1970 elif 'BIOS' in boot_selector: |
|
1971 response['boot_device'] = boot_devices.BIOS |
|
1972 elif 'CD/DVD' in boot_selector: |
|
1973 response['boot_device'] = boot_devices.CDROM |
|
1974 |
|
1975 response['persistent'] = 'Options apply to all future boots' in out |
|
1976 elif arch == 'SPARC': |
|
1977 if "net:dhcp" in out: |
|
1978 response['boot_device'] = 'wanboot' |
|
1979 else: |
|
1980 response['boot_device'] = 'disk' |
|
1981 LOG.debug(response) |
|
1982 return response |
|
1983 |
|
1984 def get_sensors_data(self, task): |
|
1985 """Get sensors data. |
|
1986 |
|
1987 :param task: a TaskManager instance. |
|
1988 :raises: FailedToGetSensorData when getting the sensor data fails. |
|
1989 :raises: FailedToParseSensorData when parsing sensor data fails. |
|
1990 :raises: InvalidParameterValue if required ipmi parameters are missing |
|
1991 :raises: MissingParameterValue if a required parameter is missing. |
|
1992 :returns: returns a dict of sensor data group by sensor type. |
|
1993 |
|
1994 """ |
|
1995 driver_info = _parse_driver_info(task.node) |
|
1996 # with '-v' option, we can get the entire sensor data including the |
|
1997 # extended sensor informations |
|
1998 cmd = "-v sdr" |
|
1999 try: |
|
2000 out, _err = _exec_ipmitool(driver_info, cmd) |
|
2001 except (exception.PasswordFileFailedToCreate, |
|
2002 processutils.ProcessExecutionError) as err: |
|
2003 raise exception.FailedToGetSensorData(node=task.node.uuid, |
|
2004 error=err) |
|
2005 |
|
2006 return ipmitool._parse_ipmi_sensors_data(task.node, out) |
|
2007 |
|
2008 |
|
2009 class AIService(): |
|
2010 """AI Service""" |
|
2011 |
|
2012 def __init__(self, task, name): |
|
2013 """Initialize AIService object |
|
2014 |
|
2015 :param task: a TaskManager instance |
|
2016 :param name: AI Service name |
|
2017 """ |
|
2018 LOG.debug("AIService.__init__()") |
|
2019 self.task = task |
|
2020 self.name = name |
|
2021 self._clients = list() |
|
2022 self._image_path = None |
|
2023 self._manifests = list() |
|
2024 self._profiles = list() |
|
2025 self._ssh_obj = None |
|
2026 self._derived_manifest = None |
|
2027 |
|
2028 @property |
|
2029 def ssh_obj(self): |
|
2030 """paramiko.SSHClient active connection""" |
|
2031 LOG.debug("AIService.ssh_obj") |
|
2032 if self._ssh_obj is None: |
|
2033 self._ssh_obj = self._get_ssh_connection() |
|
2034 return self._ssh_obj |
|
2035 |
|
2036 @property |
|
2037 def manifests(self): |
|
2038 """list() of manifest names for this service""" |
|
2039 LOG.debug("AIService.manifests") |
|
2040 if not self._manifests: |
|
2041 self._manifests = self._get_manifest_names() |
|
2042 return self._manifests |
|
2043 |
|
2044 @property |
|
2045 def profiles(self): |
|
2046 """list() of profile names for this service""" |
|
2047 LOG.debug("AIService.profiles") |
|
2048 if not self._profiles: |
|
2049 self._profiles = self._get_profile_names() |
|
2050 return self._profiles |
|
2051 |
|
2052 @property |
|
2053 def clients(self): |
|
2054 """list() of all client names(mac addresses) On AI Server""" |
|
2055 LOG.debug("AIService.clients") |
|
2056 if not self._clients: |
|
2057 self._clients = self._get_all_client_names() |
|
2058 return self._clients |
|
2059 |
|
2060 @property |
|
2061 def exists(self): |
|
2062 """True/False indicator of this service exists of not""" |
|
2063 LOG.debug("AIService.exists") |
|
2064 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm list -n " + self.name |
|
2065 try: |
|
2066 stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd) |
|
2067 except Exception as _err: |
|
2068 return False |
|
2069 |
|
2070 if self.name != self._parse_service_name(stdout): |
|
2071 return False |
|
2072 else: |
|
2073 return True |
|
2074 |
|
2075 @property |
|
2076 def image_path(self): |
|
2077 """image_path for this service""" |
|
2078 LOG.debug("AIService.image_path") |
|
2079 if self._image_path is None: |
|
2080 self._image_path = self._get_image_path() |
|
2081 return self._image_path |
|
2082 |
|
2083 @property |
|
2084 def derived_manifest(self): |
|
2085 """Access default derived manifest URI""" |
|
2086 LOG.debug("AIService.derived_manifest") |
|
2087 if not self._derived_manifest: |
|
2088 self._derived_manifest = CONF.ai.derived_manifest |
|
2089 return self._derived_manifest |
|
2090 |
|
2091 def create_service(self, archive_uri): |
|
2092 """Create a new AI Service for this object |
|
2093 |
|
2094 :param archive_uri: archive_uri to create service from |
|
2095 """ |
|
2096 |
|
2097 LOG.debug("AIService.create_service(): %s" % (self.name)) |
|
2098 |
|
2099 if PLATFORM == "SunOS": |
|
2100 # 1. Fetch archive |
|
2101 mount_dir, temp_uar = _mount_archive(self.task, archive_uri) |
|
2102 iso, uuid = _get_archive_iso_and_uuid(mount_dir) |
|
2103 else: |
|
2104 # 1. Fetch archive and Extract ISO file |
|
2105 temp_uar = _fetch_uri(self.task, archive_uri) |
|
2106 iso, uuid = _get_archive_iso_and_uuid(temp_uar, extract_iso=True) |
|
2107 |
|
2108 # 2. scp AI ISO from archive to AI Server |
|
2109 remote_iso = os.path.join("/tmp", uuid) + ".iso" |
|
2110 try: |
|
2111 self.copy_remote_file(iso, remote_iso) |
|
2112 except: |
|
2113 if PLATFORM == "SunOS": |
|
2114 _umount_archive(mount_dir, temp_uar) |
|
2115 if urlparse(archive_uri).scheme == "glance": |
|
2116 _image_refcount_adjust(temp_uar, -1) |
|
2117 else: |
|
2118 shutil.rmtree(os.path.dirname(iso)) |
|
2119 _image_refcount_adjust(temp_uar, -1) |
|
2120 raise |
|
2121 |
|
2122 if PLATFORM != "SunOS": |
|
2123 # Remove temp extracted ISO file |
|
2124 shutil.rmtree(os.path.dirname(iso)) |
|
2125 |
|
2126 # 3. Create a new AI Service |
|
2127 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm create-service " + \ |
|
2128 " -y -n " + uuid + " -s " + remote_iso |
|
2129 |
|
2130 try: |
|
2131 _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd) |
|
2132 self.name = uuid |
|
2133 self._clients = [] |
|
2134 self._manifests = [] |
|
2135 self._profiles = [] |
|
2136 |
|
2137 except Exception as _err: |
|
2138 self.delete_remote_file(remote_iso) |
|
2139 if PLATFORM == "SunOS": |
|
2140 _umount_archive(mount_dir, temp_uar) |
|
2141 else: |
|
2142 _image_refcount_adjust(temp_uar, -1) |
|
2143 raise AICreateServiceFail( |
|
2144 _("Failed to create AI Service %s") % (uuid)) |
|
2145 |
|
2146 # 4. Remove copy of AI ISO on AI Server |
|
2147 self.delete_remote_file(remote_iso) |
|
2148 |
|
2149 if PLATFORM == "SunOS": |
|
2150 # 5. Unmount UAR |
|
2151 _umount_archive(mount_dir, temp_uar) |
|
2152 |
|
2153 # 6. Decrement reference count for image |
|
2154 if temp_uar is not None: |
|
2155 _image_refcount_adjust(temp_uar, -1) |
|
2156 |
|
2157 def delete_service(self): |
|
2158 """Delete the current AI Service""" |
|
2159 LOG.debug("AIService.delete_service():name: %s" % (self.name)) |
|
2160 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm delete-service" + \ |
|
2161 " -r -y -n " + self.name |
|
2162 |
|
2163 try: |
|
2164 _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd) |
|
2165 except Exception as _err: |
|
2166 raise AIDeleteServiceFail( |
|
2167 _("Failed to delete AI Service %s") % (self.name)) |
|
2168 |
|
2169 def create_client(self, mac, arch, archive_uri, auth_token, |
|
2170 publishers, fmri): |
|
2171 """Create a client associated with this service |
|
2172 |
|
2173 :param mac: MAC Address of client to create |
|
2174 :param arch: Machine architecture for this node |
|
2175 :param archive_uri: URI of archive to install node from |
|
2176 :param auth_token: Authorization token for glance UAR retrieval |
|
2177 :param publishers: IPS publishers list in name@origin format |
|
2178 :param fmri: IPS package FMRIs to install |
|
2179 :returns: Nothing exception raised if deletion fails |
|
2180 """ |
|
2181 LOG.debug("AIService.create_client():mac: %s" % (mac)) |
|
2182 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm create-client -e " + \ |
|
2183 mac + " -n " + self.name |
|
2184 |
|
2185 # Add specific boot arguments for 'x86' clients only |
|
2186 if arch == 'x86': |
|
2187 ai_cmd += " -b install=true,console=ttya" |
|
2188 |
|
2189 if archive_uri: |
|
2190 ai_cmd += ",archive_uri=%s" % (archive_uri) |
|
2191 |
|
2192 if auth_token: |
|
2193 ai_cmd += ",auth_token=%s" % (auth_token) |
|
2194 |
|
2195 if publishers: |
|
2196 pub_list = [pub.strip() for pub in publishers.split('+') |
|
2197 if pub.strip()] |
|
2198 ai_cmd += ",publishers='%s'" % ('+'.join(pub_list)) |
|
2199 |
|
2200 if fmri: |
|
2201 pkg_list = [pkg.strip() for pkg in fmri.split('+') |
|
2202 if pkg.strip()] |
|
2203 ai_cmd += ",fmri='%s'" % ('+'.join(pkg_list)) |
|
2204 |
|
2205 try: |
|
2206 _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd) |
|
2207 except Exception as _err: |
|
2208 raise AICreateClientFail(_("Failed to create AI Client %s") % |
|
2209 (mac)) |
|
2210 |
|
2211 # If arch x86 customize grub reducing grub menu timeout to 0 |
|
2212 if arch == 'x86': |
|
2213 custom_grub = "/tmp/%s.grub" % (mac) |
|
2214 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm export -e " + \ |
|
2215 mac + " -G | /usr/bin/sed -e 's/timeout=30/timeout=0/'" + \ |
|
2216 " > %s" % (custom_grub) |
|
2217 try: |
|
2218 _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd) |
|
2219 except Exception as _err: |
|
2220 raise AICreateClientFail( |
|
2221 _("Failed to create custom grub menu for %s.") % (mac)) |
|
2222 |
|
2223 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm set-client -e " + \ |
|
2224 mac + " -G %s" % (custom_grub) |
|
2225 try: |
|
2226 _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd) |
|
2227 except Exception as _err: |
|
2228 raise AICreateClientFail( |
|
2229 _("Failed to customize AI Client %s grub menu.") % (mac)) |
|
2230 |
|
2231 self.delete_remote_file(custom_grub) |
|
2232 |
|
2233 self._clients = self._get_all_client_names() |
|
2234 |
|
2235 def delete_client(self, mac): |
|
2236 """Delete a specific client regardless of service association |
|
2237 |
|
2238 :param mac: MAC Address of client to remove |
|
2239 :returns: Nothing exception raised if deletion fails |
|
2240 """ |
|
2241 LOG.debug("AIService.delete_client():mac: %s" % (mac)) |
|
2242 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm delete-client -e " + mac |
|
2243 try: |
|
2244 _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd) |
|
2245 except Exception as _err: |
|
2246 raise AIDeleteClientFail(_("Failed to delete AI Client %s") % |
|
2247 (mac)) |
|
2248 |
|
2249 # update list of clients for this service |
|
2250 self._clients = self._get_all_client_names() |
|
2251 |
|
2252 def create_manifest(self, manifest_name, manifest_path, mac): |
|
2253 """Create a manifest associated with this service |
|
2254 |
|
2255 :param manifest_name: manifest_name to create |
|
2256 :param manifest_path: path to manifest file to use |
|
2257 :param mac: MAC address to add as criteria |
|
2258 :returns: Nothing exception raised if creation fails |
|
2259 """ |
|
2260 LOG.debug("AIService.create_manifest():manifest_name: " |
|
2261 "'%s', manifest_path: '%s', mac: '%s'" % |
|
2262 (manifest_name, manifest_path, mac)) |
|
2263 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm create-manifest -n " + \ |
|
2264 self.name + " -m " + manifest_name + " -f " + manifest_path + \ |
|
2265 " -c mac=" + mac |
|
2266 try: |
|
2267 _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd) |
|
2268 except Exception as _err: |
|
2269 raise AICreateManifestFail(_("Failed to create AI Manifest %s.") % |
|
2270 (manifest_name)) |
|
2271 |
|
2272 # Update list of manifests for this service |
|
2273 self._manifests = self._get_manifest_names() |
|
2274 |
|
2275 def delete_manifest(self, manifest_name): |
|
2276 """Delete a specific manifest |
|
2277 |
|
2278 :param manifest_name: name of manifest to remove |
|
2279 :returns: Nothing exception raised if deletion fails |
|
2280 """ |
|
2281 LOG.debug("AIService.delete_manifest():manifest_name: %s" % |
|
2282 (manifest_name)) |
|
2283 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm delete-manifest -m " + \ |
|
2284 manifest_name + " -n " + self.name |
|
2285 try: |
|
2286 _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd) |
|
2287 except Exception as _err: |
|
2288 raise AIDeleteManifestFail(_("Failed to delete AI Manifest %s") % |
|
2289 (manifest_name)) |
|
2290 |
|
2291 # Update list of manifests for this service |
|
2292 self._manifests = self._get_manifest_names() |
|
2293 |
|
2294 def create_profile(self, profile_name, profile_path, mac, env): |
|
2295 """Create a profile associated with this service |
|
2296 |
|
2297 :param profile)_name: profile name to create |
|
2298 :param profile_path: path to profile file to use |
|
2299 :param mac: MAC address to add as criteria |
|
2300 :param env: Environment to apply profile to |
|
2301 :returns: Nothing exception raised if creation fails |
|
2302 """ |
|
2303 LOG.debug("AIService.create_profile():profile_name: " |
|
2304 "'%s', profile_path: '%s', mac: '%s'" % |
|
2305 (profile_name, profile_path, mac)) |
|
2306 |
|
2307 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm create-profile -n " + \ |
|
2308 self.name + " -p " + profile_name + " -f " + profile_path + \ |
|
2309 " -c mac=" + mac |
|
2310 |
|
2311 if env is not None: |
|
2312 ai_cmd = ai_cmd + " -e " + env |
|
2313 |
|
2314 try: |
|
2315 _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd) |
|
2316 except Exception as _err: |
|
2317 raise AICreateProfileFail(_("Failed to create AI Profile %s.") % |
|
2318 (profile_name)) |
|
2319 |
|
2320 # Update list of profiles for this service |
|
2321 self._profiles = self._get_profile_names() |
|
2322 |
|
2323 def delete_profile(self, profile_name): |
|
2324 """Delete a specific profile |
|
2325 |
|
2326 :param profile_name: name of profile to remove |
|
2327 :returns: Nothing exception raised if deletion fails |
|
2328 """ |
|
2329 LOG.debug("AIService.delete_profile():profile_name: %s" % |
|
2330 (profile_name)) |
|
2331 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm delete-profile -p " + \ |
|
2332 profile_name + " -n " + self.name |
|
2333 try: |
|
2334 _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd) |
|
2335 except Exception as _err: |
|
2336 raise AIDeleteProfileFail(_("Failed to delete AI Profile %s") % |
|
2337 (profile_name)) |
|
2338 |
|
2339 # Update list of profiles for this service |
|
2340 self._profiles = self._get_profile_names() |
|
2341 |
|
2342 def copy_remote_file(self, local, remote): |
|
2343 """Using scp copy local file to remote location |
|
2344 |
|
2345 :param local: Local file path to copy |
|
2346 :param remote: Remote file path to copy to |
|
2347 :returns: Nothing, exception raised on failure |
|
2348 """ |
|
2349 LOG.debug("AIService.copy_remote_file():local: %s, remote: %s" % |
|
2350 (local, remote)) |
|
2351 try: |
|
2352 scp = SCPClient(self.ssh_obj.get_transport()) |
|
2353 scp.put(local, remote) |
|
2354 except Exception as err: |
|
2355 err_msg = _("Failed to copy file to remote server: %s") % err |
|
2356 raise SolarisIPMIError(msg=err_msg) |
|
2357 |
|
2358 def delete_remote_file(self, path): |
|
2359 """Remove remote file in AI Server |
|
2360 |
|
2361 :param path: Path of remote file to remove |
|
2362 :return: Nothing exception raised on failure |
|
2363 """ |
|
2364 LOG.debug("AIService.delete_remote_file():path: %s" % |
|
2365 (path)) |
|
2366 |
|
2367 ai_cmd = "/usr/bin/rm -f " + path |
|
2368 try: |
|
2369 _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd) |
|
2370 except Exception as err: |
|
2371 err_msg = _("Failed to delete remote file: %s") % err |
|
2372 raise SolarisIPMIError(msg=err_msg) |
|
2373 |
|
2374 def _get_image_path(self): |
|
2375 """Retrieve image_path for this service |
|
2376 |
|
2377 :returns: image_path property |
|
2378 """ |
|
2379 LOG.debug("AIService._get_image_path()") |
|
2380 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm list -vn " + self.name |
|
2381 stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd) |
|
2382 |
|
2383 for line in stdout.splitlines(): |
|
2384 words = line.split() |
|
2385 if len(words) > 2 and words[0] == "Image" and words[1] == "Path": |
|
2386 image_path = words[-1] |
|
2387 LOG.debug("AIService._get_image_path():image_path: %s" % (image_path)) |
|
2388 return image_path |
|
2389 |
|
2390 def _parse_client(self, list_out): |
|
2391 """Return service name and client from installadm list -e output |
|
2392 |
|
2393 :param list_out: stdout from installadm list -e |
|
2394 :returns: Service Name and MAC Address |
|
2395 """ |
|
2396 LOG.debug("AIService._parse_client():list_out: %s" % (list_out)) |
|
2397 lines = list_out.splitlines() |
|
2398 service_name = None |
|
2399 client_name = None |
|
2400 |
|
2401 if len(lines[2].split()[0]) > 0: |
|
2402 service_name = lines[2].split()[0] |
|
2403 |
|
2404 if len(lines[2].split()[1]) > 0: |
|
2405 client_name = lines[2].split()[1] |
|
2406 |
|
2407 LOG.debug("AIService._parse_client():service_name: %s" % |
|
2408 (service_name)) |
|
2409 LOG.debug("AIService._parse_client():client_name: %s" % (client_name)) |
|
2410 return service_name, client_name |
|
2411 |
|
2412 def _parse_service_name(self, list_out): |
|
2413 """Given installadm list -n output, parse out service name |
|
2414 |
|
2415 :param list_out: stdout from installadm list -n |
|
2416 :returns: Service Name |
|
2417 """ |
|
2418 LOG.debug("AIService._parse_service_name():list_out: %s" % (list_out)) |
|
2419 service_name = None |
|
2420 |
|
2421 lines = list_out.splitlines() |
|
2422 if len(lines[2].split()[0]) > 0: |
|
2423 service_name = lines[2].split()[0] |
|
2424 |
|
2425 LOG.debug("AIService._parse_service_name():service_name: %s" % |
|
2426 (service_name)) |
|
2427 return service_name |
|
2428 |
|
2429 def _get_ssh_connection(self): |
|
2430 """Returns an SSH client connected to a node. |
|
2431 |
|
2432 :returns: paramiko.SSHClient, an active ssh connection. |
|
2433 """ |
|
2434 LOG.debug("AIService._get_ssh_connection()") |
|
2435 return utils.ssh_connect(self._get_ssh_dict()) |
|
2436 |
|
2437 def _get_ssh_dict(self): |
|
2438 """Generate SSH Dictionary for SSH Connection via paramiko |
|
2439 |
|
2440 :returns: dictionary for paramiko connection |
|
2441 """ |
|
2442 LOG.debug("AIService._get_ssh_dict()") |
|
2443 if not CONF.ai.server or not CONF.ai.username: |
|
2444 raise exception.InvalidParameterValue(_( |
|
2445 "SSH server and username must be set.")) |
|
2446 |
|
2447 ssh_dict = { |
|
2448 'host': CONF.ai.server, |
|
2449 'username': CONF.ai.username, |
|
2450 'port': int(CONF.ai.port), |
|
2451 'timeout': int(CONF.ai.timeout) |
|
2452 } |
|
2453 |
|
2454 key_contents = key_filename = password = None |
|
2455 if CONF.ai.ssh_key_contents and CONF.ai.ssh_key_contents != "None": |
|
2456 key_contents = CONF.ai.ssh_key_contents |
|
2457 if CONF.ai.ssh_key_file and CONF.ai.ssh_key_file != "None": |
|
2458 key_filename = CONF.ai.ssh_key_file |
|
2459 if CONF.ai.password and CONF.ai.password != "None": |
|
2460 password = CONF.ai.password |
|
2461 |
|
2462 if len(filter(None, (key_filename, key_contents))) != 1: |
|
2463 raise exception.InvalidParameterValue(_( |
|
2464 "SSH requires one and only one of " |
|
2465 "ssh_key_file or ssh_key_contents to be set.")) |
|
2466 if password: |
|
2467 ssh_dict['password'] = password |
|
2468 |
|
2469 if key_contents: |
|
2470 ssh_dict['key_contents'] = key_contents |
|
2471 else: |
|
2472 if not os.path.isfile(key_filename): |
|
2473 raise exception.InvalidParameterValue(_( |
|
2474 "SSH key file %s not found.") % key_filename) |
|
2475 ssh_dict['key_filename'] = key_filename |
|
2476 LOG.debug("AIService._get_ssh_dict():ssh_dict: %s" % (ssh_dict)) |
|
2477 return ssh_dict |
|
2478 |
|
2479 def _get_manifest_names(self): |
|
2480 """Get a list of manifest names for this service |
|
2481 |
|
2482 :returns: list() of manifest names |
|
2483 """ |
|
2484 LOG.debug("AIService._get_manifest_names()") |
|
2485 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm list -mn " + self.name |
|
2486 stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd, |
|
2487 err_msg=_("Failed to retrieve manifests" |
|
2488 " for service %s") % (self.name)) |
|
2489 return self._parse_names(stdout) |
|
2490 |
|
2491 def _get_profile_names(self): |
|
2492 """Get a list of profile names for this service |
|
2493 |
|
2494 :returns: list() of profile names |
|
2495 """ |
|
2496 LOG.debug("AIService._get_profile_names()") |
|
2497 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm list -pn " + self.name |
|
2498 stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd, |
|
2499 err_msg=_("Failed to retrieve profiles for " |
|
2500 "service %s") % (self.name)) |
|
2501 return self._parse_names(stdout) |
|
2502 |
|
2503 def _get_all_client_names(self): |
|
2504 """Get a list of client names for this service |
|
2505 |
|
2506 :returns: list() of client/mac names |
|
2507 """ |
|
2508 LOG.debug("AIService._get_all_client_names()") |
|
2509 ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm list -c" |
|
2510 stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd, |
|
2511 err_msg=_("Failed to retrieve clients for " |
|
2512 "service %s") % (self.name)) |
|
2513 # Store client names all in lower case |
|
2514 return [client.lower() for client in self._parse_names(stdout)] |
|
2515 |
|
2516 def _parse_names(self, list_out): |
|
2517 """Parse client/manifest/profile names from installadm list output |
|
2518 |
|
2519 Note: when we convert to using RAD, parsing installadm CLI output |
|
2520 will not be required, as API will return a list of names. |
|
2521 |
|
2522 :param list_out: stdout from installadm list -c or -mn or -pn |
|
2523 :returns: a list of client/manifest/profile names |
|
2524 """ |
|
2525 LOG.debug("AIService._parse_names():list_out: %s" % |
|
2526 (list_out)) |
|
2527 names = [] |
|
2528 lines = list_out.splitlines() |
|
2529 |
|
2530 # Get index into string for client/manifest/profile names |
|
2531 # client/manifest/profile names are all in 2nd column of output |
|
2532 if len(lines) > 1: |
|
2533 col_start = lines[1].index(" --") |
|
2534 |
|
2535 for line in range(2, len(lines)): |
|
2536 names.append(lines[line][col_start:].split()[0]) |
|
2537 |
|
2538 LOG.debug("AIService._parse_names():names: %s" % (names)) |
|
2539 return names |
|
2540 |
|
2541 |
|
2542 # Custom Exceptions |
|
2543 class AICreateServiceFail(exception.IronicException): |
|
2544 """Exception type for AI Service creation failure""" |
|
2545 pass |
|
2546 |
|
2547 |
|
2548 class AIDeleteServiceFail(exception.IronicException): |
|
2549 """Exception type for AI Service deletion failure""" |
|
2550 pass |
|
2551 |
|
2552 |
|
2553 class AICreateClientFail(exception.IronicException): |
|
2554 """Exception type for AI Client creation failure""" |
|
2555 pass |
|
2556 |
|
2557 |
|
2558 class AIDeleteClientFail(exception.IronicException): |
|
2559 """Exception type for AI Client deletion failure""" |
|
2560 pass |
|
2561 |
|
2562 |
|
2563 class AICreateManifestFail(exception.IronicException): |
|
2564 """Exception type for AI Manifest creation failure""" |
|
2565 pass |
|
2566 |
|
2567 |
|
2568 class AIDeleteManifestFail(exception.IronicException): |
|
2569 """Exception type for AI Manifest deletion failure""" |
|
2570 pass |
|
2571 |
|
2572 |
|
2573 class AICreateProfileFail(exception.IronicException): |
|
2574 """Exception type for AI Profile creation failure""" |
|
2575 pass |
|
2576 |
|
2577 |
|
2578 class AIDeleteProfileFail(exception.IronicException): |
|
2579 """Exception type for AI Profile deletion failure""" |
|
2580 pass |
|
2581 |
|
2582 |
|
2583 class SolarisIPMIError(exception.IronicException): |
|
2584 """Generic Solaris IPMI driver exception""" |
|
2585 message = _("%(msg)s") |