PSARC 2015/172 OpenStack Ironic (OpenStack Bare Metal Provisioning Service) s11-update
authorMatt Keenan <matt.keenan@oracle.com>
Fri, 19 Jun 2015 09:35:02 +0100
branchs11-update
changeset 4508 d8924d870370
parent 4506 e5c1a87858fd
child 4509 3be1d3091fe2
PSARC 2015/172 OpenStack Ironic (OpenStack Bare Metal Provisioning Service) PSARC 2015/070 pecan - Lightweight Python web-framework PSARC 2015/071 PyCA Python Cryptography PSARC 2015/170 OpenStack client for Ironic (Bare Metal Provisioning) PSARC 2015/171 scp - python secure copy PSARC 2015/196 singledispatch - Single-dispatch generic functions for Python PSARC 2015/197 logutils - Set of handlers for standard Python logging library PSARC 2015/198 Support for enumerations in Python 2.6 and 2.7 PSARC 2015/250 paramiko - SSHv2 protocol implementation in Python 20547142 Request to integrate Ironic into userland 17502639 The Python paramiko module should be added to Userland 20172780 The Python module scp should be added to Userland 20180376 The Python module ironicclient should be added to Userland 20182588 The Python module pecan should be added to Userland 20465525 The PyCA cryptography module should be added to Userland 20904396 The Python module singledispatch should be added to Userland 20904413 The Python module logutils should be added to Userland 20917993 The Python enum34 module should be added to Userland
components/openstack/common/openstack.p5m
components/openstack/ironic/Makefile
components/openstack/ironic/files/drivers/modules/solaris_ipmitool.py
components/openstack/ironic/files/drivers/solaris.py
components/openstack/ironic/files/ironic-api
components/openstack/ironic/files/ironic-api.xml
components/openstack/ironic/files/ironic-conductor
components/openstack/ironic/files/ironic-conductor.xml
components/openstack/ironic/files/ironic-db
components/openstack/ironic/files/ironic-db.xml
components/openstack/ironic/files/ironic-keystone-setup.sh
components/openstack/ironic/files/ironic-manifest.ksh
components/openstack/ironic/files/ironic.auth_attr
components/openstack/ironic/files/ironic.conf
components/openstack/ironic/files/ironic.exec_attr
components/openstack/ironic/files/ironic.prof_attr
components/openstack/ironic/files/ironic.user_attr
components/openstack/ironic/ironic.p5m
components/openstack/ironic/patches/01-requirements.patch
components/openstack/ironic/patches/02-driver-entry.patch
components/openstack/ironic/patches/03-boot-device.patch
components/python/cryptography/Makefile
components/python/cryptography/cryptography-PYVER.p5m
components/python/enum/Makefile
components/python/enum/enum-PYVER.p5m
components/python/ironicclient/Makefile
components/python/ironicclient/ironicclient-PYVER.p5m
components/python/ironicclient/patches/01-boot-device-wanboot.patch
components/python/logutils/Makefile
components/python/logutils/logutils-PYVER.p5m
components/python/paramiko/Makefile
components/python/paramiko/paramiko-PYVER.p5m
components/python/paramiko/patches/01-nopycrypto.patch
components/python/paramiko/patches/02-socket-timeout.patch
components/python/pecan/Makefile
components/python/pecan/pecan-PYVER.p5m
components/python/scp/Makefile
components/python/scp/scp-PYVER.p5m
components/python/singledispatch/Makefile
components/python/singledispatch/singledispatch-PYVER.p5m
components/python/singledispatch/singledispatch.license
--- a/components/openstack/common/openstack.p5m	Thu Jun 18 12:17:42 2015 -0700
+++ b/components/openstack/common/openstack.p5m	Fri Jun 19 09:35:02 2015 +0100
@@ -43,6 +43,7 @@
 depend type=group fmri=cloud/openstack/glance
 depend type=group fmri=cloud/openstack/heat
 depend type=group fmri=cloud/openstack/horizon
+depend type=group fmri=cloud/openstack/ironic
 depend type=group fmri=cloud/openstack/keystone
 depend type=group fmri=cloud/openstack/neutron
 depend type=group fmri=cloud/openstack/nova
@@ -54,6 +55,7 @@
 depend type=group fmri=library/python/cinderclient
 depend type=group fmri=library/python/glanceclient
 depend type=group fmri=library/python/heatclient
+depend type=group fmri=library/python/ironicclient
 depend type=group fmri=library/python/keystoneclient
 depend type=group fmri=library/python/neutronclient
 depend type=group fmri=library/python/novaclient
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/Makefile	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,98 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+include ../../../make-rules/shared-macros.mk
+
+COMPONENT_NAME=		ironic
+COMPONENT_CODENAME=	juno
+COMPONENT_VERSION=	2014.2.1
+COMPONENT_BE_VERSION=	2014.2
+COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
+COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
+COMPONENT_ARCHIVE_HASH=	\
+    sha256:9dae1e5e030741382e75f6da9ec34af1b8e8d50940d158fb14f4d427a01c0bff
+COMPONENT_ARCHIVE_URL=	http://launchpad.net/$(COMPONENT_NAME)/$(COMPONENT_CODENAME)/$(COMPONENT_VERSION)/+download/$(COMPONENT_ARCHIVE)
+COMPONENT_PROJECT_URL=	http://www.openstack.org/
+COMPONENT_BUGDB=	service/ironic
+IPS_COMPONENT_VERSION=	0.$(COMPONENT_VERSION)
+
+TPNO=			22203
+
+include $(WS_MAKE_RULES)/prep.mk
+include $(WS_MAKE_RULES)/setup.py.mk
+include $(WS_MAKE_RULES)/ips.mk
+
+ASLR_MODE = $(ASLR_NOT_APPLICABLE)
+
+# Since this is an app, and doesn't provide any public library interfaces, we
+# only need to deliver one version.  The manifest is parameterized, though.
+PYTHON_VERSIONS=	2.6
+
+PKG_MACROS +=		COMPONENT_BE_VERSION=$(COMPONENT_BE_VERSION)
+PKG_MACROS +=		PYVER=$(PYTHON_VERSIONS)
+PKG_MACROS +=		PYV=$(shell echo $(PYTHON_VERSIONS) | tr -d .)
+
+#
+# ironic-api and ironic-conductor depend on ironic-db so copy all of the
+# service manifests into the proto directory for pkgdepend(1) to find.
+#
+COMPONENT_POST_INSTALL_ACTION += \
+	($(MKDIR) $(PROTO_DIR)/lib/svc/manifest/application/openstack; \
+	 $(CP) \
+	     files/ironic-api.xml \
+	     files/ironic-conductor.xml \
+	     files/ironic-db.xml \
+	     $(PROTO_DIR)/lib/svc/manifest/application/openstack/; \
+	 $(MKDIR) $(PROTO_DIR)$(PYTHON_LIB)/ironic/drivers/modules; \
+	 $(CP) \
+	     files/drivers/solaris.py \
+	     $(PROTO_DIR)$(PYTHON_LIB)/ironic/drivers/; \
+	 $(CP) \
+	     files/drivers/modules/solaris_ipmitool.py \
+	     $(PROTO_DIR)$(PYTHON_LIB)/ironic/drivers/modules/; \
+	 $(PYTHON) -m compileall $(PROTO_DIR)/$(PYTHON_VENDOR_PACKAGES))
+
+# common targets
+build:		$(BUILD_NO_ARCH)
+
+install:	$(INSTALL_NO_ARCH)
+
+#
+# Tests require:
+# hacking, coverage>=3.6, disvover, fixtures, oslo.test, psycopg2,
+# python-ironicclient, python-subunit, testrepository, testtools,
+# sphinx, sphinxcontrib, oslo.sphinx
+# Which haven't been integrated yet
+#
+test:		$(NO_TESTS)
+
+
+REQUIRED_PACKAGES += install/installadm
+REQUIRED_PACKAGES += shell/ksh93
+REQUIRED_PACKAGES += system/core-os
+REQUIRED_PACKAGES += system/file-system/uafs
+REQUIRED_PACKAGES += system/file-system/uvfs
+REQUIRED_PACKAGES += system/management/ipmitool
+REQUIRED_PACKAGES += web/curl
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/drivers/modules/solaris_ipmitool.py	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,2587 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright 2014 International Business Machines Corporation
+# All Rights Reserved.
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Solaris Driver and supporting meta-classes.
+"""
+
+import os
+import platform
+import re
+import select
+import shutil
+import socket
+from subprocess import Popen, PIPE
+import tempfile
+from threading import Thread
+import time
+import urllib2
+from urlparse import urlparse
+
+from lockfile import LockFile, LockTimeout
+from oslo.config import cfg
+from scp import SCPClient
+
+from ironic.common import boot_devices, exception, images, keystone, states, \
+    utils
+from ironic.common.i18n import _, _LW
+from ironic.conductor import task_manager
+from ironic.conductor import utils as manager_utils
+from ironic.db import api as dbapi
+from ironic.drivers import base
+from ironic.drivers.modules import ipmitool
+from ironic.drivers import utils as driver_utils
+from ironic.openstack.common import log as logging
+from ironic.openstack.common import loopingcall, processutils
+
+PLATFORM = platform.system()
+if PLATFORM != "SunOS":
+    import tarfile
+else:
+    from pkg.fmri import is_valid_pkg_name
+    from pkg.misc import valid_pub_prefix, valid_pub_url
+
+
+AI_OPTS = [
+    cfg.StrOpt('server',
+               default='None',
+               help='Host name for AI Server.'),
+    cfg.StrOpt('username',
+               default='None',
+               help='Username to ssh to AI Server.'),
+    cfg.StrOpt('password',
+               default='None',
+               help='Password for user to ssh to AI Server.'),
+    cfg.StrOpt('port',
+               default='22',
+               help='SSH port to use.'),
+    cfg.StrOpt('timeout',
+               default='10',
+               help='SSH socket timeout value in seconds.'),
+    cfg.StrOpt('deploy_interval',
+               default='10',
+               help='Interval in seconds to check AI deployment status.'),
+    cfg.StrOpt('derived_manifest',
+               default='file:///usr/lib/ironic/ironic-manifest.ksh',
+               help='Derived Manifest used for deployment.'),
+    cfg.StrOpt('ssh_key_file',
+               default='None',
+               help='SSH Filename to use.'),
+    cfg.StrOpt('ssh_key_contents',
+               default='None',
+               help='Actual SSH Key contents to use.')
+    ]
+
+AUTH_OPTS = [
+    cfg.StrOpt('auth_strategy',
+               default='keystone',
+               help='Method to use for authentication: noauth or keystone.')
+    ]
+
+SOLARIS_IPMI_OPTS = [
+    cfg.StrOpt('imagecache_dirname',
+               default='/var/lib/ironic/images',
+               help='Default path to image cache.'),
+    cfg.StrOpt('imagecache_lock_timeout',
+               default='60',
+               help='Timeout to wait when attempting to lock refcount file.')
+]
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+OPT_GROUP = cfg.OptGroup(name='ai',
+                         title='Options for the Automated Install driver')
+CONF.register_group(OPT_GROUP)
+CONF.register_opts(AI_OPTS, OPT_GROUP)
+CONF.register_opts(AUTH_OPTS)
+SOLARIS_IPMI_GROUP = cfg.OptGroup(
+    name="solaris_ipmi",
+    title="Options defined in ironic.drivers.modules.solaris_ipmi")
+CONF.register_group(SOLARIS_IPMI_GROUP)
+CONF.register_opts(SOLARIS_IPMI_OPTS, SOLARIS_IPMI_GROUP)
+
+VALID_ARCH = ['x86', 'SPARC']
+VALID_ARCHIVE_SCHEMES = ["file", "http", "https", "glance"]
+VALID_URI_SCHEMES = VALID_ARCHIVE_SCHEMES
+DEFAULT_ARCHIVE_IMAGE_PATH = 'auto_install/manifest/default_archive.xml'
+AI_STRING = "Automated Installation"
+AI_SUCCESS_STRING = AI_STRING + " succeeded"
+AI_FAILURE_STRING = AI_STRING + " failed"
+AI_DEPLOY_STRING = AI_STRING + " started"
+
+REQUIRED_PROPERTIES = {
+    'ipmi_address': _("IP address or hostname of the node. Required."),
+    'ipmi_username': _("username to use for IPMI connection. Required."),
+    'ipmi_password': _("password to use for IPMI connection. Required.")
+}
+
+OPTIONAL_PROPERTIES = {
+    'ai_manifest': _("Automated install manifest to be used for provisioning. "
+                     "Optional."),
+    'ai_service': _("Automated Install service name to use. Optional."),
+    'archive_uri': _("URI of archive to deploy. Optional."),
+    'fmri': _("List of IPS package FMRIs to be installed. "
+              "Required if publishers property is set."),
+    'install_profiles': _("List of configuration profiles to be applied "
+                          "to the installation environment during an install. "
+                          "Optional."),
+    'ipmi_bridging': _("bridging_type; default is \"no\". One of \"single\", "
+                       "\"dual\", \"no\". Optional."),
+    'ipmi_local_address': _("local IPMB address for bridged requests. "
+                            "Used only if ipmi_bridging is set "
+                            "to \"single\" or \"dual\". Optional."),
+    'ipmi_priv_level':
+        _("privilege level; default is ADMINISTRATOR. "
+          "One of %s. Optional.") % '. '.join(ipmitool.VALID_PRIV_LEVELS),
+    'ipmi_target_address': _("destination address for bridged request. "
+                             "Required only if ipmi_bridging is set "
+                             "to \"single\" or \"dual\"."),
+    'ipmi_target_channel': _("destination channel for bridged request. "
+                             "Required only if ipmi_bridging is set to "
+                             "\"single\" or \"dual\"."),
+    'ipmi_transit_address': _("transit address for bridged request. Required "
+                              "only if ipmi_bridging is set to \"dual\"."),
+    'ipmi_transit_channel': _("transit channel for bridged request. Required "
+                              "only if ipmi_bridging is set to \"dual\"."),
+    'publishers': _("List of IPS publishers to install from, in the format "
+                    "name@origin. Required if fmri property is set."),
+    'sc_profiles': _("List of system configuration profiles to be applied "
+                     "to an installed system. Optional.")
+}
+
+COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
+COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
+
+LAST_CMD_TIME = {}
+TIMING_SUPPORT = None
+SINGLE_BRIDGE_SUPPORT = None
+DUAL_BRIDGE_SUPPORT = None
+
+
+def _ssh_execute(ssh_obj, ssh_cmd, raise_exception=True, err_msg=None):
+    """Execute a command via SSH.
+
+    :param ssh_obj: paramiko.SSHClient, an active ssh connection
+    :param ssh_cmd: Command to execute over SSH.
+    :param raise_exception: Wheter to raise exception or not
+    :param err_msg: Custom error message to use
+    :returns: tuple [stdout from command, returncode]
+    :raises: SSHCommandFailed on an error from ssh, if specified to raise.
+    """
+    LOG.debug("_ssh_execute():ssh_cmd: %s" % (ssh_cmd))
+
+    returncode = 0
+    try:
+        stdout = processutils.ssh_execute(ssh_obj, ssh_cmd)[0]
+    except Exception as err:
+        LOG.debug(_("Cannot execute SSH cmd %(cmd)s. Reason: %(err)s.") %
+                  {'cmd': ssh_cmd, 'err': err})
+        returncode = 1
+        if raise_exception:
+            if err_msg:
+                raise SolarisIPMIError(msg=err_msg)
+            else:
+                raise exception.SSHCommandFailed(cmd=ssh_cmd)
+
+    return stdout, returncode
+
+
+def _parse_driver_info(node):
+    """Gets the parameters required for ipmitool to access the node.
+
+    Copied from ironic/drivers/modules/ipmitool.py. No differences.
+    Copied locally as REQUIRED_PROPERTIES differs from standard ipmitool.
+
+    :param node: the Node of interest.
+    :returns: dictionary of parameters.
+    :raises: InvalidParameterValue when an invalid value is specified
+    :raises: MissingParameterValue when a required ipmi parameter is missing.
+
+    """
+    LOG.debug("_parse_driver_info()")
+    info = node.driver_info or {}
+    bridging_types = ['single', 'dual']
+    missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
+    if missing_info:
+        raise exception.MissingParameterValue(
+            _("The following IPMI credentials are not supplied"
+              " to IPMI driver: %s.") % missing_info)
+
+    address = info.get('ipmi_address')
+    username = info.get('ipmi_username')
+    password = info.get('ipmi_password')
+    port = info.get('ipmi_terminal_port')
+    priv_level = info.get('ipmi_priv_level', 'ADMINISTRATOR')
+    bridging_type = info.get('ipmi_bridging', 'no')
+    local_address = info.get('ipmi_local_address')
+    transit_channel = info.get('ipmi_transit_channel')
+    transit_address = info.get('ipmi_transit_address')
+    target_channel = info.get('ipmi_target_channel')
+    target_address = info.get('ipmi_target_address')
+
+    if port:
+        try:
+            port = int(port)
+        except ValueError:
+            raise exception.InvalidParameterValue(_(
+                "IPMI terminal port is not an integer."))
+
+    # check if ipmi_bridging has proper value
+    if bridging_type == 'no':
+        # if bridging is not selected, then set all bridging params to None
+        local_address = transit_channel = transit_address = \
+            target_channel = target_address = None
+    elif bridging_type in bridging_types:
+        # check if the particular bridging option is supported on host
+        if not ipmitool._is_option_supported('%s_bridge' % bridging_type):
+            raise exception.InvalidParameterValue(_(
+                "Value for ipmi_bridging is provided as %s, but IPMI "
+                "bridging is not supported by the IPMI utility installed "
+                "on host. Ensure ipmitool version is > 1.8.11"
+            ) % bridging_type)
+
+        # ensure that all the required parameters are provided
+        params_undefined = [param for param, value in [
+            ("ipmi_target_channel", target_channel),
+            ('ipmi_target_address', target_address)] if value is None]
+        if bridging_type == 'dual':
+            params_undefined2 = [param for param, value in [
+                ("ipmi_transit_channel", transit_channel),
+                ('ipmi_transit_address', transit_address)
+            ] if value is None]
+            params_undefined.extend(params_undefined2)
+        else:
+            # if single bridging was selected, set dual bridge params to None
+            transit_channel = transit_address = None
+
+        # If the required parameters were not provided,
+        # raise an exception
+        if params_undefined:
+            raise exception.MissingParameterValue(_(
+                "%(param)s not provided") % {'param': params_undefined})
+    else:
+        raise exception.InvalidParameterValue(_(
+            "Invalid value for ipmi_bridging: %(bridging_type)s,"
+            " the valid value can be one of: %(bridging_types)s"
+        ) % {'bridging_type': bridging_type,
+             'bridging_types': bridging_types + ['no']})
+
+    if priv_level not in ipmitool.VALID_PRIV_LEVELS:
+        valid_priv_lvls = ', '.join(ipmitool.VALID_PRIV_LEVELS)
+        raise exception.InvalidParameterValue(_(
+            "Invalid privilege level value:%(priv_level)s, the valid value"
+            " can be one of %(valid_levels)s") %
+            {'priv_level': priv_level, 'valid_levels': valid_priv_lvls})
+
+    return {
+        'address': address,
+        'username': username,
+        'password': password,
+        'port': port,
+        'uuid': node.uuid,
+        'priv_level': priv_level,
+        'local_address': local_address,
+        'transit_channel': transit_channel,
+        'transit_address': transit_address,
+        'target_channel': target_channel,
+        'target_address': target_address
+        }
+
+
+def _exec_ipmitool(driver_info, command):
+    """Execute the ipmitool command.
+
+    This uses the lanplus interface to communicate with the BMC device driver.
+
+    Copied from ironic/drivers/modules/ipmitool.py. Only one difference.
+    ipmitool.py version expects a string of space separated commands, and
+    it splits this into an list using 'space' as delimiter.
+    This causes setting of bootmode script for SPARC network boot to fail.
+    Solaris versions takes a list() as command paramater, and therefore
+    we don't need to split.
+
+    :param driver_info: the ipmitool parameters for accessing a node.
+    :param command: list() : the ipmitool command to be executed.
+    :returns: (stdout, stderr) from executing the command.
+    :raises: PasswordFileFailedToCreate from creating or writing to the
+             temporary file.
+    :raises: processutils.ProcessExecutionError from executing the command.
+
+    """
+    LOG.debug("SolarisDeploy._exec_ipmitool:driver_info: '%s', "
+              "command: '%s'" % (driver_info, command))
+    args = ['/usr/sbin/ipmitool',
+            '-I',
+            'lanplus',
+            '-H',
+            driver_info['address'],
+            '-L', driver_info.get('priv_level')
+            ]
+
+    if driver_info['username']:
+        args.append('-U')
+        args.append(driver_info['username'])
+
+    for name, option in ipmitool.BRIDGING_OPTIONS:
+        if driver_info[name] is not None:
+            args.append(option)
+            args.append(driver_info[name])
+
+    # specify retry timing more precisely, if supported
+    if ipmitool._is_option_supported('timing'):
+        num_tries = max(
+            (CONF.ipmi.retry_timeout // CONF.ipmi.min_command_interval), 1)
+        args.append('-R')
+        args.append(str(num_tries))
+
+        args.append('-N')
+        args.append(str(CONF.ipmi.min_command_interval))
+
+    # 'ipmitool' command will prompt password if there is no '-f' option,
+    # we set it to '\0' to write a password file to support empty password
+    with ipmitool._make_password_file(driver_info['password'] or '\0') \
+            as pw_file:
+        args.append('-f')
+        args.append(pw_file)
+        args = args + list(command)  # Append as a list don't split(" ")
+
+        # NOTE(deva): ensure that no communications are sent to a BMC more
+        #             often than once every min_command_interval seconds.
+        time_till_next_poll = CONF.ipmi.min_command_interval - (
+            time.time() - LAST_CMD_TIME.get(driver_info['address'], 0))
+        if time_till_next_poll > 0:
+            time.sleep(time_till_next_poll)
+        try:
+            out, err = utils.execute(*args)
+        finally:
+            LAST_CMD_TIME[driver_info['address']] = time.time()
+        return out, err
+
+
+def _get_node_architecture(node):
+    """Queries the node for architecture type
+
+    :param node: the Node of interest.
+    :returns: SPARC or X86 depending on architecture discovered
+    :raises: IPMIFailure if ipmitool command fails
+    """
+    LOG.debug("SolarisDeploy._get_node_architecture")
+    ipmi_cmd_args = ['sunoem', 'getval', '/System/Processors/architecture']
+    driver_info = _parse_driver_info(node)
+    try:
+        out, _err = _exec_ipmitool(driver_info, ipmi_cmd_args)
+    except Exception:
+        raise exception.IPMIFailure(cmd=ipmi_cmd_args)
+
+    LOG.debug("SolarisDeploy._get_node_architecture: arch: '%s'" % (out))
+
+    if 'SPARC' in out:
+        return 'SPARC'
+    elif 'x86' in out:
+        return 'x86'
+    else:
+        raise SolarisIPMIError(msg="Unknown node architecture: %s" % (out))
+
+
+def _check_deploy_state(task, node_uuid, deploy_thread):
+    """ Check deployment state of a running install
+
+    Check the deployment status for this node ideally this will be
+    achieved via communicating with the AI Server and querying the
+    telemetry data returned by the AI Client install to the AI Server.
+
+    However until that is integrated we need to maintain a connection
+    with the Serial Console of the node being installed and parse the
+    output to the console made during an install.
+
+    :param task: a TaskManager instance.
+    :param deploy_thread: Threaded class monitor deployment status
+    :returns: Nothing, raises loopingcall.LoopingCallDone() once
+        node deployment status is determined as done or failed.
+    """
+    LOG.debug("_check_deploy_state()")
+    LOG.debug("_check_deploy_state() deploy_thread_state: %s" %
+              (deploy_thread.state))
+
+    # Get DB instance
+    mydbapi = dbapi.get_instance()
+    try:
+        # Get current DB copy of node
+        cur_node = mydbapi.get_node_by_uuid(node_uuid)
+    except exception.NodeNotFound:
+        LOG.info(_("During check_deploy_state, node %(node)s was not "
+                   "found and presumed deleted by another process.") %
+                 {'node': node_uuid})
+        # Thread should have stopped already, but let's make sure.
+        deploy_thread.stop()
+        if deploy_thread.state in [states.DEPLOYING, states.DEPLOYWAIT]:
+            # Update node with done/fail state
+            if task.node:
+                task.node.provision_state = states.DEPLOYFAIL
+                task.node.last_error = "Failed to find node."
+                task.node.target_provision_state = states.NOSTATE
+                task.node.save()
+        raise loopingcall.LoopingCallDone()
+    except Exception as err:
+        LOG.info(_("During check_deploy_state, node %(node)s could "
+                   "not be retrieved: %(err)") %
+                 {'node': node_uuid, 'err': err})
+        # Thread should have stopped already, but lets make sure.
+        deploy_thread.stop()
+        if deploy_thread.state in [states.DEPLOYING, states.DEPLOYWAIT]:
+            # Update node with done/fail state
+            if task.node:
+                task.node.last_error = "Failed to find node."
+                task.node.provision_state = states.DEPLOYFAIL
+                task.node.target_provision_state = states.NOSTATE
+                task.node.save()
+        raise loopingcall.LoopingCallDone()
+
+    LOG.debug("_check_deploy_state().cur_node.target_provision_state: %s" %
+              (cur_node.target_provision_state))
+
+    if deploy_thread.state not in [states.DEPLOYING, states.DEPLOYWAIT]:
+        LOG.debug("_check_deploy_state().done: %s" % (deploy_thread.state))
+        # Node has completed deployment, success or failure
+
+        # Thread should have stopped already, but lets make sure.
+        deploy_thread.stop()
+
+        # Update node with done/fail state
+        if deploy_thread.state == states.DEPLOYDONE:
+            cur_node.provision_state = states.ACTIVE
+        elif deploy_thread.state == states.DEPLOYFAIL:
+            cur_node.last_error = "Install failed; check install.log for " + \
+                                  "more details."
+            cur_node.provision_state = deploy_thread.state
+        else:
+            cur_node.provision_state = deploy_thread.state
+        cur_node.target_provision_state = states.NOSTATE
+        cur_node.save()
+
+        # Raise LoopincCallDone to terminate deployment checking.
+        raise loopingcall.LoopingCallDone()
+
+    elif deploy_thread.state == states.DEPLOYING and \
+            cur_node.provision_state != states.DEPLOYING:
+        # Actual node deployment has initiated
+        LOG.debug("_check_deploy_state().deploying: %s" %
+                  (deploy_thread.state))
+        cur_node.provision_state = states.DEPLOYING
+        cur_node.save()
+
+    elif cur_node.target_provision_state == states.NOSTATE:
+        # Node was most likely deleted so end deployment completion checking
+        LOG.debug("_check_deploy_state().deleted: %s" %
+                  (cur_node.target_provision_state))
+        deploy_thread.stop()
+        raise loopingcall.LoopingCallDone()
+
+
+def _url_exists(url):
+    """Validate specific exists
+
+    :param url: HTTP url
+    :returns: boolean, True of exists, otherwise False
+    """
+    LOG.debug("_url_exists: url: %s" % (url.strip()))
+    try:
+        _open_url = urllib2.urlopen(urllib2.Request(url))
+        return True
+    except Exception as err:
+        LOG.debug(_("URL %s not reachable: %s") % (url, err))
+        return False
+
+
+def _image_refcount_acquire_lock(image_path):
+    """Acquire a lock on reference count image file
+
+    :param image_path: Path to image file
+    :returns: Acquired LockFile lock
+    """
+    LOG.debug("_image_refcount_acquire_lock: image_path: %s" % (image_path))
+    ref_filename = image_path + ".ref"
+    lock = LockFile(ref_filename)
+    while not lock.i_am_locking():
+        try:
+            if os.path.exists(image_path):
+                image_size_1 = os.path.getsize(image_path)
+            else:
+                image_size_1 = 0
+            lock.acquire(
+                timeout=int(CONF.solaris_ipmi.imagecache_lock_timeout))
+        except LockTimeout:
+            # Check if image_path size has changed, due to still downloading
+            if os.path.exists(image_path):
+                image_size_2 = os.path.getsize(image_path)
+            else:
+                image_size_2 = 0
+
+            if image_size_1 != image_size_2:
+                LOG.debug("_image_refcount_acquire_lock: Image downloading...")
+                continue
+            else:
+                # Assume lock is an old one, force it's removal
+                LOG.debug("_image_refcount_acquire_lock: Breaking stale lock.")
+                lock.break_lock()
+                lock.acquire()
+
+    return lock
+
+
+def _image_refcount_adjust(image_path, count, release=True):
+    """Adjust cached image file reference counter
+
+    :param image_path: Path to image file
+    :param count: Integer count value to adjust reference by
+    :param release: Release the acquired lock or return it.
+    :returns: Acquired lock
+    """
+    LOG.debug("_image_refcount_adjust: image_path: %s, "
+              "count: %s" % (image_path, str(count)))
+
+    if count == 0:
+        # Adjusting by zero makes no sense just return
+        err_msg = _("Zero reference count adjustment attempted "
+                    "on file: %s") % (image_path)
+        LOG.error(err_msg)
+        raise SolarisIPMIError(msg=err_msg)
+
+    ref_filename = image_path + ".ref"
+
+    if not os.path.exists(ref_filename):
+        if count < 0:
+            # Cannot decrement reference on non-existent file
+            err_msg = _("Negative reference count adjustment attempted on "
+                        "non-existent file: %s") % (image_path)
+            LOG.error(err_msg)
+            raise SolarisIPMIError(msg=err_msg)
+
+        # Create reference count file
+        with open(ref_filename, "w") as fp:
+            fp.write("0")
+
+    # Acquire lock on refcount file
+    lock = _image_refcount_acquire_lock(image_path)
+    if lock is None:
+        err_msg = _("Failed to acquire lock on image: %s") % (image_path)
+        LOG.error(err_msg)
+        raise SolarisIPMIError(msg=err_msg)
+
+    with open(ref_filename, "r+") as fp:
+        ref_count = fp.readline()
+        if len(ref_count) == 0:
+            ref_count = 1
+        ref_count = str(int(ref_count) + count)
+
+        # Check if reference count is zero if so remove
+        # refcount file and image file
+        if int(ref_count) <= 0:
+            lock.release()
+            os.remove(ref_filename)
+            os.remove(image_path)
+        else:
+            fp.seek(0)
+            fp.write(ref_count)
+            if release:
+                lock.release()
+    return lock
+
+
+def _fetch_uri(task, uri):
+    """Retrieve the specified URI to local temporary file
+
+    Removal of locally fetched file is the responsibility of the
+    caller.
+
+    :param task: a TaskManager instance
+    :param uri: URI of file to fetch.
+    """
+    LOG.debug("SolarisDeploy._fetch_uri:uri: '%s'" % (uri))
+    url = urlparse(uri)
+
+    try:
+        if url.scheme == "glance":
+            temp_uri = os.path.join(CONF.solaris_ipmi.imagecache_dirname,
+                                    url.netloc)
+
+            # Check of image already in cache, retrieve if not
+            if not os.path.isfile(temp_uri):
+                try:
+                    # Increment reference, creates refcount file and returns
+                    # the acquired lock.
+                    lock = _image_refcount_adjust(temp_uri, 1, release=False)
+
+                    # Fetch URI from Glance into local file.
+                    images.fetch(task.context, url.netloc, temp_uri)
+
+                    # Release acquired lock now that file is retrieved
+                    lock.release()
+
+                except Exception as err:
+                    LOG.error(_("Unable to fetch Glance image: id %s: %s")
+                              % (url.netloc, err))
+                    raise
+            else:
+                # Increase reference count for this image
+                _image_refcount_adjust(temp_uri, 1)
+
+        else:   # http/file scheme handled directly by curl
+            if PLATFORM == "SunOS":
+                _fd, temp_uri = tempfile.mkstemp(
+                    dir=CONF.solaris_ipmi.imagecache_dirname)
+                cmd = ["/usr/bin/curl", "-sS", "-o", temp_uri, uri]
+                pc = Popen(cmd, stdout=PIPE, stderr=PIPE)
+                _stdout, err = pc.communicate()
+                if pc.returncode != 0:
+                    err_msg = _("Failed to retrieve image: %s") % err
+                    raise SolarisIPMIError(msg=err_msg)
+            else:  # Linux compat
+                temp_uri = os.path.join(CONF.solaris_ipmi.imagecache_dirname,
+                                        url.path.replace("/", ""))
+                if not os.path.isfile(temp_uri):
+                    try:
+                        # Increment reference, creates refcount file and
+                        # returns the acquired lock.
+                        lock = _image_refcount_adjust(temp_uri, 1,
+                                                      release=False)
+
+                        # Actually fetch the image
+                        cmd = ["/usr/bin/curl", "-sS", "-o", temp_uri, uri]
+                        pc = Popen(cmd, stdout=PIPE, stderr=PIPE)
+                        _stdout, err = pc.communicate()
+                        if pc.returncode != 0:
+                            err_msg = _("Failed to retrieve image: %s") % err
+                            raise SolarisIPMIError(msg=err_msg)
+
+                        # Release acquired lock now that file is retrieved
+                        lock.release()
+
+                    except Exception as err:
+                        LOG.error(_("Unable to fetch image: id %s: %s")
+                                  % (url.netloc, err))
+                        raise
+                else:
+                    # Increase reference count for this image
+                    _image_refcount_adjust(temp_uri, 1)
+    except Exception as err:
+        # Only remove the temporary file if exception occurs
+        # as noted above Caller is responsible for its removal
+        LOG.error(_("Unable to fetch image: uri %s: %s") % (uri, err))
+        if url.scheme == "glance":
+            _image_refcount_adjust(temp_uri, -1)
+        else:
+            os.remove(temp_uri)
+        raise
+
+    return temp_uri
+
+
+def _get_archive_iso_and_uuid(mount_dir, extract_iso=False):
+    """Get ISO name and UUID
+
+    Retrieved from mounted archive if on Solaris
+
+    On non-Solaris systems we cannot mount a UAR so we need to parse the
+    contents of the unified archive and extract ISO and UUID from
+    cached UAR. In this scenario the caller is responsible for removing
+    the extracted file.
+
+    :param mount_dir: Location of locally mounted UAR or locally cached UAR
+    :param extract_iso: Whether to extract ISO file to temp file
+    :returns: Extracted ISO location and UUID
+    """
+    LOG.debug("SolarisDeploy._get_archive_iso_and_uuid:mount_dir: '%s'" %
+              (mount_dir))
+    uuid = None
+    iso = None
+
+    if PLATFORM == "SunOS":
+        ovf_dir = os.path.join(mount_dir, "OVF")
+
+        for uar_file in os.listdir(ovf_dir):
+            if uar_file.endswith('.ovf'):
+                uuid = uar_file.split('.ovf')[0]
+            elif uar_file.endswith('.iso'):
+                iso = os.path.join(ovf_dir, uar_file)
+    else:
+        tf = tarfile.open(name=mount_dir)
+
+        for ti in tf.getmembers():
+            if ti.path.endswith('.ovf'):
+                uuid = ti.path.split('.ovf')[0]
+            elif ti.path.endswith('.iso') and extract_iso:
+                try:
+                    temp_tar_dir = tempfile.mkdtemp(
+                        dir=CONF.solaris_ipmi.imagecache_dirname)
+                    tf.extractall(path=temp_tar_dir, members=[ti])
+                    iso = os.path.join(temp_tar_dir, ti.path)
+                except:
+                    # Remove temp_tar_dir and contents
+                    shutil.rmtree(temp_tar_dir)
+                    raise
+
+    return iso, uuid
+
+
+def _mount_archive(task, archive_uri):
+    """Mount a unified archive
+
+    :param archive_uri: URI of unified archive to mount
+    :returns: Path to mounted unified archive
+    """
+    LOG.debug("SolarisDeploy._mount_archive:archive_uri: '%s'" %
+              (archive_uri))
+
+    if urlparse(archive_uri).scheme == "glance":
+        # TODO(mattk):
+        # Ideally mounting the http ISO directly is preferred.
+        # However mount(1M), does not support auth_token
+        # thus we must fetch the image locally and then mount the
+        # local image.
+        # Tried putting a proxy in place to intercept the mount(1M)
+        # http request and adding an auth_token as it proceeds.
+        # However mount(1M) launches a new SMF instance for each HTTP
+        # mount request, and each SMF instance has a minimal environment
+        # set, which does not include http_proxy, thus the custom local
+        # proxy never gets invoked.
+        # Would love to have a new mount(1M) option to accept either
+        # a proxy e.g. -o proxy=<proxy> or to accept setting of http headers
+        # e.g. -o http_header="X-Auth-Token: askdalksjdlakjsd"
+
+        # Retrieve UAR to local temp file for mounting
+        temp_uar = _fetch_uri(task, archive_uri)
+        archive_mount = temp_uar
+    else:
+        # Can mount archive directly
+        temp_uar = None
+        archive_mount = archive_uri
+
+    mount_dir = tempfile.mkdtemp(dir=CONF.solaris_ipmi.imagecache_dirname)
+
+    cmd = ["/usr/sbin/mount", "-F", "uvfs", "-o",
+           "archive=%s" % (archive_mount), "/usr/lib/fs/uafs/uafs", mount_dir]
+    LOG.debug("SolarisDeploy._mount_archive:cmd: '%s'" % (cmd))
+    pc = Popen(cmd, stdout=PIPE, stderr=PIPE)
+    _stdout, err = pc.communicate()
+    if pc.returncode != 0:
+        err_msg = _("Failed to mount UAR %s: %s") % (archive_uri, err)
+        shutil.rmtree(mount_dir)
+        raise SolarisIPMIError(msg=err_msg)
+
+    return mount_dir, temp_uar
+
+
+def _umount_archive(mount_dir, temp_uar):
+    """ Unmount archive and remove mount point directory
+
+    :param mount_dir: Path to mounted archive
+    :param temp_uar: Path to glance local uar to remove
+    """
+    LOG.debug("SolarisDeploy._umount_archive:mount_dir: '%s', temp_uar: %s" %
+              (mount_dir, temp_uar))
+
+    cmd = ["/usr/sbin/umount", mount_dir]
+    pc = Popen(cmd, stdout=PIPE, stderr=PIPE)
+    _stdout, err = pc.communicate()
+    if pc.returncode != 0:
+        err_msg = _("Failed to unmount UAR %s: %s") % (mount_dir, err)
+        raise SolarisIPMIError(msg=err_msg)
+
+    shutil.rmtree(mount_dir)
+
+
+def _get_archive_uuid(task):
+    """Get the UUID of an archive
+
+    :param task: a TaskManager instance
+    :returns: UUID string for an archive otherwise raise exception
+    """
+    LOG.debug("SolarisDeploy._get_archive_uuid")
+    uuid = None
+    archive_uri = task.node.driver_info['archive_uri']
+
+    if PLATFORM == "SunOS":
+        mount_dir, temp_uar = _mount_archive(task, archive_uri)
+        try:
+            _iso, uuid = _get_archive_iso_and_uuid(mount_dir)
+        except:
+            _umount_archive(mount_dir, temp_uar)
+            raise
+        _umount_archive(mount_dir, temp_uar)
+    else:
+        temp_uar = _fetch_uri(task, archive_uri)
+        try:
+            _iso, uuid = _get_archive_iso_and_uuid(temp_uar)
+        except:
+            _image_refcount_adjust(temp_uar, -1)
+            raise
+
+    if uuid is None:
+        err_msg = _("Failed to extract UUID from UAR: %s") % archive_uri
+        if PLATFORM != "SunOS":
+            _image_refcount_adjust(temp_uar, -1)
+        raise SolarisIPMIError(msg=err_msg)
+
+    LOG.debug("SolarisDeploy._get_archive_uuid: uuid: %s" % (uuid))
+    return uuid
+
+
+def _validate_archive_uri(task):
+    """Validate archive_uri for reachable, format, etc
+
+    :param task: a TaskManager instance.
+    :raises: InvalidParameterValie if invalid archive_uri
+    """
+    LOG.debug("SolarisDeploy._validate_archive_uri")
+    archive_uri = task.node.driver_info['archive_uri']
+
+    url = urlparse(archive_uri)
+
+    if url.scheme not in VALID_ARCHIVE_SCHEMES:
+        raise exception.InvalidParameterValue(_(
+            "Unsupported archive scheme (%s) referenced in archive_uri (%s).")
+            % (url.scheme, archive_uri))
+
+    if not url.netloc and not url.path:
+        raise exception.InvalidParameterValue(_(
+            "Missing archive name in archive_uri (%s).") % (archive_uri))
+
+    if url.scheme == "glance":
+        # Glance schema only supported if using keystone authorization
+        # otherwise ironic being used standalone
+        if CONF.auth_strategy != "keystone":
+            raise exception.InvalidParameterValue(_(
+                "Glance scheme only supported when using Keystone (%s).")
+                % (archive_uri))
+
+        # Format : glance://<glance UUID>
+        # When parsed by urlparse, Glance image uuid appears as netloc param
+        if not url.netloc:
+            raise exception.InvalidParameterValue(_(
+                "Missing Glance image UUID archive_uri (%s).")
+                % (archive_uri))
+
+        # Validate glance image exists by attempting to get download size
+        try:
+            size = images.download_size(task.context, url.netloc)
+            LOG.debug("Image %s size: %s" % (url.netloc, str(size)))
+            if not size:
+                raise exception.InvalidParameterValue(_(
+                    "Glance image not found: %s") % (url.netloc))
+
+        except Exception as err:
+            raise exception.InvalidParameterValue(_(
+                "Failed to validate Glance image '%s': %s") %
+                (url.netloc, err))
+
+    elif url.scheme in ["http", "https"]:
+        # Presuming client authentication using HTTPS is not being used.
+        # Just a secure connection.
+        # TODO(mattk): Do I need to support client side HTTPS authentication
+        if not _url_exists(archive_uri):
+            raise exception.InvalidParameterValue(_(
+                "archive_uri does not exist (%s).") % (archive_uri))
+    elif url.scheme == "file":
+        file_path = os.path.join(os.sep,
+                                 url.netloc.strip(os.sep),
+                                 url.path.strip(os.sep))
+        if not os.path.isfile(file_path):
+            raise exception.InvalidParameterValue(_(
+                "archive_uri does not exist (%s).") % (archive_uri))
+
+
+def _format_archive_uri(task, archive_uri):
+    """Format archive URL to be passed as boot argument to AI client
+
+    Transformation of archive_uri is only required if URI scheme is glance.
+
+    :param task: a TaskManager instance.
+    :param archive_uri: URI path to unified archive
+    :returns: Formatted archive URI, and auth_token if needed
+    """
+    LOG.debug("SolarisDeploy._format_archive_uri: archive_uri: %s" %
+              (archive_uri))
+    if archive_uri:
+        url = urlparse(archive_uri)
+
+        if url.scheme == "glance":
+            # Transform uri from glance://<UUID> to
+            # direct glance URL glance://<GLANCE_REST_API>/<UUID>
+            new_uri = "http://%s:%s/v2/images/%s/file" % \
+                (CONF.glance.glance_host, CONF.glance.glance_port,
+                 url.netloc)
+            auth_token = task.context.auth_token
+        else:
+            new_uri = archive_uri
+            auth_token = None
+    else:
+        new_uri = None
+        auth_token = None
+
+    return new_uri, auth_token
+
+
+def _validate_ai_manifest(task):
+    """Validate ai_manifest for format, etc
+
+    driver_info/ai_manifest is used to specify a path to a single
+    AI manifest to be used instead of the default derived script.
+    e.g. http://path-to-manifest
+
+    :param task: a TaskManager instance.
+    :raises: InvalidParameterValue if invalid ai_manifest
+    """
+    LOG.debug("SolarisDeploy._validate_ai_manifest")
+    ai_manifest = task.node.driver_info['ai_manifest']
+    _validate_uri(task, ai_manifest)
+
+
+def _validate_profiles(task, profiles):
+    """Validate profiles for format, etc
+
+    Configuration profiles are specified as a plus(+) delimited list of paths
+    e.g. http://path-to-profile+http://path-to-another-profile
+
+    :param task: a TaskManager instance.
+    :param profiles: Plus(+) delimited list of configuration profile
+    :raises: InvalidParameterValue if invalid configuration profile
+    """
+    LOG.debug("SolarisDeploy._validate_profiles: %s" % (profiles))
+
+    # Split profiles into list of paths@environment elements
+    prof_list = [prof.strip() for prof in profiles.split('+') if prof.strip()]
+
+    for profile in prof_list:
+        _validate_uri(task, profile)
+
+
+def _validate_uri(task, uri):
+    """Validate URI for AI Manifest or SC Profile
+
+    :param task: a TaskManager instance.
+    :param uri: URI to AI Manifest or SC profile
+    :raises: InvalidParameterValue if invalid manifest/profile URI
+    """
+    LOG.debug("SolarisDeploy._validate_uri: URI: %s" % (uri))
+    url = urlparse(uri)
+
+    if url.scheme not in VALID_URI_SCHEMES:
+        raise exception.InvalidParameterValue(_(
+            "Unsupported uri scheme (%s) referenced"
+            " in URI (%s).") % (url.scheme, uri))
+
+    if not url.netloc and not url.path:
+        raise exception.InvalidParameterValue(_(
+            "Missing URI name (%s).") % (uri))
+
+    if url.scheme in ["http", "https"]:
+        # Presuming client authentication using HTTPS is not being used.
+        # Just a secure connection.
+        # TODO(mattk): Do I need to support client side HTTPS authentication
+        if not _url_exists(uri):
+            raise exception.InvalidParameterValue(_(
+                "URI does not exist (%s).") % (uri))
+        else:
+            LOG.debug("SolarisDeploy._validate_uri: %s exists." %
+                      (uri))
+    elif url.scheme == "file":
+        file_path = os.path.join(os.sep,
+                                 url.netloc.strip(os.sep),
+                                 url.path.strip(os.sep))
+        if not os.path.isfile(file_path):
+            raise exception.InvalidParameterValue(_(
+                "URI does not exist (%s).") % (uri))
+        else:
+            LOG.debug("SolarisDeploy._validate_uri: %s exists." %
+                      (url.scheme))
+    elif url.scheme == "glance":
+        # Glance schema only supported if using keystone authorization
+        # otherwise ironic being used standalone
+        if CONF.auth_strategy != "keystone":
+            raise exception.InvalidParameterValue(_(
+                "Glance scheme only supported when using Keystone (%s).")
+                % (uri))
+
+        # Format : glance://<glance UUID>
+        # When parsed by urlparse, Glance image uuid appears as netloc param
+        if not url.netloc:
+            raise exception.InvalidParameterValue(_(
+                "Missing Glance image UUID for URI (%s).")
+                % (uri))
+
+        # Validate glance uri exists by attempting to get download size
+        try:
+            size = images.download_size(task.context, url.netloc)
+            LOG.debug("Image %s size: %s" % (url.netloc, str(size)))
+            if not size:
+                raise exception.InvalidParameterValue(_(
+                    "Glance image not found: %s") % (url.netloc))
+            else:
+                LOG.debug("SolarisDeploy._validate_uri: %s exists." %
+                          (uri))
+
+        except Exception as err:
+            raise exception.InvalidParameterValue(_(
+                "Failed to validate Glance URI '%s': %s") %
+                (url.netloc, err))
+
+
+def _validate_fmri(task):
+    """Validate fmri for format, etc
+
+    driver_info/fmri is a plus(+) delimited list of IPS package
+    FMRIs to be installed. e.g. pkg:/pkg1+pkg:/pkg2
+
+    :param task: a TaskManager instance.
+    :raises: InvalidParameterValue if invalid fmri
+    """
+    LOG.debug("SolarisDeploy._validate_fmri")
+    fmri = task.node.driver_info['fmri']
+
+    # Split fmri into list of possible packages
+    pkg_list = [pkg.strip() for pkg in fmri.split('+') if pkg.strip()]
+    for fmri in pkg_list:
+        _validate_fmri_format(fmri)
+
+
+def _validate_fmri_format(fmri):
+    """Validate FMRI for format
+    FMRI must not contain the publisher and must be of the format:
+
+            pkg:/<package path>
+
+    Note the fmri only contains a single backslash.
+
+    :param fmri: IPS FMRI
+    :raises: InvalidParameterValue if invalid FMRI
+    """
+    LOG.debug("SolarisDeploy._validate_fmri_format: fmri: %s" % (fmri))
+    url = urlparse(fmri)
+
+    if url.scheme != "pkg":
+        raise exception.InvalidParameterValue(_(
+            "Unsupported IPS scheme (%s) referenced in fmri (%s).")
+            % (url.scheme, fmri))
+
+    if url.netloc:
+        raise exception.InvalidParameterValue(_(
+            "Cannot specify publisher name in fmri (%s).") % (fmri))
+
+    if not url.path:
+        raise exception.InvalidParameterValue(_(
+            "Missing IPS package name in fmri (%s).") % (fmri))
+    elif PLATFORM == "SunOS":
+        # Validate package name
+        if not is_valid_pkg_name(url.path.strip("/")):
+            raise exception.InvalidParameterValue(_(
+                "Malformed IPS package name in fmri (%s).") % (fmri))
+
+
+def _validate_publishers(task):
+    """Validate custom publisher name/origins for format
+
+    publishers property is a plus(+) delimited list of IPS publishers
+    to be installed from, in the format name@origin. e.g.
+        solaris@http://pkg.oracle.com/solaris+extra@http://int.co.com/extras
+
+    :param task: a TaskManager instance.
+    :raises: InvalidParameterValue if invalid publisher
+    """
+    LOG.debug("SolarisDeploy._validate_publishers")
+    pubs = task.node.driver_info['publishers']
+
+    # Split publishers into list of name@origin publishers
+    pub_list = [pub.strip() for pub in pubs.split('+') if pub.strip()]
+    for pub in pub_list:
+        # Split into name origin
+        name, origin = pub.split('@', 1)
+        if not name or not origin:
+            raise exception.InvalidParameterValue(_(
+                "Malformed IPS publisher must be of format "
+                "name@origin (%s).") % (pub))
+
+        if PLATFORM == "SunOS":
+            if not valid_pub_prefix(name):
+                raise exception.InvalidParameterValue(_(
+                    "Malformed IPS publisher name (%s).") % (name))
+
+            if not valid_pub_url(origin):
+                raise exception.InvalidParameterValue(_(
+                    "Malformed IPS publisher origin (%s).") % (origin))
+
+
+def _fetch_and_create(task, obj_type, obj_name, obj_uri, aiservice, mac,
+                      env=None):
+    """Fetch manifest/profile and create on AI Server
+
+    :param task: a TaskManager instance.
+    :param obj_type: Type of AI object to create "manifest" or "profile"
+    :param obj_name: manifest/profile name
+    :param obj_uri: URI to manifest/profile to use
+    :param aiservice: AI Service to create manifest/profile for
+    :param mac: MAC address criteria to use
+    :param env: Environment to apply profile to
+    :raises: AICreateProfileFail or AICreateManifestFail
+    """
+    # Fetch URI to local file
+    url = urlparse(obj_uri)
+    temp_file = _fetch_uri(task, obj_uri)
+
+    try:
+        # scp temp file to AI Server
+        remote_file = os.path.join("/tmp", obj_name) + ".xml"
+        aiservice.copy_remote_file(temp_file, remote_file)
+    except Exception as err:
+        LOG.error(_("Fetch and create failed for %s: name: %s: %s") %
+                  (obj_type, obj_uri, err))
+        if url.scheme == "glance":
+            _image_refcount_adjust(temp_file, -1)
+        else:
+            os.remove(temp_file)
+        raise
+
+    try:
+        if obj_type == "manifest":
+            # Create AI Profile
+            aiservice.create_manifest(obj_name, remote_file, mac)
+        elif obj_type == "profile":
+            # Create AI Profile
+            aiservice.create_profile(obj_name, remote_file, mac, env)
+
+    except (AICreateManifestFail, AICreateProfileFail) as _err:
+        aiservice.delete_remote_file(remote_file)
+        if url.scheme == "glance":
+            _image_refcount_adjust(temp_file, -1)
+        else:
+            os.remove(temp_file)
+        raise
+
+    # Remove local and remote temporary profiles
+    aiservice.delete_remote_file(remote_file)
+    if url.scheme == "glance":
+        _image_refcount_adjust(temp_file, -1)
+    else:
+        os.remove(temp_file)
+
+
+class DeployStateChecker(Thread):
+    """Thread class to check for deployment completion"""
+
+    def __init__(self, task):
+        """Init method for thread class"""
+        LOG.debug("DeployStateChecker.__init__()")
+        Thread.__init__(self)
+
+        self.task = task
+        self.node = task.node
+        self._state = states.DEPLOYWAIT
+        self.ssh_connection = None
+        self.running = True
+
+    @property
+    def state(self):
+        """Deployment state property"""
+        return self._state
+
+    def run(self):
+        """Start the thread """
+        LOG.debug("DeployStateChecker.run(): Connecting...")
+        client = utils.ssh_connect(self._get_ssh_dict())
+        channel = client.invoke_shell()
+        channel.settimeout(0.0)
+        channel.set_combine_stderr(True)
+
+        # Continuously read stdout from console and parse
+        # specifically for success/failure output
+        while self.running:
+            with tempfile.TemporaryFile(dir='/var/lib/ironic') as tf:
+                while True:
+                    rchans, _wchans, _echans = select.select([channel], [], [])
+                    if channel in rchans:
+                        try:
+                            console_data = ""
+                            while channel.recv_ready():
+                                console_data += channel.recv(1024)
+
+                            if len(console_data) == 0:
+                                tf.write("\n*** EOF\n")
+                                # Confirm string to search for on success
+                                if self._string_in_file(tf, AI_SUCCESS_STRING):
+                                    self._state = states.DEPLOYDONE
+                                else:
+                                    # Didn't succeed so default to failure
+                                    self._state = states.DEPLOYFAIL
+                                self.stop()
+                                break
+                            tf.write(console_data)
+                            tf.flush()
+
+                            # Read input buffer for prompt
+                            if re.search("->", console_data):
+                                # Send console start command
+                                channel.send("start -script SP/Console\n")
+
+                            # Cater for Yes/No prompts always sending Yes
+                            elif re.search("y/n", console_data):
+                                channel.send("y\n")
+
+                            # Confirm string to search for on success
+                            elif self._string_in_file(tf, AI_SUCCESS_STRING):
+                                LOG.debug("DeployStateChecker.run(): Done")
+                                self._state = states.DEPLOYDONE
+                                self.stop()
+                                break
+
+                            # Confirm string to search for on failure
+                            elif self._string_in_file(tf, AI_FAILURE_STRING):
+                                LOG.debug("DeployStateChecker.run(): FAIL")
+                                self._state = states.DEPLOYFAIL
+                                self.stop()
+                                break
+
+                            elif self._string_in_file(tf, AI_DEPLOY_STRING):
+                                LOG.debug(
+                                    "DeployStateChecker.run(): DEPLOYING")
+                                self._state = states.DEPLOYING
+                        except socket.timeout:
+                            pass
+
+    def stop(self):
+        """Stop the thread"""
+        LOG.debug("DeployStateChecker.stop()")
+        self.running = False
+
+    def _string_in_file(self, fp, string):
+        """Read all data from file checking for string presence
+
+        :param fp: Open file pointer to read
+        :param string: Specific string to check for
+        :returns: boolean True of string present in file, False if not
+        """
+        found_string = False
+
+        # Position read at start of file
+        fp.seek(0)
+        for line in fp:
+            if re.search(string, line):
+                found_string = True
+                break
+
+        # Return current read point to end of file for subsequent writes
+        fp.seek(0, 2)
+        return found_string
+
+    def _get_ssh_dict(self):
+        """Generate SSH Dictionary for SSH Connection via paramiko
+
+        :returns: dictionary for paramiko connection
+        """
+        LOG.debug("DeployStateChecker._get_ssh_dict()")
+
+        driver_info = _parse_driver_info(self.node)
+
+        ssh_dict = {
+            'host': driver_info.get('address'),
+            'username': driver_info.get('username'),
+            'port': driver_info.get('port', 22)
+            }
+
+        if ssh_dict.get('port') is not None:
+            ssh_dict['port'] = int(ssh_dict.get('port'))
+        else:
+            del ssh_dict['port']
+
+        if driver_info['password']:
+            ssh_dict['password'] = driver_info['password']
+
+        LOG.debug("DeployStateChecker._get_ssh_dict():ssh_dict: %s" %
+                  (ssh_dict))
+        return ssh_dict
+
+
+class SolarisDeploy(base.DeployInterface):
+    """AI Deploy Interface """
+
+    def get_properties(self):
+        """Return Solaris driver properties"""
+        return COMMON_PROPERTIES
+
+    def validate(self, task):
+        """Validate the driver-specific Node deployment info.
+
+        :param task: a task from TaskManager.
+        :raises: InvalidParameterValue.
+        :raises: MissingParameterValue.
+        """
+        LOG.debug("SolarisDeploy.validate()")
+        LOG.debug(task.context.auth_token)
+
+        # Validate IPMI credentials by getting node architecture
+        try:
+            _arch = _get_node_architecture(task.node)
+        except Exception as err:
+            raise exception.InvalidParameterValue(_(err))
+
+        if not driver_utils.get_node_mac_addresses(task):
+            raise exception.InvalidParameterValue(
+                _("Node %s does not have any port associated with it.") %
+                (task.node.uuid))
+
+        # Ensure server configured
+        if not CONF.ai.server or CONF.ai.server == "None":
+            raise exception.MissingParameterValue(
+                _("AI Server not specified in configuration file."))
+
+        # Ensure username configured
+        if not CONF.ai.username or CONF.ai.username == "None":
+            raise exception.MissingParameterValue(
+                _("AI Server user not specified in configuration file."))
+
+        # One of ssh_key_file / ssh_key_contents / password must be configured
+        if ((not CONF.ai.password or CONF.ai.password == "None") and
+            (not CONF.ai.ssh_key_file or CONF.ai.ssh_key_file == "None") and
+            (not CONF.ai.ssh_key_contents or
+                CONF.ai.ssh_key_contents == "None")):
+            raise exception.MissingParameterValue(
+                _("AI Server authentication not specified. One of password, "
+                  "ssh_key_file and ssh_key_contents must be present in "
+                  "configuration file."))
+
+        # archive_uri, publishers or fmri are ignored if a ai_manifest is
+        # defined. They should be contained within the custom manifest itself
+        if (task.node.driver_info.get('ai_manifest') and
+            (task.node.driver_info.get('archive_uri') or
+            task.node.driver_info.get('publishers') or
+                task.node.driver_info.get('fmri'))):
+            raise exception.InvalidParameterValue(
+                _("Custom Archive, Publishers or FMRI cannot be specified "
+                  "when specifying a custom AI Manifest. They should be "
+                  "contained within this custom AI Manifest."))
+
+        # Ensure ai_service is valid if specified in driver
+        if task.node.driver_info.get('ai_service'):
+            aiservice = AIService(task,
+                                  task.node.driver_info.get('ai_service'))
+            if not aiservice.exists:
+                raise exception.InvalidParameterValue(
+                    _("AI Service %s does not exist.") % (aiservice.name))
+
+        # Ensure node archive_uri is valid if specified
+        if task.node.driver_info.get('archive_uri'):
+            # Validate archive_uri for reachable, format, etc
+            _validate_archive_uri(task)
+
+        # Ensure custom publisher provided if FMRI provided
+        if task.node.driver_info.get('fmri') and \
+                not task.node.driver_info.get('publishers'):
+            raise exception.MissingParameterValue(_(
+                "Must specify custom publisher with custom fmri."))
+
+        # Ensure node publishers are valid if specified
+        if task.node.driver_info.get('publishers'):
+            # Validate publishers for format, etc
+            _validate_publishers(task)
+
+        # Ensure node fmri is valid if specified
+        if task.node.driver_info.get('fmri'):
+            # Validate fmri for format, etc
+            _validate_fmri(task)
+
+        # Ensure node sc_profiles is valid if specified
+        if task.node.driver_info.get('sc_profiles'):
+            # Validate sc_profiles for format, etc
+            _validate_profiles(task, task.node.driver_info.get('sc_profiles'))
+
+        # Ensure node install_profiles is valid if specified
+        if task.node.driver_info.get('install_profiles'):
+            # Validate install_profiles for format, etc
+            _validate_profiles(task,
+                               task.node.driver_info.get('install_profiles'))
+
+        # Ensure node manifest is valid of specified
+        if task.node.driver_info.get('ai_manifest'):
+            # Validate ai_manifest for format, etc
+            _validate_ai_manifest(task)
+
+        # Try to get the URL of the Ironic API
+        try:
+            CONF.conductor.api_url or keystone.get_service_url()
+        except (exception.CatalogFailure,
+                exception.CatalogNotFound,
+                exception.CatalogUnauthorized):
+            raise exception.InvalidParameterValue(_(
+                "Couldn't get the URL of the Ironic API service from the "
+                "configuration file or Keystone catalog."))
+
+        # Validate driver_info by parsing contents
+        _parse_driver_info(task.node)
+
+    @task_manager.require_exclusive_lock
+    def deploy(self, task):
+        """Perform start deployment a node.
+
+        For AI Deployment of x86 machines, we simply need to set the chassis
+        boot device to pxe and reboot the physical node.
+
+        For AI Deployment of SPARC Machines we need to supply a boot script
+        indicating to perform a network DHCP boot.
+
+        AI Server settings for this node, e.g. client, manifest, boot args
+        etc, will have been configured via prepare() method which is called
+        before deploy().
+
+        :param task: a TaskManager instance.
+        :returns: deploy state DEPLOYWAIT.
+        """
+        LOG.debug("SolarisDeploy.deploy()")
+
+        arch = _get_node_architecture(task.node)
+
+        # Ensure persistence is false so net boot only occurs once
+        if arch == 'x86':
+            # Set boot device to PXE network boot
+            dev_cmd = 'pxe'
+        elif arch == 'SPARC':
+            # Set bootmode script to network DHCP
+            dev_cmd = 'wanboot'
+        else:
+            raise exception.InvalidParameterValue(
+                _("Invalid node architecture of '%s'.") % (arch))
+
+        manager_utils.node_set_boot_device(task, dev_cmd,
+                                           persistent=False)
+        manager_utils.node_power_action(task, states.REBOOT)
+
+        deploy_thread = DeployStateChecker(task)
+        deploy_thread.start()
+        timer = loopingcall.FixedIntervalLoopingCall(_check_deploy_state,
+                                                     task, task.node.uuid,
+                                                     deploy_thread)
+        timer.start(interval=int(CONF.ai.deploy_interval))
+
+        return states.DEPLOYWAIT
+
+    @task_manager.require_exclusive_lock
+    def tear_down(self, task):
+        """Tear down a previous deployment.
+
+        Reset boot device or bootmode script and power off the node.
+        All actual clean-up is done in the clean_up()
+        method which should be called separately.
+
+        :param task: a TaskManager instance.
+        :returns: deploy state DELETED.
+        """
+        LOG.debug("SolarisDeploy.tear_down()")
+        manager_utils.node_set_boot_device(task, 'disk',
+                                           persistent=False)
+        manager_utils.node_power_action(task, states.POWER_OFF)
+
+        return states.DELETED
+
+    def prepare(self, task):
+        """Prepare the deployment environment for this node.
+
+        1. Ensure Node's AI Service is specified and it exists
+        2. (Re)Create AI Clients for each port/Mac specified for this Node
+        3. (Re)Create AI Manifest for each port/Mac specified for this Node
+           with specific criteria of MAC address
+
+        AI Service to use for installation is determined from
+        driver_info properties archive_uri or ai_service. archive_uri
+        takes precedence over ai_service.
+
+        1. archive_uri specified.
+            Extract AI ISO from UAR and create a new AI service if service
+            for this ID does not exist.
+        2. ai_service specified
+            AI Service must exist.
+        3. archive_uri & ai_service not specified
+            Use default architecture specific service to perform IPS
+            install.
+
+        :param task: a TaskManager instance.
+        """
+        LOG.debug("SolarisDeploy.prepare()")
+
+        ai_manifest = task.node.driver_info.get('ai_manifest', None)
+        ai_service = task.node.driver_info.get('ai_service', None)
+        arch = _get_node_architecture(task.node)
+        archive_uri = task.node.driver_info.get('archive_uri', None)
+        fmri = task.node.driver_info.get('fmri', None)
+        install_profiles = task.node.driver_info.get('install_profiles', None)
+        publishers = task.node.driver_info.get('publishers', None)
+        sc_profiles = task.node.driver_info.get('sc_profiles', None)
+
+        # Ensure cache dir exists
+        if not os.path.exists(CONF.solaris_ipmi.imagecache_dirname):
+            os.makedirs(CONF.solaris_ipmi.imagecache_dirname)
+
+        # archive_uri, publishers or fmri are ignored if a ai_manifest is
+        # defined. They should be contained within the custom manifest itself
+        if ((ai_manifest) and (archive_uri or publishers or fmri)):
+            raise exception.InvalidParameterValue(
+                _("Custom Archive, Publishers or FMRI cannot be specified "
+                  "when specifying a custom AI Manifest. They should be "
+                  "contained within this custom AI Manifest."))
+
+        # 1. Ensure Node's AI Service exists, if archive_uri then
+        #    create a new service of UUID of archive does not already exist
+        if archive_uri:
+            # Validate archive_uri, format, reachable, etc
+            _validate_archive_uri(task)
+
+            # Extract UUID from archive UAR and instantiate AIService
+            ai_service = _get_archive_uuid(task)
+            aiservice = AIService(task, ai_service)
+
+        elif ai_service:
+            # Instantiate AIService object for this node/service
+            aiservice = AIService(task, ai_service)
+        else:
+            # IPS Install, ensure default architecture service exists
+            if arch == "x86":
+                ai_service = "default-i386"
+            elif arch == 'SPARC':
+                ai_service = "default-sparc"
+            else:
+                raise exception.InvalidParameterValue(
+                    _("Invalid node architecture of '%s'.") % (arch))
+
+            # Instantiate AIService object for this node/service
+            aiservice = AIService(task, ai_service)
+
+        # Check if AI Service exists, raise exception of not
+        if not aiservice.exists:
+            if archive_uri:
+                # Create this service
+                aiservice.create_service(archive_uri)
+            else:
+                raise exception.InvalidParameterValue(
+                    _("AI Service %s does not exist.") % (aiservice.name))
+
+        # Ensure custom publisher provided if FMRI provided
+        if fmri and not publishers:
+            raise exception.InvalidParameterValue(_(
+                "Must specify custom publisher with custom fmri."))
+
+        # Ensure node publishers are valid if specified
+        if publishers:
+            # Validate publishers for format, etc
+            _validate_publishers(task)
+
+        # Ensure node fmri is valid if specified
+        if fmri:
+            # Validate fmri, format, etc
+            _validate_fmri(task)
+
+        # Ensure node sc_profiles is of valid format if specified
+        if sc_profiles:
+            # Validate sc_profiles for format, etc
+            _validate_profiles(task, sc_profiles)
+
+        # Ensure node install_profiles is of valid format if specified
+        if install_profiles:
+            # Validate install_profiles for format, etc
+            _validate_profiles(task, install_profiles)
+
+        # Ensure node ai_manifest is valid if specified
+        if ai_manifest:
+            # Validate ai_manifest for format, etc
+            _validate_ai_manifest(task)
+
+        for mac in driver_utils.get_node_mac_addresses(task):
+            # 2. Recreate AI Clients for each port/Mac specified for this Node
+            # Check if AI Client exists for this service and if so remove it
+            if mac.lower() in aiservice.clients:
+                # Client exists remove it
+                aiservice.delete_client(mac)
+
+            # Recreate new ai client for this mac address
+            new_uri, auth_token = _format_archive_uri(task, archive_uri)
+            aiservice.create_client(mac, arch, new_uri, auth_token,
+                                    publishers, fmri)
+
+            # 3. (Re)Create AI Manifest for each port/Mac specified for this
+            #    Node. Manifest name will be MAC address stripped of colons
+            manifest_name = mac.replace(':', '')
+
+            # Check if AI Manifest exists for this service and if so remove it
+            if manifest_name in aiservice.manifests:
+                # Manifest exists remove it
+                aiservice.delete_manifest(manifest_name)
+
+            # (Re)Create new ai Manifest for this mac address
+            # If ai_manifest is specified use it as the manifest otherwise
+            # use derived manifest script specified by aiservice.
+            if ai_manifest is not None:
+                # Fetch manifest locally, copy to AI Server so that
+                # installadm create-manifest CLI works.
+                _fetch_and_create(task, "manifest", manifest_name, ai_manifest,
+                                  aiservice, mac)
+            else:
+                _fetch_and_create(task, "manifest", manifest_name,
+                                  aiservice.derived_manifest, aiservice, mac)
+
+            # 4. (Re)Create AI Profiles for each port/MAC specified for this
+            #   Node, adding a new profile for each SC Profile specified.
+            #   Profile Name will be MAC address prefix and counter suffix.
+            #   e.g. AAEEBBCCFF66-1
+            profile_prefix = mac.replace(':', '') + "-"
+
+            # Remove all profiles associated with this MAC address and service
+            for profile_name in aiservice.profiles:
+                # Profile name starts with MAC address, assuming ironic
+                # created this profile so remove it.
+                if profile_prefix in profile_name:
+                    aiservice.delete_profile(profile_name)
+
+            # Process both sc_profiles and install_profiles filtering into
+            # unique list of profiles and environments to be applied to.
+            if install_profiles is not None:
+                ins_list = [prof.strip() for prof in
+                            install_profiles.split('+') if prof.strip()]
+            else:
+                ins_list = []
+
+            prof_dict = dict(((uri, "install") for uri in ins_list))
+
+            if sc_profiles is not None:
+                sc_list = [prof.strip() for prof in sc_profiles.split('+')
+                           if prof.strip()]
+            else:
+                sc_list = []
+
+            for profile in sc_list:
+                if profile in prof_dict:
+                    prof_dict[profile] = "all"
+                else:
+                    prof_dict[profile] = "system"
+
+            profile_index = 0
+            for profile_uri, profile_env in prof_dict.iteritems():
+                profile_index += 1
+                profile_name = profile_prefix + str(profile_index)
+
+                # Fetch profile locally, copy to AI Server so that
+                # installadm create-profile CLI works.
+                _fetch_and_create(task, "profile", profile_name, profile_uri,
+                                  aiservice, mac, env=profile_env)
+
+        # Ensure local copy of archive_uri is removed if not needed
+        if archive_uri:
+            url = urlparse(archive_uri)
+            if url.scheme == "glance":
+                temp_uar = os.path.join(CONF.solaris_ipmi.imagecache_dirname,
+                                        url.netloc)
+                _image_refcount_adjust(temp_uar, -1)
+            elif PLATFORM != "SunOS":
+                temp_uar = os.path.join(CONF.solaris_ipmi.imagecache_dirname,
+                                        url.path.replace("/", ""))
+                _image_refcount_adjust(temp_uar, -1)
+
+    def clean_up(self, task):
+        """Clean up the deployment environment for this node.
+
+        As node is being torn down we need to clean up specific
+        AI Clients and Manifests associated with MAC addresses
+        associated with this node.
+
+        1. Delete AI Clients for each port/Mac specified for this Node
+        2. Delete AI Manifest for each port/Mac specified for this Node
+
+        :param task: a TaskManager instance.
+        """
+        LOG.debug("SolarisDeploy.clean_up()")
+
+        ai_service = task.node.driver_info.get('ai_service', None)
+        arch = _get_node_architecture(task.node)
+        archive_uri = task.node.driver_info.get('archive_uri', None)
+
+        # Instantiate AIService object for this node/service
+        if archive_uri:
+            aiservice = AIService(task, _get_archive_uuid(task))
+        elif ai_service:
+            aiservice = AIService(task, ai_service)
+        else:
+            if arch == "x86":
+                ai_service = "default-i386"
+            elif arch == 'SPARC':
+                ai_service = "default-sparc"
+            else:
+                raise exception.InvalidParameterValue(
+                    _("Invalid node architecture of '%s'.") % (arch))
+            aiservice = AIService(task, ai_service)
+
+        # Check if AI Service exists, log message if already removed
+        if not aiservice.exists:
+            # There is nothing to clean up as service removed
+            LOG.info(_("AI Service %s already removed.") % (aiservice.name))
+        else:
+            for mac in driver_utils.get_node_mac_addresses(task):
+                # 1. Delete AI Client for this MAC Address
+                if mac.lower() in aiservice.clients:
+                    aiservice.delete_client(mac)
+
+                # 2. Delete AI Manifest for this MAC Address
+                manifest_name = mac.replace(':', '')
+                if manifest_name in aiservice.manifests:
+                    aiservice.delete_manifest(manifest_name)
+
+                # 3. Remove AI Profiles for this MAC Address
+                profile_prefix = mac.replace(':', '') + "-"
+
+                # Remove all profiles associated with this MAC address
+                for profile_name in aiservice.profiles:
+                    if profile_prefix in profile_name:
+                        aiservice.delete_profile(profile_name)
+
+        # Ensure local copy of archive_uri is removed if not needed
+        if archive_uri:
+            url = urlparse(archive_uri)
+            if url.scheme == "glance":
+                temp_uar = os.path.join(CONF.solaris_ipmi.imagecache_dirname,
+                                        url.netloc)
+                _image_refcount_adjust(temp_uar, -1)
+            elif PLATFORM != "SunOS":
+                temp_uar = os.path.join(CONF.solaris_ipmi.imagecache_dirname,
+                                        url.path.replace("/", ""))
+                _image_refcount_adjust(temp_uar, -1)
+
+    def take_over(self, _task):
+        """Take over management of this task's node from a dead conductor."""
+        """ TODO(mattk): Determine if this is required"""
+        LOG.debug("SolarisDeploy.take_over()")
+
+
+class SolarisManagement(base.ManagementInterface):
+    """Management class for solaris nodes."""
+
+    def get_properties(self):
+        """Return Solaris driver properties"""
+        return COMMON_PROPERTIES
+
+    def __init__(self):
+        try:
+            ipmitool._check_option_support(['timing', 'single_bridge',
+                                            'dual_bridge'])
+        except OSError:
+            raise exception.DriverLoadError(
+                driver=self.__class__.__name__,
+                reason=_("Unable to locate usable ipmitool command in "
+                         "the system path when checking ipmitool version"))
+
+    def validate(self, task):
+        """Check that 'driver_info' contains IPMI credentials.
+
+        Validates whether the 'driver_info' property of the supplied
+        task's node contains the required credentials information.
+
+        :param task: a task from TaskManager.
+        :raises: InvalidParameterValue if required IPMI parameters
+            are missing.
+        :raises: MissingParameterValue if a required parameter is missing.
+
+        """
+        _parse_driver_info(task.node)
+
+    def get_supported_boot_devices(self, task=None):
+        """Get a list of the supported boot devices.
+
+        :param task: a task from TaskManager.
+        :returns: A list with the supported boot devices defined
+                  in :mod:`ironic.common.boot_devices`.
+
+        """
+        if task is None:
+            return [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM,
+                    boot_devices.BIOS, boot_devices.SAFE]
+        else:
+            # Get architecture of node and return supported boot devices
+            arch = _get_node_architecture(task.node)
+            if arch == 'x86':
+                return [boot_devices.PXE, boot_devices.DISK,
+                        boot_devices.CDROM, boot_devices.BIOS,
+                        boot_devices.SAFE]
+            elif arch == 'SPARC':
+                return [boot_devices.DISK, 'wanboot']
+            else:
+                raise exception.InvalidParameterValue(
+                    _("Invalid node architecture of '%s'.") % (arch))
+
+    @task_manager.require_exclusive_lock
+    def set_boot_device(self, task, device, persistent=False):
+        """Set the boot device for the task's node.
+
+        Set the boot device to use on next reboot of the node.
+
+        :param task: a task from TaskManager.
+        :param device: the boot device, one of
+                       :mod:`ironic.common.boot_devices`.
+        :param persistent: Boolean value. True if the boot device will
+                           persist to all future boots, False if not.
+                           Default: False.
+        :raises: InvalidParameterValue if an invalid boot device is specified
+        :raises: MissingParameterValue if required ipmi parameters are missing.
+        :raises: IPMIFailure on an error from ipmitool.
+
+        """
+        LOG.debug("SolarisManagement.set_boot_device: %s" % device)
+
+        arch = _get_node_architecture(task.node)
+        archive_uri = task.node.driver_info.get('archive_uri')
+        publishers = task.node.driver_info.get('publishers')
+        fmri = task.node.driver_info.get('fmri')
+
+        if arch == 'x86':
+            if device not in self.get_supported_boot_devices(task=task):
+                raise exception.InvalidParameterValue(_(
+                    "Invalid boot device %s specified.") % device)
+            cmd = ["chassis", "bootdev", device]
+            if persistent:
+                cmd = cmd + " options=persistent"
+        elif arch == 'SPARC':
+            # Set bootmode script to network DHCP or disk
+            if device == 'wanboot':
+                boot_cmd = 'set /HOST/bootmode script="'
+                script_str = 'boot net:dhcp - install'
+                if archive_uri:
+                    new_uri, auth_token = _format_archive_uri(task,
+                                                              archive_uri)
+                    script_str += ' archive_uri=%s' % (new_uri)
+
+                    if auth_token is not None:
+                        # Add auth_token to boot arg, AI archive transfer will
+                        # use this by setting X-Auth-Token header when using
+                        # curl to retrieve archive from glance.
+                        script_str += ' auth_token=%s' % \
+                            (task.context.auth_token)
+
+                if publishers:
+                    pub_list = [pub.strip() for pub in publishers.split('+')
+                                if pub.strip()]
+                    script_str += ' publishers=%s' % ('+'.join(pub_list))
+
+                if fmri:
+                    pkg_list = [pkg.strip() for pkg in fmri.split('+')
+                                if pkg.strip()]
+                    script_str += ' fmri=%s' % ('+'.join(pkg_list))
+
+                # bootmode script property has a size restriction of 255
+                # characters raise error if this is breached.
+                if len(script_str) > 255:
+                    raise exception.InvalidParameterValue(_(
+                        "SPARC firmware bootmode script length exceeds 255:"
+                        " %s") % script_str)
+                boot_cmd += script_str + '"'
+                cmd = ['sunoem', 'cli', boot_cmd]
+            elif device == 'disk':
+                cmd = ['sunoem', 'cli',
+                       'set /HOST/bootmode script=""']
+            else:
+                raise exception.InvalidParameterValue(_(
+                    "Invalid boot device %s specified.") % (device))
+        else:
+            raise exception.InvalidParameterValue(
+                _("Invalid node architecture of '%s'.") % (arch))
+
+        driver_info = _parse_driver_info(task.node)
+        try:
+            _out, _err = _exec_ipmitool(driver_info, cmd)
+        except (exception.PasswordFileFailedToCreate,
+                processutils.ProcessExecutionError) as err:
+            LOG.warning(_LW('IPMI set boot device failed for node %(node)s '
+                            'when executing "ipmitool %(cmd)s". '
+                            'Error: %(error)s'),
+                        {'node': driver_info['uuid'],
+                         'cmd': cmd, 'error': err})
+            raise exception.IPMIFailure(cmd=cmd)
+
+    def get_boot_device(self, task):
+        """Get the current boot device for the task's node.
+
+        Returns the current boot device of the node.
+
+        :param task: a task from TaskManager.
+        :raises: InvalidParameterValue if required IPMI parameters
+            are missing.
+        :raises: IPMIFailure on an error from ipmitool.
+        :raises: MissingParameterValue if a required parameter is missing.
+        :returns: a dictionary containing:
+
+            :boot_device: the boot device, one of
+                :mod:`ironic.common.boot_devices` or None if it is unknown.
+            :persistent: Whether the boot device will persist to all
+                future boots or not, None if it is unknown.
+
+        """
+        LOG.debug("SolarisManagement.get_boot_device")
+        arch = _get_node_architecture(task.node)
+        driver_info = _parse_driver_info(task.node)
+        response = {'boot_device': None, 'persistent': None}
+
+        if arch == 'x86':
+            cmd = ["chassis", "bootparam", "get", "5"]
+        elif arch == 'SPARC':
+            cmd = ['sunoem', 'getval', '/HOST/bootmode/script']
+        else:
+            raise exception.InvalidParameterValue(
+                _("Invalid node architecture of '%s'.") % (arch))
+
+        try:
+            out, _err = _exec_ipmitool(driver_info, cmd)
+        except (exception.PasswordFileFailedToCreate,
+                processutils.ProcessExecutionError) as err:
+            LOG.warning(_LW('IPMI get boot device failed for node %(node)s '
+                            'when executing "ipmitool %(cmd)s". '
+                            'Error: %(error)s'),
+                        {'node': driver_info['uuid'],
+                         'cmd': cmd, 'error': err})
+            raise exception.IPMIFailure(cmd=cmd)
+
+        if arch == 'x86':
+            re_obj = re.search('Boot Device Selector : (.+)?\n', out)
+            if re_obj:
+                boot_selector = re_obj.groups('')[0]
+                if 'PXE' in boot_selector:
+                    response['boot_device'] = boot_devices.PXE
+                elif 'Hard-Drive' in boot_selector:
+                    if 'Safe-Mode' in boot_selector:
+                        response['boot_device'] = boot_devices.SAFE
+                    else:
+                        response['boot_device'] = boot_devices.DISK
+                elif 'BIOS' in boot_selector:
+                    response['boot_device'] = boot_devices.BIOS
+                elif 'CD/DVD' in boot_selector:
+                    response['boot_device'] = boot_devices.CDROM
+
+            response['persistent'] = 'Options apply to all future boots' in out
+        elif arch == 'SPARC':
+            if "net:dhcp" in out:
+                response['boot_device'] = 'wanboot'
+            else:
+                response['boot_device'] = 'disk'
+        LOG.debug(response)
+        return response
+
+    def get_sensors_data(self, task):
+        """Get sensors data.
+
+        :param task: a TaskManager instance.
+        :raises: FailedToGetSensorData when getting the sensor data fails.
+        :raises: FailedToParseSensorData when parsing sensor data fails.
+        :raises: InvalidParameterValue if required ipmi parameters are missing
+        :raises: MissingParameterValue if a required parameter is missing.
+        :returns: returns a dict of sensor data group by sensor type.
+
+        """
+        driver_info = _parse_driver_info(task.node)
+        # with '-v' option, we can get the entire sensor data including the
+        # extended sensor informations
+        cmd = "-v sdr"
+        try:
+            out, _err = _exec_ipmitool(driver_info, cmd)
+        except (exception.PasswordFileFailedToCreate,
+                processutils.ProcessExecutionError) as err:
+            raise exception.FailedToGetSensorData(node=task.node.uuid,
+                                                  error=err)
+
+        return ipmitool._parse_ipmi_sensors_data(task.node, out)
+
+
+class AIService():
+    """AI Service"""
+
+    def __init__(self, task, name):
+        """Initialize AIService object
+
+        :param task: a TaskManager instance
+        :param name: AI Service name
+        """
+        LOG.debug("AIService.__init__()")
+        self.task = task
+        self.name = name
+        self._clients = list()
+        self._image_path = None
+        self._manifests = list()
+        self._profiles = list()
+        self._ssh_obj = None
+        self._derived_manifest = None
+
+    @property
+    def ssh_obj(self):
+        """paramiko.SSHClient active connection"""
+        LOG.debug("AIService.ssh_obj")
+        if self._ssh_obj is None:
+            self._ssh_obj = self._get_ssh_connection()
+        return self._ssh_obj
+
+    @property
+    def manifests(self):
+        """list() of manifest names for this service"""
+        LOG.debug("AIService.manifests")
+        if not self._manifests:
+            self._manifests = self._get_manifest_names()
+        return self._manifests
+
+    @property
+    def profiles(self):
+        """list() of profile names for this service"""
+        LOG.debug("AIService.profiles")
+        if not self._profiles:
+            self._profiles = self._get_profile_names()
+        return self._profiles
+
+    @property
+    def clients(self):
+        """list() of all client names(mac addresses) On AI Server"""
+        LOG.debug("AIService.clients")
+        if not self._clients:
+            self._clients = self._get_all_client_names()
+        return self._clients
+
+    @property
+    def exists(self):
+        """True/False indicator of this service exists of not"""
+        LOG.debug("AIService.exists")
+        ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm list -n " + self.name
+        try:
+            stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd)
+        except Exception as _err:
+            return False
+
+        if self.name != self._parse_service_name(stdout):
+            return False
+        else:
+            return True
+
+    @property
+    def image_path(self):
+        """image_path for this service"""
+        LOG.debug("AIService.image_path")
+        if self._image_path is None:
+            self._image_path = self._get_image_path()
+        return self._image_path
+
+    @property
+    def derived_manifest(self):
+        """Access default derived manifest URI"""
+        LOG.debug("AIService.derived_manifest")
+        if not self._derived_manifest:
+            self._derived_manifest = CONF.ai.derived_manifest
+        return self._derived_manifest
+
+    def create_service(self, archive_uri):
+        """Create a new AI Service for this object
+
+        :param archive_uri: archive_uri to create service from
+        """
+
+        LOG.debug("AIService.create_service(): %s" % (self.name))
+
+        if PLATFORM == "SunOS":
+            # 1. Fetch archive
+            mount_dir, temp_uar = _mount_archive(self.task, archive_uri)
+            iso, uuid = _get_archive_iso_and_uuid(mount_dir)
+        else:
+            # 1. Fetch archive and Extract ISO file
+            temp_uar = _fetch_uri(self.task, archive_uri)
+            iso, uuid = _get_archive_iso_and_uuid(temp_uar, extract_iso=True)
+
+        # 2. scp AI ISO from archive to AI Server
+        remote_iso = os.path.join("/tmp", uuid) + ".iso"
+        try:
+            self.copy_remote_file(iso, remote_iso)
+        except:
+            if PLATFORM == "SunOS":
+                _umount_archive(mount_dir, temp_uar)
+                if urlparse(archive_uri).scheme == "glance":
+                    _image_refcount_adjust(temp_uar, -1)
+            else:
+                shutil.rmtree(os.path.dirname(iso))
+                _image_refcount_adjust(temp_uar, -1)
+            raise
+
+        if PLATFORM != "SunOS":
+            # Remove temp extracted ISO file
+            shutil.rmtree(os.path.dirname(iso))
+
+        # 3. Create a new AI Service
+        ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm create-service " + \
+            " -y -n " + uuid + " -s " + remote_iso
+
+        try:
+            _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd)
+            self.name = uuid
+            self._clients = []
+            self._manifests = []
+            self._profiles = []
+
+        except Exception as _err:
+            self.delete_remote_file(remote_iso)
+            if PLATFORM == "SunOS":
+                _umount_archive(mount_dir, temp_uar)
+            else:
+                _image_refcount_adjust(temp_uar, -1)
+            raise AICreateServiceFail(
+                _("Failed to create AI Service %s") % (uuid))
+
+        # 4. Remove copy of AI ISO on AI Server
+        self.delete_remote_file(remote_iso)
+
+        if PLATFORM == "SunOS":
+            # 5. Unmount UAR
+            _umount_archive(mount_dir, temp_uar)
+
+        # 6. Decrement reference count for image
+        if temp_uar is not None:
+            _image_refcount_adjust(temp_uar, -1)
+
+    def delete_service(self):
+        """Delete the current AI Service"""
+        LOG.debug("AIService.delete_service():name: %s" % (self.name))
+        ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm delete-service" + \
+            " -r -y -n " + self.name
+
+        try:
+            _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd)
+        except Exception as _err:
+            raise AIDeleteServiceFail(
+                _("Failed to delete AI Service %s") % (self.name))
+
+    def create_client(self, mac, arch, archive_uri, auth_token,
+                      publishers, fmri):
+        """Create a client associated with this service
+
+        :param mac: MAC Address of client to create
+        :param arch: Machine architecture for this node
+        :param archive_uri: URI of archive to install node from
+        :param auth_token: Authorization token for glance UAR retrieval
+        :param publishers: IPS publishers list in name@origin format
+        :param fmri: IPS package FMRIs to install
+        :returns: Nothing exception raised if deletion fails
+        """
+        LOG.debug("AIService.create_client():mac: %s" % (mac))
+        ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm create-client -e " + \
+            mac + " -n " + self.name
+
+        # Add specific boot arguments for 'x86' clients only
+        if arch == 'x86':
+            ai_cmd += " -b install=true,console=ttya"
+
+            if archive_uri:
+                ai_cmd += ",archive_uri=%s" % (archive_uri)
+
+            if auth_token:
+                ai_cmd += ",auth_token=%s" % (auth_token)
+
+            if publishers:
+                pub_list = [pub.strip() for pub in publishers.split('+')
+                            if pub.strip()]
+                ai_cmd += ",publishers='%s'" % ('+'.join(pub_list))
+
+            if fmri:
+                pkg_list = [pkg.strip() for pkg in fmri.split('+')
+                            if pkg.strip()]
+                ai_cmd += ",fmri='%s'" % ('+'.join(pkg_list))
+
+        try:
+            _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd)
+        except Exception as _err:
+            raise AICreateClientFail(_("Failed to create AI Client %s") %
+                                     (mac))
+
+        # If arch x86 customize grub reducing grub menu timeout to 0
+        if arch == 'x86':
+            custom_grub = "/tmp/%s.grub" % (mac)
+            ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm export -e " + \
+                mac + " -G | /usr/bin/sed -e 's/timeout=30/timeout=0/'" + \
+                " > %s" % (custom_grub)
+            try:
+                _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd)
+            except Exception as _err:
+                raise AICreateClientFail(
+                    _("Failed to create custom grub menu for %s.") % (mac))
+
+            ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm set-client -e " + \
+                mac + " -G %s" % (custom_grub)
+            try:
+                _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd)
+            except Exception as _err:
+                raise AICreateClientFail(
+                    _("Failed to customize AI Client %s grub menu.") % (mac))
+
+            self.delete_remote_file(custom_grub)
+
+        self._clients = self._get_all_client_names()
+
+    def delete_client(self, mac):
+        """Delete a specific client regardless of service association
+
+        :param mac: MAC Address of client to remove
+        :returns: Nothing exception raised if deletion fails
+        """
+        LOG.debug("AIService.delete_client():mac: %s" % (mac))
+        ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm delete-client -e " + mac
+        try:
+            _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd)
+        except Exception as _err:
+            raise AIDeleteClientFail(_("Failed to delete AI Client %s") %
+                                     (mac))
+
+        # update list of clients for this service
+        self._clients = self._get_all_client_names()
+
+    def create_manifest(self, manifest_name, manifest_path, mac):
+        """Create a manifest associated with this service
+
+        :param manifest_name: manifest_name to create
+        :param manifest_path: path to manifest file to use
+        :param mac: MAC address to add as criteria
+        :returns: Nothing exception raised if creation fails
+        """
+        LOG.debug("AIService.create_manifest():manifest_name: "
+                  "'%s', manifest_path: '%s', mac: '%s'" %
+                  (manifest_name, manifest_path, mac))
+        ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm create-manifest -n " + \
+            self.name + " -m " + manifest_name + " -f " + manifest_path + \
+            " -c mac=" + mac
+        try:
+            _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd)
+        except Exception as _err:
+            raise AICreateManifestFail(_("Failed to create AI Manifest %s.") %
+                                       (manifest_name))
+
+        # Update list of manifests for this service
+        self._manifests = self._get_manifest_names()
+
+    def delete_manifest(self, manifest_name):
+        """Delete a specific manifest
+
+        :param manifest_name: name of manifest to remove
+        :returns: Nothing exception raised if deletion fails
+        """
+        LOG.debug("AIService.delete_manifest():manifest_name: %s" %
+                  (manifest_name))
+        ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm delete-manifest -m " + \
+            manifest_name + " -n " + self.name
+        try:
+            _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd)
+        except Exception as _err:
+            raise AIDeleteManifestFail(_("Failed to delete AI Manifest %s") %
+                                       (manifest_name))
+
+        # Update list of manifests for this service
+        self._manifests = self._get_manifest_names()
+
+    def create_profile(self, profile_name, profile_path, mac, env):
+        """Create a profile associated with this service
+
+        :param profile)_name: profile name to create
+        :param profile_path: path to profile file to use
+        :param mac: MAC address to add as criteria
+        :param env: Environment to apply profile to
+        :returns: Nothing exception raised if creation fails
+        """
+        LOG.debug("AIService.create_profile():profile_name: "
+                  "'%s', profile_path: '%s', mac: '%s'" %
+                  (profile_name, profile_path, mac))
+
+        ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm create-profile -n " + \
+            self.name + " -p " + profile_name + " -f " + profile_path + \
+            " -c mac=" + mac
+
+        if env is not None:
+            ai_cmd = ai_cmd + " -e " + env
+
+        try:
+            _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd)
+        except Exception as _err:
+            raise AICreateProfileFail(_("Failed to create AI Profile %s.") %
+                                      (profile_name))
+
+        # Update list of profiles for this service
+        self._profiles = self._get_profile_names()
+
+    def delete_profile(self, profile_name):
+        """Delete a specific profile
+
+        :param profile_name: name of profile to remove
+        :returns: Nothing exception raised if deletion fails
+        """
+        LOG.debug("AIService.delete_profile():profile_name: %s" %
+                  (profile_name))
+        ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm delete-profile -p " + \
+            profile_name + " -n " + self.name
+        try:
+            _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd)
+        except Exception as _err:
+            raise AIDeleteProfileFail(_("Failed to delete AI Profile %s") %
+                                      (profile_name))
+
+        # Update list of profiles for this service
+        self._profiles = self._get_profile_names()
+
+    def copy_remote_file(self, local, remote):
+        """Using scp copy local file to remote location
+
+        :param local: Local file path to copy
+        :param remote: Remote file path to copy to
+        :returns: Nothing, exception raised on failure
+        """
+        LOG.debug("AIService.copy_remote_file():local: %s, remote: %s" %
+                  (local, remote))
+        try:
+            scp = SCPClient(self.ssh_obj.get_transport())
+            scp.put(local, remote)
+        except Exception as err:
+            err_msg = _("Failed to copy file to remote server: %s") % err
+            raise SolarisIPMIError(msg=err_msg)
+
+    def delete_remote_file(self, path):
+        """Remove remote file in AI Server
+
+        :param path: Path of remote file to remove
+        :return: Nothing exception raised on failure
+        """
+        LOG.debug("AIService.delete_remote_file():path: %s" %
+                  (path))
+
+        ai_cmd = "/usr/bin/rm -f " + path
+        try:
+            _stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd)
+        except Exception as err:
+            err_msg = _("Failed to delete remote file: %s") % err
+            raise SolarisIPMIError(msg=err_msg)
+
+    def _get_image_path(self):
+        """Retrieve image_path for this service
+
+        :returns: image_path property
+        """
+        LOG.debug("AIService._get_image_path()")
+        ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm list -vn " + self.name
+        stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd)
+
+        for line in stdout.splitlines():
+            words = line.split()
+            if len(words) > 2 and words[0] == "Image" and words[1] == "Path":
+                image_path = words[-1]
+        LOG.debug("AIService._get_image_path():image_path: %s" % (image_path))
+        return image_path
+
+    def _parse_client(self, list_out):
+        """Return service name and client from installadm list -e output
+
+        :param list_out: stdout from installadm list -e
+        :returns: Service Name and MAC Address
+        """
+        LOG.debug("AIService._parse_client():list_out: %s" % (list_out))
+        lines = list_out.splitlines()
+        service_name = None
+        client_name = None
+
+        if len(lines[2].split()[0]) > 0:
+            service_name = lines[2].split()[0]
+
+        if len(lines[2].split()[1]) > 0:
+            client_name = lines[2].split()[1]
+
+        LOG.debug("AIService._parse_client():service_name: %s" %
+                  (service_name))
+        LOG.debug("AIService._parse_client():client_name: %s" % (client_name))
+        return service_name, client_name
+
+    def _parse_service_name(self, list_out):
+        """Given installadm list -n output, parse out service name
+
+        :param list_out: stdout from installadm list -n
+        :returns: Service Name
+        """
+        LOG.debug("AIService._parse_service_name():list_out: %s" % (list_out))
+        service_name = None
+
+        lines = list_out.splitlines()
+        if len(lines[2].split()[0]) > 0:
+            service_name = lines[2].split()[0]
+
+        LOG.debug("AIService._parse_service_name():service_name: %s" %
+                  (service_name))
+        return service_name
+
+    def _get_ssh_connection(self):
+        """Returns an SSH client connected to a node.
+
+        :returns: paramiko.SSHClient, an active ssh connection.
+        """
+        LOG.debug("AIService._get_ssh_connection()")
+        return utils.ssh_connect(self._get_ssh_dict())
+
+    def _get_ssh_dict(self):
+        """Generate SSH Dictionary for SSH Connection via paramiko
+
+        :returns: dictionary for paramiko connection
+        """
+        LOG.debug("AIService._get_ssh_dict()")
+        if not CONF.ai.server or not CONF.ai.username:
+            raise exception.InvalidParameterValue(_(
+                "SSH server and username must be set."))
+
+        ssh_dict = {
+            'host': CONF.ai.server,
+            'username': CONF.ai.username,
+            'port': int(CONF.ai.port),
+            'timeout': int(CONF.ai.timeout)
+            }
+
+        key_contents = key_filename = password = None
+        if CONF.ai.ssh_key_contents and CONF.ai.ssh_key_contents != "None":
+            key_contents = CONF.ai.ssh_key_contents
+        if CONF.ai.ssh_key_file and CONF.ai.ssh_key_file != "None":
+            key_filename = CONF.ai.ssh_key_file
+        if CONF.ai.password and CONF.ai.password != "None":
+            password = CONF.ai.password
+
+        if len(filter(None, (key_filename, key_contents))) != 1:
+            raise exception.InvalidParameterValue(_(
+                "SSH requires one and only one of "
+                "ssh_key_file or ssh_key_contents to be set."))
+        if password:
+            ssh_dict['password'] = password
+
+        if key_contents:
+            ssh_dict['key_contents'] = key_contents
+        else:
+            if not os.path.isfile(key_filename):
+                raise exception.InvalidParameterValue(_(
+                    "SSH key file %s not found.") % key_filename)
+            ssh_dict['key_filename'] = key_filename
+        LOG.debug("AIService._get_ssh_dict():ssh_dict: %s" % (ssh_dict))
+        return ssh_dict
+
+    def _get_manifest_names(self):
+        """Get a list of manifest names for this service
+
+        :returns: list() of manifest names
+        """
+        LOG.debug("AIService._get_manifest_names()")
+        ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm list -mn " + self.name
+        stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd,
+                                   err_msg=_("Failed to retrieve manifests"
+                                             " for service %s") % (self.name))
+        return self._parse_names(stdout)
+
+    def _get_profile_names(self):
+        """Get a list of profile names for this service
+
+        :returns: list() of profile names
+        """
+        LOG.debug("AIService._get_profile_names()")
+        ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm list -pn " + self.name
+        stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd,
+                                   err_msg=_("Failed to retrieve profiles for "
+                                             "service %s") % (self.name))
+        return self._parse_names(stdout)
+
+    def _get_all_client_names(self):
+        """Get a list of client names for this service
+
+        :returns: list() of client/mac names
+        """
+        LOG.debug("AIService._get_all_client_names()")
+        ai_cmd = "/usr/bin/pfexec /usr/sbin/installadm list -c"
+        stdout, _rc = _ssh_execute(self.ssh_obj, ai_cmd,
+                                   err_msg=_("Failed to retrieve clients for "
+                                             "service %s") % (self.name))
+        # Store client names all in lower case
+        return [client.lower() for client in self._parse_names(stdout)]
+
+    def _parse_names(self, list_out):
+        """Parse client/manifest/profile names from installadm list output
+
+        Note: when we convert to using RAD, parsing installadm CLI output
+            will not be required, as API will return a list of names.
+
+        :param list_out: stdout from installadm list -c or -mn or -pn
+        :returns: a list of client/manifest/profile names
+        """
+        LOG.debug("AIService._parse_names():list_out: %s" %
+                  (list_out))
+        names = []
+        lines = list_out.splitlines()
+
+        # Get index into string for client/manifest/profile names
+        # client/manifest/profile names are all in 2nd column of output
+        if len(lines) > 1:
+            col_start = lines[1].index(" --")
+
+            for line in range(2, len(lines)):
+                names.append(lines[line][col_start:].split()[0])
+
+        LOG.debug("AIService._parse_names():names: %s" % (names))
+        return names
+
+
+# Custom Exceptions
+class AICreateServiceFail(exception.IronicException):
+    """Exception type for AI Service creation failure"""
+    pass
+
+
+class AIDeleteServiceFail(exception.IronicException):
+    """Exception type for AI Service deletion failure"""
+    pass
+
+
+class AICreateClientFail(exception.IronicException):
+    """Exception type for AI Client creation failure"""
+    pass
+
+
+class AIDeleteClientFail(exception.IronicException):
+    """Exception type for AI Client deletion failure"""
+    pass
+
+
+class AICreateManifestFail(exception.IronicException):
+    """Exception type for AI Manifest creation failure"""
+    pass
+
+
+class AIDeleteManifestFail(exception.IronicException):
+    """Exception type for AI Manifest deletion failure"""
+    pass
+
+
+class AICreateProfileFail(exception.IronicException):
+    """Exception type for AI Profile creation failure"""
+    pass
+
+
+class AIDeleteProfileFail(exception.IronicException):
+    """Exception type for AI Profile deletion failure"""
+    pass
+
+
+class SolarisIPMIError(exception.IronicException):
+    """Generic Solaris IPMI driver exception"""
+    message = _("%(msg)s")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/drivers/solaris.py	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,44 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Solaris Driver and supporting meta-classes.
+"""
+
+from ironic.drivers import base
+from ironic.drivers.modules import ipmitool
+from ironic.drivers.modules import solaris_ipmitool
+from ironic.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+class SolarisAndIPMIToolDriver(base.BaseDriver):
+    """Solaris + IPMITool driver.
+
+    This driver implements the 'core' functionality, combining
+    :class:'ironic.drivers.ipmi.IPMI' for power on/off and reboot with
+    :class:'ironic.drivers.solaris for image deployment. Implementations are in
+    those respective classes; this class is merely the glue between them.
+    """
+
+    def __init__(self):
+        LOG.debug(_("Loading Solaris And IPMI Tool Driver"))
+        self.power = ipmitool.IPMIPower()
+        self.deploy = solaris_ipmitool.SolarisDeploy()
+        self.console = None   # Not implemented yet
+        self.rescue = None    # Not implemented yet
+        self.management = solaris_ipmitool.SolarisManagement()
+        self.vendor = None    # No VendorSpecific methods yet
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/ironic-api	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,39 @@
+#!/usr/bin/python2.6
+
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import sys
+
+import smf_include
+
+
+def start():
+    ironic_conf = sys.argv[2]
+
+    # verify config path is valid
+    if not os.path.exists(ironic_conf) or not os.access(ironic_conf, os.R_OK):
+        print >> sys.stderr, '%s does not exist or is not readable' % \
+            ironic_conf
+        return smf_include.SMF_EXIT_ERR_CONFIG
+
+    # Initiate ironic-api service
+    cmd_str = "/usr/lib/ironic/ironic-api --config-file %s " \
+        % (ironic_conf)
+    smf_include.smf_subprocess(cmd_str)
+
+if __name__ == "__main__":
+    os.putenv("LC_ALL", "C")
+    smf_include.smf_main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/ironic-api.xml	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,89 @@
+<?xml version="1.0" ?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+ NOTE:  This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade.  Make customizations in a different
+ file.
+-->
+<service_bundle type="manifest" name="ironic-api">
+
+  <service version="1" type="service"
+    name="application/openstack/ironic/ironic-api">
+
+    <dependency name='multiuser' grouping='require_all' restart_on='error'
+      type='service'>
+      <service_fmri value='svc:/milestone/multi-user:default' />
+    </dependency>
+
+    <!-- create a dependency on the ironic-db service ensuring the database
+         is created/synced for all other services. -->
+    <dependency name='ironic_db' grouping='optional_all' restart_on='error'
+      type='service'>
+      <service_fmri value='svc:/application/openstack/ironic/ironic-db'/>
+    </dependency>
+
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <dependency name='rabbitmq' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/application/rabbitmq:default'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
+    <exec_method timeout_seconds="60" type="method" name="start"
+      exec="/lib/svc/method/ironic-api %m %{config/config_path}">
+      <method_context>
+        <method_credential user='ironic' group='ironic' />
+      </method_context>
+    </exec_method>
+    <exec_method timeout_seconds="60" type="method" name="stop"
+      exec=":kill"/>
+
+    <instance name='default' enabled='false'>
+      <!-- to start/stop/refresh the service -->
+      <property_group name='general' type='framework'>
+        <propval name='action_authorization' type='astring'
+                 value='solaris.smf.manage.ironic' />
+        <propval name='value_authorization' type='astring'
+                 value='solaris.smf.value.ironic' />
+      </property_group>
+      <property_group name='config' type='application'>
+        <propval name='config_path' type='astring'
+          value='/etc/ironic/ironic.conf'/>
+      </property_group>
+    </instance>
+
+    <template>
+      <common_name>
+        <loctext xml:lang="C">
+          OpenStack Ironic API Service
+        </loctext>
+      </common_name>
+      <description>
+        <loctext xml:lang="C">
+          ironic-api is a server daemon that provides the Ironic API service in
+          order to provide bare metal deployment for OpenStack.
+        </loctext>
+      </description>
+    </template>
+  </service>
+</service_bundle>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/ironic-conductor	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,39 @@
+#!/usr/bin/python2.6
+
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import sys
+
+import smf_include
+
+
+def start():
+    ironic_conf = sys.argv[2]
+
+    # verify config path is valid
+    if not os.path.exists(ironic_conf) or not os.access(ironic_conf, os.R_OK):
+        print >> sys.stderr, '%s does not exist or is not readable' % \
+            ironic_conf
+        return smf_include.SMF_EXIT_ERR_CONFIG
+
+    # Initiate ironic-conductor service
+    cmd_str = "/usr/bin/pfexec /usr/lib/ironic/ironic-conductor " + \
+              "--config-file %s " % (ironic_conf)
+    smf_include.smf_subprocess(cmd_str)
+
+if __name__ == "__main__":
+    os.putenv("LC_ALL", "C")
+    smf_include.smf_main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/ironic-conductor.xml	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,90 @@
+<?xml version="1.0" ?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+ NOTE:  This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade.  Make customizations in a different
+ file.
+-->
+<service_bundle type="manifest" name="ironic-conductor">
+
+  <service version="1" type="service"
+    name="application/openstack/ironic/ironic-conductor">
+
+    <dependency name='multiuser' grouping='require_all' restart_on='error'
+      type='service'>
+      <service_fmri value='svc:/milestone/multi-user:default' />
+    </dependency>
+
+    <!-- create a dependency on the ironic-db service ensuring the database
+         is created/synced for all other services. -->
+    <dependency name='ironic_db' grouping='optional_all' restart_on='error'
+      type='service'>
+      <service_fmri value='svc:/application/openstack/ironic/ironic-db'/>
+    </dependency>
+
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <dependency name='rabbitmq' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/application/rabbitmq:default'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
+    <exec_method timeout_seconds="60" type="method" name="start"
+      exec="/lib/svc/method/ironic-conductor %m %{config/config_path}">
+      <method_context>
+        <method_credential user='ironic' group='ironic' />
+      </method_context>
+    </exec_method>
+    <exec_method timeout_seconds="60" type="method" name="stop"
+      exec=":kill"/>
+
+    <instance name='default' enabled='false'>
+      <!-- to start/stop/refresh the service -->
+      <property_group name='general' type='framework'>
+        <propval name='action_authorization' type='astring'
+                 value='solaris.smf.manage.ironic' />
+        <propval name='value_authorization' type='astring'
+                 value='solaris.smf.value.ironic' />
+      </property_group>
+      <property_group name='config' type='application'>
+        <propval name='config_path' type='astring'
+          value='/etc/ironic/ironic.conf'/>
+      </property_group>
+    </instance>
+
+    <template>
+      <common_name>
+        <loctext xml:lang="C">
+          OpenStack Ironic Conductor Service
+        </loctext>
+      </common_name>
+      <description>
+        <loctext xml:lang="C">
+          ironic-conductor is a server daemon that provides the Ironic
+          Conductor service in order to provide bare metal deployment for
+          OpenStack.
+        </loctext>
+      </description>
+    </template>
+  </service>
+</service_bundle>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/ironic-db	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,61 @@
+#!/usr/bin/python2.6
+
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+from subprocess import Popen, PIPE
+import sys
+
+import smf_include
+
+
+def start():
+    ironic_conf = sys.argv[2]
+
+    # verify config path is valid
+    if not os.path.exists(ironic_conf) or not os.access(ironic_conf, os.R_OK):
+        print >> sys.stderr, '%s does not exist or is not readable' % \
+            ironic_conf
+        return smf_include.SMF_EXIT_ERR_CONFIG
+
+    # Create/Sync Ironic database
+    cmd = ["/usr/bin/ironic-dbsync", "--config-file", ironic_conf,
+           "create_schema"]
+    proc = Popen(cmd, stderr=PIPE, stdout=PIPE)
+    _out, error = proc.communicate()
+
+    if proc.returncode != 0:
+        if "DbMigrationError" in error:
+            # Attempted to create already existing database
+            # Attempt to upgrade instead
+
+            cmd = ["/usr/bin/ironic-dbsync", "--config-file", ironic_conf,
+                   "upgrade"]
+            proc = Popen(cmd, stderr=PIPE, stdout=PIPE)
+            _out, error = proc.communicate()
+            if proc.returncode != 0:
+                print >> sys.stderr, \
+                    'Error executing ironic-dbsync upgrade: %s' % error
+                return smf_include.SMF_EXIT_ERR_FATAL
+        else:
+            print >> sys.stderr, \
+                'Error executing ironic-dbsync create_schema: %s' % error
+            return smf_include.SMF_EXIT_ERR_FATAL
+
+    return smf_include.SMF_EXIT_OK
+
+if __name__ == "__main__":
+    os.putenv("LC_ALL", "C")
+    smf_include.smf_main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/ironic-db.xml	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,86 @@
+<?xml version="1.0" ?>
+<!DOCTYPE service_bundle SYSTEM '/usr/share/lib/xml/dtd/service_bundle.dtd.1'>
+<!--
+ Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+ NOTE:  This service manifest is not editable; its contents will
+ be overwritten by package or patch operations, including
+ operating system upgrade.  Make customizations in a different
+ file.
+-->
+<service_bundle type="manifest" name="ironic-db">
+
+  <service version="1" type="service"
+    name="application/openstack/ironic/ironic-db">
+
+    <dependency name='multiuser' grouping='require_all' restart_on='error'
+      type='service'>
+      <service_fmri value='svc:/milestone/multi-user:default' />
+    </dependency>
+
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <dependency name='mysql' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/application/database/mysql'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
+    <exec_method timeout_seconds="60" type="method" name="start"
+      exec="/lib/svc/method/ironic-db %m %{config/config_path}">
+      <method_context>
+        <method_credential user='ironic' group='ironic' />
+      </method_context>
+    </exec_method>
+    <exec_method timeout_seconds="60" type="method" name="stop"
+      exec=":true"/>
+
+    <property_group type="framework" name="startd">
+      <propval type="astring" name="duration" value="transient"/>
+    </property_group>
+
+    <instance name='default' enabled='false'>
+      <!-- to start/stop/refresh the service -->
+      <property_group name='general' type='framework'>
+        <propval name='action_authorization' type='astring'
+                 value='solaris.smf.manage.ironic' />
+        <propval name='value_authorization' type='astring'
+                 value='solaris.smf.value.ironic' />
+      </property_group>
+      <property_group name='config' type='application'>
+        <propval name='config_path' type='astring'
+          value='/etc/ironic/ironic.conf'/>
+      </property_group>
+    </instance>
+
+    <template>
+      <common_name>
+        <loctext xml:lang="C">
+          OpenStack Ironic Database Creation Service
+        </loctext>
+      </common_name>
+      <description>
+        <loctext xml:lang="C">
+          ironic-db is a transient service to create the Ironic database if
+          required.
+        </loctext>
+      </description>
+    </template>
+  </service>
+</service_bundle>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/ironic-keystone-setup.sh	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,114 @@
+#!/usr/bin/env bash
+
+# Copyright 2013 OpenStack Foundation
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Sample initial data for Keystone ironic setup using python-keystoneclient
+#
+# Creates ironic user and user-role, then creates ironic keystone service.
+# Finally creates keystone-endpoint of required.
+#
+# If any keystone components already exist, will remove them before attempting
+# to create.
+#
+# Disable creation of endpoints by setting DISABLE_ENDPOINTS environment
+# variable. Use this with the Catalog Templated backend.
+#
+# Tenant               User      Roles
+# -------------------------------------------------------
+# service              ironic    admin
+
+# By default, passwords used are those in the OpenStack Install and Deploy
+# Manual. One can override these (publicly known, and hence, insecure) passwords
+# by setting the appropriate environment variables. A common default password
+# can be used by the "SERVICE_PASSWORD" environment variable.
+
+PATH=/usr/bin
+
+IRONIC_PASSWORD=${IRONIC_PASSWORD:-${SERVICE_PASSWORD:-ironic}}
+
+CONTROLLER_PUBLIC_ADDRESS=${CONTROLLER_PUBLIC_ADDRESS:-localhost}
+CONTROLLER_ADMIN_ADDRESS=${CONTROLLER_ADMIN_ADDRESS:-localhost}
+CONTROLLER_INTERNAL_ADDRESS=${CONTROLLER_INTERNAL_ADDRESS:-localhost}
+
+IRONIC_PUBLIC_ADDRESS=${IRONIC_PUBLIC_ADDRESS:-$CONTROLLER_PUBLIC_ADDRESS}
+IRONIC_ADMIN_ADDRESS=${IRONIC_ADMIN_ADDRESS:-$CONTROLLER_ADMIN_ADDRESS}
+IRONIC_INTERNAL_ADDRESS=${IRONIC_INTERNAL_ADDRESS:-$CONTROLLER_INTERNAL_ADDRESS}
+
+export OS_AUTH_URL="http://localhost:5000/v2.0"
+export OS_USERNAME="admin"
+export OS_PASSWORD="secrete"
+export OS_TENANT_NAME="demo"
+
+function get_id () {
+    echo `"$@" | grep ' id ' | awk '{print $4}'`
+}
+
+function get_role_id () {
+    echo `"$@" | grep ' admin ' | awk '{print $2}'`
+}
+
+function get_endpoint_id () {
+    echo `"$@" | grep $KEYSTONE_SERVICE | awk '{print $2}'`
+}
+
+#
+# Service tenant
+#
+SERVICE_TENANT=$(get_id keystone tenant-get service)
+
+#
+# Admin Role
+#
+ADMIN_ROLE=$(get_role_id keystone user-role-list)
+
+
+#
+# Ironic User
+#
+IRONIC_USER=$(get_id keystone user-get ironic 2> /dev/null)
+if ! [[ -z "$IRONIC_USER" ]]; then
+  keystone user-role-remove --user=ironic \
+                            --role=admin \
+                            --tenant=service
+  keystone user-delete ironic
+fi
+IRONIC_USER=$(get_id keystone user-create --name=ironic \
+                                          --pass="${IRONIC_PASSWORD}")
+keystone user-role-add --user-id $IRONIC_USER \
+                       --role-id $ADMIN_ROLE \
+                       --tenant-id $SERVICE_TENANT
+
+#
+# Ironic service
+#
+KEYSTONE_SERVICE=$(get_id keystone service-get ironic 2> /dev/null)
+if ! [[ -z "$KEYSTONE_SERVICE" ]]; then
+  KEYSTONE_ENDPOINT=$(get_endpoint_id keystone endpoint-list)
+  keystone endpoint-delete $KEYSTONE_ENDPOINT
+  keystone service-delete ironic
+fi
+
+KEYSTONE_SERVICE=$(get_id \
+keystone service-create --name=ironic \
+                        --type=baremetal \
+                        --description="Ironic Bare Metal Provisioning Service")
+if [[ -z "$DISABLE_ENDPOINTS" ]]; then
+    keystone endpoint-create --region RegionOne --service-id $KEYSTONE_SERVICE \
+        --publicurl "http://$IRONIC_PUBLIC_ADDRESS:6385" \
+        --adminurl "http://$IRONIC_ADMIN_ADDRESS:6385" \
+        --internalurl "http://$IRONIC_INTERNAL_ADDRESS:6385"
+fi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/ironic-manifest.ksh	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,459 @@
+#!/bin/ksh93
+
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# The default AI manifest is a Derived Manifest (DM) script. The
+# script creates a temp file from xml within the script, loads that
+# file in as the manifest then replaces the source to be a specific
+# archive specified for this client. 
+
+SCRIPT_SUCCESS=0
+SOLPKG="entire"
+DEFAULT_SERVER_PKG="pkg:/group/system/solaris-large-server"
+TMPFILE=`/usr/bin/mktemp /tmp/default.xml.XXXXXX`
+
+#
+# create_xml_file
+# Create xml tmp file from here document. The contents of the
+# here document are inserted during installadm create-service.
+#
+function create_xml_file
+{
+  # Create xml tmp file using this AI Clients default.xml as a base
+  /usr/bin/cat /usr/share/auto_install/default.xml > $TMPFILE
+}
+
+#
+# error_handler
+# Error handling function
+#
+function error_handler
+{
+  exit $?
+}
+
+#
+# load_xml
+# Load the default manifest from previously created tmp file
+#
+function load_xml
+{
+  # load the default manifest
+  trap error_handler ERR
+  /usr/bin/aimanifest load $TMPFILE
+  trap - ERR
+}
+
+#
+# validate_entire
+# Update the manifest entry of the Solaris consolidation package
+# so that the build of Solaris being installed is the same as
+# that running on the client.
+# Remove all packages other than the Solaris consolidation package.
+#
+function validate_entire
+{
+  # Override SI_SYSPKG using exact version from the client
+  SI_SYSPKG=$(/usr/bin/pkg info ${SOLPKG} | /usr/bin/nawk '/FMRI:/{print $2}')
+  if [ -z ${SI_SYSPKG} ]; then
+    echo "'${SOLPKG}' package not found on system"
+    echo "Unable to constrain Solaris version being installed."
+    SI_SYSPKG="pkg:/$SOLPKG"
+  fi
+
+  # Get IPS software_data path
+  software_data=$(/usr/bin/aimanifest get -r \
+    /auto_install/ai_instance/software[@type="IPS"]/software_data[@action="install"] \
+    2> /dev/null)
+  software_data_path=($(echo ${software_data} | /usr/bin/nawk '{print $2}'))
+
+  # Clear out packages adding back just $SOLPKG
+  echo "Removing all packages"
+  /usr/bin/aimanifest delete ${software_data_path}/name
+  echo "Adding Solaris consolidation package ${SI_SYSPKG}"
+  /usr/bin/aimanifest add ${software_data_path}/name ${SI_SYSPKG}
+}
+
+#
+# Validate one of solaris-small-server or solaris-larger-server
+# packages are present in the manifest, adding default solaris-large-server
+# if neither are present.
+function validate_server_pkgs
+{
+  # Get list of pkgs to install from the manifest
+  pkgs=$(/usr/bin/aimanifest get -r \
+    /auto_install/ai_instance/software[@type="IPS"]/software_data[@action="install"]/name \
+    2> /dev/null)
+  # array will be formatted as:  <pkg>  <aimanifest path>...
+  array=($(echo ${pkgs} | /usr/bin/nawk 'BEGIN{FS="\n"} {print $NF}'))
+
+  idx=0
+  pkg_found=0
+  while [ $idx -lt ${#array[@]} ]; do
+    pkgname=${array[$idx]}
+    path=${array[$idx+1]}
+
+    # check if pkgname is large|small server
+    echo $pkgname | /usr/bin/egrep -s "solaris-(large|small)-server"
+    if [ $? -eq 0 ]; then
+      pkg_found=1
+      break
+    fi
+    # look at next pkg
+    (( idx=idx+2 ))
+  done
+
+  if [ $pkg_found -eq 0 ]; then
+    # Add solaris-large-server
+    # Get IPS software_data path
+    software_data=$(/usr/bin/aimanifest get -r \
+      /auto_install/ai_instance/software[@type="IPS"]/software_data[@action="install"] \
+      2> /dev/null)
+    software_data_path=($(echo ${software_data} | /usr/bin/nawk '{print $2}'))
+
+    echo "Adding default server package ${DEFAULT_SERVER_PKG}"
+    /usr/bin/aimanifest add ${software_data_path}/name ${DEFAULT_SERVER_PKG}
+  fi
+}
+
+#
+# add_publishers
+# Add publishers to the software source section
+#
+function add_publishers
+{
+  publishers=$1
+
+  # publishers is plus(+) delimited list, split it into an array
+  OLDIFS=$IFS
+  IFS="+"
+  set -A pub_list ${publishers}
+  IFS=$OLDIFS
+
+  # Get IPS software paths
+  software=$(/usr/bin/aimanifest get -r \
+    /auto_install/ai_instance/software[@type="IPS"] \
+    2> /dev/null)
+  software_path=($(echo ${software} | /usr/bin/nawk '{print $2}'))
+
+  software_source=$(/usr/bin/aimanifest get -r \
+    ${software_path}/source 2> /dev/null)
+  software_source_path=($(echo ${software_source} | /usr/bin/nawk '{print $2}'))
+
+  software_publishers=$(/usr/bin/aimanifest get -r \
+    ${software_source_path}/publisher 2> /dev/null)
+
+  # Save list existing publisher names and associated origins
+  pub_array=($(echo ${software_publishers} | /usr/bin/nawk \
+    'BEGIN{FS="\n"} {print $NF}'))
+  pub_idx=0
+  idx1=0
+  while [ $idx1 -lt ${#pub_array[@]} ]; do
+    pub=${pub_array[$idx1+1]}
+    pub_name=$(/usr/bin/aimanifest get ${pub}@name 2> /dev/null)
+    origins=$(/usr/bin/aimanifest get -r ${pub}/origin 2> /dev/null)
+
+    publisher=${pub_name}
+
+    idx2=0
+    origin_array=($(echo ${origins} | /usr/bin/nawk \
+      'BEGIN{FS="\n"} {print $NF}'))
+    while [ $idx2 -lt ${#origin_array[@]} ]; do
+      origin=${origin_array[$idx2+1]}
+      origin_name=$(/usr/bin/aimanifest get ${origin}@name 2> /dev/null)
+      publisher="${publisher}+${origin_name}"
+      (( idx2=idx2+2 ))
+    done
+
+    saved_pubs[$pub_idx]=${publisher}
+    (( pub_idx=pub_idx+1 ))
+    (( idx1=idx1+2 ))
+  done
+
+  if [[ -n ${software_source_path} ]]; then
+    # Delete source and start a fresh
+    /usr/bin/aimanifest delete ${software_source_path}
+  fi
+
+  # Cycle through each custom publisher, adding in custom ordering
+  # If already existed add custome origin before original
+  idx1=0
+  software_source_path=
+  while [ $idx1 -lt ${#pub_list[@]} ]; do
+    echo ${pub_list[$idx1]} | IFS='@' read publisher_name publisher_origin
+
+    # Add specified publisher
+    echo "Adding custom publisher ${publisher_name} @ ${publisher_origin}"
+    if [ -z ${software_source_path} ]; then
+      publisher_path=$(/usr/bin/aimanifest add -r \
+        ${software_path}/source/publisher@name ${publisher_name} 2> /dev/null)
+      /usr/bin/aimanifest add ${publisher_path}/origin@name ${publisher_origin}
+
+      software_source=$(/usr/bin/aimanifest get -r \
+        ${software_path}/source 2> /dev/null)
+      software_source_path=($(echo ${software_source} | /usr/bin/nawk \
+        '{print $2}'))
+    else
+      publisher_path=$(/usr/bin/aimanifest add -r \
+        ${software_source_path}/publisher@name ${publisher_name} 2> /dev/null)
+      /usr/bin/aimanifest add ${publisher_path}/origin@name ${publisher_origin}
+    fi
+
+    # Check if this publisher already in manifest and add back original origins
+    idx2=0
+    while [ $idx2 -lt ${#saved_pubs[@]} ]; do
+      saved_publisher=${saved_pubs[$idx2]}
+
+      # saved_publisher is plus(+) delimited list, first item is publisher name 
+      # and remaining items are origins, split it into an array
+      OLDIFS=$IFS
+      IFS="+"
+      set -A saved_pub_details ${saved_publisher}
+      IFS=$OLDIFS
+
+      pub_name=${saved_pub_details[0]}
+
+      if [ ${pub_name} == ${publisher_name} ]; then
+        # Add all saved origins for this publisher
+        idx3=1
+        while [ $idx3 -lt ${#saved_pub_details[@]} ]; do
+          saved_origin=${saved_pub_details[idx3]}
+          echo "Adding extra saved origin ${saved_origin}"
+          /usr/bin/aimanifest add ${publisher_path}/origin@name ${saved_origin}
+          (( idx3=idx3+1 ))
+        done
+        saved_pubs[idx2]="PROCESSED"
+        break
+      else
+        # look at next publisher
+        (( idx2=idx2+1 ))
+        continue
+      fi
+    done
+
+    (( idx1=idx1+1 ))
+  done
+
+  # Cycle through saved publishers adding back unprocessed ones
+  idx1=0
+  while [ $idx1 -lt ${#saved_pubs[@]} ]; do
+    saved_publisher=${saved_pubs[$idx1]}
+
+    OLDIFS=$IFS
+    IFS="+"
+    set -A saved_pub_details ${saved_publisher}
+    IFS=$OLDIFS
+
+    if [ ${saved_pub_details[0]} != "PROCESSED" ]; then
+      # Add specified publisher
+      echo "Adding saved publisher ${saved_pub_details[0]}"
+      publisher_path=$(/usr/bin/aimanifest add -r \
+        ${software_source_path}/publisher@name ${saved_pub_details[0]} \
+        2> /dev/null)
+
+      # Add all saved origins for this publisher
+      idx2=1
+      while [ $idx2 -lt ${#saved_pub_details[@]} ]; do
+          saved_origin=${saved_pub_details[idx2]}
+          echo "Adding saved origin ${saved_origin}"
+          /usr/bin/aimanifest add ${publisher_path}/origin@name ${saved_origin}
+          (( idx2=idx2+1 ))
+      done
+    fi
+    (( idx1=idx1+1 ))
+  done
+}
+
+#
+# add_fmri
+# Add specific fmri to be installed to software_data section
+#
+function add_fmri
+{
+  fmri=$1
+
+  # fmri is plus(+) delimited list, split into an array
+  OLDIFS=$IFS
+  IFS="+"
+  set -A pkg_list ${fmri}
+  IFS=$OLDIFS
+
+  # Get IPS software_data path
+  software_data=$(/usr/bin/aimanifest get -r \
+    /auto_install/ai_instance/software[@type="IPS"]/software_data[@action="install"] \
+    2> /dev/null)
+
+  # Get software_data path
+  software_data_path=($(echo ${software_data} | /usr/bin/nawk '{print $2}'))
+
+  # Get list of pkgs to install from the manifest
+  # array will be formatted as:  <pkg>  <aimanifest path>...
+  pkgs=($(/usr/bin/aimanifest get -r ${software_data_path}/name 2> /dev/null | /usr/bin/tr '\n' ' '))
+  set -A array ${pkgs}
+
+  # Cycle through each fmri package and add if not in manifest
+  idx1=0
+  while [ $idx1 -lt ${#pkg_list[@]} ]; do
+    new_pkg=${pkg_list[$idx1]}
+
+    found=0
+    idx2=0
+    while [ $idx2 -lt ${#array[@]} ]; do
+      pkgname=${array[$idx2]}
+      path=${array[$idx2+1]}
+      echo ${new_pkg} | /usr/bin/egrep -s ${pkgname}
+      if [ $? -ne 0 ]; then
+        # Not found try comparing next package
+        (( idx2=idx2+2 ))
+        continue
+      else
+        # Package already in manifest can break out of this loop
+        echo "Package ${new_pkg} already present in manifest."
+        found=1
+        break
+      fi
+    done
+
+    # Package not found, so add to manifest
+    if [ ${found} -eq 0 ]; then
+      echo "Adding package ${new_pkg} to manifest."
+      /usr/bin/aimanifest add ${software_data_path}/name ${new_pkg}
+    fi
+
+    (( idx1=idx1+1 ))
+  done
+}
+
+########################################
+# main
+########################################
+# Create xml tmp file, then use aimanifest(1M) to load the
+# file and update the Solaris version to install.
+if [ -z "$TMPFILE" ]; then
+  echo "Error: Unable to create temporary manifest file"
+  exit 1
+fi
+create_xml_file
+load_xml
+#
+# Process bootargs
+#
+if [[ ${SI_ARCH} = sparc ]]; then
+  ARCHIVE_URI=$(/usr/sbin/prtconf -vp | /usr/bin/nawk \
+    '/bootargs.*archive_uri=/{n=split($0,a,"archive_uri=");split(a[2],b);split(b[1],c,"'\''");print c[1]}')
+  AUTH_TOKEN=$(/usr/sbin/prtconf -vp | /usr/bin/nawk \
+    '/bootargs.*auth_token=/{n=split($0,a,"auth_token=");split(a[2],b);split(b[1],c,"'\''");print c[1]}')
+  PUBLISHERS=$(/usr/sbin/prtconf -vp | /usr/bin/nawk \
+    '/bootargs.*publishers=/{n=split($0,a,"publishers=");split(a[2],b);split(b[1],c,"'\''");print c[1]}')
+  FMRI=$(/usr/sbin/prtconf -vp | /usr/bin/nawk \
+    '/bootargs.*fmri=/{n=split($0,a,"fmri=");split(a[2],b);split(b[1],c,"'\''");print c[1]}')
+else
+  ARCHIVE_URI=$(/usr/sbin/devprop -s archive_uri)
+  AUTH_TOKEN=$(/usr/sbin/devprop -s auth_token)
+  PUBLISHERS=$(/usr/sbin/devprop -s publishers)
+  FMRI=$(/usr/sbin/devprop -s fmri)
+fi
+
+if [[ -n "$ARCHIVE_URI" ]]; then
+  # Save list of default facets to re-insertion if required
+  if [[ -n "$PUBLISHERS" || -n "$FMRI" ]]; then
+    # Get IPS software paths
+    software=$(/usr/bin/aimanifest get -r \
+      /auto_install/ai_instance/software[@type="IPS"] \
+      2> /dev/null)
+    software_path=($(echo ${software} | /usr/bin/nawk '{print $2}'))
+
+    facets=$(/usr/bin/aimanifest get -r \
+      ${software_path}/destination/image/facet 2> /dev/null)
+    facet_array=($(echo ${facets} | /usr/bin/nawk \
+      'BEGIN{FS="\n"} {print $NF}'))
+
+    facet_idx=0
+    idx1=0
+    while [ $idx1 -lt ${#facet_array[@]} ]; do
+      facet=${facet_array[$idx1]}
+      facet_path=${facet_array[$idx1+1]}
+      facet_set=$(/usr/bin/aimanifest get ${facet_path}@set 2> /dev/null)
+      saved_facets[$facet_idx]="${facet_set}+${facet}"
+      (( facet_idx=facet_idx+1 ))
+      (( idx1=idx1+2 ))
+    done
+  fi
+
+  # Replace package software section with archive
+  /usr/bin/aimanifest delete software
+  swpath=$(/usr/bin/aimanifest add -r /auto_install/ai_instance/software@type ARCHIVE)
+  file=$(/usr/bin/aimanifest add -r $swpath/source/file@uri $ARCHIVE_URI)
+  if [[ -n "$AUTH_TOKEN" ]]; then
+    /usr/bin/aimanifest add $file/credentials/http_auth_token $AUTH_TOKEN
+  fi
+  inspath=$(/usr/bin/aimanifest add -r $swpath/software_data@action install)
+  /usr/bin/aimanifest add $inspath/name global
+fi
+
+if [[ -n "$PUBLISHERS" || -n "$FMRI" ]]; then
+  if [[ -n "$ARCHIVE_URI" ]]; then
+    # All software was removed, so add back stub IPS software
+    swpath=$(/usr/bin/aimanifest add -r /auto_install/ai_instance/software@type IPS)
+    swdatapath=$(/usr/bin/aimanifest add -r $swpath/software_data@action install)
+
+    # Add back list of default facets
+    echo "Adding back facets"
+    idx1=0
+    while [ $idx1 -lt ${#saved_facets[@]} ]; do
+      saved_facet=${saved_facets[$idx1]}
+
+      # saved_facet is plus(+) delimited two value string,
+      # first item is set value and 2nd is actual facet
+      OLDIFS=$IFS
+      IFS="+"
+      set -A saved_facet_details ${saved_facet}
+      IFS=$OLDIFS
+
+      facet_set=${saved_facet_details[0]}
+      facet=${saved_facet_details[1]}
+
+      facet_path=$(/usr/bin/aimanifest add -r ${swpath}/destination/image/facet ${facet})
+      /usr/bin/aimanifest set ${facet_path}@set ${facet_set}
+      (( idx1=idx1+1 ))
+    done
+  else
+    # Ensure version if $SOLPKG is correct, remove all packages
+    # other than $SOLPKG
+    validate_entire
+  fi
+
+  if [[ -n "$PUBLISHERS" ]]; then
+    # Add specific publishers to software source
+    add_publishers $PUBLISHERS
+  fi
+
+  if [[ -n "$FMRI" ]]; then
+    # Add specific FMRI to package set to install
+    add_fmri $FMRI
+  fi
+
+  # Minumum package set is entire and one of solaris-small-server or
+  # solaris-large-server
+  # Ensure one of server packages are present in manifest
+  if [[ -z "$ARCHIVE_URI" ]]; then
+    validate_server_pkgs
+  fi
+fi
+
+# Ensure manifest indicates to auto_reboot
+/usr/bin/aimanifest set -r /auto_install/ai_instance@auto_reboot true
+
+exit $SCRIPT_SUCCESS
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/ironic.auth_attr	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,2 @@
+solaris.smf.manage.ironic:RO::Manage OpenStack Ironic Service States::
+solaris.smf.value.ironic:RO::Change Values of OpenStack Ironic Properties::
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/ironic.conf	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,1308 @@
+[DEFAULT]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The number of prefetched messages held by receiver. (integer
+# value)
+#qpid_receiver_capacity=1
+
+# The qpid topology version to use.  Version 1 is what was
+# originally used by impl_qpid.  Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work.  Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+#rabbit_host=localhost
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+#rabbit_port=5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+#rabbit_userid=guest
+
+# The RabbitMQ password. (string value)
+#rabbit_password=guest
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# If passed, use a fake RabbitMQ provider. (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=ironic
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+#notification_driver=
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+#rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in ironic.netconf
+#
+
+# IP address of this host. (string value)
+#my_ip=10.0.0.1
+
+# Use IPv6. (boolean value)
+#use_ipv6=false
+
+
+#
+# Options defined in ironic.api.app
+#
+
+# Method to use for authentication: noauth or keystone.
+# (string value)
+#auth_strategy=keystone
+
+
+#
+# Options defined in ironic.common.driver_factory
+#
+
+# Specify the list of drivers to load during service
+# initialization. Missing drivers, or drivers which fail to
+# initialize, will prevent the conductor service from
+# starting. The option default is a recommended set of
+# production-oriented drivers. A complete list of drivers
+# present on your system may be found by enumerating the
+# "ironic.drivers" entrypoint. An example may be found in the
+# developer documentation online. (list value)
+enabled_drivers=solaris
+
+
+#
+# Options defined in ironic.common.exception
+#
+
+# Make exception message format errors fatal. (boolean value)
+#fatal_exception_format_errors=false
+
+
+#
+# Options defined in ironic.common.hash_ring
+#
+
+# Exponent to determine number of hash partitions to use when
+# distributing load across conductors. Larger values will
+# result in more even distribution of load and less load when
+# rebalancing the ring, but more memory usage. Number of
+# partitions per conductor is (2^hash_partition_exponent).
+# This determines the granularity of rebalancing: given 10
+# hosts, and an exponent of the 2, there are 40 partitions in
+# the ring.A few thousand partitions should make rebalancing
+# smooth in most cases. The default is suitable for up to a
+# few hundred conductors. Too many partitions has a CPU
+# impact. (integer value)
+#hash_partition_exponent=5
+
+# [Experimental Feature] Number of hosts to map onto each hash
+# partition. Setting this to more than one will cause
+# additional conductor services to prepare deployment
+# environments and potentially allow the Ironic cluster to
+# recover more quickly if a conductor instance is terminated.
+# (integer value)
+#hash_distribution_replicas=1
+
+
+#
+# Options defined in ironic.common.images
+#
+
+# Force backing images to raw format. (boolean value)
+#force_raw_images=true
+
+# Path to isolinux binary file. (string value)
+#isolinux_bin=/usr/lib/syslinux/isolinux.bin
+
+# Template file for isolinux configuration file. (string
+# value)
+#isolinux_config_template=$pybasedir/common/isolinux_config.template
+
+
+#
+# Options defined in ironic.common.paths
+#
+
+# Directory where the ironic python module is installed.
+# (string value)
+pybasedir=/usr/lib/python2.7/vendor-packages/ironic
+
+# Directory where ironic binaries are installed. (string
+# value)
+bindir=/usr/lib/ironic
+
+# Top-level directory for maintaining ironic's state. (string
+# value)
+state_path=/var/lib/ironic
+
+
+#
+# Options defined in ironic.common.policy
+#
+
+# JSON file representing policy. (string value)
+#policy_file=policy.json
+
+# Rule checked when requested rule is not found. (string
+# value)
+#policy_default_rule=default
+
+
+#
+# Options defined in ironic.common.service
+#
+
+# Seconds between running periodic tasks. (integer value)
+#periodic_interval=60
+
+# Name of this node.  This can be an opaque identifier.  It is
+# not necessarily a hostname, FQDN, or IP address. However,
+# the node name must be valid within an AMQP key, and if using
+# ZeroMQ, a valid hostname, FQDN, or IP address. (string
+# value)
+#host=ironic
+
+
+#
+# Options defined in ironic.common.utils
+#
+
+# Path to the rootwrap configuration file to use for running
+# commands as root. (string value)
+#rootwrap_config=/etc/ironic/rootwrap.conf
+
+# Explicitly specify the temporary working directory. (string
+# value)
+#tempdir=<None>
+
+
+#
+# Options defined in ironic.drivers.modules.image_cache
+#
+
+# Run image downloads and raw format conversions in parallel.
+# (boolean value)
+#parallel_image_downloads=false
+
+
+#
+# Options defined in ironic.drivers.modules.solaris_ipmitool
+#
+
+# Method to use for authentication: noauth or keystone.
+# (string value)
+#auth_strategy=keystone
+
+
+#
+# Options defined in ironic.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor.  Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers.  The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in ironic.openstack.common.lockutils
+#
+
+# Enables or disables inter-process locks. (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. (string value)
+#lock_path=<None>
+
+
+#
+# Options defined in ironic.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# Log output to standard error. (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context. (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context.
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format.
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN
+
+# Enables or disables publication of error events. (boolean
+# value)
+#publish_errors=false
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+
+# The format for an instance that is passed with the log
+# message. (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log
+# message. (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of a logging configuration file. This file is
+# appended to any existing logging configuration files. For
+# details about logging configuration files, see the Python
+# logging module documentation. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated.  Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s . (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=<None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and will change in J to honor RFC5424. (boolean
+# value)
+#use_syslog=false
+
+# (Optional) Enables or disables syslog rfc5424 format for
+# logging. If enabled, prefixes the MSG part of the syslog
+# message with APP-NAME (RFC5424). The format without the APP-
+# NAME is deprecated in I, and will be removed in J. (boolean
+# value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in ironic.openstack.common.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+
+[agent]
+
+#
+# Options defined in ironic.drivers.modules.agent
+#
+
+# Additional append parameters for baremetal PXE boot. (string
+# value)
+#agent_pxe_append_params=nofb nomodeset vga=normal
+
+# Template file for PXE configuration. (string value)
+#agent_pxe_config_template=$pybasedir/drivers/modules/agent_config.template
+
+# Neutron bootfile DHCP parameter. (string value)
+#agent_pxe_bootfile_name=pxelinux.0
+
+# Maximum interval (in seconds) for agent heartbeats. (integer
+# value)
+#heartbeat_timeout=300
+
+
+#
+# Options defined in ironic.drivers.modules.agent_client
+#
+
+# API version to use for communicating with the ramdisk agent.
+# (string value)
+#agent_api_version=v1
+
+
+[ai]
+
+#
+# Options defined in ironic.drivers.modules.solaris_ipmitool
+#
+
+# Host name for AI Server. (string value)
+server=%AI_SERVER%
+
+# Username to ssh to AI Server. (string value)
+username=%AI_USERNAME%
+
+# Password for user to ssh to AI Server. (string value)
+# If ssh_key_file or ssh_key_contents are set, this config setting is
+# used to provide the passphrase if required. If an encrypted key is
+# used, set this to the passphrase.
+#password=<None>
+
+# SSH port to use. (string value)
+#port=22
+
+# SSH socket timeout value in seconds. (string value)
+#timeout=10
+
+# Interval in seconds to check AI deployment status. (string
+# value)
+#deploy_interval=10
+
+# Derived Manifest used for deployment. (string value)
+#derived_manifest=file:///usr/lib/ironic/ironic-manifest.ksh
+
+# SSH Filename to use. (string value)
+#ssh_key_file=<None>
+
+# Actual SSH Key contents to use. (string value)
+#ssh_key_contents=<None>
+
+
+[api]
+
+#
+# Options defined in ironic.api
+#
+
+# The listen IP for the Ironic API server. (string value)
+#host_ip=0.0.0.0
+
+# The port for the Ironic API server. (integer value)
+#port=6385
+
+# The maximum number of items returned in a single response
+# from a collection resource. (integer value)
+#max_limit=1000
+
+
+[conductor]
+
+#
+# Options defined in ironic.conductor.manager
+#
+
+# URL of Ironic API service. If not set ironic can get the
+# current value from the keystone service catalog. (string
+# value)
+#api_url=<None>
+
+# Seconds between conductor heart beats. (integer value)
+#heartbeat_interval=10
+
+# Maximum time (in seconds) since the last check-in of a
+# conductor. (integer value)
+#heartbeat_timeout=60
+
+# Interval between syncing the node power state to the
+# database, in seconds. (integer value)
+#sync_power_state_interval=60
+
+# Interval between checks of provision timeouts, in seconds.
+# (integer value)
+#check_provision_state_interval=60
+
+# Timeout (seconds) for waiting callback from deploy ramdisk.
+# 0 - unlimited. (integer value)
+#deploy_callback_timeout=1800
+
+# During sync_power_state, should the hardware power state be
+# set to the state recorded in the database (True) or should
+# the database be updated based on the hardware state (False).
+# (boolean value)
+#force_power_state_during_sync=true
+
+# During sync_power_state failures, limit the number of times
+# Ironic should try syncing the hardware node power state with
+# the node power state in DB (integer value)
+#power_state_sync_max_retries=3
+
+# Maximum number of worker threads that can be started
+# simultaneously by a periodic task. Should be less than RPC
+# thread pool size. (integer value)
+#periodic_max_workers=8
+
+# The size of the workers greenthread pool. (integer value)
+#workers_pool_size=100
+
+# Number of attempts to grab a node lock. (integer value)
+#node_locked_retry_attempts=3
+
+# Seconds to sleep between node lock attempts. (integer value)
+#node_locked_retry_interval=1
+
+# Enable sending sensor data message via the notification bus
+# (boolean value)
+#send_sensor_data=false
+
+# Seconds between conductor sending sensor data message to
+# ceilometer via the notification bus. (integer value)
+#send_sensor_data_interval=600
+
+# List of comma separated metric types which need to be sent
+# to Ceilometer. The default value, "ALL", is a special value
+# meaning send all the sensor data. (list value)
+#send_sensor_data_types=ALL
+
+# When conductors join or leave the cluster, existing
+# conductors may need to update any persistent local state as
+# nodes are moved around the cluster. This option controls how
+# often, in seconds, each conductor will check for nodes that
+# it should "take over". Set it to a negative value to disable
+# the check entirely. (integer value)
+#sync_local_state_interval=180
+
+
+[console]
+
+#
+# Options defined in ironic.drivers.modules.console_utils
+#
+
+# Path to serial console terminal program (string value)
+#terminal=shellinaboxd
+
+# Directory containing the terminal SSL cert(PEM) for serial
+# console access (string value)
+#terminal_cert_dir=<None>
+
+# Directory for holding terminal pid files. If not specified,
+# the temporary directory will be used. (string value)
+#terminal_pid_dir=<None>
+
+# Time interval (in seconds) for checking the status of
+# console subprocess. (integer value)
+#subprocess_checking_interval=1
+
+# Time (in seconds) to wait for the console subprocess to
+# start. (integer value)
+#subprocess_timeout=10
+
+
+[database]
+
+#
+# Options defined in oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+#sqlite_db=oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous=true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the
+# database. (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection=<None>
+
+# The SQLAlchemy connection string to use to connect to the
+# slave database. (string value)
+#slave_connection=<None>
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode=TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum db connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a SQL connection.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information: 0=None,
+# 100=Everything. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add Python stack traces to SQL as comment strings. (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+# Enable the experimental use of database reconnect on
+# connection lost. (boolean value)
+#use_db_reconnect=false
+
+# Seconds between database connection retries. (integer value)
+#db_retry_interval=1
+
+# If True, increases the interval between database connection
+# retries up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval=true
+
+# If db_inc_retry_interval is set, the maximum seconds between
+# database connection retries. (integer value)
+#db_max_retry_interval=10
+
+# Maximum database connection retries before error is raised.
+# Set to -1 to specify an infinite retry count. (integer
+# value)
+#db_max_retries=20
+
+
+#
+# Options defined in ironic.db.sqlalchemy.models
+#
+
+# MySQL engine to use. (string value)
+#mysql_engine=InnoDB
+
+
+[dhcp]
+
+#
+# Options defined in ironic.common.dhcp_factory
+#
+
+# DHCP provider to use. "neutron" uses Neutron, and "none"
+# uses a no-op provider. (string value)
+#dhcp_provider=neutron
+
+
+[disk_partitioner]
+
+#
+# Options defined in ironic.common.disk_partitioner
+#
+
+# After Ironic has completed creating the partition table, it
+# continues to check for activity on the attached iSCSI device
+# status at this interval prior to copying the image to the
+# node, in seconds (integer value)
+#check_device_interval=1
+
+# The maximum number of times to check that the device is not
+# accessed by another process. If the device is still busy
+# after that, the disk partitioning will be treated as having
+# failed. (integer value)
+#check_device_max_retries=20
+
+
+[glance]
+
+#
+# Options defined in ironic.common.glance_service.v2.image_service
+#
+
+# A list of URL schemes that can be downloaded directly via
+# the direct_url.  Currently supported schemes: [file]. (list
+# value)
+#allowed_direct_url_schemes=
+
+# The secret token given to Swift to allow temporary URL
+# downloads. Required for temporary URLs. (string value)
+#swift_temp_url_key=<None>
+
+# The length of time in seconds that the temporary URL will be
+# valid for. Defaults to 20 minutes. If some deploys get a 401
+# response code when trying to download from the temporary
+# URL, try raising this duration. (integer value)
+#swift_temp_url_duration=1200
+
+# The "endpoint" (scheme, hostname, optional port) for the
+# Swift URL of the form
+# "endpoint_url/api_version/account/container/object_id". Do
+# not include trailing "/". For example, use
+# "https://swift.example.com". Required for temporary URLs.
+# (string value)
+#swift_endpoint_url=<None>
+
+# The Swift API version to create a temporary URL for.
+# Defaults to "v1". Swift temporary URL format:
+# "endpoint_url/api_version/account/container/object_id"
+# (string value)
+#swift_api_version=v1
+
+# The account that Glance uses to communicate with Swift. The
+# format is "AUTH_uuid". "uuid" is the UUID for the account
+# configured in the glance-api.conf. Required for temporary
+# URLs. For example:
+# "AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30". Swift temporary
+# URL format:
+# "endpoint_url/api_version/account/container/object_id"
+# (string value)
+#swift_account=<None>
+
+# The Swift container Glance is configured to store its images
+# in. Defaults to "glance", which is the default in glance-
+# api.conf. Swift temporary URL format:
+# "endpoint_url/api_version/account/container/object_id"
+# (string value)
+#swift_container=glance
+
+
+#
+# Options defined in ironic.common.image_service
+#
+
+# Default glance hostname or IP address. (string value)
+#glance_host=$my_ip
+
+# Default glance port. (integer value)
+#glance_port=9292
+
+# Default protocol to use when connecting to glance. Set to
+# https for SSL. (string value)
+#glance_protocol=http
+
+# A list of the glance api servers available to ironic. Prefix
+# with https:// for SSL-based glance API servers. Format is
+# [hostname|IP]:port. (string value)
+#glance_api_servers=<None>
+
+# Allow to perform insecure SSL (https) requests to glance.
+# (boolean value)
+#glance_api_insecure=false
+
+# Number of retries when downloading an image from glance.
+# (integer value)
+#glance_num_retries=0
+
+# Default protocol to use when connecting to glance. Set to
+# https for SSL. (string value)
+#auth_strategy=keystone
+
+
+[ilo]
+
+#
+# Options defined in ironic.drivers.modules.ilo.common
+#
+
+# Timeout (in seconds) for iLO operations (integer value)
+#client_timeout=60
+
+# Port to be used for iLO operations (integer value)
+#client_port=443
+
+# The Swift iLO container to store data. (string value)
+#swift_ilo_container=ironic_ilo_container
+
+# Amount of time in seconds for Swift objects to auto-expire.
+# (integer value)
+#swift_object_expiry_timeout=900
+
+
+#
+# Options defined in ironic.drivers.modules.ilo.power
+#
+
+# Number of times a power operation needs to be retried
+# (integer value)
+#power_retry=6
+
+# Amount of time in seconds to wait in between power
+# operations (integer value)
+#power_wait=2
+
+
+[ipmi]
+
+#
+# Options defined in ironic.drivers.modules.ipminative
+#
+
+# Maximum time in seconds to retry IPMI operations. (integer
+# value)
+#retry_timeout=60
+
+# Minimum time, in seconds, between IPMI operations sent to a
+# server. There is a risk with some hardware that setting this
+# too low may cause the BMC to crash. Recommended setting is 5
+# seconds. (integer value)
+#min_command_interval=5
+
+
+[keystone_authtoken]
+
+#
+# Options defined in keystonemiddleware.auth_token
+#
+
+# Prefix to prepend at the beginning of the path. Deprecated,
+# use identity_uri. (string value)
+#auth_admin_prefix=
+
+# Host providing the admin Identity API endpoint. Deprecated,
+# use identity_uri. (string value)
+#auth_host=127.0.0.1
+
+# Port of the admin Identity API endpoint. Deprecated, use
+# identity_uri. (integer value)
+#auth_port=35357
+
+# Protocol of the admin Identity API endpoint (http or https).
+# Deprecated, use identity_uri. (string value)
+#auth_protocol=https
+
+# Complete public Identity API endpoint (string value)
+#auth_uri=<None>
+
+# Complete admin Identity API endpoint. This should specify
+# the unversioned root endpoint e.g. https://localhost:35357/
+# (string value)
+#identity_uri=<None>
+
+# API version of the admin Identity API endpoint (string
+# value)
+#auth_version=<None>
+
+# Do not handle authorization requests within the middleware,
+# but delegate the authorization decision to downstream WSGI
+# components (boolean value)
+#delay_auth_decision=false
+
+# Request timeout value for communicating with Identity API
+# server. (boolean value)
+#http_connect_timeout=<None>
+
+# How many times are we trying to reconnect when communicating
+# with Identity API Server. (integer value)
+#http_request_max_retries=3
+
+# This option is deprecated and may be removed in a future
+# release. Single shared secret with the Keystone
+# configuration used for bootstrapping a Keystone
+# installation, or otherwise bypassing the normal
+# authentication process. This option should not be used, use
+# `admin_user` and `admin_password` instead. (string value)
+#admin_token=<None>
+
+# Keystone account username (string value)
+admin_user=%SERVICE_USER%
+
+# Keystone account password (string value)
+admin_password=%SERVICE_PASSWORD%
+
+# Keystone service account tenant name to validate user tokens
+# (string value)
+admin_tenant_name=%SERVICE_TENANT_NAME%
+
+# Env key for the swift cache (string value)
+#cache=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#certfile=<None>
+
+# Required if Keystone server requires client certificate
+# (string value)
+#keyfile=<None>
+
+# A PEM encoded Certificate Authority to use when verifying
+# HTTPs connections. Defaults to system CAs. (string value)
+#cafile=<None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure=false
+
+# Directory used to cache files related to PKI tokens (string
+# value)
+signing_dir=$state_path/keystone-signing
+
+# Optionally specify a list of memcached server(s) to use for
+# caching. If left undefined, tokens will instead be cached
+# in-process. (list value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers=<None>
+
+# In order to prevent excessive effort spent validating
+# tokens, the middleware caches previously-seen tokens for a
+# configurable duration (in seconds). Set to -1 to disable
+# caching completely. (integer value)
+#token_cache_time=300
+
+# Determines the frequency at which the list of revoked tokens
+# is retrieved from the Identity service (in seconds). A high
+# number of revocation events combined with a low cache
+# duration may significantly reduce performance. (integer
+# value)
+#revocation_cache_time=10
+
+# (optional) if defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. Acceptable
+# values are MAC or ENCRYPT.  If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token
+# data is encrypted and authenticated in the cache. If the
+# value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+#memcache_security_strategy=<None>
+
+# (optional, mandatory if memcache_security_strategy is
+# defined) this string is used for key derivation. (string
+# value)
+#memcache_secret_key=<None>
+
+# (optional) number of seconds memcached server is considered
+# dead before it is tried again. (integer value)
+#memcache_pool_dead_retry=300
+
+# (optional) max total number of open connections to every
+# memcached server. (integer value)
+#memcache_pool_maxsize=10
+
+# (optional) socket timeout in seconds for communicating with
+# a memcache server. (integer value)
+#memcache_pool_socket_timeout=3
+
+# (optional) number of seconds a connection to memcached is
+# held unused in the pool before it is closed. (integer value)
+#memcache_pool_unused_timeout=60
+
+# (optional) number of seconds that an operation will wait to
+# get a memcache client connection from the pool. (integer
+# value)
+#memcache_pool_conn_get_timeout=10
+
+# (optional) use the advanced (eventlet safe) memcache client
+# pool. The advanced pool will only work under python 2.x.
+# (boolean value)
+#memcache_use_advanced_pool=false
+
+# (optional) indicate whether to set the X-Service-Catalog
+# header. If False, middleware will not ask for service
+# catalog on token validation and will not set the X-Service-
+# Catalog header. (boolean value)
+#include_service_catalog=true
+
+# Used to control the use and type of token binding. Can be
+# set to: "disabled" to not check token binding. "permissive"
+# (default) to validate binding information if the bind type
+# is of a form known to the server and ignore it if not.
+# "strict" like "permissive" but if the bind type is unknown
+# the token will be rejected. "required" any form of token
+# binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string
+# value)
+#enforce_token_bind=permissive
+
+# If true, the revocation list will be checked for cached
+# tokens. This requires that PKI tokens are configured on the
+# Keystone server. (boolean value)
+#check_revocations_for_cached=false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a
+# single algorithm or multiple. The algorithms are those
+# supported by Python standard hashlib.new(). The hashes will
+# be tried in the order given, so put the preferred one first
+# for performance. The result of the first hash will be stored
+# in the cache. This will typically be set to multiple values
+# only while migrating from a less secure algorithm to a more
+# secure one. Once all the old tokens are expired this option
+# should be set to a single value for better performance.
+# (list value)
+#hash_algorithms=md5
+
+
+[matchmaker_redis]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[neutron]
+
+#
+# Options defined in ironic.dhcp.neutron
+#
+
+# URL for connecting to neutron. (string value)
+#url=http://$my_ip:9696
+
+# Timeout value for connecting to neutron in seconds. (integer
+# value)
+#url_timeout=30
+
+# Default authentication strategy to use when connecting to
+# neutron. Can be either "keystone" or "noauth". Running
+# neutron in noauth mode (related to but not affected by this
+# setting) is insecure and should only be used for testing.
+# (string value)
+#auth_strategy=keystone
+
+
+[pxe]
+
+#
+# Options defined in ironic.drivers.modules.iscsi_deploy
+#
+
+# Additional append parameters for baremetal PXE boot. (string
+# value)
+#pxe_append_params=nofb nomodeset vga=normal
+
+# Default file system format for ephemeral partition, if one
+# is created. (string value)
+#default_ephemeral_format=ext4
+
+# Directory where images are stored on disk. (string value)
+#images_path=/var/lib/ironic/images/
+
+# Directory where master instance images are stored on disk.
+# (string value)
+#instance_master_path=/var/lib/ironic/master_images
+
+# Maximum size (in MiB) of cache for master images, including
+# those in use. (integer value)
+#image_cache_size=20480
+
+# Maximum TTL (in minutes) for old master images in cache.
+# (integer value)
+#image_cache_ttl=10080
+
+# The disk devices to scan while doing the deploy. (string
+# value)
+#disk_devices=cciss/c0d0,sda,hda,vda
+
+
+#
+# Options defined in ironic.drivers.modules.pxe
+#
+
+# Template file for PXE configuration. (string value)
+#pxe_config_template=$pybasedir/drivers/modules/pxe_config.template
+
+# Template file for PXE configuration for UEFI boot loader.
+# (string value)
+#uefi_pxe_config_template=$pybasedir/drivers/modules/elilo_efi_pxe_config.template
+
+# IP address of Ironic compute node's tftp server. (string
+# value)
+#tftp_server=$my_ip
+
+# Ironic compute node's tftp root path. (string value)
+#tftp_root=/tftpboot
+
+# Directory where master tftp images are stored on disk.
+# (string value)
+#tftp_master_path=/tftpboot/master_images
+
+# Bootfile DHCP parameter. (string value)
+#pxe_bootfile_name=pxelinux.0
+
+# Bootfile DHCP parameter for UEFI boot mode. (string value)
+#uefi_pxe_bootfile_name=elilo.efi
+
+# Ironic compute node's HTTP server URL. Example:
+# http://192.1.2.3:8080 (string value)
+#http_url=<None>
+
+# Ironic compute node's HTTP root path. (string value)
+#http_root=/httpboot
+
+# Enable iPXE boot. (boolean value)
+#ipxe_enabled=false
+
+# The path to the main iPXE script file. (string value)
+#ipxe_boot_script=$pybasedir/drivers/modules/boot.ipxe
+
+
+[seamicro]
+
+#
+# Options defined in ironic.drivers.modules.seamicro
+#
+
+# Maximum retries for SeaMicro operations (integer value)
+#max_retry=3
+
+# Seconds to wait for power action to be completed (integer
+# value)
+#action_timeout=10
+
+
+[snmp]
+
+#
+# Options defined in ironic.drivers.modules.snmp
+#
+
+# Seconds to wait for power action to be completed (integer
+# value)
+#power_timeout=10
+
+
+[solaris_ipmi]
+
+#
+# Options defined in ironic.drivers.modules.solaris_ipmitool
+#
+
+# Default path to image cache. (string value)
+#imagecache_dirname=/var/lib/ironic/images
+
+# Timeout to wait when attempting to lock refcount file.
+# (string value)
+#imagecache_lock_timeout=60
+
+
+[ssh]
+
+#
+# Options defined in ironic.drivers.modules.ssh
+#
+
+# libvirt uri (string value)
+#libvirt_uri=qemu:///system
+
+
+[swift]
+
+#
+# Options defined in ironic.common.swift
+#
+
+# Maximum number of times to retry a Swift request, before
+# failing. (integer value)
+#swift_max_retries=2
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/ironic.exec_attr	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,6 @@
+OpenStack Bare Metal Provisioning Management:solaris:cmd:RO::\
+/usr/bin/ironic-dbsync:uid=ironic;gid=ironic
+
+ironic-conductor:solaris:cmd:RO::/usr/sbin/mount:privs=sys_mount
+
+ironic-conductor:solaris:cmd:RO::/usr/sbin/umount:privs=sys_mount
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/ironic.prof_attr	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,14 @@
+OpenStack Bare Metal Provisioning Management:RO::\
+Manage OpenStack Ironic:\
+auths=solaris.admin.edit/etc/ironic/*,\
+solaris.smf.manage.ironic,\
+solaris.smf.value.ironic;\
+defaultpriv={file_dac_read}\:/var/svc/log/application-openstack-*
+
+OpenStack Management:RO:::profiles=OpenStack Bare Metal Provisioning Management
+
+ironic-conductor:RO::\
+Do not assign to users. \
+Commands required for application/openstack/ironic/ironic-conductor:\
+auths=solaris.smf.manage.uvfs,\
+solaris.smf.modify
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/files/ironic.user_attr	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,4 @@
+ironic::RO::profiles=OpenStack Bare Metal Provisioning Management,\
+Install Service Management,\
+Service Configuration,\
+ironic-conductor
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/ironic.p5m	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,405 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+set name=pkg.fmri \
+    value=pkg:/cloud/openstack/ironic@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
+set name=pkg.summary value="OpenStack Ironic"
+set name=pkg.description \
+    value="Provisioning of bare metal instances in OpenStack."
+set name=pkg.human-version value="Juno $(COMPONENT_VERSION)"
+set name=com.oracle.info.description \
+    value="Ironic, the OpenStack bare metal provisioning service"
+set name=com.oracle.info.tpno value=$(TPNO)
+set name=info.classification \
+    value="org.opensolaris.category.2008:System/Administration and Configuration" \
+    value="org.opensolaris.category.2008:System/Enterprise Management" \
+    value=org.opensolaris.category.2008:System/Virtualization \
+    value="org.opensolaris.category.2008:Web Services/Application and Web Servers"
+set name=info.source-url value=$(COMPONENT_ARCHIVE_URL)
+set name=info.upstream value="OpenStack <[email protected]>"
+set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
+set name=openstack.upgrade-id reboot-needed=true value=$(COMPONENT_BE_VERSION)
+set name=org.opensolaris.arc-caseid value=PSARC/2013/350 value=PSARC/2015/172
+set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
+#
+dir  path=etc/ironic owner=ironic group=ironic mode=0700
+file files/ironic.conf path=etc/ironic/ironic.conf owner=ironic group=ironic \
+    mode=0644 overlay=allow preserve=renamenew
+file path=etc/ironic/policy.json owner=ironic group=ironic mode=0644 \
+    overlay=allow preserve=renamenew
+file files/ironic.auth_attr \
+    path=etc/security/auth_attr.d/cloud:openstack:ironic group=sys
+file files/ironic.exec_attr \
+    path=etc/security/exec_attr.d/cloud:openstack:ironic group=sys
+file files/ironic.prof_attr \
+    path=etc/security/prof_attr.d/cloud:openstack:ironic group=sys
+file files/ironic.user_attr path=etc/user_attr.d/cloud:openstack:ironic \
+    group=sys
+file path=lib/svc/manifest/application/openstack/ironic-api.xml
+file path=lib/svc/manifest/application/openstack/ironic-conductor.xml
+file path=lib/svc/manifest/application/openstack/ironic-db.xml
+file files/ironic-api path=lib/svc/method/ironic-api
+file files/ironic-conductor path=lib/svc/method/ironic-conductor
+file files/ironic-db path=lib/svc/method/ironic-db
+file path=usr/bin/ironic-dbsync
+file files/ironic-keystone-setup.sh \
+    path=usr/demo/openstack/keystone/ironic-keystone-setup.sh mode=0555
+file usr/bin/ironic-api path=usr/lib/ironic/ironic-api mode=0555
+file usr/bin/ironic-conductor path=usr/lib/ironic/ironic-conductor mode=0555
+file files/ironic-manifest.ksh path=usr/lib/ironic/ironic-manifest.ksh mode=0555
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic-$(COMPONENT_VERSION)-py$(PYVER).egg-info/PKG-INFO
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic-$(COMPONENT_VERSION)-py$(PYVER).egg-info/entry_points.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic-$(COMPONENT_VERSION)-py$(PYVER).egg-info/not-zip-safe
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic-$(COMPONENT_VERSION)-py$(PYVER).egg-info/pbr.json
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/acl.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/app.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/app.wsgi
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/controllers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/controllers/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/controllers/link.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/controllers/root.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/controllers/v1/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/controllers/v1/chassis.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/controllers/v1/collection.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/controllers/v1/driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/controllers/v1/node.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/controllers/v1/port.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/controllers/v1/state.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/controllers/v1/types.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/controllers/v1/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/hooks.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/middleware/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/middleware/auth_token.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/api/middleware/parsable_error.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/cmd/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/cmd/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/cmd/conductor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/cmd/dbsync.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/boot_devices.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/context.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/dhcp_factory.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/disk_partitioner.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/driver_factory.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/exception.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/glance_service/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/glance_service/base_image_service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/glance_service/service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/glance_service/service_utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/glance_service/v1/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/glance_service/v1/image_service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/glance_service/v2/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/glance_service/v2/image_service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/hash_ring.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/i18n.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/image_service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/images.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/isolinux_config.template
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/keystone.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/network.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/paths.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/policy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/pxe_utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/rpc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/safe_utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/states.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/swift.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/common/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/conductor/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/conductor/manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/conductor/rpcapi.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/conductor/task_manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/conductor/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/migration.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/sqlalchemy/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/sqlalchemy/alembic.ini
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/sqlalchemy/alembic/README
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/sqlalchemy/alembic/env.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/sqlalchemy/alembic/script.py.mako
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/sqlalchemy/alembic/versions/21b331f883ef_add_provision_updated_at.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/sqlalchemy/alembic/versions/31baaf680d2b_add_node_instance_info.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/sqlalchemy/alembic/versions/3bea56f25597_add_unique_constraint_to_instance_uuid.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/sqlalchemy/alembic/versions/3cb628139ea4_nodes_add_console_enabled.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/sqlalchemy/alembic/versions/487deb87cc9d_add_conductor_affinity_and_online.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/sqlalchemy/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/sqlalchemy/migration.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/db/sqlalchemy/models.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/dhcp/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/dhcp/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/dhcp/neutron.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/dhcp/none.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/drac.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/fake.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/ilo.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/agent_client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/agent_config.template
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/boot.ipxe
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/console_utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/deploy_utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/drac/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/drac/client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/drac/common.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/drac/management.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/drac/power.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/drac/resource_uris.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/elilo_efi_pxe_config.template
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/fake.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/iboot.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/ilo/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/ilo/common.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/ilo/deploy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/ilo/power.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/image_cache.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/ipminative.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/ipmitool.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/ipxe_config.template
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/iscsi_deploy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/pxe.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/pxe_config.template
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/seamicro.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/snmp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/solaris_ipmitool.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/modules/ssh.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/pxe.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/solaris.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/drivers/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/migrate_nova/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/migrate_nova/migrate_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/migrate_nova/nova_baremetal_states.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/migrate_nova/nova_models.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/netconf.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/nova/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/nova/compute/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/nova/compute/manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/nova/scheduler/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/nova/scheduler/ironic_host_manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/nova/virt/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/nova/virt/ironic/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/nova/virt/ironic/driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/objects/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/objects/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/objects/chassis.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/objects/conductor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/objects/node.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/objects/port.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/objects/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/apiclient/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/apiclient/auth.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/apiclient/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/apiclient/client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/apiclient/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/apiclient/fake_client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/cliutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/config/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/config/generator.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/context.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/eventlet_backdoor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/excutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/fileutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/gettextutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/imageutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/importutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/jsonutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/local.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/lockutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/log.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/loopingcall.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/network_utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/periodic_task.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/policy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/processutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/strutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/systemd.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/threadgroup.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/timeutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/uuidutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/openstack/common/versionutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironic/version.py
+dir  path=var/lib/ironic owner=ironic group=ironic mode=0700
+#
+group groupname=ironic gid=91
+user username=ironic ftpuser=false gcos-field="OpenStack Ironic" group=ironic \
+    home-dir=/var/lib/ironic password=NP uid=91
+#
+license LICENSE license="Apache 2.0"
+
+# force a group dependency on the optional anyjson; pkgdepend work is needed to
+# flush this out.
+depend type=group fmri=library/python/anyjson-$(PYV)
+
+# force a group dependency on the optional simplejson; pkgdepend work is needed
+# to flush this out.
+depend type=group fmri=library/python/simplejson-$(PYV)
+
+# force a dependency on package delivering curl(1)
+depend type=require fmri=__TBD pkg.debug.depend.file=usr/bin/curl
+
+# force a dependency on package delivering the Unified Archive File System
+depend type=require fmri=__TBD pkg.debug.depend.file=usr/lib/fs/uafs/uafs
+
+# force a dependency on package delivering uvfs(7FS)
+depend type=require fmri=__TBD pkg.debug.depend.file=usr/lib/fs/uvfs/mount
+
+# force a dependency on package delivering installadm(1M)
+depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/installadm
+
+# force a dependency on package delivering ipmitool(1)
+depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/ipmitool
+
+# force a dependency on nova; pkgdepend work is needed to flush this out.
+depend type=require fmri=cloud/openstack/nova
+
+# force a dependency on alembic; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/alembic-$(PYV)
+
+# force a dependency on argparse; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/argparse-$(PYV)
+
+# force a dependency on babel; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/babel-$(PYV)
+
+# force a dependency on eventlet; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/eventlet-$(PYV)
+
+# force a dependency on glanceclient; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/glanceclient-$(PYV)
+
+# force a dependency on greenlet; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/greenlet-$(PYV)
+
+# force a dependency on iso8601; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/iso8601-$(PYV)
+
+# force a dependency on jinja2; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/jinja2-$(PYV)
+
+# force a dependency on jsonpatch; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/jsonpatch-$(PYV)
+
+# force a dependency on keystoneclient; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/keystoneclient-$(PYV)
+
+# force a dependency on keystonemiddleware; pkgdepend work is needed to flush
+# this out.
+depend type=require fmri=library/python/keystonemiddleware-$(PYV)
+
+# force a dependency on lockfile; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/lockfile-$(PYV)
+
+# force a dependency on netaddr; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/netaddr-$(PYV)
+
+# force a dependency on neutronclient; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/neutronclient-$(PYV)
+
+# force a dependency on oslo.config; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/oslo.config-$(PYV)
+
+# force a dependency on oslo.db; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/oslo.db-$(PYV)
+
+# force a dependency on oslo.i18n; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/oslo.i18n-$(PYV)
+
+# force a dependency on oslo.messaging; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/oslo.messaging-$(PYV)
+
+# force a dependency on oslo.utils; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/oslo.utils-$(PYV)
+
+# force a dependency on paramiko; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/paramiko-$(PYV)
+
+# force a dependency on pbr; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/pbr-$(PYV)
+
+# force a dependency on pecan; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/pecan-$(PYV)
+
+# force a dependency on posix_ipc; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/posix_ipc-$(PYV)
+
+# force a dependency on prettytable; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/prettytable-$(PYV)
+
+# force a dependency on pysendfile; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/pysendfile-$(PYV)
+
+# force a dependency on requests; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/requests-$(PYV)
+
+# force a dependency on retrying; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/retrying-$(PYV)
+
+# force a dependency on scp; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/scp-$(PYV)
+
+# force a dependency on setuptools; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/setuptools-$(PYV)
+
+# force a dependency on six; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/six-$(PYV)
+
+# force a dependency on sqlalchemy; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/sqlalchemy-$(PYV)
+
+# force a dependency on stevedore; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/stevedore-$(PYV)
+
+# force a dependency on swiftclient; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/swiftclient-$(PYV)
+
+# force a dependency on webob; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/webob-$(PYV)
+
+# force a dependency on wsme; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/wsme-$(PYV)
+
+# force a dependency on pkg; pkgdepend work is needed to flush this out.
+depend type=require fmri=package/pkg
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/patches/01-requirements.patch	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,57 @@
+In-house patch to remove unnecessary dependencies from Ironic's
+requirements files. The specific reasons are as follows:
+
+kombu		Not applicable
+
+lxml		Not applicable
+
+websockify	Not applicable
+
+oslo.rootwrap	Not applicable to Solaris
+
+--- ironic-2014.2.1/ironic.egg-info/requires.txt.~1~	2015-02-19 10:50:54.000000000 -0800
++++ ironic-2014.2.1/ironic.egg-info/requires.txt	2015-05-12 13:36:29.313651947 -0700
+@@ -4,9 +4,7 @@ alembic>=0.6.4
+ anyjson>=0.3.3
+ argparse
+ eventlet>=0.15.1,<0.16.0
+-kombu>=2.5.0
+ lockfile>=0.8
+-lxml>=2.3
+ WebOb>=1.2.3
+ greenlet>=0.3.2
+ sqlalchemy-migrate==0.9.1
+@@ -19,10 +17,8 @@ python-keystoneclient>=0.10.0
+ python-swiftclient>=2.2.0
+ stevedore>=1.0.0  # Apache-2.0
+ pysendfile==2.0.0
+-websockify>=0.6.0,<0.7
+ oslo.config>=1.4.0  # Apache-2.0
+ oslo.db>=1.0.0,<1.1  # Apache-2.0
+-oslo.rootwrap>=1.3.0
+ oslo.i18n>=1.0.0  # Apache-2.0
+ oslo.utils>=1.0.0                       # Apache-2.0
+ pecan>=0.5.0
+--- ironic-2014.2.1/requirements.txt.~1~	2015-02-19 10:45:47.000000000 -0800
++++ ironic-2014.2.1/requirements.txt	2015-05-12 13:37:03.904654352 -0700
+@@ -7,9 +7,7 @@ alembic>=0.6.4
+ anyjson>=0.3.3
+ argparse
+ eventlet>=0.15.1,<0.16.0
+-kombu>=2.5.0
+ lockfile>=0.8
+-lxml>=2.3
+ WebOb>=1.2.3
+ greenlet>=0.3.2
+ sqlalchemy-migrate==0.9.1
+@@ -22,10 +20,8 @@ python-keystoneclient>=0.10.0
+ python-swiftclient>=2.2.0
+ stevedore>=1.0.0  # Apache-2.0
+ pysendfile==2.0.0
+-websockify>=0.6.0,<0.7
+ oslo.config>=1.4.0  # Apache-2.0
+ oslo.db>=1.0.0,<1.1  # Apache-2.0
+-oslo.rootwrap>=1.3.0
+ oslo.i18n>=1.0.0  # Apache-2.0
+ oslo.utils>=1.0.0                       # Apache-2.0
+ pecan>=0.5.0
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/patches/02-driver-entry.patch	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,12 @@
+Add solaris driver to list of available drivers
+
+--- ORIGINAL/setup.cfg	2015-02-19 18:50:54.000000000 +0000
++++ ironic-2014.2.1/setup.cfg	2015-04-09 16:59:01.035921792 +0100
+@@ -57,6 +57,7 @@
+ 	pxe_ilo = ironic.drivers.pxe:PXEAndIloDriver
+ 	pxe_drac = ironic.drivers.drac:PXEDracDriver
+ 	pxe_snmp = ironic.drivers.pxe:PXEAndSNMPDriver
++	solaris = ironic.drivers.solaris:SolarisAndIPMIToolDriver
+ ironic.database.migration_backend = 
+ 	sqlalchemy = ironic.db.sqlalchemy.migration
+ 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/ironic/patches/03-boot-device.patch	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,34 @@
+Ironic is very "Linux" centric, so for all nodes regardless of architecture it
+reports the same list of supported boot devices.
+
+This is not correct for SPARC, e.g. PXE is not supported.
+
+This patch simply passes the task into the driver implemented method for 
+getting the boot device so that architecture specific information can be
+returned.
+
+Upstream bug logged against trunk:
+  https://bugs.launchpad.net/ironic/+bug/1391598
+
+--- ORIGINAL/ironic/conductor/manager.py	2015-02-20 18:03:18.051557776 +0000
++++ ironic-2014.2.1/ironic/conductor/manager.py	2015-02-20 18:08:33.001316709 +0000
+@@ -1380,4 +1380,4 @@
+             if not getattr(task.driver, 'management', None):
+                 raise exception.UnsupportedDriverExtension(
+                             driver=task.node.driver, extension='management')
+-            return task.driver.management.get_supported_boot_devices()
++            return task.driver.management.get_supported_boot_devices(task)
+--- ORIGINAL/ironic/drivers/base.py	2015-02-20 18:03:18.037072121 +0000
++++ ironic-2014.2.1/ironic/drivers/base.py	2015-02-20 18:09:58.769898691 +0000
+@@ -436,9 +436,10 @@
+         """
+ 
+     @abc.abstractmethod
+-    def get_supported_boot_devices(self):
++    def get_supported_boot_devices(self, task=None):
+         """Get a list of the supported boot devices.
+ 
++        :param task: a task from TaskManager.
+         :returns: A list with the supported boot devices defined
+                   in :mod:`ironic.common.boot_devices`.
+         """
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/cryptography/Makefile	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,80 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+include ../../../make-rules/shared-macros.mk
+
+COMPONENT_NAME=		cryptography
+COMPONENT_VERSION=	0.8.2
+COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
+COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
+COMPONENT_ARCHIVE_HASH=	\
+    sha256:1c9a022ab3decaf152093e2ef2d5ee4258c72c7d429446c86bd68ff8c0929db6
+COMPONENT_ARCHIVE_URL=	$(call pypi_url)
+COMPONENT_PROJECT_URL=	https://cryptography.io/
+COMPONENT_BUGDB=	python-mod/cryptography
+
+TPNO=			22524
+
+include $(WS_MAKE_RULES)/prep.mk
+include $(WS_MAKE_RULES)/setup.py.mk
+include $(WS_MAKE_RULES)/ips.mk
+
+ASLR_MODE = $(ASLR_NOT_APPLICABLE)
+
+#
+# Until enum is available in the build environment, it needs to be built
+# in order for setuptools to work here.
+#
+ENUM=	$(WS_COMPONENTS)/python/enum/build/prototype/$(MACH)/$(PYTHON_LIB)
+
+$(ENUM):
+	(cd ../enum ; $(MAKE) install)
+
+COMPONENT_BUILD_ENV +=		PYTHONPATH=$(ENUM)
+
+COMPONENT_INSTALL_ENV +=	PYTHONPATH=$(ENUM)
+
+$(BUILD_32_and_64):		$(ENUM)
+
+clean::
+	(cd ../enum ; $(MAKE) clean)
+
+clobber::
+	(cd ../enum ; $(MAKE) clobber)
+
+# common targets
+build:		$(BUILD_32_and_64)
+
+install:	$(INSTALL_32_and_64)
+
+#
+# tests require cryptography_vectors, iso8601, pretend, pytest, and six,
+# some of which have not yet integrated.
+#
+test:		$(NO_TESTS)
+
+
+REQUIRED_PACKAGES += library/security/openssl
+REQUIRED_PACKAGES += system/library
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/cryptography/cryptography-PYVER.p5m	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,194 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+set name=pkg.fmri \
+    value=pkg:/library/python/cryptography-$(PYV)@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
+set name=pkg.summary value="Python crytographic recipes and primitives"
+set name=pkg.description \
+    value="The cryptography package provides cryptographic recipes and primitives to Python developers. It includes both high level recipes, and low level interfaces to common cryptographic algorithms such as symmetric ciphers, message digests and key derivation functions."
+set name=com.oracle.info.description value="the Python cryptography module"
+set name=com.oracle.info.tpno value=$(TPNO)
+set name=info.classification \
+    value=org.opensolaris.category.2008:Development/Python \
+    value=org.opensolaris.category.2008:System/Security
+set name=info.source-url value=$(COMPONENT_ARCHIVE_URL)
+set name=info.upstream \
+    value="The cryptography developers <[email protected]>"
+set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
+set name=org.opensolaris.arc-caseid value=PSARC/2015/071
+set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
+#
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography-$(COMPONENT_VERSION)-py$(PYVER).egg-info/PKG-INFO
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography-$(COMPONENT_VERSION)-py$(PYVER).egg-info/entry_points.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography-$(COMPONENT_VERSION)-py$(PYVER).egg-info/not-zip-safe
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
+$(PYTHON_2.7_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/64/_Cryptography_cffi_26cb75b8x62b488b1.so
+$(PYTHON_2.6_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/64/_Cryptography_cffi_353274b2xffc7b1ce.so
+$(PYTHON_2.6_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/64/_Cryptography_cffi_383020b0x62b488b1.so
+$(PYTHON_2.7_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/64/_Cryptography_cffi_590da19fxffc7b1ce.so
+$(PYTHON_2.6_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/64/_Cryptography_cffi_93dac7cbx399b1113.so
+$(PYTHON_3.4_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/64/_Cryptography_cffi_a8febd48xffc7b1ce.so
+$(PYTHON_3.4_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/64/_Cryptography_cffi_b03f9c7x62b488b1.so
+$(PYTHON_3.4_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/64/_Cryptography_cffi_e9178e86x399b1113.so
+$(PYTHON_2.7_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/64/_Cryptography_cffi_f3e4673fx399b1113.so
+$(PYTHON_2.7_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/_Cryptography_cffi_26cb75b8x62b488b1.so
+$(PYTHON_2.6_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/_Cryptography_cffi_353274b2xffc7b1ce.so
+$(PYTHON_2.6_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/_Cryptography_cffi_383020b0x62b488b1.so
+$(PYTHON_2.7_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/_Cryptography_cffi_590da19fxffc7b1ce.so
+$(PYTHON_2.6_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/_Cryptography_cffi_93dac7cbx399b1113.so
+$(PYTHON_2.7_ONLY)file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/_Cryptography_cffi_f3e4673fx399b1113.so
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/__about__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/fernet.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/commoncrypto/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/commoncrypto/backend.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/commoncrypto/ciphers.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/commoncrypto/hashes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/commoncrypto/hmac.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/interfaces.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/multibackend.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/openssl/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/openssl/backend.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/openssl/ciphers.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/openssl/cmac.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/openssl/dsa.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/openssl/ec.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/openssl/hashes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/openssl/hmac.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/openssl/rsa.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/openssl/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/backends/openssl/x509.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/commoncrypto/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/commoncrypto/binding.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/commoncrypto/cf.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/commoncrypto/common_cryptor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/commoncrypto/common_digest.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/commoncrypto/common_hmac.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/commoncrypto/common_key_derivation.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/commoncrypto/secimport.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/commoncrypto/secitem.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/commoncrypto/seckey.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/commoncrypto/seckeychain.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/commoncrypto/sectransform.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/aes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/asn1.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/bignum.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/binding.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/bio.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/cmac.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/cms.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/conf.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/crypto.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/dh.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/dsa.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/ec.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/ecdh.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/ecdsa.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/engine.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/err.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/evp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/hmac.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/nid.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/objects.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/opensslv.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/osrandom_engine.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/pem.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/pkcs12.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/pkcs7.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/rand.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/rsa.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/ssl.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/x509.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/x509_vfy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/x509name.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/openssl/x509v3.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/bindings/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/asymmetric/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/asymmetric/dh.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/asymmetric/dsa.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/asymmetric/ec.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/asymmetric/padding.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/asymmetric/rsa.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/asymmetric/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/ciphers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/ciphers/algorithms.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/ciphers/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/ciphers/modes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/cmac.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/constant_time.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/hashes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/hmac.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/interfaces/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/kdf/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/kdf/hkdf.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/kdf/pbkdf2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/padding.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/serialization.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/src/constant_time.c
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/src/constant_time.h
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/src/padding.c
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/src/padding.h
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/twofactor/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/twofactor/hotp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/hazmat/primitives/twofactor/totp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cryptography/x509.py
+#
+license LICENSE license="Apache 2.0"
+
+# Dependencies on cffi must be incorporated.
+depend type=incorporate fmri=library/python/cffi-$(PYV)@0.8.2
+
+# force a dependency on the Python runtime
+depend type=require fmri=__TBD pkg.debug.depend.file=python$(PYVER) \
+    pkg.debug.depend.path=usr/bin
+
+# force a dependency on cffi; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/cffi-$(PYV)
+
+# force a dependency on the cryptography package
+depend type=require \
+    fmri=library/python/cryptography@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
+
+# force a dependency on enum; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/enum-$(PYV)
+
+# force a dependency on pyasn1; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/pyasn1-$(PYV)
+
+# force a dependency on setuptools; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/setuptools-$(PYV)
+
+# force a dependency on six; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/six-$(PYV)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/enum/Makefile	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,57 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+include ../../../make-rules/shared-macros.mk
+
+COMPONENT_NAME=		enum34
+COMPONENT_VERSION=	1.0.4
+COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
+COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
+COMPONENT_ARCHIVE_HASH=	\
+    sha256:d3c19f26a6a34629c18c775f59dfc5dd595764c722b57a2da56ebfb69b94e447
+COMPONENT_ARCHIVE_URL=	$(call pypi_url)
+COMPONENT_PROJECT_URL=	https://pypi.python.org/pypi/enum34
+COMPONENT_BUGDB=	python-mod/enum34
+
+TPNO=			21360
+
+# enum34 is superfluous in Python 3.4
+PYTHON_VERSIONS =	2.7 2.6
+
+include $(WS_MAKE_RULES)/prep.mk
+include $(WS_MAKE_RULES)/setup.py.mk
+include $(WS_MAKE_RULES)/ips.mk
+
+ASLR_MODE = $(ASLR_NOT_APPLICABLE)
+
+COMPONENT_TEST_ARGS=	enum/test_enum.py
+COMPONENT_TEST_DIR=	$(COMPONENT_SRC)
+
+# common targets
+build:		$(BUILD_NO_ARCH)
+
+install:	$(INSTALL_NO_ARCH)
+
+test:		$(TEST_NO_ARCH)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/enum/enum-PYVER.p5m	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,53 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+set name=pkg.fmri \
+    value=pkg:/library/python/enum-$(PYV)@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
+set name=pkg.summary value="Support for enumerations"
+set name=pkg.description \
+    value="enum is the new Python stdlib enum module available in Python 3.4 backported for previous versions of Python. An enumeration is a set of symbolic names (members) bound to unique, constant values. Within an enumeration, the members can be compared by identity, and the enumeration itself can be iterated over."
+set name=com.oracle.info.description value="the Python enum module"
+set name=com.oracle.info.tpno value=$(TPNO)
+set name=info.classification \
+    value=org.opensolaris.category.2008:Development/Python
+set name=info.source-url value=$(COMPONENT_ARCHIVE_URL)
+set name=info.upstream value="Ethan Furman <[email protected]>"
+set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
+set name=org.opensolaris.arc-caseid value=PSARC/2015/198
+set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
+#
+file path=usr/lib/python$(PYVER)/vendor-packages/enum/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/enum/enum.py
+file path=usr/lib/python$(PYVER)/vendor-packages/enum34-$(COMPONENT_VERSION)-py$(PYVER).egg-info
+#
+license enum/LICENSE license=BSD
+
+# force a dependency on the Python runtime
+depend type=require fmri=__TBD pkg.debug.depend.file=python$(PYVER) \
+    pkg.debug.depend.path=usr/bin
+
+# force a dependency on the enum package
+depend type=require \
+    fmri=library/python/enum@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/ironicclient/Makefile	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,63 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+include ../../../make-rules/shared-macros.mk
+
+COMPONENT_NAME=		python-ironicclient
+COMPONENT_VERSION=	0.3.3
+COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
+COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
+COMPONENT_ARCHIVE_HASH=	\
+    sha256:224b6c2ed2ee3b358684640f83ab1c7425a1d030e39a96a78d06c499adbe586e
+COMPONENT_ARCHIVE_URL=	$(call pypi_url)
+COMPONENT_PROJECT_URL=	https://launchpad.net/python-ironicclient
+COMPONENT_BUGDB=	service/ironic
+
+TPNO=			22204
+
+# Depends on keystoneclient which is not Python 3 ready.
+PYTHON_VERSIONS=	2.7 2.6
+
+include $(WS_MAKE_RULES)/prep.mk
+include $(WS_MAKE_RULES)/setup.py.mk
+include $(WS_MAKE_RULES)/ips.mk
+
+ASLR_MODE = $(ASLR_NOT_APPLICABLE)
+
+COMPONENT_POST_INSTALL_ACTION = \
+	(cd $(PROTO_DIR)/usr/bin ; $(MV) -f ironic ironic-$(PYTHON_VERSION))
+
+# common targets
+build:		$(BUILD_NO_ARCH)
+
+install:	$(INSTALL_NO_ARCH)
+
+#
+# Tests require:
+# coverage>=3.6, discover, fixtures, hacking, httpretty, oslo.sphinx
+# python-subunit, sphinx, testrepository, testtools
+# which haven't been integrated yet.
+#
+test:		$(NO_TESTS)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/ironicclient/ironicclient-PYVER.p5m	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,134 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+set name=pkg.fmri \
+    value=pkg:/library/python/ironicclient-$(PYV)@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
+set name=pkg.summary \
+    value="OpenStack Bare Metal Provisioning API client library"
+set name=pkg.description \
+    value=" This is a client for the OpenStack Bare Metal Provisioning API. It provides a Python API (the 'ironicclient' module) and a command-line interface ('ironic')."
+set name=com.oracle.info.description \
+    value="ironicclient, the Python bindings to the OpenStack Bare Metal Provisioning Storage API"
+set name=com.oracle.info.tpno value=$(TPNO)
+set name=info.classification \
+    value=org.opensolaris.category.2008:Development/Python \
+    value="org.opensolaris.category.2008:System/Administration and Configuration" \
+    value="org.opensolaris.category.2008:System/Enterprise Management"
+set name=info.source-url value=$(COMPONENT_ARCHIVE_URL)
+set name=info.upstream value="OpenStack <[email protected]>"
+set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
+set name=org.opensolaris.arc-caseid value=PSARC/2015/170
+set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
+#
+link path=usr/bin/ironic target=ironic-$(PYVER) mediator=python \
+    mediator-version=$(PYVER)
+file path=usr/bin/ironic-$(PYVER)
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/common/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/common/http.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/common/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/exc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/common/_i18n.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/common/apiclient/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/common/apiclient/auth.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/common/apiclient/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/common/apiclient/client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/common/apiclient/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/common/apiclient/fake_client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/common/apiclient/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/common/cliutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/common/gettextutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/common/importutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/common/strutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/openstack/common/uuidutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/shell.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/v1/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/v1/chassis.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/v1/chassis_shell.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/v1/client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/v1/driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/v1/driver_shell.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/v1/node.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/v1/node_shell.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/v1/port.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/v1/port_shell.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/v1/resource_fields.py
+file path=usr/lib/python$(PYVER)/vendor-packages/ironicclient/v1/shell.py
+file path=usr/lib/python$(PYVER)/vendor-packages/python_ironicclient-$(COMPONENT_VERSION)-py$(PYVER).egg-info/PKG-INFO
+file path=usr/lib/python$(PYVER)/vendor-packages/python_ironicclient-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/python_ironicclient-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/python_ironicclient-$(COMPONENT_VERSION)-py$(PYVER).egg-info/entry_points.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/python_ironicclient-$(COMPONENT_VERSION)-py$(PYVER).egg-info/not-zip-safe
+file path=usr/lib/python$(PYVER)/vendor-packages/python_ironicclient-$(COMPONENT_VERSION)-py$(PYVER).egg-info/pbr.json
+file path=usr/lib/python$(PYVER)/vendor-packages/python_ironicclient-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/python_ironicclient-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
+#
+license LICENSE license="Apache 2.0"
+
+# force a group dependency on the optional simplejson; pkgdepend work is needed
+# to flush this out.
+depend type=group fmri=library/python/simplejson-$(PYV)
+
+# force a dependency on argparse; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/argparse-$(PYV)
+
+# force a dependency on babel; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/babel-$(PYV)
+
+# force a dependency on httplib2; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/httplib2-$(PYV)
+
+# force a dependency on the ironicclient package
+depend type=require \
+    fmri=library/python/ironicclient@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
+
+# force a dependency on keystoneclient; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/keystoneclient-$(PYV)
+
+# force a dependency on oslo.i18n; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/oslo.i18n-$(PYV)
+
+# force a dependency on oslo.utils; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/oslo.utils-$(PYV)
+
+# force a dependency on pbr; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/pbr-$(PYV)
+
+# force a dependency on prettytable; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/prettytable-$(PYV)
+
+# force a dependency on requests; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/requests-$(PYV)
+
+# force a dependency on six; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/six-$(PYV)
+
+# force a dependency on stevedore; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/stevedore-$(PYV)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/ironicclient/patches/01-boot-device-wanboot.patch	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,15 @@
+Add wanboot to list of potentially supported boot devices.
+
+--- python-ironicclient-0.3.2/ironicclient/v1/node_shell.py.~1~	2014-12-10 11:10:37.000000000 -0800
++++ python-ironicclient-0.3.2/ironicclient/v1/node_shell.py	2015-01-09 00:10:16.660604875 -0800
+@@ -344,8 +344,8 @@ def do_node_set_console_mode(cc, args):
+ @cliutils.arg(
+     'device',
+     metavar='<boot device>',
+-    choices=['pxe', 'disk', 'cdrom', 'bios', 'safe'],
+-    help="Supported boot devices:  'pxe', 'disk', 'cdrom', 'bios', 'safe'")
++    choices=['pxe', 'disk', 'cdrom', 'bios', 'safe', 'wanboot'],
++    help="Supported boot devices:  'pxe', 'disk', 'cdrom', 'bios', 'safe', 'wanboot'")
+ @cliutils.arg(
+     '--persistent',
+     dest='persistent',
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/logutils/Makefile	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,54 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+include ../../../make-rules/shared-macros.mk
+
+COMPONENT_NAME=		logutils
+COMPONENT_VERSION=	0.3.3
+COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
+COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
+COMPONENT_ARCHIVE_HASH=	\
+    sha256:4042b8e57cbe3b01552b3c84191595ae6c36f1ab5aef7e3a6ce5c2f15c297c9c
+COMPONENT_ARCHIVE_URL=	$(call pypi_url)
+COMPONENT_PROJECT_URL=	https://bitbucket.org/vinay.sajip/logutils/
+COMPONENT_BUGDB=	python-mod/logutils
+
+TPNO=			22462
+
+include $(WS_MAKE_RULES)/prep.mk
+include $(WS_MAKE_RULES)/setup.py.mk
+include $(WS_MAKE_RULES)/ips.mk
+
+ASLR_MODE = $(ASLR_NOT_APPLICABLE)
+
+COMPONENT_TEST_ARGS=	setup.py test
+COMPONENT_TEST_DIR=	$(COMPONENT_SRC)
+
+# common targets
+build:		$(BUILD_NO_ARCH)
+
+install:	$(INSTALL_NO_ARCH)
+
+test:		$(TEST_NO_ARCH)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/logutils/logutils-PYVER.p5m	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,61 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+set name=pkg.fmri \
+    value=pkg:/library/python/logutils-$(PYV)@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
+set name=pkg.summary \
+    value="Set of handlers for the Python standard library's logging package"
+set name=pkg.description \
+    value="The logutils package consists of a set of handlers and utilities for the Python standard library's logging package including: helpers for working with queues; helpers for working with unit tests; a NullHandler for use when configuring logging for libraries; an updated LoggerAdapter for adding contextual information to logs; an updated HTTPHandler for sending information to Web sites; dictionary-based configuration as proposed in PEP 391; and a stream handler which colorizes terminal streams."
+set name=com.oracle.info.description \
+    value="the logutils enhancements to the Python standard library's logging package"
+set name=com.oracle.info.tpno value=$(TPNO)
+set name=info.classification \
+    value=org.opensolaris.category.2008:Development/Python
+set name=info.source-url value=$(COMPONENT_ARCHIVE_URL)
+set name=info.upstream value="Vinay Sajip https://bitbucket.org/vinay.sajip"
+set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
+set name=org.opensolaris.arc-caseid value=PSARC/2015/197
+set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
+#
+file path=usr/lib/python$(PYVER)/vendor-packages/logutils-$(COMPONENT_VERSION)-py$(PYVER).egg-info
+file path=usr/lib/python$(PYVER)/vendor-packages/logutils/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/logutils/adapter.py
+file path=usr/lib/python$(PYVER)/vendor-packages/logutils/colorize.py
+file path=usr/lib/python$(PYVER)/vendor-packages/logutils/dictconfig.py
+file path=usr/lib/python$(PYVER)/vendor-packages/logutils/http.py
+file path=usr/lib/python$(PYVER)/vendor-packages/logutils/queue.py
+file path=usr/lib/python$(PYVER)/vendor-packages/logutils/redis.py
+file path=usr/lib/python$(PYVER)/vendor-packages/logutils/testing.py
+#
+license LICENSE.txt license=BSD
+
+# force a dependency on the Python runtime
+depend type=require fmri=__TBD pkg.debug.depend.file=python$(PYVER) \
+    pkg.debug.depend.path=usr/bin
+
+# force a dependency on the logutils package
+depend type=require \
+    fmri=library/python/logutils@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/paramiko/Makefile	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,55 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+include ../../../make-rules/shared-macros.mk
+
+COMPONENT_NAME=		paramiko
+COMPONENT_VERSION=	1.15.2
+COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
+COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
+COMPONENT_ARCHIVE_HASH=	\
+    sha256:4f56a671a3eecbb76e6143e6e4ca007d503a39aa79aa9e14ade667fa53fd6e55
+COMPONENT_ARCHIVE_URL=	$(call pypi_url)
+COMPONENT_PROJECT_URL=	http://github.com/paramiko/paramiko/
+COMPONENT_BUGDB=	python-mod/paramiko
+
+TPNO=			21692
+
+include $(WS_MAKE_RULES)/prep.mk
+include $(WS_MAKE_RULES)/setup.py.mk
+include $(WS_MAKE_RULES)/ips.mk
+
+ASLR_MODE = $(ASLR_NOT_APPLICABLE)
+
+COMPONENT_TEST_ARGS=	./test.py
+COMPONENT_TEST_DIR=	$(SOURCE_DIR)
+
+# common targets
+build:		$(BUILD_NO_ARCH)
+
+install:	$(INSTALL_NO_ARCH)
+
+# Disable tests until cryptography is integrated and 20917217 is resolved.
+test:		$(NO_TESTS)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/paramiko/paramiko-PYVER.p5m	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,106 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+set name=pkg.fmri \
+    value=pkg:/library/python/paramiko-$(PYV)@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
+set name=pkg.summary value="SSH2 protocol library for Python"
+set name=pkg.description \
+    value="Paramiko is a library for making SSH2 connections (client or server.) Emphasis is on using SSH2 as an alternative to SSL for making secure connections between Python scripts. All major ciphers and hash methods are supported. SFTP client and server mode are both supported too."
+set name=com.oracle.info.description \
+    value="Paramiko, the SSH2 protocol library for Python"
+set name=com.oracle.info.tpno value=$(TPNO)
+set name=info.classification \
+    value=org.opensolaris.category.2008:Development/Python \
+    value=org.opensolaris.category.2008:System/Security
+set name=info.source-url value=$(COMPONENT_ARCHIVE_URL)
+set name=info.upstream value="Jeff Forcier <[email protected]>"
+set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
+set name=org.opensolaris.arc-caseid value=PSARC/2015/250
+set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
+#
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko-$(COMPONENT_VERSION)-py$(PYVER).egg-info/PKG-INFO
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/_version.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/_winapi.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/auth_handler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/ber.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/buffered_pipe.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/channel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/common.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/compress.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/dsskey.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/ecdsakey.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/file.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/hostkeys.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/kex_gex.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/kex_group1.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/kex_group14.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/kex_gss.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/message.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/packet.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/pipe.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/pkey.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/primes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/proxy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/py3compat.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/resource.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/rsakey.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/server.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/sftp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/sftp_attr.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/sftp_client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/sftp_file.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/sftp_handle.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/sftp_server.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/sftp_si.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/ssh_exception.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/ssh_gss.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/transport.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/util.py
+file path=usr/lib/python$(PYVER)/vendor-packages/paramiko/win_pageant.py
+#
+license LICENSE license=LGPL2.1
+
+# force a dependency on the Python runtime
+depend type=require fmri=__TBD pkg.debug.depend.file=python$(PYVER) \
+    pkg.debug.depend.path=usr/bin
+
+# force a dependency on cryptography; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/cryptography-$(PYV)
+
+# force a dependency on the paramiko package
+depend type=require \
+    fmri=library/python/paramiko@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
+
+# force a dependency on pyasn1; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/pyasn1-$(PYV)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/paramiko/patches/01-nopycrypto.patch	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,1613 @@
+External but not-yet-integrated patch that changes Paramiko to use
+"cryptography" rather than "PyCrypto". The changes have been modified
+from
+
+	https://github.com/paramiko/paramiko/pull/394/files
+
+to patch cleanly into Paramiko 1.15.2.
+This patch is a stop-gap and will be removed when the upstream Paramiko
+completes the transition to "cryptography".
+
+--- paramiko-1.15.2/README.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/README	2015-04-12 17:36:15.204911382 -0700
+@@ -25,7 +25,7 @@ channels to remote services across the e
+ works, for example).
+ 
+ it is written entirely in python (no C or platform-dependent code) and is
+-released under the GNU LGPL (lesser GPL). 
++released under the GNU LGPL (lesser GPL).
+ 
+ the package and its API is fairly well documented in the "doc/" folder
+ that should have come with this archive.
+@@ -36,8 +36,8 @@ Requirements
+ 
+   - Python 2.6 or better <http://www.python.org/> - this includes Python
+     3.2 and higher as well.
+-  - pycrypto 2.1 or better <https://www.dlitz.net/software/pycrypto/>
+-  - ecdsa 0.9 or better <https://pypi.python.org/pypi/ecdsa>
++  - Cryptography 0.8 or better <https://cryptography.io>
++  - pyasn1 0.1.7 or better <https://pypi.python.org/pypi/pyasn1>
+ 
+ If you have setuptools, you can build and install paramiko and all its
+ dependencies with this command (as root)::
+--- paramiko-1.15.2/paramiko/_winapi.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/paramiko/_winapi.py	2015-04-12 17:36:15.205197752 -0700
+@@ -106,7 +106,7 @@ MapViewOfFile.restype = ctypes.wintypes.
+ 
+ class MemoryMap(object):
+     """
+-    A memory map object which can have security attributes overrideden.
++    A memory map object which can have security attributes overridden.
+     """
+     def __init__(self, name, length, security_attributes=None):
+         self.name = name
+--- paramiko-1.15.2/paramiko/agent.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/paramiko/agent.py	2015-04-12 17:36:15.205474363 -0700
+@@ -32,7 +32,7 @@ from select import select
+ from paramiko.common import asbytes, io_sleep
+ from paramiko.py3compat import byte_chr
+ 
+-from paramiko.ssh_exception import SSHException
++from paramiko.ssh_exception import SSHException, AuthenticationException
+ from paramiko.message import Message
+ from paramiko.pkey import PKey
+ from paramiko.util import retry_on_signal
+@@ -109,9 +109,12 @@ class AgentProxyThread(threading.Thread)
+     def run(self):
+         try:
+             (r, addr) = self.get_connection()
++            # Found that r should be either a socket from the socket library or None
+             self.__inr = r
+-            self.__addr = addr
++            self.__addr = addr # This should be an IP address as a string? or None
+             self._agent.connect()
++            if not isinstance(self._agent, int) and (self._agent._conn is None or not hasattr(self._agent._conn, 'fileno')):
++                raise AuthenticationException("Unable to connect to SSH agent")
+             self._communicate()
+         except:
+             #XXX Not sure what to do here ... raise or pass ?
+--- paramiko-1.15.2/paramiko/channel.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/paramiko/channel.py	2015-04-12 17:36:15.205880064 -0700
+@@ -337,7 +337,7 @@ class Channel (ClosingContextManager):
+         further x11 requests can be made from the server to the client,
+         when an x11 application is run in a shell session.
+ 
+-        From RFC4254::
++        From :rfc:`4254`::
+ 
+             It is RECOMMENDED that the 'x11 authentication cookie' that is
+             sent be a fake, random cookie, and that the cookie be checked and
+--- paramiko-1.15.2/paramiko/client.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/paramiko/client.py	2015-04-12 17:36:15.206296235 -0700
+@@ -25,6 +25,7 @@ import getpass
+ import os
+ import socket
+ import warnings
++from errno import ECONNREFUSED, EHOSTUNREACH
+ 
+ from paramiko.agent import Agent
+ from paramiko.common import DEBUG
+@@ -35,7 +36,9 @@ from paramiko.hostkeys import HostKeys
+ from paramiko.py3compat import string_types
+ from paramiko.resource import ResourceManager
+ from paramiko.rsakey import RSAKey
+-from paramiko.ssh_exception import SSHException, BadHostKeyException
++from paramiko.ssh_exception import (
++    SSHException, BadHostKeyException, NoValidConnectionsError
++)
+ from paramiko.transport import Transport
+ from paramiko.util import retry_on_signal, ClosingContextManager
+ 
+@@ -172,10 +175,46 @@ class SSHClient (ClosingContextManager):
+         """
+         self._policy = policy
+ 
+-    def connect(self, hostname, port=SSH_PORT, username=None, password=None, pkey=None,
+-                key_filename=None, timeout=None, allow_agent=True, look_for_keys=True,
+-                compress=False, sock=None, gss_auth=False, gss_kex=False,
+-                gss_deleg_creds=True, gss_host=None, banner_timeout=None):
++    def _families_and_addresses(self, hostname, port):
++        """
++        Yield pairs of address families and addresses to try for connecting.
++
++        :param str hostname: the server to connect to
++        :param int port: the server port to connect to
++        :returns: Yields an iterable of ``(family, address)`` tuples
++        """
++        guess = True
++        addrinfos = socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM)
++        for (family, socktype, proto, canonname, sockaddr) in addrinfos:
++            if socktype == socket.SOCK_STREAM:
++                yield family, sockaddr
++                guess = False
++
++        # some OS like AIX don't indicate SOCK_STREAM support, so just guess. :(
++        # We only do this if we did not get a single result marked as socktype == SOCK_STREAM.
++        if guess:
++            for family, _, _, _, sockaddr in addrinfos:
++                yield family, sockaddr
++
++    def connect(
++        self,
++        hostname,
++        port=SSH_PORT,
++        username=None,
++        password=None,
++        pkey=None,
++        key_filename=None,
++        timeout=None,
++        allow_agent=True,
++        look_for_keys=True,
++        compress=False,
++        sock=None,
++        gss_auth=False,
++        gss_kex=False,
++        gss_deleg_creds=True,
++        gss_host=None,
++        banner_timeout=None
++    ):
+         """
+         Connect to an SSH server and authenticate to it.  The server's host key
+         is checked against the system host keys (see `load_system_host_keys`)
+@@ -206,8 +245,10 @@ class SSHClient (ClosingContextManager):
+         :param str key_filename:
+             the filename, or list of filenames, of optional private key(s) to
+             try for authentication
+-        :param float timeout: an optional timeout (in seconds) for the TCP connect
+-        :param bool allow_agent: set to False to disable connecting to the SSH agent
++        :param float timeout:
++            an optional timeout (in seconds) for the TCP connect
++        :param bool allow_agent:
++            set to False to disable connecting to the SSH agent
+         :param bool look_for_keys:
+             set to False to disable searching for discoverable private key
+             files in ``~/.ssh/``
+@@ -216,9 +257,11 @@ class SSHClient (ClosingContextManager):
+             an open socket or socket-like object (such as a `.Channel`) to use
+             for communication to the target host
+         :param bool gss_auth: ``True`` if you want to use GSS-API authentication
+-        :param bool gss_kex: Perform GSS-API Key Exchange and user authentication
++        :param bool gss_kex:
++            Perform GSS-API Key Exchange and user authentication
+         :param bool gss_deleg_creds: Delegate GSS-API client credentials or not
+-        :param str gss_host: The targets name in the kerberos database. default: hostname
++        :param str gss_host:
++            The targets name in the kerberos database. default: hostname
+         :param float banner_timeout: an optional timeout (in seconds) to wait
+             for the SSH banner to be presented.
+ 
+@@ -234,21 +277,37 @@ class SSHClient (ClosingContextManager):
+             ``gss_deleg_creds`` and ``gss_host`` arguments.
+         """
+         if not sock:
+-            for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
+-                if socktype == socket.SOCK_STREAM:
+-                    af = family
+-                    addr = sockaddr
+-                    break
+-            else:
+-                # some OS like AIX don't indicate SOCK_STREAM support, so just guess. :(
+-                af, _, _, _, addr = socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM)
+-            sock = socket.socket(af, socket.SOCK_STREAM)
+-            if timeout is not None:
++            errors = {}
++            # Try multiple possible address families (e.g. IPv4 vs IPv6)
++            to_try = list(self._families_and_addresses(hostname, port))
++            for af, addr in to_try:
+                 try:
+-                    sock.settimeout(timeout)
+-                except:
+-                    pass
+-            retry_on_signal(lambda: sock.connect(addr))
++                    sock = socket.socket(af, socket.SOCK_STREAM)
++                    if timeout is not None:
++                        try:
++                            sock.settimeout(timeout)
++                        except:
++                            pass
++                    retry_on_signal(lambda: sock.connect(addr))
++                    # Break out of the loop on success
++                    break
++                except socket.error as e:
++                    # Raise anything that isn't a straight up connection error
++                    # (such as a resolution error)
++                    if e.errno not in (ECONNREFUSED, EHOSTUNREACH):
++                        raise
++                    # Capture anything else so we know how the run looks once
++                    # iteration is complete. Retain info about which attempt
++                    # this was.
++                    errors[addr] = e
++
++            # Make sure we explode usefully if no address family attempts
++            # succeeded. We've no way of knowing which error is the "right"
++            # one, so we construct a hybrid exception containing all the real
++            # ones, of a subclass that client code should still be watching for
++            # (socket.error)
++            if len(errors) == len(to_try):
++                raise NoValidConnectionsError(errors)
+ 
+         t = self._transport = Transport(sock, gss_kex=gss_kex, gss_deleg_creds=gss_deleg_creds)
+         t.use_compression(compress=compress)
+--- paramiko-1.15.2/paramiko/config.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/paramiko/config.py	2015-04-12 17:36:15.206521239 -0700
+@@ -98,7 +98,7 @@ class SSHConfig (object):
+ 
+         The host-matching rules of OpenSSH's ``ssh_config`` man page are used:
+         For each parameter, the first obtained value will be used.  The
+-        configuration files contain sections separated by ``Host''
++        configuration files contain sections separated by ``Host``
+         specifications, and that section is only applied for hosts that match
+         one of the patterns given in the specification.
+ 
+--- paramiko-1.15.2/paramiko/dsskey.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/paramiko/dsskey.py	2015-04-12 17:36:15.206846024 -0700
+@@ -20,21 +20,23 @@
+ DSS keys.
+ """
+ 
+-import os
+-from hashlib import sha1
+-
+-from Crypto.PublicKey import DSA
++from cryptography.exceptions import InvalidSignature
++from cryptography.hazmat.backends import default_backend
++from cryptography.hazmat.primitives import hashes, serialization
++from cryptography.hazmat.primitives.asymmetric import dsa
++from cryptography.hazmat.primitives.asymmetric.utils import (
++    decode_rfc6979_signature, encode_rfc6979_signature
++)
+ 
+ from paramiko import util
+ from paramiko.common import zero_byte
+-from paramiko.py3compat import long
+ from paramiko.ssh_exception import SSHException
+ from paramiko.message import Message
+ from paramiko.ber import BER, BERException
+ from paramiko.pkey import PKey
+ 
+ 
+-class DSSKey (PKey):
++class DSSKey(PKey):
+     """
+     Representation of a DSS key which can be used to sign an verify SSH2
+     data.
+@@ -98,15 +100,21 @@ class DSSKey (PKey):
+         return self.x is not None
+ 
+     def sign_ssh_data(self, data):
+-        digest = sha1(data).digest()
+-        dss = DSA.construct((long(self.y), long(self.g), long(self.p), long(self.q), long(self.x)))
+-        # generate a suitable k
+-        qsize = len(util.deflate_long(self.q, 0))
+-        while True:
+-            k = util.inflate_long(os.urandom(qsize), 1)
+-            if (k > 2) and (k < self.q):
+-                break
+-        r, s = dss.sign(util.inflate_long(digest, 1), k)
++        key = dsa.DSAPrivateNumbers(
++            x=self.x,
++            public_numbers=dsa.DSAPublicNumbers(
++                y=self.y,
++                parameter_numbers=dsa.DSAParameterNumbers(
++                    p=self.p,
++                    q=self.q,
++                    g=self.g
++                )
++            )
++        ).private_key(backend=default_backend())
++        signer = key.signer(hashes.SHA1())
++        signer.update(data)
++        r, s = decode_rfc6979_signature(signer.finalize())
++
+         m = Message()
+         m.add_string('ssh-dss')
+         # apparently, in rare cases, r or s may be shorter than 20 bytes!
+@@ -132,27 +140,65 @@ class DSSKey (PKey):
+         # pull out (r, s) which are NOT encoded as mpints
+         sigR = util.inflate_long(sig[:20], 1)
+         sigS = util.inflate_long(sig[20:], 1)
+-        sigM = util.inflate_long(sha1(data).digest(), 1)
+ 
+-        dss = DSA.construct((long(self.y), long(self.g), long(self.p), long(self.q)))
+-        return dss.verify(sigM, (sigR, sigS))
++        signature = encode_rfc6979_signature(sigR, sigS)
+ 
+-    def _encode_key(self):
+-        if self.x is None:
+-            raise SSHException('Not enough key information')
+-        keylist = [0, self.p, self.q, self.g, self.y, self.x]
++        key = dsa.DSAPublicNumbers(
++            y=self.y,
++            parameter_numbers=dsa.DSAParameterNumbers(
++                p=self.p,
++                q=self.q,
++                g=self.g
++            )
++        ).public_key(backend=default_backend())
++        verifier = key.verifier(signature, hashes.SHA1())
++        verifier.update(data)
+         try:
+-            b = BER()
+-            b.encode(keylist)
+-        except BERException:
+-            raise SSHException('Unable to create ber encoding of key')
+-        return b.asbytes()
++            verifier.verify()
++        except InvalidSignature:
++            return False
++        else:
++            return True
+ 
+     def write_private_key_file(self, filename, password=None):
+-        self._write_private_key_file('DSA', filename, self._encode_key(), password)
++        key = dsa.DSAPrivateNumbers(
++            x=self.x,
++            public_numbers=dsa.DSAPublicNumbers(
++                y=self.y,
++                parameter_numbers=dsa.DSAParameterNumbers(
++                    p=self.p,
++                    q=self.q,
++                    g=self.g
++                )
++            )
++        ).private_key(backend=default_backend())
++
++        self._write_private_key_file(
++            filename,
++            key,
++            serialization.PrivateFormat.TraditionalOpenSSL,
++            password=password
++        )
+ 
+     def write_private_key(self, file_obj, password=None):
+-        self._write_private_key('DSA', file_obj, self._encode_key(), password)
++        key = dsa.DSAPrivateNumbers(
++            x=self.x,
++            public_numbers=dsa.DSAPublicNumbers(
++                y=self.y,
++                parameter_numbers=dsa.DSAParameterNumbers(
++                    p=self.p,
++                    q=self.q,
++                    g=self.g
++                )
++            )
++        ).private_key(backend=default_backend())
++
++        self._write_private_key(
++            file_obj,
++            key,
++            serialization.PrivateFormat.TraditionalOpenSSL,
++            password=password
++        )
+ 
+     @staticmethod
+     def generate(bits=1024, progress_func=None):
+@@ -161,14 +207,19 @@ class DSSKey (PKey):
+         generate a new host key or authentication key.
+ 
+         :param int bits: number of bits the generated key should be.
+-        :param function progress_func:
+-            an optional function to call at key points in key generation (used
+-            by ``pyCrypto.PublicKey``).
++        :param function progress_func: Unused
+         :return: new `.DSSKey` private key
+         """
+-        dsa = DSA.generate(bits, os.urandom, progress_func)
+-        key = DSSKey(vals=(dsa.p, dsa.q, dsa.g, dsa.y))
+-        key.x = dsa.x
++        numbers = dsa.generate_private_key(
++            bits, backend=default_backend()
++        ).private_numbers()
++        key = DSSKey(vals=(
++            numbers.public_numbers.parameter_numbers.p,
++            numbers.public_numbers.parameter_numbers.q,
++            numbers.public_numbers.parameter_numbers.g,
++            numbers.public_numbers.y
++        ))
++        key.x = numbers.x
+         return key
+ 
+     ###  internals...
+--- paramiko-1.15.2/paramiko/ecdsakey.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/paramiko/ecdsakey.py	2015-04-12 17:36:15.207208398 -0700
+@@ -21,18 +21,24 @@ ECDSA keys
+ """
+ 
+ import binascii
+-from hashlib import sha256
+ 
+-from ecdsa import SigningKey, VerifyingKey, der, curves
++from cryptography.exceptions import InvalidSignature
++from cryptography.hazmat.backends import default_backend
++from cryptography.hazmat.primitives import hashes, serialization
++from cryptography.hazmat.primitives.asymmetric import ec
++from cryptography.hazmat.primitives.asymmetric.utils import (
++    decode_rfc6979_signature, encode_rfc6979_signature
++)
+ 
+ from paramiko.common import four_byte, one_byte
+ from paramiko.message import Message
+ from paramiko.pkey import PKey
+-from paramiko.py3compat import byte_chr, u
++from paramiko.py3compat import byte_chr
+ from paramiko.ssh_exception import SSHException
++from paramiko.util import deflate_long, inflate_long
+ 
+ 
+-class ECDSAKey (PKey):
++class ECDSAKey(PKey):
+     """
+     Representation of an ECDSA key which can be used to sign and verify SSH2
+     data.
+@@ -65,9 +71,13 @@ class ECDSAKey (PKey):
+             if pointinfo[0:1] != four_byte:
+                 raise SSHException('Point compression is being used: %s' %
+                                    binascii.hexlify(pointinfo))
+-            self.verifying_key = VerifyingKey.from_string(pointinfo[1:],
+-                                                          curve=curves.NIST256p,
+-                                                          validate_point=validate_point)
++            curve = ec.SECP256R1()
++            numbers = ec.EllipticCurvePublicNumbers(
++                x=inflate_long(pointinfo[1:1 + curve.key_size // 8], always_positive=True),
++                y=inflate_long(pointinfo[1 + curve.key_size // 8:], always_positive=True),
++                curve=curve
++            )
++            self.verifying_key = numbers.public_key(backend=default_backend())
+         self.size = 256
+ 
+     def asbytes(self):
+@@ -76,8 +86,15 @@ class ECDSAKey (PKey):
+         m.add_string('ecdsa-sha2-nistp256')
+         m.add_string('nistp256')
+ 
+-        point_str = four_byte + key.to_string()
++        numbers = key.public_numbers()
+ 
++        x_bytes = deflate_long(numbers.x, add_sign_padding=False)
++        x_bytes = b'\x00' * (len(x_bytes) - key.curve.key_size // 8) + x_bytes
++
++        y_bytes = deflate_long(numbers.y, add_sign_padding=False)
++        y_bytes = b'\x00' * (len(y_bytes) - key.curve.key_size // 8) + y_bytes
++
++        point_str = four_byte + x_bytes + y_bytes
+         m.add_string(point_str)
+         return m.asbytes()
+ 
+@@ -86,8 +103,8 @@ class ECDSAKey (PKey):
+ 
+     def __hash__(self):
+         h = hash(self.get_name())
+-        h = h * 37 + hash(self.verifying_key.pubkey.point.x())
+-        h = h * 37 + hash(self.verifying_key.pubkey.point.y())
++        h = h * 37 + hash(self.verifying_key.public_numbers().x)
++        h = h * 37 + hash(self.verifying_key.public_numbers().y)
+         return hash(h)
+ 
+     def get_name(self):
+@@ -100,46 +117,59 @@ class ECDSAKey (PKey):
+         return self.signing_key is not None
+ 
+     def sign_ssh_data(self, data):
+-        sig = self.signing_key.sign_deterministic(
+-            data, sigencode=self._sigencode, hashfunc=sha256)
++        signer = self.signing_key.signer(ec.ECDSA(hashes.SHA256()))
++        signer.update(data)
++        sig = signer.finalize()
++        r, s = decode_rfc6979_signature(sig)
++
+         m = Message()
+         m.add_string('ecdsa-sha2-nistp256')
+-        m.add_string(sig)
++        m.add_string(self._sigencode(r, s))
+         return m
+ 
+     def verify_ssh_sig(self, data, msg):
+         if msg.get_text() != 'ecdsa-sha2-nistp256':
+             return False
+         sig = msg.get_binary()
++        sigR, sigS = self._sigdecode(sig)
++        signature = encode_rfc6979_signature(sigR, sigS)
+ 
+-        # verify the signature by SHA'ing the data and encrypting it
+-        # using the public key.
+-        hash_obj = sha256(data).digest()
+-        return self.verifying_key.verify_digest(sig, hash_obj,
+-                                                sigdecode=self._sigdecode)
++        verifier = self.verifying_key.verifier(signature, ec.ECDSA(hashes.SHA256()))
++        verifier.update(data)
++        try:
++            verifier.verify()
++        except InvalidSignature:
++            return False
++        else:
++            return True
+ 
+     def write_private_key_file(self, filename, password=None):
+-        key = self.signing_key or self.verifying_key
+-        self._write_private_key_file('EC', filename, key.to_der(), password)
++        self._write_private_key_file(
++            filename,
++            self.signing_key,
++            serialization.PrivateFormat.TraditionalOpenSSL,
++            password=password
++        )
+ 
+     def write_private_key(self, file_obj, password=None):
+-        key = self.signing_key or self.verifying_key
+-        self._write_private_key('EC', file_obj, key.to_der(), password)
++        self._write_private_key(
++            file_obj,
++            self.signing_key,
++            serialization.PrivateFormat.TraditionalOpenSSL,
++            password=password
++        )
+ 
+     @staticmethod
+-    def generate(curve=curves.NIST256p, progress_func=None):
++    def generate(curve=ec.SECP256R1(), progress_func=None):
+         """
+         Generate a new private RSA key.  This factory function can be used to
+         generate a new host key or authentication key.
+ 
+-        :param function progress_func:
+-            an optional function to call at key points in key generation (used
+-            by ``pyCrypto.PublicKey``).
++        :param function progress_func: Unused
+         :returns: A new private key (`.RSAKey`) object
+         """
+-        signing_key = SigningKey.generate(curve)
+-        key = ECDSAKey(vals=(signing_key, signing_key.get_verifying_key()))
+-        return key
++        private_key = ec.generate_private_key(curve, backend=default_backend())
++        return ECDSAKey(vals=(private_key, private_key.public_key()))
+ 
+     ###  internals...
+ 
+@@ -155,23 +185,18 @@ class ECDSAKey (PKey):
+                         byte_chr(5) * 5, byte_chr(6) * 6, byte_chr(7) * 7]
+ 
+     def _decode_key(self, data):
+-        s, padding = der.remove_sequence(data)
+-        if padding:
+-            if padding not in self.ALLOWED_PADDINGS:
+-                raise ValueError("weird padding: %s" % u(binascii.hexlify(data)))
+-            data = data[:-len(padding)]
+-        key = SigningKey.from_der(data)
++        key = serialization.load_der_private_key(data, password=None, backend=default_backend())
+         self.signing_key = key
+-        self.verifying_key = key.get_verifying_key()
+-        self.size = 256
++        self.verifying_key = key.public_key()
++        self.size = key.curve.key_size
+ 
+-    def _sigencode(self, r, s, order):
++    def _sigencode(self, r, s):
+         msg = Message()
+         msg.add_mpint(r)
+         msg.add_mpint(s)
+         return msg.asbytes()
+ 
+-    def _sigdecode(self, sig, order):
++    def _sigdecode(self, sig):
+         msg = Message(sig)
+         r = msg.get_mpint()
+         s = msg.get_mpint()
+--- paramiko-1.15.2/paramiko/kex_gss.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/paramiko/kex_gss.py	2015-04-12 17:36:15.207554941 -0700
+@@ -21,14 +21,15 @@
+ 
+ 
+ """
+-This module provides GSS-API / SSPI Key Exchange as defined in RFC 4462.
++This module provides GSS-API / SSPI Key Exchange as defined in :rfc:`4462`.
+ 
+ .. note:: Credential delegation is not supported in server mode.
+ 
+ .. note::
+-    `RFC 4462 Section 2.2 <http://www.ietf.org/rfc/rfc4462.txt>`_ says we are
+-    not required to implement GSS-API error messages. Thus, in many methods
+-    within this module, if an error occurs an exception will be thrown and the
++    `RFC 4462 Section 2.2
++    <https://tools.ietf.org/html/rfc4462.html#section-2.2>`_ says we are not
++    required to implement GSS-API error messages. Thus, in many methods within
++    this module, if an error occurs an exception will be thrown and the
+     connection will be terminated.
+ 
+ .. seealso:: :doc:`/api/ssh_gss`
+@@ -55,8 +56,8 @@ c_MSG_KEXGSS_GROUPREQ, c_MSG_KEXGSS_GROU
+ 
+ class KexGSSGroup1(object):
+     """
+-    GSS-API / SSPI Authenticated Diffie-Hellman Key Exchange
+-    as defined in `RFC 4462 Section 2 <http://www.ietf.org/rfc/rfc4462.txt>`_
++    GSS-API / SSPI Authenticated Diffie-Hellman Key Exchange as defined in `RFC
++    4462 Section 2 <https://tools.ietf.org/html/rfc4462.html#section-2>`_
+     """
+     # draft-ietf-secsh-transport-09.txt, page 17
+     P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
+@@ -278,8 +279,9 @@ class KexGSSGroup1(object):
+ 
+ class KexGSSGroup14(KexGSSGroup1):
+     """
+-    GSS-API / SSPI Authenticated Diffie-Hellman Group14 Key Exchange
+-    as defined in `RFC 4462 Section 2 <http://www.ietf.org/rfc/rfc4462.txt>`_
++    GSS-API / SSPI Authenticated Diffie-Hellman Group14 Key Exchange as defined
++    in `RFC 4462 Section 2
++    <https://tools.ietf.org/html/rfc4462.html#section-2>`_
+     """
+     P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF
+     G = 2
+@@ -288,8 +290,8 @@ class KexGSSGroup14(KexGSSGroup1):
+ 
+ class KexGSSGex(object):
+     """
+-    GSS-API / SSPI Authenticated Diffie-Hellman Group Exchange
+-    as defined in `RFC 4462 Section 2 <http://www.ietf.org/rfc/rfc4462.txt>`_
++    GSS-API / SSPI Authenticated Diffie-Hellman Group Exchange as defined in
++    `RFC 4462 Section 2 <https://tools.ietf.org/html/rfc4462.html#section-2>`_
+     """
+     NAME = "gss-gex-sha1-toWM5Slw5Ew8Mqkay+al2g=="
+     min_bits = 1024
+@@ -590,8 +592,9 @@ class KexGSSGex(object):
+ 
+ class NullHostKey(object):
+     """
+-    This class represents the Null Host Key for GSS-API Key Exchange
+-    as defined in `RFC 4462 Section 5 <http://www.ietf.org/rfc/rfc4462.txt>`_
++    This class represents the Null Host Key for GSS-API Key Exchange as defined
++    in `RFC 4462 Section 5
++    <https://tools.ietf.org/html/rfc4462.html#section-5>`_
+     """
+     def __init__(self):
+         self.key = ""
+--- paramiko-1.15.2/paramiko/packet.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/paramiko/packet.py	2015-04-12 17:36:15.207839345 -0700
+@@ -307,7 +307,7 @@ class Packetizer (object):
+                 self._log(DEBUG, 'Write packet <%s>, length %d' % (cmd_name, orig_len))
+                 self._log(DEBUG, util.format_binary(packet, 'OUT: '))
+             if self.__block_engine_out is not None:
+-                out = self.__block_engine_out.encrypt(packet)
++                out = self.__block_engine_out.update(packet)
+             else:
+                 out = packet
+             # + mac
+@@ -340,7 +340,7 @@ class Packetizer (object):
+         """
+         header = self.read_all(self.__block_size_in, check_rekey=True)
+         if self.__block_engine_in is not None:
+-            header = self.__block_engine_in.decrypt(header)
++            header = self.__block_engine_in.update(header)
+         if self.__dump_packets:
+             self._log(DEBUG, util.format_binary(header, 'IN: '))
+         packet_size = struct.unpack('>I', header[:4])[0]
+@@ -352,7 +352,7 @@ class Packetizer (object):
+         packet = buf[:packet_size - len(leftover)]
+         post_packet = buf[packet_size - len(leftover):]
+         if self.__block_engine_in is not None:
+-            packet = self.__block_engine_in.decrypt(packet)
++            packet = self.__block_engine_in.update(packet)
+         if self.__dump_packets:
+             self._log(DEBUG, util.format_binary(packet, 'IN: '))
+         packet = leftover + packet
+--- paramiko-1.15.2/paramiko/pkey.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/paramiko/pkey.py	2015-04-12 17:36:15.208139348 -0700
+@@ -21,27 +21,39 @@ Common API for all public keys.
+ """
+ 
+ import base64
+-from binascii import hexlify, unhexlify
++from binascii import unhexlify
+ import os
+ from hashlib import md5
+ 
+-from Crypto.Cipher import DES3, AES
++from cryptography.hazmat.backends import default_backend
++from cryptography.hazmat.primitives import serialization
++from cryptography.hazmat.primitives.ciphers import algorithms, modes, Cipher
+ 
+ from paramiko import util
+-from paramiko.common import o600, zero_byte
++from paramiko.common import o600
+ from paramiko.py3compat import u, encodebytes, decodebytes, b
+ from paramiko.ssh_exception import SSHException, PasswordRequiredException
+ 
+ 
+-class PKey (object):
++class PKey(object):
+     """
+     Base class for public keys.
+     """
+ 
+     # known encryption types for private key files:
+     _CIPHER_TABLE = {
+-        'AES-128-CBC': {'cipher': AES, 'keysize': 16, 'blocksize': 16, 'mode': AES.MODE_CBC},
+-        'DES-EDE3-CBC': {'cipher': DES3, 'keysize': 24, 'blocksize': 8, 'mode': DES3.MODE_CBC},
++        'AES-128-CBC': {
++            'cipher': algorithms.AES,
++            'keysize': 16,
++            'blocksize': 16,
++            'mode': modes.CBC
++        },
++        'DES-EDE3-CBC': {
++            'cipher': algorithms.TripleDES,
++            'keysize': 24,
++            'blocksize': 8,
++            'mode': modes.CBC
++        },
+     }
+ 
+     def __init__(self, msg=None, data=None):
+@@ -300,9 +312,12 @@ class PKey (object):
+         mode = self._CIPHER_TABLE[encryption_type]['mode']
+         salt = unhexlify(b(saltstr))
+         key = util.generate_key_bytes(md5, salt, password, keysize)
+-        return cipher.new(key, mode, salt).decrypt(data)
++        decryptor = Cipher(
++            cipher(key), mode(salt), backend=default_backend()
++        ).decryptor()
++        return decryptor.update(data) + decryptor.finalize()
+ 
+-    def _write_private_key_file(self, tag, filename, data, password=None):
++    def _write_private_key_file(self, filename, key, format, password=None):
+         """
+         Write an SSH2-format private key file in a form that can be read by
+         paramiko or openssh.  If no password is given, the key is written in
+@@ -319,31 +334,16 @@ class PKey (object):
+         with open(filename, 'w', o600) as f:
+             # grrr... the mode doesn't always take hold
+             os.chmod(filename, o600)
+-            self._write_private_key(tag, f, data, password)
++            self._write_private_key(f, key, format)
+ 
+-    def _write_private_key(self, tag, f, data, password=None):
+-        f.write('-----BEGIN %s PRIVATE KEY-----\n' % tag)
+-        if password is not None:
+-            cipher_name = list(self._CIPHER_TABLE.keys())[0]
+-            cipher = self._CIPHER_TABLE[cipher_name]['cipher']
+-            keysize = self._CIPHER_TABLE[cipher_name]['keysize']
+-            blocksize = self._CIPHER_TABLE[cipher_name]['blocksize']
+-            mode = self._CIPHER_TABLE[cipher_name]['mode']
+-            salt = os.urandom(blocksize)
+-            key = util.generate_key_bytes(md5, salt, password, keysize)
+-            if len(data) % blocksize != 0:
+-                n = blocksize - len(data) % blocksize
+-                #data += os.urandom(n)
+-                # that would make more sense ^, but it confuses openssh.
+-                data += zero_byte * n
+-            data = cipher.new(key, mode, salt).encrypt(data)
+-            f.write('Proc-Type: 4,ENCRYPTED\n')
+-            f.write('DEK-Info: %s,%s\n' % (cipher_name, u(hexlify(salt)).upper()))
+-            f.write('\n')
+-        s = u(encodebytes(data))
+-        # re-wrap to 64-char lines
+-        s = ''.join(s.split('\n'))
+-        s = '\n'.join([s[i: i + 64] for i in range(0, len(s), 64)])
+-        f.write(s)
+-        f.write('\n')
+-        f.write('-----END %s PRIVATE KEY-----\n' % tag)
++    def _write_private_key(self, f, key, format, password=None):
++        if password is None:
++            encryption = serialization.NoEncryption()
++        else:
++            encryption = serialization.BestEncryption(password)
++
++        f.write(key.private_bytes(
++            serialization.Encoding.PEM,
++            format,
++            encryption
++        ).decode())
+--- paramiko-1.15.2/paramiko/rsakey.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/paramiko/rsakey.py	2015-04-12 17:36:15.208516662 -0700
+@@ -20,34 +20,26 @@
+ RSA keys.
+ """
+ 
+-import os
+-from hashlib import sha1
++from cryptography.exceptions import InvalidSignature
++from cryptography.hazmat.backends import default_backend
++from cryptography.hazmat.primitives import hashes, serialization
++from cryptography.hazmat.primitives.asymmetric import rsa, padding
+ 
+-from Crypto.PublicKey import RSA
+-
+-from paramiko import util
+-from paramiko.common import max_byte, zero_byte, one_byte
+ from paramiko.message import Message
+-from paramiko.ber import BER, BERException
+ from paramiko.pkey import PKey
+-from paramiko.py3compat import long
+ from paramiko.ssh_exception import SSHException
+ 
+ SHA1_DIGESTINFO = b'\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'
+ 
+ 
+-class RSAKey (PKey):
++class RSAKey(PKey):
+     """
+     Representation of an RSA key which can be used to sign and verify SSH2
+     data.
+     """
+ 
+-    def __init__(self, msg=None, data=None, filename=None, password=None, vals=None, file_obj=None):
+-        self.n = None
+-        self.e = None
+-        self.d = None
+-        self.p = None
+-        self.q = None
++    def __init__(self, msg=None, data=None, filename=None, password=None, key=None, file_obj=None):
++        self.key = None
+         if file_obj is not None:
+             self._from_private_key(file_obj, password)
+             return
+@@ -56,22 +48,33 @@ class RSAKey (PKey):
+             return
+         if (msg is None) and (data is not None):
+             msg = Message(data)
+-        if vals is not None:
+-            self.e, self.n = vals
++        if key is not None:
++            self.key = key
+         else:
+             if msg is None:
+                 raise SSHException('Key object may not be empty')
+             if msg.get_text() != 'ssh-rsa':
+                 raise SSHException('Invalid key')
+-            self.e = msg.get_mpint()
+-            self.n = msg.get_mpint()
+-        self.size = util.bit_length(self.n)
++            self.key = rsa.RSAPublicNumbers(
++                e=msg.get_mpint(), n=msg.get_mpint()
++            ).public_key(default_backend())
++
++    @property
++    def size(self):
++        return self.key.key_size
++
++    @property
++    def public_numbers(self):
++        if isinstance(self.key, rsa.RSAPrivateKey):
++            return self.key.private_numbers().public_numbers
++        else:
++            return self.key.public_numbers()
+ 
+     def asbytes(self):
+         m = Message()
+         m.add_string('ssh-rsa')
+-        m.add_mpint(self.e)
+-        m.add_mpint(self.n)
++        m.add_mpint(self.public_numbers.e)
++        m.add_mpint(self.public_numbers.n)
+         return m.asbytes()
+ 
+     def __str__(self):
+@@ -79,8 +82,8 @@ class RSAKey (PKey):
+ 
+     def __hash__(self):
+         h = hash(self.get_name())
+-        h = h * 37 + hash(self.e)
+-        h = h * 37 + hash(self.n)
++        h = h * 37 + hash(self.public_numbers.e)
++        h = h * 37 + hash(self.public_numbers.n)
+         return hash(h)
+ 
+     def get_name(self):
+@@ -90,12 +93,16 @@ class RSAKey (PKey):
+         return self.size
+ 
+     def can_sign(self):
+-        return self.d is not None
++        return isinstance(self.key, rsa.RSAPrivateKey)
+ 
+     def sign_ssh_data(self, data):
+-        digest = sha1(data).digest()
+-        rsa = RSA.construct((long(self.n), long(self.e), long(self.d)))
+-        sig = util.deflate_long(rsa.sign(self._pkcs1imify(digest), bytes())[0], 0)
++        signer = self.key.signer(
++            padding=padding.PKCS1v15(),
++            algorithm=hashes.SHA1(),
++        )
++        signer.update(data)
++        sig = signer.finalize()
++
+         m = Message()
+         m.add_string('ssh-rsa')
+         m.add_string(sig)
+@@ -104,32 +111,38 @@ class RSAKey (PKey):
+     def verify_ssh_sig(self, data, msg):
+         if msg.get_text() != 'ssh-rsa':
+             return False
+-        sig = util.inflate_long(msg.get_binary(), True)
+-        # verify the signature by SHA'ing the data and encrypting it using the
+-        # public key.  some wackiness ensues where we "pkcs1imify" the 20-byte
+-        # hash into a string as long as the RSA key.
+-        hash_obj = util.inflate_long(self._pkcs1imify(sha1(data).digest()), True)
+-        rsa = RSA.construct((long(self.n), long(self.e)))
+-        return rsa.verify(hash_obj, (sig,))
+-
+-    def _encode_key(self):
+-        if (self.p is None) or (self.q is None):
+-            raise SSHException('Not enough key info to write private key file')
+-        keylist = [0, self.n, self.e, self.d, self.p, self.q,
+-                   self.d % (self.p - 1), self.d % (self.q - 1),
+-                   util.mod_inverse(self.q, self.p)]
++        key = self.key
++        if isinstance(key, rsa.RSAPrivateKey):
++            key = key.public_key()
++
++        verifier = key.verifier(
++            signature=msg.get_binary(),
++            padding=padding.PKCS1v15(),
++            algorithm=hashes.SHA1(),
++        )
++        verifier.update(data)
+         try:
+-            b = BER()
+-            b.encode(keylist)
+-        except BERException:
+-            raise SSHException('Unable to create ber encoding of key')
+-        return b.asbytes()
++            verifier.verify()
++        except InvalidSignature:
++            return False
++        else:
++            return True
+ 
+     def write_private_key_file(self, filename, password=None):
+-        self._write_private_key_file('RSA', filename, self._encode_key(), password)
++        self._write_private_key_file(
++            filename,
++            self.key,
++            serialization.PrivateFormat.TraditionalOpenSSL,
++            password=password
++        )
+ 
+     def write_private_key(self, file_obj, password=None):
+-        self._write_private_key('RSA', file_obj, self._encode_key(), password)
++        self._write_private_key(
++            file_obj,
++            self.key,
++            serialization.PrivateFormat.TraditionalOpenSSL,
++            password=password
++        )
+ 
+     @staticmethod
+     def generate(bits, progress_func=None):
+@@ -138,29 +151,16 @@ class RSAKey (PKey):
+         generate a new host key or authentication key.
+ 
+         :param int bits: number of bits the generated key should be.
+-        :param function progress_func:
+-            an optional function to call at key points in key generation (used
+-            by ``pyCrypto.PublicKey``).
++        :param function progress_func: Unused
+         :return: new `.RSAKey` private key
+         """
+-        rsa = RSA.generate(bits, os.urandom, progress_func)
+-        key = RSAKey(vals=(rsa.e, rsa.n))
+-        key.d = rsa.d
+-        key.p = rsa.p
+-        key.q = rsa.q
+-        return key
++        key = rsa.generate_private_key(
++            public_exponent=65537, key_size=bits, backend=default_backend()
++        )
++        return RSAKey(key=key)
+ 
+     ###  internals...
+ 
+-    def _pkcs1imify(self, data):
+-        """
+-        turn a 20-byte SHA1 hash into a blob of data as large as the key's N,
+-        using PKCS1's \"emsa-pkcs1-v1_5\" encoding.  totally bizarre.
+-        """
+-        size = len(util.deflate_long(self.n, 0))
+-        filler = max_byte * (size - len(SHA1_DIGESTINFO) - len(data) - 3)
+-        return zero_byte + one_byte + filler + zero_byte + SHA1_DIGESTINFO + data
+-
+     def _from_private_key_file(self, filename, password):
+         data = self._read_private_key_file('RSA', filename, password)
+         self._decode_key(data)
+@@ -170,18 +170,8 @@ class RSAKey (PKey):
+         self._decode_key(data)
+ 
+     def _decode_key(self, data):
+-        # private key file contains:
+-        # RSAPrivateKey = { version = 0, n, e, d, p, q, d mod p-1, d mod q-1, q**-1 mod p }
+-        try:
+-            keylist = BER(data).decode()
+-        except BERException:
+-            raise SSHException('Unable to parse key file')
+-        if (type(keylist) is not list) or (len(keylist) < 4) or (keylist[0] != 0):
+-            raise SSHException('Not a valid RSA private key file (bad ber encoding)')
+-        self.n = keylist[1]
+-        self.e = keylist[2]
+-        self.d = keylist[3]
+-        # not really needed
+-        self.p = keylist[4]
+-        self.q = keylist[5]
+-        self.size = util.bit_length(self.n)
++        key = serialization.load_der_private_key(
++            data, password=None, backend=default_backend()
++        )
++        assert isinstance(key, rsa.RSAPrivateKey)
++        self.key = key
+--- paramiko-1.15.2/paramiko/ssh_exception.py.~1~	2014-09-08 10:42:16.000000000 -0700
++++ paramiko-1.15.2/paramiko/ssh_exception.py	2015-04-12 17:36:15.208756832 -0700
+@@ -16,6 +16,8 @@
+ # along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+ # 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ 
++import socket
++
+ 
+ class SSHException (Exception):
+     """
+@@ -129,3 +131,39 @@ class ProxyCommandFailure (SSHException)
+         self.error = error
+         # for unpickling
+         self.args = (command, error, )
++
++
++class NoValidConnectionsError(socket.error):
++    """
++    Multiple connection attempts were made and no families succeeded.
++
++    This exception class wraps multiple "real" underlying connection errors,
++    all of which represent failed connection attempts. Because these errors are
++    not guaranteed to all be of the same error type (i.e. different errno,
++    class, message, etc) we expose a single unified error message and a
++    ``None`` errno so that instances of this class match most normal handling
++    of `socket.error` objects.
++    
++    To see the wrapped exception objects, access the ``errors`` attribute.
++    ``errors`` is a dict whose keys are address tuples (e.g. ``('127.0.0.1',
++    22)``) and whose values are the exception encountered trying to connect to
++    that address.
++
++    It is implied/assumed that all the errors given to a single instance of
++    this class are from connecting to the same hostname + port (and thus that
++    the differences are in the resolution of the hostname - e.g. IPv4 vs v6).
++    """
++    def __init__(self, errors):
++        """
++        :param dict errors:
++            The errors dict to store, as described by class docstring.
++        """
++        addrs = errors.keys()
++        body = ', '.join([x[0] for x in addrs[:-1]])
++        tail = addrs[-1][0]
++        msg = "Unable to connect to port {0} on {1} or {2}"
++        super(NoValidConnectionsError, self).__init__(
++            None, # stand-in for errno
++            msg.format(addrs[0][1], body, tail)
++        )
++        self.errors = errors
+--- paramiko-1.15.2/paramiko/ssh_gss.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/paramiko/ssh_gss.py	2015-04-12 17:36:15.209036497 -0700
+@@ -20,7 +20,7 @@
+ 
+ 
+ """
+-This module provides GSS-API / SSPI  authentication as defined in RFC 4462.
++This module provides GSS-API / SSPI  authentication as defined in :rfc:`4462`.
+ 
+ .. note:: Credential delegation is not supported in server mode.
+ 
+@@ -39,22 +39,8 @@ import sys
+ """
+ GSS_AUTH_AVAILABLE = True
+ 
+-try:
+-    from pyasn1.type.univ import ObjectIdentifier
+-    from pyasn1.codec.der import encoder, decoder
+-except ImportError:
+-    GSS_AUTH_AVAILABLE = False
+-    class ObjectIdentifier(object):
+-        def __init__(self, *args):
+-            raise NotImplementedError("Module pyasn1 not importable")
+-
+-    class decoder(object):
+-        def decode(self):
+-            raise NotImplementedError("Module pyasn1 not importable")
+-
+-    class encoder(object):
+-        def encode(self):
+-            raise NotImplementedError("Module pyasn1 not importable")
++from pyasn1.type.univ import ObjectIdentifier
++from pyasn1.codec.der import encoder, decoder
+ 
+ from paramiko.common import MSG_USERAUTH_REQUEST
+ from paramiko.ssh_exception import SSHException
+--- paramiko-1.15.2/paramiko/transport.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/paramiko/transport.py	2015-04-12 17:36:15.209751892 -0700
+@@ -28,6 +28,9 @@ import time
+ import weakref
+ from hashlib import md5, sha1
+ 
++from cryptography.hazmat.backends import default_backend
++from cryptography.hazmat.primitives.ciphers import algorithms, Cipher, modes
++
+ import paramiko
+ from paramiko import util
+ from paramiko.auth_handler import AuthHandler
+@@ -63,11 +66,6 @@ from paramiko.ssh_exception import (SSHE
+                                     ChannelException, ProxyCommandFailure)
+ from paramiko.util import retry_on_signal, ClosingContextManager, clamp_value
+ 
+-from Crypto.Cipher import Blowfish, AES, DES3, ARC4
+-try:
+-    from Crypto.Util import Counter
+-except ImportError:
+-    from paramiko.util import Counter
+ 
+ 
+ # for thread cleanup
+@@ -91,6 +89,9 @@ class Transport (threading.Thread, Closi
+ 
+     Instances of this class may be used as context managers.
+     """
++    _ENCRYPT = object()
++    _DECRYPT = object()
++
+     _PROTO_ID = '2.0'
+     _CLIENT_ID = 'paramiko_%s' % paramiko.__version__
+ 
+@@ -102,16 +103,57 @@ class Transport (threading.Thread, Closi
+     _preferred_compression = ('none',)
+ 
+     _cipher_info = {
+-        'aes128-ctr': {'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 16},
+-        'aes256-ctr': {'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 32},
+-        'blowfish-cbc': {'class': Blowfish, 'mode': Blowfish.MODE_CBC, 'block-size': 8, 'key-size': 16},
+-        'aes128-cbc': {'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 16},
+-        'aes256-cbc': {'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 32},
+-        '3des-cbc': {'class': DES3, 'mode': DES3.MODE_CBC, 'block-size': 8, 'key-size': 24},
+-        'arcfour128': {'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 16},
+-        'arcfour256': {'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 32},
++        'aes128-ctr': {
++            'class': algorithms.AES,
++            'mode': modes.CTR,
++            'block-size': 16,
++            'key-size': 16
++        },
++        'aes256-ctr': {
++            'class': algorithms.AES,
++            'mode': modes.CTR,
++            'block-size': 16,
++            'key-size': 32
++        },
++        'blowfish-cbc': {
++            'class': algorithms.Blowfish,
++            'mode': modes.CBC,
++            'block-size': 8,
++            'key-size': 16
++        },
++        'aes128-cbc': {
++            'class': algorithms.AES,
++            'mode': modes.CBC,
++            'block-size': 16,
++            'key-size': 16
++        },
++        'aes256-cbc': {
++            'class': algorithms.AES,
++            'mode': modes.CBC,
++            'block-size': 16,
++            'key-size': 32
++        },
++        '3des-cbc': {
++            'class': algorithms.TripleDES,
++            'mode': modes.CBC,
++            'block-size': 8,
++            'key-size': 24
++        },
++        'arcfour128': {
++            'class': algorithms.ARC4,
++            'mode': None,
++            'block size': 8,
++            'key-size': 16
++        },
++        'arcfour256': {
++            'class': algorithms.ARC4,
++            'mode': None,
++            'block size': 8,
++            'key-size': 32
++        },
+     }
+ 
++
+     _mac_info = {
+         'hmac-sha1': {'class': sha1, 'size': 20},
+         'hmac-sha1-96': {'class': sha1, 'size': 12},
+@@ -1508,22 +1550,34 @@ class Transport (threading.Thread, Closi
+             sofar += digest
+         return out[:nbytes]
+ 
+-    def _get_cipher(self, name, key, iv):
++    def _get_cipher(self, name, key, iv, operation):
+         if name not in self._cipher_info:
+             raise SSHException('Unknown client cipher ' + name)
+         if name in ('arcfour128', 'arcfour256'):
+             # arcfour cipher
+-            cipher = self._cipher_info[name]['class'].new(key)
++            cipher = Cipher(
++                self._cipher_info[name]['class'](key),
++                None,
++                backend=default_backend()
++            )
++            if operation is self._ENCRYPT:
++                engine = cipher.encryptor()
++            else:
++                engine = cipher.decryptor()
+             # as per RFC 4345, the first 1536 bytes of keystream
+             # generated by the cipher MUST be discarded
+-            cipher.encrypt(" " * 1536)
+-            return cipher
+-        elif name.endswith("-ctr"):
+-            # CTR modes, we need a counter
+-            counter = Counter.new(nbits=self._cipher_info[name]['block-size'] * 8, initial_value=util.inflate_long(iv, True))
+-            return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv, counter)
++            engine.encrypt(" " * 1536)
++            return engine
+         else:
+-            return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv)
++            cipher = Cipher(
++                self._cipher_info[name]['class'](key),
++                self._cipher_info[name]['mode'](iv),
++                backend=default_backend(),
++            )
++            if operation is self._ENCRYPT:
++                return cipher.encryptor()
++            else:
++                return cipher.decryptor()
+ 
+     def _set_forward_agent_handler(self, handler):
+         if handler is None:
+@@ -1879,7 +1933,7 @@ class Transport (threading.Thread, Closi
+         else:
+             IV_in = self._compute_key('B', block_size)
+             key_in = self._compute_key('D', self._cipher_info[self.remote_cipher]['key-size'])
+-        engine = self._get_cipher(self.remote_cipher, key_in, IV_in)
++        engine = self._get_cipher(self.remote_cipher, key_in, IV_in, self._DECRYPT)
+         mac_size = self._mac_info[self.remote_mac]['size']
+         mac_engine = self._mac_info[self.remote_mac]['class']
+         # initial mac keys are done in the hash's natural size (not the potentially truncated
+@@ -1906,7 +1960,7 @@ class Transport (threading.Thread, Closi
+         else:
+             IV_out = self._compute_key('A', block_size)
+             key_out = self._compute_key('C', self._cipher_info[self.local_cipher]['key-size'])
+-        engine = self._get_cipher(self.local_cipher, key_out, IV_out)
++        engine = self._get_cipher(self.local_cipher, key_out, IV_out, self._ENCRYPT)
+         mac_size = self._mac_info[self.local_mac]['size']
+         mac_engine = self._mac_info[self.local_mac]['class']
+         # initial mac keys are done in the hash's natural size (not the potentially truncated
+--- paramiko-1.15.2/paramiko/util.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/paramiko/util.py	2015-04-12 17:36:15.210034924 -0700
+@@ -22,7 +22,6 @@ Useful functions used by the rest of par
+ 
+ from __future__ import generators
+ 
+-import array
+ import errno
+ import sys
+ import struct
+@@ -31,7 +30,7 @@ import threading
+ import logging
+ 
+ from paramiko.common import DEBUG, zero_byte, xffffffff, max_byte
+-from paramiko.py3compat import PY2, long, byte_ord, b, byte_chr
++from paramiko.py3compat import PY2, long, byte_chr, byte_ord, b
+ from paramiko.config import SSHConfig
+ 
+ 
+@@ -273,37 +272,6 @@ def retry_on_signal(function):
+                 raise
+ 
+ 
+-class Counter (object):
+-    """Stateful counter for CTR mode crypto"""
+-    def __init__(self, nbits, initial_value=long(1), overflow=long(0)):
+-        self.blocksize = nbits / 8
+-        self.overflow = overflow
+-        # start with value - 1 so we don't have to store intermediate values when counting
+-        # could the iv be 0?
+-        if initial_value == 0:
+-            self.value = array.array('c', max_byte * self.blocksize)
+-        else:
+-            x = deflate_long(initial_value - 1, add_sign_padding=False)
+-            self.value = array.array('c', zero_byte * (self.blocksize - len(x)) + x)
+-
+-    def __call__(self):
+-        """Increament the counter and return the new value"""
+-        i = self.blocksize - 1
+-        while i > -1:
+-            c = self.value[i] = byte_chr((byte_ord(self.value[i]) + 1) % 256)
+-            if c != zero_byte:
+-                return self.value.tostring()
+-            i -= 1
+-        # counter reset
+-        x = deflate_long(self.overflow, add_sign_padding=False)
+-        self.value = array.array('c', zero_byte * (self.blocksize - len(x)) + x)
+-        return self.value.tostring()
+-
+-    @classmethod
+-    def new(cls, nbits, initial_value=long(1), overflow=long(0)):
+-        return cls(nbits, initial_value=initial_value, overflow=overflow)
+-
+-
+ def constant_time_bytes_eq(a, b):
+     if len(a) != len(b):
+         return False
+--- paramiko-1.15.2/setup.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/setup.py	2015-04-12 17:36:15.210254883 -0700
+@@ -24,7 +24,7 @@ connections between python scripts.  All
+ are supported.  SFTP client and server mode are both supported too.
+ 
+ Required packages:
+-    pyCrypto
++    Cryptography
+ 
+ To install the `in-development version
+ <https://github.com/paramiko/paramiko/tarball/master#egg=paramiko-dev>`_, use
+@@ -41,8 +41,8 @@ try:
+     from setuptools import setup
+     kw = {
+         'install_requires': [
+-            'pycrypto >= 2.1, != 2.4',
+-            'ecdsa >= 0.11',
++            'cryptography >= 0.8',
++            'pyasn1 >= 0.1.7',
+         ],
+     }
+ except ImportError:
+--- paramiko-1.15.2/tests/test_auth.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/tests/test_auth.py	2015-04-12 17:36:15.210519848 -0700
+@@ -83,13 +83,13 @@ class NullServer (ServerInterface):
+                 return AUTH_SUCCESSFUL
+             return AUTH_PARTIALLY_SUCCESSFUL
+         return AUTH_FAILED
+-    
++
+     def check_auth_interactive(self, username, submethods):
+         if username == 'commie':
+             self.username = username
+             return InteractiveQuery('password', 'Please enter a password.', ('Password', False))
+         return AUTH_FAILED
+-    
++
+     def check_auth_interactive_response(self, responses):
+         if self.username == 'commie':
+             if (len(responses) == 1) and (responses[0] == 'cat'):
+@@ -111,7 +111,7 @@ class AuthTest (unittest.TestCase):
+         self.ts.close()
+         self.socks.close()
+         self.sockc.close()
+-    
++
+     def start_server(self):
+         host_key = RSAKey.from_private_key_file(test_path('test_rsa.key'))
+         self.public_host_key = RSAKey(data=host_key.asbytes())
+@@ -120,7 +120,7 @@ class AuthTest (unittest.TestCase):
+         self.server = NullServer()
+         self.assertTrue(not self.event.is_set())
+         self.ts.start_server(self.event, self.server)
+-    
++
+     def verify_finished(self):
+         self.event.wait(1.0)
+         self.assertTrue(self.event.is_set())
+@@ -156,7 +156,7 @@ class AuthTest (unittest.TestCase):
+             self.assertTrue(issubclass(etype, AuthenticationException))
+         self.tc.auth_password(username='slowdive', password='pygmalion')
+         self.verify_finished()
+-    
++
+     def test_3_multipart_auth(self):
+         """
+         verify that multipart auth works.
+@@ -187,7 +187,7 @@ class AuthTest (unittest.TestCase):
+         self.assertEqual(self.got_prompts, [('Password', False)])
+         self.assertEqual([], remain)
+         self.verify_finished()
+-        
++
+     def test_5_interactive_auth_fallback(self):
+         """
+         verify that a password auth attempt will fallback to "interactive"
+--- paramiko-1.15.2/tests/test_client.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/tests/test_client.py	2015-04-12 17:36:15.210808627 -0700
+@@ -22,6 +22,8 @@ Some unit tests for SSHClient.
+ 
+ from __future__ import with_statement
+ 
++import gc
++import platform
+ import socket
+ from tempfile import mkstemp
+ import threading
+@@ -31,8 +33,9 @@ import warnings
+ import os
+ import time
+ from tests.util import test_path
++
+ import paramiko
+-from paramiko.common import PY2, b
++from paramiko.common import PY2
+ from paramiko.ssh_exception import SSHException
+ 
+ 
+@@ -266,14 +269,13 @@ class SSHClientTest (unittest.TestCase):
+         transport's packetizer) is closed.
+         """
+         # Unclear why this is borked on Py3, but it is, and does not seem worth
+-        # pursuing at the moment.
++        # pursuing at the moment. Skipped on PyPy because it fails on travis
++        # for unknown reasons, works fine locally.
+         # XXX: It's the release of the references to e.g packetizer that fails
+         # in py3...
+-        if not PY2:
++        if not PY2 or platform.python_implementation() == "PyPy":
+             return
+         threading.Thread(target=self._run).start()
+-        host_key = paramiko.RSAKey.from_private_key_file(test_path('test_rsa.key'))
+-        public_host_key = paramiko.RSAKey(data=host_key.asbytes())
+ 
+         self.tc = paramiko.SSHClient()
+         self.tc.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+@@ -289,14 +291,10 @@ class SSHClientTest (unittest.TestCase):
+         self.tc.close()
+         del self.tc
+ 
+-        # hrm, sometimes p isn't cleared right away.  why is that?
+-        #st = time.time()
+-        #while (time.time() - st < 5.0) and (p() is not None):
+-        #    time.sleep(0.1)
+-
+-        # instead of dumbly waiting for the GC to collect, force a collection
+-        # to see whether the SSHClient object is deallocated correctly
+-        import gc
++        # force a collection to see whether the SSHClient object is deallocated
++        # correctly. 2 GCs are needed to make sure it's really collected on
++        # PyPy
++        gc.collect()
+         gc.collect()
+ 
+         self.assertTrue(p() is None)
+@@ -306,8 +304,6 @@ class SSHClientTest (unittest.TestCase):
+         verify that an SSHClient can be used a context manager
+         """
+         threading.Thread(target=self._run).start()
+-        host_key = paramiko.RSAKey.from_private_key_file(test_path('test_rsa.key'))
+-        public_host_key = paramiko.RSAKey(data=host_key.asbytes())
+ 
+         with paramiko.SSHClient() as tc:
+             self.tc = tc
+--- paramiko-1.15.2/tests/test_packetizer.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/tests/test_packetizer.py	2015-04-12 17:36:15.211084420 -0700
+@@ -23,9 +23,10 @@ Some unit tests for the ssh2 protocol in
+ import unittest
+ from hashlib import sha1
+ 
+-from tests.loop import LoopSocket
++from cryptography.hazmat.backends import default_backend
++from cryptography.hazmat.primitives.ciphers import algorithms, Cipher, modes
+ 
+-from Crypto.Cipher import AES
++from tests.loop import LoopSocket
+ 
+ from paramiko import Message, Packetizer, util
+ from paramiko.common import byte_chr, zero_byte
+@@ -43,8 +44,12 @@ class PacketizerTest (unittest.TestCase)
+         p = Packetizer(wsock)
+         p.set_log(util.get_logger('paramiko.transport'))
+         p.set_hexdump(True)
+-        cipher = AES.new(zero_byte * 16, AES.MODE_CBC, x55 * 16)
+-        p.set_outbound_cipher(cipher, 16, sha1, 12, x1f * 20)
++        encryptor = Cipher(
++            algorithms.AES(zero_byte * 16),
++            modes.CBC(x55 * 16),
++            backend=default_backend()
++        ).encryptor()
++        p.set_outbound_cipher(encryptor, 16, sha1, 12, x1f * 20)
+ 
+         # message has to be at least 16 bytes long, so we'll have at least one
+         # block of data encrypted that contains zero random padding bytes
+@@ -66,8 +71,12 @@ class PacketizerTest (unittest.TestCase)
+         p = Packetizer(rsock)
+         p.set_log(util.get_logger('paramiko.transport'))
+         p.set_hexdump(True)
+-        cipher = AES.new(zero_byte * 16, AES.MODE_CBC, x55 * 16)
+-        p.set_inbound_cipher(cipher, 16, sha1, 12, x1f * 20)
++        decryptor = Cipher(
++            algorithms.AES(zero_byte * 16),
++            modes.CBC(x55 * 16),
++            backend=default_backend()
++        ).decryptor()
++        p.set_inbound_cipher(decryptor, 16, sha1, 12, x1f * 20)
+         wsock.send(b'\x43\x91\x97\xbd\x5b\x50\xac\x25\x87\xc2\xc4\x6b\xc7\xe9\x38\xc0\x90\xd2\x16\x56\x0d\x71\x73\x61\x38\x7c\x4c\x3d\xfb\x97\x7d\xe2\x6e\x03\xb1\xa0\xc2\x1c\xd6\x41\x41\x4c\xb4\x59')
+         cmd, m = p.read_message()
+         self.assertEqual(100, cmd)
+@@ -82,8 +91,12 @@ class PacketizerTest (unittest.TestCase)
+         p = Packetizer(wsock)
+         p.set_log(util.get_logger('paramiko.transport'))
+         p.set_hexdump(True)
+-        cipher = AES.new(zero_byte * 16, AES.MODE_CBC, x55 * 16)
+-        p.set_outbound_cipher(cipher, 16, sha1, 12, x1f * 20)
++        encryptor = Cipher(
++            algorithms.AES(zero_byte * 16),
++            modes.CBC(x55 * 16),
++            backend=default_backend()
++        ).encryptor()
++        p.set_outbound_cipher(encryptor, 16, sha1, 12, x1f * 20)
+ 
+         # message has to be at least 16 bytes long, so we'll have at least one
+         # block of data encrypted that contains zero random padding bytes
+--- paramiko-1.15.2/tests/test_pkey.py.~1~	2014-12-19 15:01:22.000000000 -0800
++++ paramiko-1.15.2/tests/test_pkey.py	2015-04-12 17:36:15.211328345 -0700
+@@ -42,34 +42,34 @@ SIGNED_RSA = '20:d7:8a:31:21:cb:f7:92:12
+ 
+ RSA_PRIVATE_OUT = """\
+ -----BEGIN RSA PRIVATE KEY-----
+-MIICXAIBAAKCAIEA049W6geFpmsljTwfvI1UmKWWJPNFI74+vNKTk4dmzkQY2yAM
+-s6FhlvhlI8ysU4oj71ZsRYMecHbBbxdN79+JRFVYTKaLqjwGENeTd+yv4q+V2PvZ
+-v3fLnzApI3l7EJCqhWwJUHJ1jAkZzqDx0tyOL4uoZpww3nmE0kb3y21tH4cCASMC
+-ggCAEiI6plhqipt4P05L3PYr0pHZq2VPEbE4k9eI/gRKo/c1VJxY3DJnc1cenKsk
+-trQRtW3OxCEufqsX5PNec6VyKkW+Ox6beJjMKm4KF8ZDpKi9Nw6MdX3P6Gele9D9
+-+ieyhVFljrnAqcXsgChTBOYlL2imqCs3qRGAJ3cMBIAx3VsCQQD3pIFVYW398kE0
+-n0e1icEpkbDRV4c5iZVhu8xKy2yyfy6f6lClSb2+Ub9uns7F3+b5v0pYSHbE9+/r
+-OpRq83AfAkEA2rMZlr8SnMXgnyka2LuggA9QgMYy18hyao1dUxySubNDa9N+q2QR
+-mwDisTUgRFHKIlDHoQmzPbXAmYZX1YlDmQJBAPCRLS5epV0XOAc7pL762OaNhzHC
+-veAfQKgVhKBt105PqaKpGyQ5AXcNlWQlPeTK4GBTbMrKDPna6RBkyrEJvV8CQBK+
+-5O+p+kfztCrmRCE0p1tvBuZ3Y3GU1ptrM+KNa6mEZN1bRV8l1Z+SXJLYqv6Kquz/
+-nBUeFq2Em3rfoSDugiMCQDyG3cxD5dKX3IgkhLyBWls/FLDk4x/DQ+NUTu0F1Cu6
+-JJye+5ARLkL0EweMXf0tmIYfWItDLsWB0fKg/56h0js=
++MIICWgIBAAKBgQDTj1bqB4WmayWNPB+8jVSYpZYk80Ujvj680pOTh2bORBjbIAyz
++oWGW+GUjzKxTiiPvVmxFgx5wdsFvF03v34lEVVhMpouqPAYQ15N37K/ir5XY+9m/
++d8ufMCkjeXsQkKqFbAlQcnWMCRnOoPHS3I4vi6hmnDDeeYTSRvfLbW0fhwIBIwKB
++gBIiOqZYaoqbeD9OS9z2K9KR2atlTxGxOJPXiP4ESqP3NVScWNwyZ3NXHpyrJLa0
++EbVtzsQhLn6rF+TzXnOlcipFvjsem3iYzCpuChfGQ6SovTcOjHV9z+hnpXvQ/fon
++soVRZY65wKnF7IAoUwTmJS9opqgrN6kRgCd3DASAMd1bAkEA96SBVWFt/fJBNJ9H
++tYnBKZGw0VeHOYmVYbvMSstssn8un+pQpUm9vlG/bp7Oxd/m+b9KWEh2xPfv6zqU
++avNwHwJBANqzGZa/EpzF4J8pGti7oIAPUIDGMtfIcmqNXVMckrmzQ2vTfqtkEZsA
++4rE1IERRyiJQx6EJsz21wJmGV9WJQ5kCQQDwkS0uXqVdFzgHO6S++tjmjYcxwr3g
++H0CoFYSgbddOT6miqRskOQF3DZVkJT3kyuBgU2zKygz52ukQZMqxCb1fAkASvuTv
++qfpH87Qq5kQhNKdbbwbmd2NxlNabazPijWuphGTdW0VfJdWfklyS2Kr+iqrs/5wV
++HhathJt636Eg7oIjAkA8ht3MQ+XSl9yIJIS8gVpbPxSw5OMfw0PjVE7tBdQruiSc
++nvuQES5C9BMHjF39LZiGH1iLQy7FgdHyoP+eodI7
+ -----END RSA PRIVATE KEY-----
+ """
+ 
+ DSS_PRIVATE_OUT = """\
+ -----BEGIN DSA PRIVATE KEY-----
+-MIIBvgIBAAKCAIEA54GmA2d9HOv+3CYBBG7ZfBYCncIW2tWe6Dqzp+DCP+guNhtW
+-2MDLqmX+HQQoJbHat/Uh63I2xPFaueID0jod4OPrlfUXIOSDqDy28Kdo0Hxen9RS
+-G7Me4awwiKlHEHHD0sXrTwSplyPUTfK2S2hbkHk5yOuQSjPfEbsL6ukiNi8CFQDw
+-z4UnmsGiSNu5iqjn3uTzwUpshwKCAIEAkxfFeY8P2wZpDjX0MimZl5wkoFQDL25c
+-PzGBuB4OnB8NoUk/yjAHIIpEShw8V+LzouMK5CTJQo5+Ngw3qIch/WgRmMHy4kBq
+-1SsXMjQCte1So6HBMvBPIW5SiMTmjCfZZiw4AYHK+B/JaOwaG9yRg2Ejg4Ok10+X
+-FDxlqZo8Y+wCggCARmR7CCPjodxASvRbIyzaVpZoJ/Z6x7dAumV+ysrV1BVYd0lY
+-ukmnjO1kKBWApqpH1ve9XDQYN8zgxM4b16L21kpoWQnZtXrY3GZ4/it9kUgyB7+N
+-wacIBlXa8cMDL7Q/69o0d54U0X/NeX5QxuYR6OMJlrkQB7oiW/P/1mwjQgECFGI9
+-QPSch9pT9XHqn+1rZ4bK+QGA
++MIIBuwIBAAKBgQDngaYDZ30c6/7cJgEEbtl8FgKdwhba1Z7oOrOn4MI/6C42G1bY
++wMuqZf4dBCglsdq39SHrcjbE8Vq54gPSOh3g4+uV9Rcg5IOoPLbwp2jQfF6f1FIb
++sx7hrDCIqUcQccPSxetPBKmXI9RN8rZLaFuQeTnI65BKM98Ruwvq6SI2LwIVAPDP
++hSeawaJI27mKqOfe5PPBSmyHAoGBAJMXxXmPD9sGaQ419DIpmZecJKBUAy9uXD8x
++gbgeDpwfDaFJP8owByCKREocPFfi86LjCuQkyUKOfjYMN6iHIf1oEZjB8uJAatUr
++FzI0ArXtUqOhwTLwTyFuUojE5own2WYsOAGByvgfyWjsGhvckYNhI4ODpNdPlxQ8
++ZamaPGPsAoGARmR7CCPjodxASvRbIyzaVpZoJ/Z6x7dAumV+ysrV1BVYd0lYukmn
++jO1kKBWApqpH1ve9XDQYN8zgxM4b16L21kpoWQnZtXrY3GZ4/it9kUgyB7+NwacI
++BlXa8cMDL7Q/69o0d54U0X/NeX5QxuYR6OMJlrkQB7oiW/P/1mwjQgECFGI9QPSc
++h9pT9XHqn+1rZ4bK+QGA
+ -----END DSA PRIVATE KEY-----
+ """
+ 
+@@ -121,7 +121,7 @@ class KeyTest (unittest.TestCase):
+         self.assertEqual(exp_rsa, my_rsa)
+         self.assertEqual(PUB_RSA.split()[1], key.get_base64())
+         self.assertEqual(1024, key.get_bits())
+-        
++
+     def test_4_load_dss(self):
+         key = DSSKey.from_private_key_file(test_path('test_dss.key'))
+         self.assertEqual('ssh-dss', key.get_name())
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/paramiko/patches/02-socket-timeout.patch	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,43 @@
+#
+# This patch addresses an issue when creating a paramiko client
+# connection and a timeout is specified. When a Transport object is being
+# instantiated, it overrides this timeout value all the time and it
+# should not.
+#
+# This patch is suitable for the upstream and a bug has been filed:
+#
+#	https://github.com/paramiko/paramiko/issues/476
+#
+
+--- paramiko-1.15.2/paramiko/transport.py.~2~	2015-04-12 18:39:08.295798093 -0700
++++ paramiko-1.15.2/paramiko/transport.py	2015-04-12 18:39:49.250225381 -0700
+@@ -270,10 +270,13 @@ class Transport (threading.Thread, Closi
+         self.sock = sock
+         # Python < 2.3 doesn't have the settimeout method - RogerB
+         try:
+-            # we set the timeout so we can check self.active periodically to
+-            # see if we should bail.  socket.timeout exception is never
+-            # propagated.
+-            self.sock.settimeout(0.1)
++            # Only settimeout if not already set:
++            timeout = self.sock.gettimeout()
++            if timeout is None or timeout <= 0:
++                # we set the timeout so we can check self.active periodically
++                # to see if we should bail.  socket.timeout exception is never
++                # propagated.
++                self.sock.settimeout(0.1)
+         except AttributeError:
+             pass
+ 
+--- paramiko-1.15.2/tests/loop.py.~1~	2014-09-06 16:07:24.000000000 -0700
++++ paramiko-1.15.2/tests/loop.py	2015-04-12 17:36:15.218573915 -0700
+@@ -73,6 +73,9 @@ class LoopSocket (object):
+     def settimeout(self, n):
+         self.__timeout = n
+ 
++    def gettimeout(self):
++        return self.__timeout
++
+     def link(self, other):
+         self.__mate = other
+         self.__mate.__mate = self
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/pecan/Makefile	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,64 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+include ../../../make-rules/shared-macros.mk
+
+COMPONENT_NAME=		pecan
+COMPONENT_VERSION=	0.8.3
+COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
+COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
+COMPONENT_ARCHIVE_HASH=	\
+    sha256:ca6c31e1fb98e4ae780a44a40a88eacb7a19172d3f64d4e31f32049a784a1281
+COMPONENT_ARCHIVE_URL=	$(call pypi_url)
+COMPONENT_PROJECT_URL=	http://pecanpy.org/
+COMPONENT_BUGDB=	python-mod/pecan
+
+TPNO=			22205
+
+# Depends on logutils and webtest which are not Python 3 ready.
+PYTHON_VERSIONS=	2.7 2.6
+
+include $(WS_MAKE_RULES)/prep.mk
+include $(WS_MAKE_RULES)/setup.py.mk
+include $(WS_MAKE_RULES)/ips.mk
+
+ASLR_MODE = $(ASLR_NOT_APPLICABLE)
+
+COMPONENT_POST_INSTALL_ACTION = \
+	(cd $(PROTO_DIR)/usr/bin ; $(MV) -f pecan pecan-$(PYTHON_VERSION))
+
+COMPONENT_TEST_ARGS=	setup.py test
+COMPONENT_TEST_DIR=	$(COMPONENT_SRC)
+
+# common targets
+build:		$(BUILD_NO_ARCH)
+
+install:	$(INSTALL_NO_ARCH)
+
+test:		$(TEST_NO_ARCH)
+
+
+REQUIRED_PACKAGES += library/python/setuptools-26
+REQUIRED_PACKAGES += library/python/setuptools-27
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/pecan/pecan-PYVER.p5m	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,158 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+set name=pkg.fmri \
+    value=pkg:/library/python/pecan-$(PYV)@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
+set name=pkg.summary value="A WSGI object-dispatching web framework"
+set name=pkg.description \
+    value="Pecan is a lean Python web framework inspired by CherryPy, TurboGears, and Pylons. It was created to fill a void in the Python web-framework world - a very lightweight framework that provides object-dispatch style routing. Pecan does not aim to be a 'full stack' framework, and therefore includes no out of the box support for things like sessions or databases (although tutorials are included for integrating these yourself in just a few lines of code). Pecan instead focuses on HTTP itself. Although it is lightweight, Pecan does offer an extensive feature set for building HTTP-based applications, including: object-dispatch for easy routing; full support for REST-style controllers; extensible security framework; extensible template language support; extensible JSON support; easy Python-based configuration."
+set name=com.oracle.info.description \
+    value="the Pecan WSGI object-dispatching web framework"
+set name=com.oracle.info.tpno value=$(TPNO)
+set name=info.classification \
+    value=org.opensolaris.category.2008:Development/Python \
+    value="org.opensolaris.category.2008:Web Services/Application and Web Servers"
+set name=info.source-url value=$(COMPONENT_ARCHIVE_URL)
+set name=info.upstream value="Pecan <[email protected]>"
+set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
+set name=org.opensolaris.arc-caseid value=PSARC/2015/070
+set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
+#
+link path=usr/bin/pecan target=pecan-$(PYVER) mediator=python \
+    mediator-version=$(PYVER)
+file path=usr/bin/pecan-$(PYVER)
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan-$(COMPONENT_VERSION)-py$(PYVER).egg-info/PKG-INFO
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan-$(COMPONENT_VERSION)-py$(PYVER).egg-info/entry_points.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan-$(COMPONENT_VERSION)-py$(PYVER).egg-info/not-zip-safe
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/commands/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/commands/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/commands/create.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/commands/serve.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/commands/shell.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/compat/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/configuration.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/core.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/decorators.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/deploy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/ext/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/extensions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/hooks.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/jsonify.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/log.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/middleware/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/middleware/debug.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/middleware/errordocument.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/middleware/recursive.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/middleware/resources/XRegExp.js
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/middleware/resources/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/middleware/resources/brush-python.js
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/middleware/resources/pecan.png
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/middleware/resources/shCore.js
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/middleware/resources/syntax.css
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/middleware/resources/theme.css
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/middleware/static.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/rest.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/routing.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/+package+/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/+package+/app.py_tmpl
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/+package+/controllers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/+package+/controllers/root.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/+package+/model/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/+package+/templates/error.html
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/+package+/templates/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/+package+/templates/layout.html
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/+package+/tests/__init__.py_tmpl
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/+package+/tests/config.py_tmpl
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/+package+/tests/test_functional.py_tmpl
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/+package+/tests/test_units.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/MANIFEST.in
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/config.py_tmpl
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/public/css/style.css
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/public/images/logo.png
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/setup.cfg_tmpl
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/base/setup.py_tmpl
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/rest-api/+package+/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/rest-api/+package+/app.py_tmpl
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/rest-api/+package+/controllers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/rest-api/+package+/controllers/root.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/rest-api/+package+/errors.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/rest-api/+package+/model/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/rest-api/+package+/tests/__init__.py_tmpl
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/rest-api/+package+/tests/config.py_tmpl
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/rest-api/+package+/tests/test_functional.py_tmpl
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/rest-api/+package+/tests/test_units.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/rest-api/config.py_tmpl
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/rest-api/setup.cfg_tmpl
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/scaffolds/rest-api/setup.py_tmpl
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/secure.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/templating.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/testing.py
+file path=usr/lib/python$(PYVER)/vendor-packages/pecan/util.py
+#
+license LICENSE license=BSD
+
+# force a group dependency on the optional jinja2; pkgdepend work is needed to
+# flush this out.
+depend type=group fmri=library/python/jinja2-$(PYV)
+
+# force a group dependency on the optional simplejson; pkgdepend work is needed
+# to flush this out.
+depend type=group fmri=library/python/simplejson-$(PYV)
+
+# force a group dependency on the optional sqlalchemy; pkgdepend work is needed
+# to flush this out.
+depend type=group fmri=library/python/sqlalchemy-$(PYV)
+
+# force a dependency on argparse; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/argparse-$(PYV)
+
+# force a dependency on logutils; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/logutils-$(PYV)
+
+# force a dependency on mako; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/mako-$(PYV)
+
+# force a dependency on the pecan package
+depend type=require \
+    fmri=library/python/pecan@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
+
+# force a dependency on singledispatch; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/singledispatch-$(PYV)
+
+# force a dependency on six; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/six-$(PYV)
+
+# force a dependency on webob; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/webob-$(PYV)
+
+# force a dependency on webtest; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/webtest-$(PYV)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/scp/Makefile	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,51 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+include ../../../make-rules/shared-macros.mk
+
+COMPONENT_NAME=		scp
+COMPONENT_VERSION=	0.8.0
+COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
+COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
+COMPONENT_ARCHIVE_HASH=	\
+    sha256:e51374fcc0a81903dbf2e3aeecb23fa64637cb7e1a7b21c815f3fb6358e95de2
+COMPONENT_ARCHIVE_URL=	$(call pypi_url)
+COMPONENT_PROJECT_URL=	https://github.com/jbardin/scp.py
+COMPONENT_BUGDB=	python-mod/scp
+
+TPNO=			20675
+
+include $(WS_MAKE_RULES)/prep.mk
+include $(WS_MAKE_RULES)/setup.py.mk
+include $(WS_MAKE_RULES)/ips.mk
+
+ASLR_MODE = $(ASLR_NOT_APPLICABLE)
+
+# common targets
+build:		$(BUILD_NO_ARCH)
+
+install:	$(INSTALL_NO_ARCH)
+
+test:		$(NO_TESTS)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/scp/scp-PYVER.p5m	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,55 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+set name=pkg.fmri \
+    value=pkg:/library/python/scp-$(PYV)@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
+set name=pkg.summary value="Paramiko SCP Module"
+set name=pkg.description \
+    value="A pure Python implementation of scp using the paramiko transport to send and revieve files via the SCPv1 protocol."
+set name=com.oracle.info.description value="the Python SCP library"
+set name=com.oracle.info.tpno value=$(TPNO)
+set name=info.classification \
+    value=org.opensolaris.category.2008:Development/Python \
+    value=org.opensolaris.category.2008:System/Security
+set name=info.source-url value=$(COMPONENT_ARCHIVE_URL)
+set name=info.upstream value="James Bardin"
+set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
+set name=org.opensolaris.arc-caseid value=PSARC/2015/171
+set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
+file path=usr/lib/python$(PYVER)/vendor-packages/scp-$(COMPONENT_VERSION)-py$(PYVER).egg-info/PKG-INFO
+file path=usr/lib/python$(PYVER)/vendor-packages/scp-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/scp-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/scp-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/scp-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/scp.py
+license LICENSE.txt license=LGPLv2.1
+
+# force a dependency on the Python runtime
+depend type=require fmri=__TBD pkg.debug.depend.file=python$(PYVER) \
+    pkg.debug.depend.path=usr/bin
+
+# force a dependency on the scp package
+depend type=require \
+    fmri=library/python/scp@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/singledispatch/Makefile	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,54 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+include ../../../make-rules/shared-macros.mk
+
+COMPONENT_NAME=		singledispatch
+COMPONENT_VERSION=	3.4.0.3
+COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
+COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
+COMPONENT_ARCHIVE_HASH=	\
+    sha256:5b06af87df13818d14f08a028e42f566640aef80805c3b50c5056b086e3c2b9c
+COMPONENT_ARCHIVE_URL=	$(call pypi_url)
+COMPONENT_PROJECT_URL=	https://bitbucket.org/ambv/singledispatch
+COMPONENT_BUGDB=	python-mod/singledispatch
+
+TPNO=			22463
+
+# singledispatch is superfluous in Python 3.4
+PYTHON_VERSIONS=	2.7 2.6
+
+include $(WS_MAKE_RULES)/prep.mk
+include $(WS_MAKE_RULES)/setup.py.mk
+include $(WS_MAKE_RULES)/ips.mk
+
+ASLR_MODE = $(ASLR_NOT_APPLICABLE)
+
+# common targets
+build:		$(BUILD_NO_ARCH)
+
+install:	$(INSTALL_NO_ARCH)
+
+test:		$(NO_TESTS)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/singledispatch/singledispatch-PYVER.p5m	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,64 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+
+set name=pkg.fmri \
+    value=pkg:/library/python/singledispatch-$(PYV)@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
+set name=pkg.summary value="Single-dispatch generic functions for Python"
+set name=pkg.description \
+    value="The singledispatch Python module provides support for PEP 443, which defines a simple form of generic programming known as single-dispatch generic functions. A generic function is composed of multiple functions implementing the same operation for different types. Which implementation should be used during a call is determined by the dispatch algorithm. When the implementation is chosen based on the type of a single argument, this is known as single dispatch."
+set name=com.oracle.info.description value="the Python singledispatch module"
+set name=com.oracle.info.tpno value=$(TPNO)
+set name=info.classification \
+    value=org.opensolaris.category.2008:Development/Python
+set name=info.source-url value=$(COMPONENT_ARCHIVE_URL)
+set name=info.upstream value="Łukasz Langa <[email protected]>"
+set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
+set name=org.opensolaris.arc-caseid value=PSARC/2015/196
+set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
+#
+file path=usr/lib/python$(PYVER)/vendor-packages/singledispatch-$(COMPONENT_VERSION)-py$(PYVER).egg-info/PKG-INFO
+file path=usr/lib/python$(PYVER)/vendor-packages/singledispatch-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/singledispatch-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/singledispatch-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/singledispatch-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/singledispatch-$(COMPONENT_VERSION)-py$(PYVER).egg-info/zip-safe
+file path=usr/lib/python$(PYVER)/vendor-packages/singledispatch.py
+file path=usr/lib/python$(PYVER)/vendor-packages/singledispatch_helpers.py \
+    pkg.depend.bypass-generate=.*/_dummy_thread.* \
+    pkg.depend.bypass-generate=.*/_thread.* \
+    pkg.depend.bypass-generate=.*/ordereddict.*
+#
+license singledispatch.license license=MIT
+
+# force a dependency on the Python runtime
+depend type=require fmri=__TBD pkg.debug.depend.file=python$(PYVER) \
+    pkg.debug.depend.path=usr/bin
+
+# force a dependency on ordereddict; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/ordereddict-$(PYV)
+
+# force a dependency on the singledispatch package
+depend type=require \
+    fmri=library/python/singledispatch@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/python/singledispatch/singledispatch.license	Fri Jun 19 09:35:02 2015 +0100
@@ -0,0 +1,19 @@
+Copyright (C) 2013 by Łukasz Langa
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.