7038120 Update Auto Installer to use CUD
authorDarren Kenny <Darren.Kenny@Oracle.COM>
Wed, 25 May 2011 21:26:43 +0100
changeset 1151 95413393ef67
parent 1150 c21368c1904f
child 1152 67a7b566bd10
7038120 Update Auto Installer to use CUD 7013976 Solaris 11 Express 2010.11 autoinstaller fails to install on 32GB SSDs with default VTOC 6978133 The AutoInstall manifest should allow setting the initial Boot Environment name 6989348 want to specify post installation IPS repository in AI manifest 6993349 AI could use some progress/notification information 7011599 AI gets wrong publisher name for secondary publisher when origin is the same. 7040644 AI Install Phase progress should be captured in the install_log
usr/src/cmd/Makefile
usr/src/cmd/Makefile.targ
usr/src/cmd/ai-webserver/publish_manifest.py
usr/src/cmd/auto-install/Makefile
usr/src/cmd/auto-install/__init__.py
usr/src/cmd/auto-install/ai.dtd
usr/src/cmd/auto-install/ai_get_manifest.py
usr/src/cmd/auto-install/ai_instance.py
usr/src/cmd/auto-install/ai_manifest.xml
usr/src/cmd/auto-install/ai_sd.py
usr/src/cmd/auto-install/auto-install.py
usr/src/cmd/auto-install/auto_ddu_lib.c
usr/src/cmd/auto-install/auto_install.c
usr/src/cmd/auto-install/auto_install.h
usr/src/cmd/auto-install/auto_install.py
usr/src/cmd/auto-install/auto_parse.c
usr/src/cmd/auto-install/auto_parse_manifest.c
usr/src/cmd/auto-install/auto_td.c
usr/src/cmd/auto-install/checkpoints/Makefile
usr/src/cmd/auto-install/checkpoints/__init__.py
usr/src/cmd/auto-install/checkpoints/dmm.py
usr/src/cmd/auto-install/checkpoints/target_selection.py
usr/src/cmd/auto-install/checkpoints/test/dmm_build_test.py
usr/src/cmd/auto-install/checkpoints/test/dmm_env_test.py
usr/src/cmd/auto-install/checkpoints/test/dmm_log_test.py
usr/src/cmd/auto-install/configuration.dtd
usr/src/cmd/auto-install/default.xml
usr/src/cmd/auto-install/software.dtd
usr/src/cmd/auto-install/svc/auto-installer
usr/src/cmd/auto-install/svc/manifest-locator
usr/src/cmd/auto-install/svc/manifest-locator.xml
usr/src/cmd/auto-install/target.dtd
usr/src/cmd/auto-install/test/manifest_auto_reboot_false.xml
usr/src/cmd/auto-install/test/manifest_auto_reboot_invalid.xml
usr/src/cmd/auto-install/test/manifest_auto_reboot_not_set.xml
usr/src/cmd/auto-install/test/manifest_auto_reboot_true.xml
usr/src/cmd/auto-install/test/test_auto_install_manifest.py
usr/src/cmd/auto-install/test/test_auto_install_parse_args.py
usr/src/cmd/auto-install/test/test_auto_install_script.py
usr/src/cmd/auto-install/test/test_python_script
usr/src/cmd/auto-install/test/test_shell_script.sh
usr/src/cmd/auto-install/test/test_target_selection_sparc.py
usr/src/cmd/auto-install/test/test_target_selection_x86.py
usr/src/cmd/auto-install/utmpx.py
usr/src/cmd/auto-install/xslt/README
usr/src/cmd/auto-install/xslt/new-to-newer.py
usr/src/cmd/auto-install/xslt/new-to-newer.xslt
usr/src/cmd/auto-install/xslt/old-to-new.py
usr/src/cmd/distro_const/checkpoints/pre_pkg_img_mod.py
usr/src/cmd/installadm/Makefile
usr/src/cmd/installadm/__init__.py
usr/src/cmd/system-config/svc/svc-system-config
usr/src/lib/install_common/__init__.py
usr/src/lib/install_engine/__init__.py
usr/src/lib/install_ict/Makefile
usr/src/lib/install_ict/__init__.py
usr/src/lib/install_ict/setup_swap.py
usr/src/lib/install_ict/test/test_setup_swap.py
usr/src/lib/install_ict/update_dumpadm.py
usr/src/lib/install_manifest/dtd/ai.dtd
usr/src/lib/install_target/__init__.py
usr/src/lib/install_target/controller.py
usr/src/lib/install_target/discovery.py
usr/src/lib/install_target/instantiation.py
usr/src/lib/install_target/libbe/be.py
usr/src/lib/install_target/libbe/const.py
usr/src/lib/install_target/logical.py
usr/src/lib/install_target/physical.py
usr/src/lib/install_target/shadow/physical.py
usr/src/lib/install_transfer/cpio.py
usr/src/lib/install_transfer/ips.py
usr/src/pkg/manifests/system-install-auto-install-auto-install-common.mf
usr/src/pkg/manifests/system-install-auto-install.mf
usr/src/pkg/manifests/system-library-install.mf
usr/src/tools/tests/tests.nose
--- a/usr/src/cmd/Makefile	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/Makefile	Wed May 25 21:26:43 2011 +0100
@@ -29,8 +29,7 @@
 
 include $(SRC)/Makefile.master
 
-SUBDIRS=	auto-install \
-		gui-aux \
+SUBDIRS=	gui-aux \
 		gui-install \
 		rbac \
 		slim-install \
@@ -38,6 +37,7 @@
 
 PYTHONSUBDIRS=	aimanifest \
 		ai-webserver \
+		auto-install \
 		distro_const \
 		installadm \
 		js2ai \
--- a/usr/src/cmd/Makefile.targ	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/Makefile.targ	Wed May 25 21:26:43 2011 +0100
@@ -89,6 +89,12 @@
 $(ROOTPYTHONVENDORINSTALLAI):
 	$(INS.dir)
 
+$(ROOTPYTHONVENDORSOLINSTALLAI):
+	$(INS.dir)
+
+$(ROOTPYTHONVENDORSOLINSTALLAICHKPT):
+	$(INS.dir)
+
 $(ROOTPYTHONVENDORINSTALLPROF):
 	$(INS.dir)
 
@@ -129,6 +135,12 @@
 $(ROOTPYTHONVENDORSOLINSTALLTI)/%: %
 	$(CP_P.file)
 
+$(ROOTPYTHONVENDORSOLINSTALLAI)/%: %
+	$(CP_P.file)
+
+$(ROOTPYTHONVENDORSOLINSTALLAICHKPT)/%: %
+	$(CP_P.file)
+
 $(ROOTPYTHONVENDORSCI)/%: %
 	$(CP_P.file)
 
--- a/usr/src/cmd/ai-webserver/publish_manifest.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/ai-webserver/publish_manifest.py	Wed May 25 21:26:43 2011 +0100
@@ -49,7 +49,7 @@
 
 INFINITY = str(0xFFFFFFFFFFFFFFFF)
 IMG_AI_MANIFEST_DTD = "auto_install/ai.dtd"
-SYS_AI_MANIFEST_DTD = "/usr/share/auto_install/ai.dtd"
+SYS_AI_MANIFEST_DTD = "/usr/share/install/ai.dtd"
 
 IMG_AI_MANIFEST_SCHEMA = "auto_install/ai_manifest.rng"
 
--- a/usr/src/cmd/auto-install/Makefile	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/Makefile	Wed May 25 21:26:43 2011 +0100
@@ -28,31 +28,24 @@
 clobber:=	TARGET=	clobber
 install:=	TARGET=	install
 
-PROG	   = auto-install
-SUBDIRS=	svc config
+SUBDIRS=	svc config checkpoints
+
+PROGS=		ai_get_manifest ai_sd auto-install
 
-SRCS =	auto_install.c \
-	auto_td.c \
-	auto_parse.c \
-	auto_parse_manifest.c \
-	auto_ddu_lib.c
-
-PROGS=		ai_get_manifest ai_sd
-
-PYMODULES=	__init__.py ai_parse_manifest.py
-
-HDRS =	auto_install.h
+PYMODULES=	\
+	__init__.py \
+	ai_get_manifest.py \
+	ai_instance.py \
+	ai_parse_manifest.py \
+	ai_sd.py \
+	auto-install.py \
+	auto_install.py \
+	utmpx.py
 
 PYCMODULES =	$(PYMODULES:%.py=%.pyc)
 
-OBJS =	${SRCS:%.c=${ARCH}/%.o}
-
 MANIFEST_FILES = ai_manifest.xml \
-		 default.xml \
-		 ai.dtd \
-		 target.dtd \
-		 configuration.dtd \
-		 software.dtd
+		 default.xml
 
 VERSION_FILE = version
 
@@ -60,9 +53,9 @@
 
 ROOTPROGS=	$(PROGS:%=$(ROOTUSRBIN)/%)
 
-ROOTPYMODULES=	$(PYMODULES:%=$(ROOTPYTHONVENDORINSTALLAI)/%)
+ROOTPYMODULES=	$(PYMODULES:%=$(ROOTPYTHONVENDORSOLINSTALLAI)/%)
 
-ROOTPYCMODULES= $(PYCMODULES:%=$(ROOTPYTHONVENDORINSTALLAI)/%)
+ROOTPYCMODULES= $(PYCMODULES:%=$(ROOTPYTHONVENDORSOLINSTALLAI)/%)
 
 ROOTMANIFESTS= $(MANIFEST_FILES:%=$(ROOTAUTOINST)/%)
 
@@ -70,66 +63,26 @@
 
 ROOTSCPROFILES= $(SC_PROFILE_FILES:%=$(ROOTAUTOINSTSCPROFILES)/%)
 
-LIBDIR  = $(ROOTADMINLIB)
-LIBDIRS = -L${LIBDIR} -L$(SFWLIBDIR) -R$(SFWLIBRDIR) -L$(ROOTUSRLIB)
-
-INCLUDEDIR = -I. -I${SRC}/lib/liborchestrator -I${SRC}/lib/libtd -I${SRC}/lib/libti -I${SRC}/lib/liblogsvc -I${SRC}/lib/libtransfer -I$(ROOTINCADMIN) -I/usr/include/python2.6
-
-CPPFLAGS  += $(INCLUDEDIR)
-CFLAGS	  += $(DEBUG_CFLAGS) -Xa
-LINTFLAGS  = -umx ${CPPFLAGS}
-
-LDFLAGS  +=	$(DEBUG_CFLAGS) \
-		-R$(ROOTADMINLIB:$(ROOT)%=%) $(LIBDIRS)
-LDLIBS  +=	-Bdynamic -ltd -ltransfer -lti -lorchestrator \
-		 -lbe -lspmicommon -lnvpair -llogsvc -lelf -lpython2.6
-
-MSG_DOMAIN = SUNW_INSTALL_AUTOINSTALL
-
-${ARCH}/%.o: %.c
-	${COMPILE.c} -o $@ $<
-
-FILEMODE= 555
-OWNER= root
-GROUP= sys
-
-.KEEP_STATE: 
-
-all: ${ARCH} .WAIT $(PROG) python $(PROGS)
-	@true
-
-${ARCH}:
-	@[ -d ${@} ] || (${RM} ${@} ;mkdir -p ${@})
-
-$(PROG):  $(OBJS) $(HDRS) .WAIT $(LIBDEP)
-	$(LINK.c) -o $@ $(OBJS) $(LDLIBS)
-	cp $@ ${ARCH}
-	$(POST_PROCESS)
-
-${OBJS}: $(HDRS)
+all: python $(PROGS)
 
 install: all .WAIT $(ROOTPROGS) \
 	$(ROOTUSRBIN) \
 	$(ROOTUSRBINPROG) \
 	$(ROOTPYTHONVENDOR) \
-	$(ROOTPYTHONVENDORINSTALL) \
-	$(ROOTPYTHONVENDORINSTALLAI) \
+	$(ROOTPYTHONVENDORSOLINSTALL) \
+	$(ROOTPYTHONVENDORSOLINSTALLAI) \
 	$(ROOTMANIFESTS) \
 	$(ROOTVERSION) \
 	$(ROOTPYMODULES) $(ROOTPYCMODULES) \
 	$(ROOTSCPROFILES) \
-	$(SUBDIRS) \
-	.WAIT msgs
+	$(SUBDIRS)
 
 python:
 	$(PYTHON) -m compileall -l $(@D)
 
-headers:
-
-install_h:
-
-lint: ${SRCS} ${HDRS}
-	${LINT.c} ${SRCS}
+auto-install: auto-install.py
+	$(CP) auto-install.py auto-install
+	$(CHMOD) 755 auto-install
 
 ai_get_manifest: ai_get_manifest.py
 	$(CP) ai_get_manifest.py ai_get_manifest
@@ -137,16 +90,8 @@
 ai_sd: ai_sd.py
 	$(CP) ai_sd.py ai_sd
 
-msgs: ${MSG_DOMAIN}.po
-
-${MSG_DOMAIN}.po: ${SRCS} ${HDRS}
-	@echo "Making messages file ${MSG_DOMAIN}.po"
-	@${COMPILE.c} -C -E ${SRCS} 2>/dev/null | \
-	    xgettext -d ${MSG_DOMAIN} -s \
-	    	-c "i18n:" - > /dev/null 2>&1
-
 clean: $(SUBDIRS)
-	rm -f $(PROGS) $(PYCMODULES) ${PROG} ${PYCMODS} ${ARCH}/* ${MSG_DOMAIN}.po
+	$(RM) $(PROGS) $(PYCMODULES)
 
 clobber: clean 
 
--- a/usr/src/cmd/auto-install/__init__.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/__init__.py	Wed May 25 21:26:43 2011 +0100
@@ -20,12 +20,22 @@
 # CDDL HEADER END
 #
 
-# Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
 
 #
 # This file is installed into
-# usr/lib/python2.6/vendor-packages/osol_install/auto_install/ directory
+# usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ directory
 # and lets the Python interpreter know that this directory contains valid
 # Python modules which can be imported using following command:
-# from osol_install.auto_install.<module_name> import <object>
+# from solaris_install.auto_install.<module_name> import <object>
 #
+
+"""Init module for the Automated Installer package"""
+
+from solaris_install.data_object.cache import DataObjectCache
+import ai_instance
+
+# Register local Data Objects, use relative module reference.
+DataObjectCache.register_class(ai_instance)
+
+__all__ = []
--- a/usr/src/cmd/auto-install/ai.dtd	Wed May 25 13:29:32 2011 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,51 +0,0 @@
-<!--
- CDDL HEADER START
-
- The contents of this file are subject to the terms of the
- Common Development and Distribution License (the "License").
- You may not use this file except in compliance with the License.
-
- You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- or http://www.opensolaris.org/os/licensing.
- See the License for the specific language governing permissions
- and limitations under the License.
-
- When distributing Covered Code, include this CDDL HEADER in each
- file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- If applicable, add the following below this CDDL HEADER, with the
- fields enclosed by brackets "[]" replaced with your own identifying
- information: Portions Copyright [yyyy] [name of copyright owner]
-
- CDDL HEADER END
-
- Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
-
--->
-
-<!ELEMENT auto_install (ai_instance)>
-
-<!ENTITY % target SYSTEM "target.dtd">
-%target;
-
-<!ENTITY % configuration SYSTEM "configuration.dtd">
-%configuration;
-
-<!ENTITY % software SYSTEM "software.dtd">
-%software;
-
-<!--
-	The source element, if specified, is for post installation changes
-	to the publisher for IPS. The first source element specified
-	will be considered the primary source, all additional sources
-	will be added as secondary sources.
--->
-
-<!ELEMENT ai_instance (target*, software+, add_drivers?, configuration*, source*)>
-<!ATTLIST ai_instance name CDATA #IMPLIED>
-<!ATTLIST ai_instance http_proxy CDATA #IMPLIED>
-<!ATTLIST ai_instance auto_reboot (true|false) "false">
-
-<!ELEMENT add_drivers (software*, search_all?)>
-
-<!ELEMENT search_all (source?)>
-<!ATTLIST search_all addall (true|false) "false">
--- a/usr/src/cmd/auto-install/ai_get_manifest.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/ai_get_manifest.py	Wed May 25 21:26:43 2011 +0100
@@ -42,16 +42,17 @@
 import traceback
 import urllib
 
-from solaris_install import _
+from solaris_install import _, system_temp_path
 
 VERSION_FILE = '/usr/share/auto_install/version'
 
 # constants for generating temporary files with unique names for SMF profiles
-SC_OUTPUT_DIRECTORY = '/system/volatile/profile' # work directory for profiles
+SC_OUTPUT_DIRECTORY = system_temp_path('profile')  # work dir for profiles
 SC_PREFIX = 'profile_'
 SC_EXTENSION = '.xml'
 
-AI_MANIFEST_ATTACHMENT_NAME = 'manifest.xml' # named as MIME attachment
+AI_MANIFEST_ATTACHMENT_NAME = 'manifest.xml'  # named as MIME attachment
+
 
 class AILog:
     """
@@ -67,7 +68,7 @@
     AI_DBGLVL_WARN = 3
     AI_DBGLVL_INFO = 4
 
-    def __init__(self, logid="AI", logfile="/tmp/ai_sd_log",
+    def __init__(self, logid="AI", logfile=system_temp_path("ai_sd_log"),
                  debuglevel=AI_DBGLVL_WARN):
         self.log_file = logfile
         self.logid = logid
@@ -684,18 +685,19 @@
                     return None, -1, None
 
                 params = urllib.urlencode({
-                                          'version': version,
-                                          'service': service_name,
-                                          'logging': AIGM_LOG.get_debug_level(),
-                                          'postData': post_data})
+                                      'version': version,
+                                      'service': service_name,
+                                      'logging': AIGM_LOG.get_debug_level(),
+                                      'postData': post_data})
             else:
                 # compatibility mode only needs to send the data
                 params = urllib.urlencode({'postData': post_data})
 
             AIGM_LOG.post(AILog.AI_DBGLVL_INFO, "%s", params)
 
-            http_headers = {"Content-Type": "application/x-www-form-urlencoded",
-                            "Accept": "text/plain,multipart/alternative"}
+            http_headers = {
+                "Content-Type": "application/x-www-form-urlencoded",
+                "Accept": "text/plain,multipart/alternative"}
             http_conn.request("POST", file_path, params, http_headers)
         else:
             http_conn.request("GET", file_path)
@@ -821,8 +823,8 @@
                       "Invalid options or arguments provided")
         usage()
 
-    service_list = "/tmp/service_list"
-    manifest_file = "/tmp/manifest.xml"
+    service_list = system_temp_path("service_list")
+    manifest_file = system_temp_path("manifest.xml")
     list_criteria_only = False
 
     for option, argument in opts:
@@ -911,7 +913,7 @@
         # to connect next AI service,
         #
         if ret == httplib.OK:
-            if content_type == 'text/xml': # old format
+            if content_type == 'text/xml':  # old format
                 ai_manifest = http_resp
                 ai_manifest_obtained = True
                 AIGM_LOG.post(AILog.AI_DBGLVL_INFO,
@@ -926,7 +928,7 @@
             mime_response = "Content-Type: %s\n%s" % (content_type, http_resp)
             # by design, response is MIME-encoded, multipart
             if mime_response is not None:
-                cleanup_earlier_run() # delete any profiles from previous runs
+                cleanup_earlier_run()  # delete any profiles from previous runs
                 # parse the MIME response
                 parse = Parser()
                 msg = parse.parsestr(mime_response)
@@ -935,7 +937,7 @@
                     # write out manifest, any profiles, console messages
                     if handle_mime_payload(imsg, manifest_file):
                         ai_manifest_obtained = True
-            if ai_manifest_obtained: # manifest written by MIME handler
+            if ai_manifest_obtained:  # manifest written by MIME handler
                 service_list_fh.close()
                 return 0
         else:
@@ -993,7 +995,7 @@
     Effects: write a manifest file, write any profile files, log and display
         manifest locator CGI script text messages
     """
-    wrote_manifest = False # when manifest is found, set to True
+    wrote_manifest = False  # when manifest is found, set to True
     payload = msg.get_payload()
 
     # XML received - either manifest or profile
@@ -1022,8 +1024,8 @@
         # log and display text messages from the locator CGI
         # assuming any text not within an attachment goes to the console
         AIGM_LOG.post(AILog.AI_DBGLVL_WARN,
-              _("Messages from AI server while locating manifest and profiles:")
-              + "\n" + payload)
+             _("Messages from AI server while locating manifest and profiles:")
+             + "\n" + payload)
     return wrote_manifest
 
 
@@ -1040,7 +1042,7 @@
             return
         raise
     for fclean in cleanlist:
-        if fclean.startswith(SC_PREFIX): # uniquely identify profiles
+        if fclean.startswith(SC_PREFIX):  # uniquely identify profiles
             os.unlink(os.path.join(SC_OUTPUT_DIRECTORY, fclean))
 
 
@@ -1067,10 +1069,10 @@
             return False
     try:
         if name[-len(extension):] == extension:
-            name = name[:-len(extension)] # strip ending .xml
-        (fd, name) = tempfile.mkstemp(suffix = extension,
-                prefix = prefix + name + '.',
-                dir = outdir)
+            name = name[:-len(extension)]  # strip ending .xml
+        (fd, name) = tempfile.mkstemp(suffix=extension,
+                prefix=prefix + name + '.',
+                dir=outdir)
         os.write(fd, profile)
         os.close(fd)
     except (OSError, IOError), err:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/ai_instance.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+
+from lxml import etree
+
+from solaris_install.data_object import DataObject
+
+
+class AIInstance(DataObject):
+    """
+    ai_instance xml tag handler class
+    """
+    def __init__(self, name):
+        """
+        Class constructor
+        """
+        super(AIInstance, self).__init__(name)
+        self.auto_reboot = False
+        self.http_proxy = None
+
+    def to_xml(self):
+        """
+        Convert DataObject DOM to XML
+        """
+        ai_instance = etree.Element("ai_instance")
+        if self.auto_reboot:
+            ai_instance.set("auto_reboot", "true")
+        else:
+            ai_instance.set("auto_reboot", "false")
+
+        if self.name is not None:
+            ai_instance.set("name", self.name)
+
+        if self.http_proxy:
+            ai_instance.set("http_proxy", self.http_proxy)
+
+        return ai_instance
+
+    @classmethod
+    def can_handle(cls, element):
+        """
+        can_handle notification method for ai_instance tags
+        """
+        if element.tag == "ai_instance":
+            return True
+        return False
+
+    @classmethod
+    def from_xml(cls, element):
+        """
+        Convert from xml for DOM for DataObject storage
+        """
+        # Parse name, no validation required
+        ai_name = element.get("name")
+
+        ai_instance = AIInstance(ai_name)
+
+        # Parse auto_reboot, validate set to true or false
+        auto_reboot = element.get("auto_reboot")
+        if auto_reboot is not None:
+            # Convert to lowercase, to simplify tests
+            auto_reboot = auto_reboot.lower()
+            if auto_reboot == "true":
+                ai_instance.auto_reboot = True
+            elif auto_reboot == "false":
+                ai_instance.auto_reboot = False
+        else:
+            ai_instance.auto_reboot = False
+
+        # Parse http_proxy
+        ai_instance.http_proxy = element.get("http_proxy")
+
+        return ai_instance
+
+    def __repr__(self):
+        return "ai_instance: name='%s' auto_reboot=%s; http_proxy='%s'" % \
+            (self.name, str(self.auto_reboot), self.http_proxy)
--- a/usr/src/cmd/auto-install/ai_manifest.xml	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/ai_manifest.xml	Wed May 25 21:26:43 2011 +0100
@@ -26,12 +26,12 @@
 DTD sample manifest for Automatic Installer input manifest specification.
 ===============================================================================
 -->
-<!DOCTYPE auto_install SYSTEM "file:///usr/share/auto_install/ai.dtd">
+<!DOCTYPE auto_install SYSTEM "file:///usr/share/install/ai.dtd">
 <auto_install>
   <!--
-	"auto_reboot" set to "true" may be an issue for x86 machines.
-	The boot order is not guaranteed and may cause unexpected
-	behavior. If auto_reboot is not desired, set auto_reboot="false".
+        "auto_reboot" set to "true" may be an issue for x86 machines.
+        The boot order is not guaranteed and may cause unexpected
+        behavior. If auto_reboot is not desired, set auto_reboot="false".
 
         The name of the manifest is obtained from (in this order):
         1) the name from the installadm add-manifest command line "-m" option.
@@ -42,24 +42,24 @@
   <ai_instance auto_reboot="true">
     <!--
       =======================================================================
-      <target/target_device> - selections for AI target Device specification
+      <target> - selections for AI target Device specification
 
       Disk criteria are divided into three mutually exclusive groups:
 
       G1 - deterministic disk criteria
       ................................
-        * target_device/disk/iscsi parameters
-        * target_device/disk/disk_name, with name_type attribute:
+        * disk/iscsi parameters
+        * disk/disk_name, with name_type attribute:
           one of ctd, volid, devpath or devid
 
       G2 - non-deterministic disk criteria
       ..........................
-        * target_device/disk/disk_prop: Any of dev_type, dev_vendor or
+        * disk/disk_prop: Any of dev_type, dev_vendor or
           dev_size
 
       G3 - keyword disk criteria
       ...........................
-        * target_device/disk/disk_keyword: "boot_disk"
+        * disk/disk_keyword: "boot_disk"
 
       Schema ai.dtd enforces following policy:
 
@@ -74,70 +74,103 @@
       =======================================================================
     -->
     <target>
-      <target_device>
-        <disk>
-          <!-- G1 -->
-          <!--
-            c#t#d# device name like c0t0d0 or 
-            MPXIO name like c0t2000002037CD9F72d0
-          -->
-          <disk_name name="c1t0d0" name_type="ctd"/>
-          <!-- volume name set for instance by means
-            of format(1M) command
-          -->
-          <!--
-          <disk_name name="ai-disk" name_type="volid"/>
-          -->
-          <!-- device id - e.g. can be obtained by means of
-            iostat(1M) -iEn
-          -->
-          <!--
-          <disk_name name="id1,cmdk@AST31000340NS=____________9QJ2LNYY" name_type="devid"/>
-          -->
-          <!-- device path under /devices directory, e.g.
-            /pci@1e,600000/pci@0/pci@9/pci@0/scsi@1/sd@0,0
-          -->
-          <!--
-          <disk_name name="/pci@0/pci@9/pci@0/scsi@1/sd@0,0" name_type="devpath"/>
-          -->
-          <!--
-            ISCSI target device
+      <disk>
+        <!-- G1 -->
+        <!--
+          c#t#d# device name like c0t0d0 or 
+          MPXIO name like c0t2000002037CD9F72d0
+        -->
+        <disk_name name="c1t0d0" name_type="ctd"/>
+        <!-- volume name set for instance by means
+          of format(1M) command
+        -->
+        <!--
+        <disk_name name="ai-disk" name_type="volid"/>
+        -->
+        <!-- device id - e.g. can be obtained by means of
+          iostat(1M) -iEn
+        -->
+        <!--
+        <disk_name name="id1,cmdk@AST31000340NS=____________9QJ2LNYY" name_type="devid"/>
+        -->
+        <!-- device path under /devices directory, e.g.
+          /pci@1e,600000/pci@0/pci@9/pci@0/scsi@1/sd@0,0
+        -->
+        <!--
+        <disk_name name="/pci@0/pci@9/pci@0/scsi@1/sd@0,0" name_type="devpath"/>
+        -->
+        <!--
+          ISCSI target device
 
-          <iscsi name="c0d2E0001010F68">
-            <ip>192.168.1.34</ip>
-          </iscsi> 
-          -->
-          <!-- G2 -->
-          <!--
-          <disk_prop dev_vendor="hitachi" dev_size="20480mb"/>
-          -->
-          <!-- G3 -->
-          <!--
-          <disk_keyword key="boot_disk"/>
-          -->
-          <!--
-            Uncomment this to force AI to find an existing Solaris
-            partition instead of creating a new one.
-          -->
-          <!--
-          <partition action="use_existing"/>
-          -->
-          <partition name="1" part_type="99">
-            <size start_sector="200" val="20480mb"/>
-          </partition>
-          <partition name="4" part_type="99">
-            <size start_sector="2000" val="20480mb"/>
-          </partition>
-          <slice name="0" is_root="true">
+        <iscsi name="c0d2E0001010F68">
+          <ip>192.168.1.34</ip>
+        </iscsi> 
+        -->
+        <!-- G2 -->
+        <!--
+        <disk_prop dev_vendor="hitachi" dev_size="20480mb"/>
+
+        or 
+
+        <disk_prop dev_vendor="hitachi"/>
+
+        or
+
+        <disk_prop dev_size="20480mb"/>
+        -->
+        <!-- G3 -->
+        <!--
+        <disk_keyword key="boot_disk"/>
+        -->
+        <!--
+          On X86 machines, Slices exist within partitions only
+        -->
+        <!--
+          Uncomment this to force AI to find an existing Solaris
+          partition.
+        -->
+        <!--
+        <partition action="use_existing_solaris2">
+          <slice name="0">
             <size val="20480mb"/>
           </slice>
           <slice name="4">
             <size val="20480mb"/>
           </slice>
-        </disk>
-      </target_device>
+        </partition>
+
+        or, use the following to create a Solaris partition
+        -->
+        <partition name="1" part_type="191">
+          <size start_sector="200" val="40960mb"/>
+          <slice name="0">
+            <size val="20480mb"/>
+          </slice>
+          <slice name="4">
+            <size val="20480mb"/>
+          </slice>
+        </partition>
+        <!-- Define some other partitions to create too -->
+        <partition name="2" part_type="99">
+          <size start_sector="200" val="20480mb"/>
+        </partition>
+        <partition name="4" part_type="99">
+          <size start_sector="2000" val="20480mb"/>
+        </partition>
+        <!--
+        On SPARC systems, only specify the Slice layout.
+        -->
+        <!--
+            <slice name="0">
+              <size val="20480mb"/>
+            </slice>
+            <slice name="4">
+              <size val="20480mb"/>
+            </slice>
+        -->
+      </disk>
     </target>
-    <software name="ips">
+    <software name="ips" type="IPS">
       <source>
         <publisher name="solaris">
           <origin name="http://pkg.oracle.com/solaris/release"/>
@@ -151,14 +184,14 @@
         form:
       <name="[email protected]#"/>
       -->
-      <software_data type="IPS">
+      <software_data>
         <name>pkg:/entire</name>
         <name>pkg:/server_install</name>
       </software_data>
     </software>
     <add_drivers>
       <!--
-	    Driver Updates: This section is for adding driver packages to the
+            Driver Updates: This section is for adding driver packages to the
             boot environment before the installation takes place.  The
             installer can then access all devices on the system.  The
             packages installed in the boot environment will also be installed
@@ -192,17 +225,17 @@
                     <source>
                         <publisher>
                             <origin
-				name=
-	"http://pkg.oracle.com/solaris/release/p5i/0/driver/firewire.p5i"/>
+                                name=
+        "http://pkg.oracle.com/solaris/release/p5i/0/driver/firewire.p5i"/>
                         </publisher>
                     </source>
-		    <software_data type="P5I"/>
+                    <software_data type="P5I"/>
                 </software>
 
             SVR4: An SVR4 package spec. The source/publisher/origin corresponds
             to the directory containing the packages.  The 
-	    software/software_data/name refers tp the package's top level
-	    directory or the package's datastream file.
+            software/software_data/name refers tp the package's top level
+            directory or the package's datastream file.
 
                 <software>
                     <source>
@@ -217,8 +250,8 @@
 
             DU: An ITU (Install Time Update) or Driver Update image.
             The source/publisher/origin refers to the path just above the 
-	    image's DU directory (if expanded) or the name of the .iso image.  
-	    All packages in the image will be added.
+            image's DU directory (if expanded) or the name of the .iso image.  
+            All packages in the image will be added.
 
                 <software>
                     <source>
@@ -227,7 +260,7 @@
                         </publisher>
                     </source>
                     <software_data type="DU"/>
-                </software>	
+                </software>     
       -->
       <search_all/>
     </add_drivers>
--- a/usr/src/cmd/auto-install/ai_sd.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/ai_sd.py	Wed May 25 21:26:43 2011 +0100
@@ -31,7 +31,8 @@
 import getopt
 import gettext
 import traceback
-from osol_install.auto_install.ai_get_manifest import AILog
+from solaris_install import system_temp_path
+from solaris_install.auto_install.ai_get_manifest import AILog
 import osol_install.auto_install.aimdns_mod as aimdns
 from osol_install.auto_install.installadm_common import REGTYPE
 
@@ -158,7 +159,7 @@
     service_name = ""
     service_lookup_timeout = 5
 
-    service_file = "/tmp/service_list"
+    service_file = system_temp_path("service_list")
 
     for option, argument in opts:
         if option == "-s":
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/auto-install.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,42 @@
+#!/usr/bin/python2.6
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+'''Main launcher for Automated Installer'''
+
+import sys
+from solaris_install.auto_install import auto_install
+
+if __name__ == '__main__':
+    try:
+        ai = auto_install.AutoInstall(sys.argv[1:])
+        ai.perform_autoinstall()
+        sys.exit(ai.exitval)
+
+    except Exception, e:
+        print "ERROR: an exception occurred.\n"
+        print "\t%s" % str(e)
+        print "\nPlease check logs for futher information."
+        sys.exit(ai.AI_EXIT_FAILURE)
+    except KeyboardInterrupt:
+        pass
--- a/usr/src/cmd/auto-install/auto_ddu_lib.c	Wed May 25 13:29:32 2011 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2221 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- */
-
-#include "Python.h"	/* Must be the first header file. */
-#include <alloca.h>
-#include <libintl.h>
-#include <strings.h>
-#include <auto_install.h>
-
-/* Python DDU function module related definitions. */
-#define	DDU_FUNCTION_MODULE	"DDU.ddu_function"
-#define	DDU_PACKAGE_LOOKUP	"ddu_package_lookup"
-#define	DDU_INSTALL_PACKAGE	"ddu_install_package"
-#define	DDU_DEVSCAN		"ddu_devscan"
-#define	DDU_BUILD_REPO_LIST	"ddu_build_repo_list"
-
-/* Python DDU package module related definitions. */
-#define	DDU_PACKAGE_MODULE	"DDU.ddu_package"
-#define	DDU_PACKAGE_OBJECT	"ddu_package_object"
-
-/* Pkg commands. */
-#define	PKG_PUBLISHER		"/usr/bin/pkg publisher"
-
-/* DDU error log */
-#define	DDU_ERRLOG		"/tmp/ddu_err.log"
-
-/*  DDU error module related. */
-#define	DDU_ERROR_MODULE		"DDU.ddu_errors"
-#define	DDU_PACKAGE_NOT_FOUND_EXC	"PackageNoFound"
-
-/* ICT module related definitions. */
-#define	ICT_MODULE		"osol_install.ict"
-#define	ICT_CLASS		"ICT"
-#define	ICT_UPDATE_ARCHIVE	"update_boot_archive"
-
-/* AI Manifest (AIM) related path definitions. */
-#define	AIM_PREFACE		"auto_install/ai_instance/add_drivers/"
-#define	PKGSPEC_NODEPATH	"software"
-#define	ORIGIN_NODEPATH		"software/source/publisher/origin/name"
-#define	TYPE_NODEPATH		"software[source/publisher/origin/" \
-				    "name=\"%s\"]/software_data/type"
-#define	NAME_NODEPATH		"software[source/publisher/origin/" \
-				    "name=\"%s\":software_data/" \
-				    "type=\"%s\"]/software_data/name"
-#define	ACTION_NONAME_NODEPATH \
-				"software[source/publisher/origin/" \
-				    "name=\"%s\":software_data/" \
-				    "type=\"%s\"]/software_data/action"
-#define	ACTION_YESNAME_NODEPATH \
-				"software[source/publisher/origin/" \
-				    "name=\"%s\":software_data/type=\"%s\":" \
-				    "software_data/name=\"%s\"]/" \
-				    "software_data/action"
-
-#define	SEARCH_NODEPATH		"search_all"
-#define	SEARCH_ORIGIN_NODEPATH	"search_all/source/publisher/origin/name"
-#define	SEARCH_PUBNAME_NODEPATH	"search_all/source/publisher/name"
-#define	SEARCH_ADDALL_NODEPATH	"search_all/addall"
-
-#define	MAX_NODEPATH_SIZE	256
-
-typedef struct {
-	PyThreadState *myThreadState;
-	PyThreadState *mainThreadState;
-	PyObject *pFunctionModule;
-	PyObject *pPackageModule;
-	PyObject *pErrorModule;
-	PyObject *pICTModule;
-} py_state_t;
-
-typedef struct {
-	char path_str[MAX_NODEPATH_SIZE];
-	char *post_prefix_start;
-	int post_prefix_len;
-} path_t;
-
-static py_state_t *auto_ddu_lib_init();
-static void auto_ddu_lib_fini(py_state_t *py_state_p);
-static void ai_dump_python_exception();
-static PyObject *ai_call_ddu_devscan(py_state_t *py_state_p,
-    boolean_t get_only_missing_drivers, char *dev_type);
-static int ai_call_ddu_package_lookup(py_state_t *py_state_p,
-    PyObject *pDevObj, PyObject *pRepoList, PyObject **pPackageObj_p);
-static int ai_call_ddu_install_package(py_state_t *py_state_p,
-    PyObject *ddu_package_obj, char *install_root, boolean_t third_party_ok);
-static PyObject *ai_new_ddu_package_object(py_state_t *py_state_p,
-    char *type, char *name, char *origin);
-static int ai_get_ddu_package_object_values(PyObject *pDDUPackageObject,
-    char **type, char **location, char **name, char **descr, char **inf_link,
-    boolean_t *third_party);
-static int ai_get_ddu_dev_data_values(PyObject *pDDUDevData,
-    char **dev_type_p, char **descr_p, char **vendor_ID_p, char **device_ID_p,
-    char **class_p);
-static int ai_du_process_manual_pkg(py_state_t *py_state_p,
-    PyObject *pPackageList, char *origin, char *type, char *name,
-    char *noinstall);
-static int ai_du_process_manual_pkg_names(py_state_t *py_state_p,
-    path_t *path_p, PyObject *pPackageList, char *origin, char *type,
-    char *name);
-static int ai_du_process_manual_pkg_types(py_state_t *py_state_p,
-    PyObject *pPackageList, path_t *path_p, char *origin, char *type);
-static int ai_du_get_manual_pkg_list(py_state_t *py_state_p, path_t *path_p,
-    PyObject **pPackageList_p);
-static int ai_du_get_searched_pkg_list(py_state_t *py_state_p, path_t *path_p,
-    char *install_root, PyObject **pPackageList_p);
-static int ai_du_install_packages(py_state_t *py_state_p,
-    PyObject *pPkgTupleList, char *install_root, boolean_t honor_noinstall,
-    int *num_installed_pkgs_p);
-static char **ai_uniq_manifest_values(char **in, int *len_p);
-static int ai_du_call_update_archive_ict(py_state_t *py_state_p,
-    char *install_root);
-
-/*
- * Stores the list of packages set up by ai_du_get_and_install() for use by
- * ai_du_install().
- */
-static PyObject *py_pkg_list;
-
-static char *empty_string = "";
-
-/* Private functions. */
-
-/*
- * auto_ddu_lib_init:
- * Initialize they python interpreter state so that python functions can be
- * called from this module.  Initialize a few common things always used.
- *
- * Arguments: None
- *
- * Returns:
- *   Success: A pointer to an initialized py_state_t object
- *   Failure: NULL
- *
- * Note: Call auto_ddu_lib_fini(), passing it the item returned from this
- * function, to undo the effects of this function.
- */
-static py_state_t *
-auto_ddu_lib_init()
-{
-	py_state_t *py_state_p = malloc(sizeof (py_state_t));
-
-	static PyObject *pFunctionModule = NULL;
-	static PyObject *pPackageModule = NULL;
-	static PyObject *pErrorModule = NULL;
-	static PyObject *pICTModule = NULL;
-
-	if (py_state_p == NULL) {
-		auto_debug_print(AUTO_DBGLVL_ERR, "auto_ddu_lib_init: "
-		    "No memory.\n");
-		return (NULL);
-	}
-
-	/* If one of the above is NULL, all will be NULL. */
-	if (pFunctionModule == NULL) {
-		PyObject *pName;
-
-		/* Get names of modules for use by python/C interfaces. */
-		if ((pName = PyString_FromString(DDU_FUNCTION_MODULE)) !=
-		    NULL) {
-			pFunctionModule = PyImport_Import(pName);
-			Py_DECREF(pName);
-		}
-		if ((pName = PyString_FromString(DDU_PACKAGE_MODULE)) != NULL) {
-			pPackageModule = PyImport_Import(pName);
-			Py_DECREF(pName);
-		}
-		if ((pName = PyString_FromString(DDU_ERROR_MODULE)) != NULL) {
-			pErrorModule = PyImport_Import(pName);
-			Py_DECREF(pName);
-		}
-		if ((pName = PyString_FromString(ICT_MODULE)) != NULL) {
-			pICTModule = PyImport_Import(pName);
-			Py_DECREF(pName);
-		}
-
-		/* Cleanup and return NULL on error. */
-		if ((pFunctionModule == NULL) || (pPackageModule == NULL) ||
-		    (pErrorModule == NULL) || (pICTModule == NULL)) {
-			auto_debug_print(AUTO_DBGLVL_ERR, "auto_ddu_lib_init: "
-			    "error accessing DDU library or ICT modules.\n");
-			PyErr_Print();
-			Py_XDECREF(pFunctionModule);
-			Py_XDECREF(pPackageModule);
-			Py_XDECREF(pErrorModule);
-			Py_XDECREF(pICTModule);
-			pFunctionModule = pPackageModule = NULL;
-			pErrorModule = pICTModule = NULL;
-			free(py_state_p);
-			return (NULL);
-		}
-	}
-
-	/* Set up python interpreter state. */
-	PyEval_InitThreads();
-	py_state_p->mainThreadState = PyThreadState_Get();
-	py_state_p->myThreadState =
-	    PyThreadState_New(py_state_p->mainThreadState->interp);
-	(void) PyThreadState_Swap(py_state_p->myThreadState);
-
-	py_state_p->pFunctionModule = pFunctionModule;
-	py_state_p->pPackageModule = pPackageModule;
-	py_state_p->pErrorModule = pErrorModule;
-	py_state_p->pICTModule = pICTModule;
-
-	return (py_state_p);
-}
-
-/*
- * auto_ddu_lib_fini:
- * Undo initialization of python interpreter state set up by
- * auto_ddu_lib_init().
- *
- * Arguments: A pointer to an initialized py_state_t object
- *
- * Returns: N/A
- */
-static void
-auto_ddu_lib_fini(py_state_t *py_state_p)
-{
-	if (py_state_p == NULL) {
-		return;
-	}
-	(void) PyThreadState_Swap(py_state_p->mainThreadState);
-	PyThreadState_Clear(py_state_p->myThreadState);
-	PyThreadState_Delete(py_state_p->myThreadState);
-	free(py_state_p);
-}
-
-/*
- * ai_dump_python_exception:
- * Dump the class and message of a python exception.  Traceback not dumped.
- *
- * Caveat: An exception must be ready to be dumped, as indicated by
- * PyErr_Occurred() * returning Non-NULL.
- *
- * Arguments: None
- *
- * Returns: N/A
- */
-static void
-ai_dump_python_exception()
-{
-	PyObject *pType, *pValue, *pTraceback;
-	PyObject *pTypeString, *pValueString;
-
-	if (PyErr_Occurred() == NULL) {
-		return;
-	}
-
-	PyErr_Fetch(&pType, &pValue, &pTraceback);
-	pTypeString = PyObject_Str(pType);
-	pValueString = PyObject_Str(pValue);
-	auto_debug_print(AUTO_DBGLVL_ERR,
-	    "%s\n", PyString_AsString(pTypeString));
-	auto_debug_print(AUTO_DBGLVL_ERR,
-	    "%s\n", PyString_AsString(pValueString));
-	Py_DECREF(pType);
-	Py_DECREF(pValue);
-	Py_DECREF(pTraceback);
-	Py_DECREF(pTypeString);
-	Py_DECREF(pValueString);
-	PyErr_Clear();
-}
-
-/*
- * ai_call_ddu_build_repo_list:
- * Call the DDU library ddu_build_repo_list function.  This sets up the
- * list of repositories specified (as name/URL tuples) in the second argument,
- * and returns a python list of ddu_repo_objects for use by ddu_package_lookup.
- *
- * Arguments:
- *   py_state: Initialized py_state_t object.
- *   pRepoTypleList: List of (pubname, URL) tuples, each tuple representing
- *	a repository.
- *
- * Returns:
- *   Success: A python object representing a list of ddu_repo_objects.
- *   Failure: NULL
- */
-static PyObject *
-ai_call_ddu_build_repo_list(py_state_t *py_state_p, PyObject *pRepoTupleList)
-{
-	PyObject *pRet = NULL;
-
-	/* Find the function */
-	PyObject *pFunc = PyObject_GetAttrString(py_state_p->pFunctionModule,
-	    DDU_BUILD_REPO_LIST);
-
-	if ((pFunc == NULL) || (!PyCallable_Check(pFunc))) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "Function not callable: %s\n", DDU_BUILD_REPO_LIST);
-	} else {
-		PyObject *pArgs = PyTuple_New(1);
-
-		/*
-		 * INCREF here since PyTuple_SetItem steals the reference, and
-		 * can decrement the refcount when pArgs is DECREFed.
-		 */
-		Py_INCREF(pRepoTupleList);
-
-		/* Set up args to python function and call it. */
-		(void) PyTuple_SetItem(pArgs, 0, pRepoTupleList);
-		pRet = PyObject_CallObject(pFunc, pArgs);
-		Py_DECREF(pArgs);
-
-		if ((PyErr_Occurred() != NULL) || (pRet == NULL) ||
-		    (pRet == Py_None)) {
-			auto_debug_dump_file(AUTO_DBGLVL_ERR, DDU_ERRLOG);
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "%s returned an error.\n", DDU_BUILD_REPO_LIST);
-			ai_dump_python_exception();
-			Py_XDECREF(pRet);
-			pRet = NULL;
-		}
-	}
-
-	Py_XDECREF(pFunc);
-	return (pRet);
-}
-
-/*
- * ai_call_ddu_devscan:
- * Call the DDU library ddu_devscan function.  This function performs a device
- * scan on the system, to find out which devices are missing drivers.
- *
- * Arguments:
- *   py_state_p: Initialized py_state_t object.
- *   get_only_missing_drivers: Boolean: when true, return only the list of
- *	devices which are missing drivers.  When false, return all devices.
- *   dev_type: Type of devices to scan for.  See the DDU library ddu_devscan()
- *	function for the list of device types.  "all" is an acceptable device
- *	type.
- *
- * Returns:
- *   Success:
- *	A python object representing a list of unique ddu_dev_data objects is
- *	returned.
- *	- NOTE: if no devices are missing drivers and get_only_missing_drivers
- *	  is true, then an empty list is returned.
- *	- A ddu_dev_data object represents a found device.
- *   Failure:
- *	NULL
- */
-static PyObject *
-ai_call_ddu_devscan(py_state_t *py_state_p,
-    boolean_t get_only_missing_drivers, char *dev_type)
-{
-	PyObject *pRet = NULL;
-	PyObject *pList = NULL;
-	Py_ssize_t orig_listlen;
-	char **vids, **dids, **classes;
-	Py_ssize_t new_listused = 0;
-	Py_ssize_t i, j;
-
-	/* Find the function */
-	PyObject *pFunc = PyObject_GetAttrString(py_state_p->pFunctionModule,
-	    DDU_DEVSCAN);
-
-	if ((pFunc == NULL) || (!PyCallable_Check(pFunc))) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "Function not callable: %s\n", DDU_DEVSCAN);
-		Py_XDECREF(pFunc);
-		return (NULL);
-	} else {
-		/* Set up args to python function and call it. */
-		PyObject *pArgs = PyTuple_New(2);
-		(void) PyTuple_SetItem(pArgs, 0,
-		    PyBool_FromLong((long)get_only_missing_drivers));
-		(void) PyTuple_SetItem(pArgs, 1, PyString_FromString(dev_type));
-		pList = PyObject_CallObject(pFunc, pArgs);
-
-		Py_DECREF(pArgs);
-		if ((PyErr_Occurred() != NULL) || (pList == NULL) ||
-		    (pList == Py_None)) {
-			auto_debug_dump_file(AUTO_DBGLVL_ERR, DDU_ERRLOG);
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "%s returned an error.\n", DDU_DEVSCAN);
-			ai_dump_python_exception();
-			Py_XDECREF(pList);
-			Py_XDECREF(pFunc);
-			return (NULL);
-		}
-	}
-
-	Py_XDECREF(pFunc);
-
-	orig_listlen = PyList_Size(pList);
-	if (orig_listlen < 2) {
-		return (pList);
-	}
-
-	/* Check for duplicates. */
-	vids = (char **)malloc(sizeof (char *) * orig_listlen);
-	dids = (char **)malloc(sizeof (char *) * orig_listlen);
-	classes = (char **)malloc(sizeof (char *) * orig_listlen);
-	if ((vids == NULL) || (dids == NULL) || (classes == NULL)) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_call_ddu_devscan: No memory.\n");
-		return (NULL);
-	}
-
-	/* Build a list of unique values to be returned. */
-	pRet = PyList_New(0);
-
-	/* Loop through the list. */
-	for (i = 0; i < orig_listlen; i++) {
-		PyObject *pDDUDevData = PyList_GetItem(pList, i);
-		char *vendor_ID, *device_ID, *class;
-		boolean_t dup;
-
-		if (ai_get_ddu_dev_data_values(pDDUDevData, NULL, NULL,
-		    &vendor_ID, &device_ID, &class) != AUTO_INSTALL_SUCCESS) {
-			/* If can't compare, just allow it. */
-			continue;
-		}
-
-		/* Check for matching vendor, device, class. */
-		for (j = 0, dup = B_FALSE; j < new_listused; j++) {
-			if ((strcmp(class, classes[j]) == 0) &&
-			    (strcmp(device_ID, dids[j]) == 0) &&
-			    (strcmp(vendor_ID, vids[j]) == 0)) {
-				dup = B_TRUE;
-				break;
-			}
-		}
-
-		if (!dup) {
-			(void) PyList_Append(pRet, pDDUDevData);
-			vids[new_listused] = vendor_ID;
-			dids[new_listused] = device_ID;
-			classes[new_listused++] = class;
-		}
-	}
-
-	free(vids);
-	free(dids);
-	free(classes);
-
-	Py_XDECREF(pList);
-	return (pRet);
-}
-
-/*
- * ai_call_ddu_package_lookup:
- * Call the DDU library ddu_package_lookup function.  Given a list of
- * repositories, this function attempts to find a pkg(5) package in one of the
- * repositories.
- *
- * Arguments:
- *   py_state_p: Initialized py_state_t object.
- *   pDevObj: A python ddu_dev_data object representing a device.
- *   pRepoList: A python list of ddu_repo_object objects.  This represents the
- *	list of repositories to search through for a driver package.
- *   pPackageObj_p: Pointer to the returned ddu_package_object.
- *
- * Returns:
- *   AUTO_INSTALL_SUCCESS: pPackageObj_p points to an object representing a
- *	package to install for the given device.
- *   AUTO_INSTALL_PKG_NOT_FOUND: No package was found to install for the given
- *	device.  pPackageObj_p set to NULL.
- *   AUTO_INSTALL_FAILURE: Corresponds to an error other than not finding the
- *	package to install for the given device.  pPackageObj_p set to NULL.
- */
-static int
-ai_call_ddu_package_lookup(py_state_t *py_state_p,
-    PyObject *pDevObj, PyObject *pRepoList, PyObject **pPackageObj_p)
-{
-	int err = AUTO_INSTALL_SUCCESS;
-
-	/* Find the function */
-	PyObject *pFunc = PyObject_GetAttrString(py_state_p->pFunctionModule,
-	    DDU_PACKAGE_LOOKUP);
-
-	*pPackageObj_p = NULL;
-
-	if ((pFunc == NULL) || (!PyCallable_Check(pFunc))) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "Function not callable: %s\n", DDU_PACKAGE_LOOKUP);
-	} else {
-		/* Set up args to python function. */
-		PyObject *pArgs = PyTuple_New(2);
-		Py_INCREF(pDevObj);	/* PyTuple_SetItem steals reference. */
-		Py_INCREF(pRepoList);	/* PyTuple_SetItem steals reference. */
-		(void) PyTuple_SetItem(pArgs, 0, pDevObj);
-		(void) PyTuple_SetItem(pArgs, 1, pRepoList);
-
-		/* Call ddu_package_lookup() */
-		*pPackageObj_p = PyObject_CallObject(pFunc, pArgs);
-		Py_DECREF(pArgs);
-		if ((PyErr_Occurred() != NULL) || (*pPackageObj_p == NULL) ||
-		    (*pPackageObj_p == Py_None)) {
-			err = AUTO_INSTALL_FAILURE;
-			auto_debug_dump_file(AUTO_DBGLVL_ERR, DDU_ERRLOG);
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "%s returned an error.\n", DDU_PACKAGE_LOOKUP);
-			if (PyErr_Occurred() != NULL) {
-				PyObject *pType, *pValue, *pTraceback;
-				PyObject *pPkgNotFndExcObj =
-				    PyObject_GetAttrString(
-				    py_state_p->pErrorModule,
-				    DDU_PACKAGE_NOT_FOUND_EXC);
-				PyErr_Fetch(&pType, &pValue, &pTraceback);
-
-				if (PyObject_IsSubclass(pType,
-				    pPkgNotFndExcObj) == 1) {	/* 1 = match */
-					err = AUTO_INSTALL_PKG_NOT_FND;
-				}
-				Py_DECREF(pType);
-				Py_DECREF(pValue);
-				Py_DECREF(pTraceback);
-				Py_DECREF(pPkgNotFndExcObj);
-				PyErr_Clear();
-			}
-			Py_XDECREF(*pPackageObj_p);
-			*pPackageObj_p = NULL;
-
-		} else {
-			/*
-			 * DDU can return a pPackageObj_p that has type unknown,
-			 * no location and no inf_link.  Treat these as
-			 * "package not found" as well.
-			 */
-			char *ttype = empty_string;
-			char *tlocn = empty_string;
-			char *tinf_link = empty_string;
-			(void) ai_get_ddu_package_object_values(*pPackageObj_p,
-			    &ttype, &tlocn, NULL, NULL, &tinf_link, NULL);
-			if ((tlocn[0] == '\0') && (tinf_link[0] == '\0') &&
-			    (strcmp(ttype, "UNK") == 0)) {
-				err = AUTO_INSTALL_PKG_NOT_FND;
-				Py_XDECREF(*pPackageObj_p);
-				*pPackageObj_p = NULL;
-			}
-		}
-	}
-
-	Py_XDECREF(pFunc);
-	return (err);
-}
-
-/*
- * ai_call_ddu_install_package:
- * Call the DDU library ddu_install_package function.  Install the package
- * represented by pDDUPackageObj under the tree or file system given by
- * install_root.
- *
- * Arguments:
- *   py_state_p: Initialized py_state_t object.
- *   pDDUPackageObj: A python ddu_package_object representing the package to
- *	install.
- *   install_root: The root of the directory tree or file system in which to
- *	install the package.
- *   third_party_ok: Boolean: When true, it is OK to download and install
- *	packages found at third-party websites (as opposed to pkg(5)
- *	repositories).
- *
- * Returns:
- *   AUTO_INSTALL_SUCCESS: Package was successfully installed.
- *   AUTO_INSTALL_FAILURE: Package was not successfully installed.
- *
- * NOTE: check installer logfile for details of the failure.
- */
-static int
-ai_call_ddu_install_package(py_state_t *py_state_p,
-    PyObject *pDDUPackageObj, char *install_root, boolean_t third_party_ok)
-{
-	int rval = AUTO_INSTALL_SUCCESS;
-
-	/* Find the function */
-	PyObject *pFunc = PyObject_GetAttrString(py_state_p->pFunctionModule,
-	    DDU_INSTALL_PACKAGE);
-
-	if ((pFunc == NULL) || (!PyCallable_Check(pFunc))) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "Function not callable: %s\n", DDU_INSTALL_PACKAGE);
-		rval = AUTO_INSTALL_FAILURE;
-
-	} else {
-
-		/* Set up args to python function. */
-		PyObject *pArgs = PyTuple_New(3);
-		Py_INCREF(pDDUPackageObj);	/* PyTuple_SetItem steals ref */
-		(void) PyTuple_SetItem(pArgs, 0, pDDUPackageObj);
-		(void) PyTuple_SetItem(pArgs, 1,
-		    PyString_FromString(install_root));
-		(void) PyTuple_SetItem(pArgs, 2,
-		    PyBool_FromLong((long)third_party_ok));
-
-		/* Call ddu_install_packages() */
-		(void) PyObject_CallObject(pFunc, pArgs);
-		Py_DECREF(pArgs);
-
-		if (PyErr_Occurred() != NULL) {
-			auto_debug_dump_file(AUTO_DBGLVL_ERR, DDU_ERRLOG);
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "%s returned an error\n", DDU_INSTALL_PACKAGE);
-			ai_dump_python_exception();
-			rval = AUTO_INSTALL_FAILURE;
-		}
-	}
-
-	Py_XDECREF(pFunc);
-	return (rval);
-}
-
-/*
- * ai_new_ddu_package_object:
- * Create a new ddu_package_object of given type, name and location.
- *
- * Arguments:
- *   py_state_p: Initialized py_state_t object.
- *   type: type of package.
- *   name: name of package. (Not used by all types of packages.)
- *   origin: directory of where package is located.
- *
- * Returns:
- *   Success: A new python ddu_package_object object of the given
- *	type/name/location.
- *   Failure: NULL
- */
-static PyObject *
-ai_new_ddu_package_object(py_state_t *py_state_p,
-    char *type, char *name, char *origin)
-/*
- * Construct and return a new python ddu_package_object based on arguments.
- * Assumes auto_ddu_lib_init() has been called.
- *
- * Success: Returns the new object.
- * Failure: NULL
- */
-{
-	PyObject *pRet = NULL;
-
-	/* Find the function */
-	PyObject *pFunc = PyObject_GetAttrString(py_state_p->pPackageModule,
-	    DDU_PACKAGE_OBJECT);
-
-	if ((pFunc == NULL) || (!PyCallable_Check(pFunc))) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ddu_package_object constructor not callable\n");
-	} else {
-		/* Set up args to python function. */
-		PyObject *pArgs = PyTuple_New(3);
-		(void) PyTuple_SetItem(pArgs, 0, PyString_FromString(type));
-		(void) PyTuple_SetItem(pArgs, 1, PyString_FromString(name));
-		(void) PyTuple_SetItem(pArgs, 2, PyString_FromString(origin));
-
-		/* Call ddu_package_object constructor. */
-		pRet = PyObject_CallObject(pFunc, pArgs);
-		Py_DECREF(pArgs);
-		if ((PyErr_Occurred() != NULL) || (pRet == NULL) ||
-		    (pRet == Py_None)) {
-			auto_debug_dump_file(AUTO_DBGLVL_ERR, DDU_ERRLOG);
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ddu_package_object constructor failed\n");
-			ai_dump_python_exception();
-			Py_XDECREF(pRet);
-			pRet = NULL;
-		}
-	}
-
-	Py_XDECREF(pFunc);
-	return (pRet);
-}
-
-/*
- * ai_get_ddu_package_object_values:
- * Return selected values from a given ddu_package_object.
- *
- * Values returned live on the python interpreter's heap, and should not be
- * modified or freed.  They live only as long as the python obj they come from.
- *
- * Note: implementation is tied to the fields of the python object.
- *
- * Arguments:
- *   pDDUPackageObject: Object to extract values from.  Assumed to be a
- *	ddu_package_object;  not verified.
- *   type: char string pointer returned filled in with "pkg_type" field.
- *	Not processed if NULL.
- *   location: char string pointer returned filled in with "pkg_location" field.
- *	Not processed if NULL.
- *   name: char string pointer returned filled in with "pkg_name" field.
- *	Not processed if NULL.
- *   descr: char string pointer returned filled in with "device_descriptor"
- *	field.  Not processed if NULL.
- *   inf_link: char string pointer returned filled in with "inf_link" field.
- *	Not processed if NULL.
- *   third_party: boolean pointer returned filled in with
- *	"third_party_from_search" field.  Not processed if NULL.
- *
- * Returns:
- *   AUTO_INSTALL_SUCCESS: when all requested fields are found and extracted.
- *   AUTO_INSTALL_FAILURE: when one or more fields could not be found or
- *	extracted.
- */
-static int
-ai_get_ddu_package_object_values(PyObject *pDDUPackageObject,
-    char **type, char **location, char **name, char **descr, char **inf_link,
-    boolean_t *third_party)
-{
-	PyObject *pValue;
-
-	if (type != NULL) {
-		pValue = PyObject_GetAttrString(pDDUPackageObject, "pkg_type");
-		if (pValue == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_get_ddu_package_object_values: "
-			    "no ddu_package_object pkg_type field.\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-		*type = PyString_AsString(pValue);
-	}
-
-	if (location != NULL) {
-		pValue = PyObject_GetAttrString(pDDUPackageObject,
-		    "pkg_location");
-		if (pValue == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_get_ddu_package_object_values: "
-			    "no ddu_package_object pkg_location field.\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-		*location = PyString_AsString(pValue);
-	}
-
-	if (name != NULL) {
-		pValue = PyObject_GetAttrString(pDDUPackageObject, "pkg_name");
-		if (pValue == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_get_ddu_package_object_values: "
-			    "no ddu_package_object pkg_name field.\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-		*name = PyString_AsString(pValue);
-	}
-
-	if (descr != NULL) {
-		pValue = PyObject_GetAttrString(pDDUPackageObject,
-		    "device_descriptor");
-		if (pValue == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_get_ddu_package_object_values: "
-			    "no ddu_package_object device_descriptor field.\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-		*descr = PyString_AsString(pValue);
-	}
-
-	if (inf_link != NULL) {
-		pValue = PyObject_GetAttrString(pDDUPackageObject, "inf_link");
-		if (pValue == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_get_ddu_package_object_values: "
-			    "no ddu_package_object inf_link field.\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-		*inf_link = PyString_AsString(pValue);
-	}
-
-	if (third_party != NULL) {
-		pValue = PyObject_GetAttrString(pDDUPackageObject,
-		    "third_party_from_search");
-		if (pValue == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_get_ddu_package_object_values: "
-			    "no ddu_package_object "
-			    "third_party_from_search field.\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-		*third_party = (PyObject_IsTrue(pValue));
-	}
-
-	return (AUTO_INSTALL_SUCCESS);
-}
-
-/*
- * ai_get_ddu_dev_data_values:
- * Return selected values from a given ddu_dev_data object.
- *
- * Values returned live on the python interpreter's heap, and should not be
- * modified or freed.  They live only as long as the python obj they come from.
- *
- * Note: implementation is tied to the fields of the python object.
- *
- * Arguments:
- *   pDDUDevData: Object to extract values from.  Assumed to be a
- *	ddu_dev_data object;  not verified.
- *   dev_type_p: char string pointer returned filled in with "device_type"
- *	field.  Not processed if NULL.
- *   descr_p: char string pointer returned filled in with "description" field.
- *	Not processed if NULL.
- *   vendor_ID_p: char string pointer returned filled in with "vendor ID" field.
- *	Not processed if NULL.
- *   device_ID_p: char string pointer returned filled in with "device ID" field.
- *	Not processed if NULL.
- *   class_p: char string pointer returned filled in with PCI "class" field.
- *	Not processed if NULL.
- *
- * Returns:
- *   AUTO_INSTALL_SUCCESS: when all requested fields are found and extracted.
- *   AUTO_INSTALL_FAILURE: when one or more fields could not be found or
- *	extracted.
- */
-static int
-ai_get_ddu_dev_data_values(PyObject *pDDUDevData, char **dev_type_p,
-    char **descr_p, char **vendor_ID_p, char **device_ID_p, char **class_p)
-{
-	PyObject *pValue;
-
-	if (dev_type_p != NULL) {
-		pValue = PyObject_GetAttrString(pDDUDevData, "device_type");
-		if (pValue == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_get_ddu_dev_data_values: "
-			    "no ddu_dev_data device_type field.\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-		*dev_type_p = PyString_AsString(pValue);
-	}
-
-	if (descr_p != NULL) {
-		pValue = PyObject_GetAttrString(pDDUDevData, "description");
-		if (pValue == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_get_ddu_dev_data_values: "
-			    "no ddu_dev_data description field.\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-		*descr_p = PyString_AsString(pValue);
-	}
-
-	if (vendor_ID_p != NULL) {
-		pValue = PyObject_GetAttrString(pDDUDevData, "vendor_id");
-		if (pValue == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_get_ddu_dev_data_values: "
-			    "no ddu_dev_data vendor_id field.\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-		*vendor_ID_p = PyString_AsString(pValue);
-	}
-
-	if (device_ID_p != NULL) {
-		pValue = PyObject_GetAttrString(pDDUDevData, "device_id");
-		if (pValue == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_get_ddu_dev_data_values: "
-			    "no ddu_dev_data device_id field.\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-		*device_ID_p = PyString_AsString(pValue);
-	}
-
-	if (class_p != NULL) {
-		pValue = PyObject_GetAttrString(pDDUDevData, "class_code");
-		if (pValue == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_get_ddu_dev_data_values: "
-			    "no ddu_dev_data class_code field.\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-		*class_p = PyString_AsString(pValue);
-	}
-
-	return (AUTO_INSTALL_SUCCESS);
-}
-
-/*
- * ai_du_process_manual_pkg:
- * Create a ddu_package_object from parameters, and add it to the pPackageList.
- *
- * Arguments:
- *   py_state_p: Initialized py_state_t object.
- *   pPackageList: List of packages to append the new ddu_package_object to.
- *   origin: directory of where package or package directive is located.
- *   type: type of package.
- *   name: name of package.
- *   noinstall: boolean whether package is to be installed only to booted
- *	environment, not to target.
- *
- * Returns:
- *   AUTO_INSTALL_SUCCESS: A new package object was successfully appended to
- *	pPackageList.
- *   AUTO_INSTALL_FAILURE: An error occurred when trying to create a new
- *	package object or append it to the pPackageList.
- *
- *   Note: The list object referenced by pPackageList will be modified.
- */
-static int
-ai_du_process_manual_pkg(py_state_t *py_state_p, PyObject *pPackageList,
-    char *origin, char *type, char *name, char *noinstall)
-{
-	PyObject *pDDUPackageObject;
-	PyObject *pTuple;
-
-	auto_log_print(gettext(
-	    "Add Drivers: Found manifest entry for package:\n"));
-	if (name != empty_string) {
-		auto_log_print(gettext("  type:%s, origin:%s, name:%s\n"),
-		    type, origin, name);
-	} else {
-		auto_log_print(gettext("  type:%s, origin:%s\n"),
-		    type, origin);
-	}
-	if (strcmp(noinstall, "true") == 0) {
-		auto_log_print(gettext("    Package to be "
-		    "installed only in current booted environment.\n"));
-	} else {
-		auto_log_print(gettext("    Package to be "
-		    "installed in current booted environment and target.\n"));
-	}
-
-	/* Initialize a new ddu_package_object object */
-	pDDUPackageObject = ai_new_ddu_package_object(py_state_p,
-	    type, name, origin);
-
-	if (pDDUPackageObject == NULL) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_process_manual_pkg: <add_drivers> error:\n"
-		    "Error creating new package object for "
-		    "origin %s %s\n", origin, name);
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	pTuple = PyTuple_New(3);
-	(void) PyTuple_SetItem(pTuple, 0, pDDUPackageObject);
-	(void) PyTuple_SetItem(pTuple, 1, Py_True);	/* third party OK */
-	(void) PyTuple_SetItem(pTuple, 2,
-	    (strcmp(noinstall, "true") == 0) ? Py_True : Py_False);
-
-	/*
-	 * NOTE: Don't decref pTuple here as PyList_Append doesn't
-	 * steal a reference to it.
-	 */
-	(void) PyList_Append(pPackageList, pTuple);
-	return (AUTO_INSTALL_SUCCESS);
-}
-
-/*
- * ai_du_process_manual_pkg_names:
- * Do any processing of packages for which unique origin (location), type and
- *	name are known.
- *
- * Arguments:
- *   py_state_p: Initialized py_state_t object.
- *   path_p: Used to build nodepath strings for manifest checking.
- *   pPackageList: List of packages to append the new ddu_package_object to.
- *   origin: directory of where package is located.
- *   type: type of package.
- *   name: name of package.
- *
- * Returns:
- *   AUTO_INSTALL_SUCCESS: Package processed successfully and appended to
- *	pPackageList.
- *   AUTO_INSTALL_FAILURE: An error occurred.  No package appended to
- *	pPackageList.
- *
- *   Note 1: the object pointed to by pPackageList will be modified.
- */
-static int
-ai_du_process_manual_pkg_names(py_state_t *py_state_p, path_t *path_p,
-    PyObject *pPackageList, char *origin, char *type, char *name)
-{
-	char **actions;
-	int actions_len;
-	char *nodespec;
-	int rval;
-
-	/* Get the action attribute. */
-
-	/* Search is different depending on whether a name is specified. */
-	if (name == empty_string) {
-		nodespec = ACTION_NONAME_NODEPATH;
-	} else {
-		nodespec = ACTION_YESNAME_NODEPATH;
-	}
-
-	if (snprintf(path_p->post_prefix_start, path_p->post_prefix_len,
-	    nodespec, origin, type, name) >= path_p->post_prefix_len) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_process_manual_pkg_names: "
-		    "<add_drivers> manifest error:\n"
-		    "action path buffer overflow for origin \"%s\", "
-		    "type \"%s\", name \"%s\"\n", origin, type, name);
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	actions = ai_get_manifest_values(path_p->path_str, &actions_len);
-
-	/*
-	 * Note: action must be present and must be
-	 * either "install" or "noinstall".
-	 */
-	if (actions_len <= 0) {
-		auto_log_print(gettext("Add Drivers: "
-		    "<add_drivers> manifest error:\n"
-		    "no action value for origin \"%s\", "
-		    "type \"%s\", name \"%s\"\n"), origin, type, name);
-		rval = AUTO_INSTALL_FAILURE;
-
-	} else if (actions_len > 1) {
-		auto_log_print(gettext("Add Drivers: "
-		    "<add_drivers> manifest error:\n"
-		    "multiple action values for origin \"%s\", "
-		    "type \"%s\", name \"%s\"\n"), origin, type, name);
-		rval = AUTO_INSTALL_FAILURE;
-
-	} else if (strcmp(actions[0], "install") == 0) {
-		/*
-		 * If action="install" then call ai_du_process_manual_pkg with
-		 * noinstall param set to empty_string, which means pkg will be
-		 * installed in both boot env and target.
-		 */
-
-		/* Obj pointed to by pPackageList will be modified. */
-		rval = ai_du_process_manual_pkg(py_state_p, pPackageList,
-		    origin, type, name, empty_string);
-	} else if (strcmp(actions[0], "noinstall") == 0) {
-		/*
-		 * If action="noinstall" then call ai_du_process_manual_pkg with
-		 * noinstall param set to "true", which means pkg will only be
-		 * installed in both boot env and not in target.
-		 */
-		/* Obj pointed to by pPackageList will be modified. */
-		rval = ai_du_process_manual_pkg(py_state_p, pPackageList,
-		    origin, type, name, "true");
-	} else {
-		auto_log_print(gettext("Add Drivers: "
-		    "<add_drivers> manifest error:\n"
-		    "action must be install or noinstall for origin \"%s\", "
-		    "type \"%s\", name \"%s\"\n"), origin, type, name);
-		rval = AUTO_INSTALL_FAILURE;
-	}
-	ai_free_manifest_values(actions);
-	return (rval);
-}
-
-/*
- * ai_du_process_manual_pkg_types:
- * Do any processing of packages for which unique location and type are known.
- *
- * Arguments:
- *   py_state_p: Initialized py_state_t object.
- *   pPackageList: List of packages to append the new ddu_package_object to.
- *   origin: directory of where package is located.
- *   type: type of package.
- *
- * Returns:
- *   AUTO_INSTALL_SUCCESS: Package processed successfully and appended to
- *	pPackageList.
- *   AUTO_INSTALL_FAILURE: An error occurred.  No package appended to
- *	pPackageList.
- *
- *   Note 1: the pPackageList will be modified.
- *   Note 2: Appropriate error messages are logged/displayed.
- */
-static int
-ai_du_process_manual_pkg_types(py_state_t *py_state_p, PyObject *pPackageList,
-    path_t *path_p, char *origin, char *type)
-{
-	char **names;
-	char **uniq_names;
-	int namelen;
-	int k;
-	int rval = AUTO_INSTALL_SUCCESS;
-
-	if ((strcmp(type, "P5I") != 0) &&
-	    (strcmp(type, "SVR4") != 0) &&
-	    (strcmp(type, "DU") != 0)) {
-		auto_log_print(gettext("Add Drivers: "
-		    "<add_drivers> manifest error:\n"
-		    "invalid type %s given for origin %s\n"), type, origin);
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	/* Get all names assocated with type and origin. */
-
-	if (snprintf(path_p->post_prefix_start, path_p->post_prefix_len,
-	    NAME_NODEPATH, origin, type) >= path_p->post_prefix_len) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_process_manual_pkg_types: "
-		    "<add_drivers> manifest error:\n"
-		    "name path buffer overflow for origin "
-		    "%s, type %s\n", origin, type);
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	names = ai_get_manifest_values(path_p->path_str, &namelen);
-	uniq_names = ai_uniq_manifest_values(names, &namelen);
-	ai_free_manifest_values(names);
-	names = uniq_names;
-
-	/* P5I and DU type entries don't have a "name" entry. */
-	if (strcmp(type, "SVR4") != 0) {
-		if (namelen > 0) {
-			auto_log_print(gettext(
-			    "Add Drivers: <add_drivers> "
-			    "manifest error:\n"
-			    "name given to P5I or DU package specification at "
-			    "origin %s\n"), origin);
-			rval = AUTO_INSTALL_FAILURE;
-		} else {
-			/* Obj pointed to by pPackageList will be modified. */
-			rval = ai_du_process_manual_pkg_names(py_state_p,
-			    path_p, pPackageList, origin, type, empty_string);
-		}
-
-	/* There must be at least one "name" entry per pkgspec for SVR4 type. */
-	} else if (namelen <= 0) {
-		auto_log_print(gettext("Add Drivers: "
-		    "<add_drivers> manifest error:\n"
-		    "  no name given for SVR4 package specification\n"
-		    "  at origin %s, type %s\n"), origin, type);
-		rval = AUTO_INSTALL_FAILURE;
-
-	} else {
-		/* Process each origin/type/name entry. */
-		for (k = 0; k < namelen; k++) {
-
-			/* Obj pointed to by pPackageList will be modified. */
-			int status = ai_du_process_manual_pkg_names(py_state_p,
-			    path_p, pPackageList, origin, type, names[k]);
-			if (status == AUTO_INSTALL_FAILURE) {
-				rval = AUTO_INSTALL_FAILURE;
-			}
-		}
-	}
-	ai_free_manifest_values(names);
-	return (rval);
-}
-
-/*
- * ai_du_get_manual_pkg_list:
- * Read the AI ai.xml Manifest file and process the <software> tags under the
- * <add_drivers> section.  <software> represents a manual specification of a
- * package to install.  Do error checking of the manifest as necessary, as this
- * function reads the manifest before it is validated against a schema.
- *
- * Validates syntax and processes the following from the manifest:
- *	<add_drivers>
- *		<software>
- *			<source>
- *				<publisher>
- *					<origin name="location"/>
- *				</publisher>
- *			</source>
- *			<software_data type="type" action="noinstall">
- *				<name>"name"</name>
- *			</software_data>
- *		</software>
- *	</add_drivers>
- *
- *	type can be "SVR4", "P5I" or "DU".
- *	name not allowed if type is "P5I" or "DU"
- *
- * Always return a list.  An empty list can be returned if the manifest shows
- * there are no packages to install for Driver Update, or on some errors.
- *
- * Arguments:
- *   py_state_p: Initialized py_state_t object.
- *   path_p: Used to build nodepath strings for manifest checking.
- *
- * Returns:
- *   AUTO_INSTALL_SUCCESS: A complete python list of (ddu_package_object,
- *	third_party_ok, noinstall) tuples suitable for input to
- *	ai_du_install_packages() has been created.
- *   AUTO_INSTALL_FAILURE: A python list of tuples suitable for input to
- *	ai_du_install_packages() has been created, but is missing one or more
- *	requested packages due to errors.  These errors could be manifest
- *	parsing errors or errors in setting up the packages.
- *
- * NOTE: check installer logfile for details of the failure.
- */
-static int
-ai_du_get_manual_pkg_list(py_state_t *py_state_p, path_t *path_p,
-    PyObject **pPackageList_p)
-{
-	char **uniq_origins = NULL;
-	char **types = NULL;
-	int origin_len, typelen;
-	char **origins;
-	char **uniq_types;
-	int num_pkgspecs;
-	int i, j;
-	int rval = AUTO_INSTALL_SUCCESS;
-
-	/*
-	 * Initialize a zero-length list.
-	 * This will be returned empty if nothing to install has been found
-	 * or if an error is found early on.
-	 */
-	*pPackageList_p = PyList_New(0);
-
-	/* Read manifest for specific package requests. */
-
-	/* Get the number of <software> package spec entries. */
-	if (strlcpy(path_p->post_prefix_start, PKGSPEC_NODEPATH,
-	    path_p->post_prefix_len) > path_p->post_prefix_len) {
-		auto_debug_print(AUTO_DBGLVL_ERR, "ai_du_get_manual_pkg_list: "
-		    "<software> path buffer overflow\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	/* Use "origins" like a dummy here.  Interest only in num_pkgspecs. */
-	origins = ai_get_manifest_values(path_p->path_str, &num_pkgspecs);
-	ai_free_manifest_values(origins);
-
-	/* No package specs.  Return an empty list. */
-	if (num_pkgspecs <= 0) {
-		return (AUTO_INSTALL_SUCCESS);
-	}
-
-	/* Retrieve a list of all specific package request origins. */
-	if (strlcpy(path_p->post_prefix_start, ORIGIN_NODEPATH,
-	    path_p->post_prefix_len) > path_p->post_prefix_len) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_get_manual_pkg_list: origin path buffer overflow\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	/* Get real origins list here for use below. */
-	origins = ai_get_manifest_values(path_p->path_str, &origin_len);
-
-	/*
-	 * Not a perfect test to validate package specs vs origins in
-	 * manifest, but it will do...
-	 */
-	if (origin_len != num_pkgspecs) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_get_manual_pkg_list: <add_drivers> manifest error:\n"
-		    "There is not a 1-1 <origin> - <software> mapping.\n");
-		ai_free_manifest_values(origins);
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	uniq_origins = ai_uniq_manifest_values(origins, &origin_len);
-	ai_free_manifest_values(origins);
-	origins = uniq_origins;
-
-	/*
-	 * For each origin (location), get types.  Note it is possible for
-	 * there to be more than one type at an origin.  There can also be more
-	 * than one item of a given type at an origin.
-	 */
-	for (i = 0; i < origin_len; i++) {
-
-		/* Process "type" entries. */
-
-		if (snprintf(path_p->post_prefix_start, path_p->post_prefix_len,
-		    TYPE_NODEPATH, origins[i]) >= path_p->post_prefix_len) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_du_get_manual_pkg_list: "
-			    "<add_drivers> manifest error:\n"
-			    "type path buffer overflow for origin %s\n",
-			    origins[i]);
-			rval = AUTO_INSTALL_FAILURE;
-			continue;
-		}
-
-		types = ai_get_manifest_values(path_p->path_str, &typelen);
-		if (typelen <= 0) {
-			auto_log_print(gettext("Add Drivers: "
-			    "<add_drivers> manifest error:\n"
-			    "no type given for origin %s\n"), origins[i]);
-			rval = AUTO_INSTALL_FAILURE;
-			continue;
-		}
-
-		uniq_types = ai_uniq_manifest_values(types, &typelen);
-		ai_free_manifest_values(types);
-		types = uniq_types;
-
-		/* Loop for all types found at this origin... */
-		for (j = 0; j < typelen; j++) {
-
-			/* Obj *pPackageList_p points to will be modified.  */
-			int status = ai_du_process_manual_pkg_types(py_state_p,
-			    *pPackageList_p, path_p, origins[i], types[j]);
-			if (status == AUTO_INSTALL_FAILURE) {
-				rval = AUTO_INSTALL_FAILURE;
-			}
-		}
-	}
-
-	ai_free_manifest_values(origins);
-	ai_free_manifest_values(types);
-	return (rval);
-}
-
-/*
- * ai_du_get_searched_pkg_list:
- * Read the AI ai.xml Manifest file and process the <search_all> tag under the
- * <add_drivers> section.  Do the needful to scan for missing devices and to
- * perform package searches and installs for missing drivers.  Do error
- * checking of the manifest as necessary, as this function reads the manifest
- * before it is validated against a schema.
- *
- * Always return a list.  An empty list can be returned if a search determines
- * there are no packages to install for Driver Update (i.e. the system is
- * missing no drivers), or on some errors.
- *
- * Validates syntax and processes the following from the manifest:
- *	<add_drivers>
- *		<search_all addall="false">
- *			<source>
- *				<publisher name="publisher">
- *					<origin name="location"/>
- *				</publisher>
- *			</source>
- *		</search_all>
- *	</add_drivers>
- *
- *	publisher and origin are both optional, but if one is specified then
- *		the other must also be specified.
- *
- *	addall is optional.  When true, it allows search_all to install third
- *	party drivers (found via the database in the given pkg(5) repository,
- *	but installed from somewhere else).  Defaults to "false" if not
- *	specified.
- *
- * Arguments:
- *   py_state_p: Initialized py_state_t object.
- *   path_p: Used to build nodepath strings for manifest checking.
- *   install_root: Root used for determining pkg publisher.
- *   pPackageList_p: A python list of (ddu_package_object, third_party_ok,
- *	noinstall) tuples suitable for input to ai_du_install_packages().
- *
- * Returns:
- *   AUTO_INSTALL_SUCCESS: No errors were encountered in retrieving package
- *	information.  It is also possible that the system is missing no drivers,
- *	and an empty list is returned.
- *   AUTO_INSTALL_PKG_NOT_FND: Packages for one or more missing drivers are not
- *	available.
- *   AUTO_INSTALL_FAILURE: One or more errors (other than packages which were
- *	not available) were encountered in retrieving package information.
- *
- * NOTE: check installer logfile for details of the failure.
- */
-static int
-ai_du_get_searched_pkg_list(py_state_t *py_state_p, path_t *path_p,
-    char *install_root, PyObject **pPackageList_p)
-{
-	PyObject *pDeviceList = NULL;
-	PyObject *pTuple;
-	PyObject *pRepoTupleList;
-	int len, sublen;
-	PyObject *pSearchRepoList = NULL;
-	char *search_origin, *search_pub;
-	PyObject *py_search_addall = NULL;
-	char **searches = NULL;
-	char **search_origins = NULL;
-	char **search_pubs = NULL;
-	char **search_addalls = NULL;
-	Py_ssize_t i, listlen;
-	int rval = AUTO_INSTALL_FAILURE;
-
-	*pPackageList_p = PyList_New(0); /* Initialize a zero-length list. */
-
-	/* Read manifest for search requests. */
-
-	if (strlcpy(path_p->post_prefix_start, SEARCH_NODEPATH,
-	    path_p->post_prefix_len) > path_p->post_prefix_len) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_get_searched_pkg_list: "
-		    "search pathname buffer overflow.\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	searches = ai_get_manifest_values(path_p->path_str, &len);
-	ai_free_manifest_values(searches);
-	if (len > 1) {
-		auto_log_print(gettext("Add Drivers: "
-		    "Only one <search_all> entry allowed in manifest\n"));
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (len <= 0) {
-		return (AUTO_INSTALL_SUCCESS);
-	}
-
-	auto_log_print(gettext("Add Drivers: Doing a device "
-	    "scan for devices which are missing drivers...\n"));
-
-	/*
-	 * Call ddu_devscan() to do search if requested.
-	 * The boolean value is to scan only for missing drivers.
-	 */
-	pDeviceList = ai_call_ddu_devscan(py_state_p, B_TRUE, "all");
-	if (pDeviceList == NULL) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_get_searched_pkg_list: "
-		    "Error scanning for missing drivers.\n");
-		return (AUTO_INSTALL_FAILURE);
-
-	/* An empty list is perfectly acceptable here.  No missing drivers. */
-	} else if (PyList_Size(pDeviceList) == 0) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_get_searched_pkg_list: No missing drivers found.\n");
-		return (AUTO_INSTALL_SUCCESS);
-	}
-
-	/* Get repo location, if specified. */
-
-	if (strlcpy(path_p->post_prefix_start, SEARCH_ORIGIN_NODEPATH,
-	    path_p->post_prefix_len) > path_p->post_prefix_len) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_get_searched_pkg_list: search repo origin path "
-		    "buffer overflow.\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	auto_log_print(gettext("Add Drivers: Querying manifest "
-	    "for explicit repo for getting missing driver packages...\n"));
-
-	search_origins = ai_get_manifest_values(path_p->path_str, &sublen);
-	if (sublen == 1) {
-		search_origin = search_origins[0];
-	} else if (sublen <= 0) {
-		search_origin = empty_string;
-	} else {
-		auto_log_print(gettext("Add Drivers: "
-		    "<add_drivers> manifest error:\n"
-		    "Only one origin allowed per <search_all> entry.\n"));
-		goto done;
-	}
-
-	/* Get repo publisher, if specified. */
-
-	if (strlcpy(path_p->post_prefix_start, SEARCH_PUBNAME_NODEPATH,
-	    path_p->post_prefix_len) > path_p->post_prefix_len) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_get_searched_pkg_list: search repo publisher path "
-		    "buffer overflow.\n");
-		goto done;
-	}
-
-	search_pubs = ai_get_manifest_values(path_p->path_str, &sublen);
-	if (sublen == 1) {
-		search_pub = search_pubs[0];
-	} else if (sublen <= 0) {
-		search_pub = empty_string;
-	} else {
-		auto_log_print(gettext("Add Drivers: "
-		    "<add_drivers> manifest error:\n"
-		    "Only one publisher allowed for a <search_all> entry\n"));
-		goto done;
-	}
-
-	/* Can't have one without the other. */
-	if ((search_pub == empty_string) ^
-	    (search_origin == empty_string)) {
-		auto_log_print(gettext("Add Drivers: "
-		    "<add_drivers> manifest error:\n"
-		    "search repo origin and "
-		    "publisher must be specified together.\n"));
-		goto done;
-	}
-
-	/*
-	 * if publisher and origin provided, create tuple from them and
-	 * build a repo list from it.
-	 */
-	if (search_pub != empty_string) {
-
-		auto_log_print(gettext("Add Drivers: "
-		    "Found repo in manifest: publisher:%s, origin:%s\n"),
-		    search_pub, search_origin);
-
-		pTuple = PyTuple_New(2);
-		(void) PyTuple_SetItem(pTuple, 0,
-		    PyString_FromString(search_pub));
-		(void) PyTuple_SetItem(pTuple, 1,
-		    PyString_FromString(search_origin));
-		pRepoTupleList = PyList_New(0);
-		(void) PyList_Append(pRepoTupleList, pTuple);
-		pSearchRepoList = ai_call_ddu_build_repo_list(py_state_p,
-		    pRepoTupleList);
-		Py_DECREF(pTuple);
-		Py_DECREF(pRepoTupleList);
-
-		if (pSearchRepoList == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_du_get_searched_pkg_list:"
-			    "Error building search repo list.\n");
-			goto done;
-		}
-
-		auto_log_print(gettext("Add Drivers: "
-		    "Searching for packages in %s repository at %s\n"),
-		    search_pub, search_origin);
-
-	} else {
-		FILE *pub_info;
-		char pub_buf[MAXPATHLEN];
-		char cmd_buf[MAXPATHLEN];
-
-		/* No publisher/URL provided.  Return an empty repo list. */
-
-		auto_log_print(gettext("Add Drivers: "
-		    "No explicit <search_all> repo specified in manifest\n"));
-		auto_log_print(gettext("... Searching for packages in "
-		    "repositories already configured on the system\n"));
-
-		(void) snprintf(cmd_buf, MAXPATHLEN,
-		    "/usr/bin/pkg -R %s publisher", install_root);
-		if ((pub_info = popen(cmd_buf, "r")) != NULL) {
-			while (fgets(pub_buf, MAXPATHLEN, pub_info) != NULL) {
-				auto_log_print("%s\n", pub_buf);
-			}
-			(void) pclose(pub_info);
-		}
-
-		pSearchRepoList = PyList_New(0);
-	}
-
-	/* Find out if <addall> was specified. */
-
-	if (strlcpy(path_p->post_prefix_start, SEARCH_ADDALL_NODEPATH,
-	    path_p->post_prefix_len) > path_p->post_prefix_len) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_get_searched_pkg_list: search addall path "
-		    "buffer overflow.\n");
-		goto done;
-	}
-
-	/* No more than a single true/false value is allowed. */
-	search_addalls = ai_get_manifest_values(path_p->path_str, &sublen);
-	if ((sublen > 1) ||
-	    ((sublen == 1) &&
-	    ((strcmp(search_addalls[0], "true") != 0) &&
-	    (strcmp(search_addalls[0], "false") != 0)))) {
-		auto_log_print(gettext("Add Drivers: "
-		    "<add_drivers> manifest error:\n"
-		    "invalid addall value for <search_all> entry\n"));
-		goto done;
-
-	/* Default to false if not provided. */
-	} else if ((sublen <= 0) || (strcmp(search_addalls[0], "false") == 0)) {
-		Py_INCREF(Py_False);
-		py_search_addall = Py_False;
-
-	} else {
-		auto_log_print(gettext("Add Drivers: Manifest "
-		    "allows adding of third-party drivers\n"));
-		Py_INCREF(Py_True);
-		py_search_addall = Py_True;
-	}
-
-	/*
-	 * Append packages found for missing devices, to the list of packages
-	 * to install.
-	 */
-	rval = AUTO_INSTALL_SUCCESS;
-	listlen = PyList_Size(pDeviceList);
-	for (i = 0; i < listlen; i++) {
-
-		PyObject *pDDUPackageObject;
-		PyObject *pDDUDevData;
-		char *dev_type;
-		char *descr;
-		int lookup_err;
-		boolean_t third_party = B_FALSE;
-
-		pDDUDevData = PyList_GetItem(pDeviceList, i);
-
-		/* Find the package containing the driver for this device. */
-		lookup_err = ai_call_ddu_package_lookup(py_state_p,
-		    pDDUDevData, pSearchRepoList, &pDDUPackageObject);
-
-		/* Get info for display / logging purposes, and log it. */
-		if (ai_get_ddu_dev_data_values(pDDUDevData, &dev_type, &descr,
-		    NULL, NULL, NULL) != AUTO_INSTALL_SUCCESS) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_du_get_searched_pkg_list: Error retrieving "
-			    "device information for display\n");
-			dev_type = descr = empty_string;
-		}
-
-		/* Package not found is not considered an error. */
-		if (lookup_err == AUTO_INSTALL_PKG_NOT_FND) {
-			auto_log_print(gettext("Add Drivers: "
-			    "Warning: Search found no package for "
-			    "\"%s\" type device \"%s\".\n"), dev_type, descr);
-			/*
-			 * Set marginal success status.
-			 * Don't override failure status.
-			 */
-			if (rval == AUTO_INSTALL_SUCCESS) {
-				rval = AUTO_INSTALL_PKG_NOT_FND;
-			}
-			continue;
-		} else if (lookup_err != AUTO_INSTALL_SUCCESS) {
-			auto_log_print(gettext("Add Drivers: "
-			    "Error retrieving package for "
-			    "\"%s\" type device \"%s\".\n"), dev_type, descr);
-			rval = AUTO_INSTALL_FAILURE;
-			continue;
-		} else {
-			auto_log_print(gettext("Add Drivers: "
-			    "DDU returned package info for "
-			    "\"%s\" type device \"%s\".\n"), dev_type, descr);
-		}
-
-		(void) ai_get_ddu_package_object_values(pDDUPackageObject,
-		    NULL, NULL, NULL, NULL, NULL, &third_party);
-		if (third_party) {
-			auto_log_print(gettext("  This is a third-party "
-			    "package.\n"));
-		}
-
-		/* Append the package info to the returned list. */
-
-		/*
-		 * NOTE: Don't decref pTuple here as PyList_Append doesn't
-		 * steal a reference to it.
-		 */
-		pTuple = PyTuple_New(3);
-		(void) PyTuple_SetItem(pTuple, 0, pDDUPackageObject);
-		Py_INCREF(py_search_addall);	/* 3rd party OK */
-		(void) PyTuple_SetItem(pTuple, 1, py_search_addall);
-		Py_INCREF(Py_False);		/* always install */
-		(void) PyTuple_SetItem(pTuple, 2, Py_False);
-		(void) PyList_Append(*pPackageList_p, pTuple);
-	}
-done:
-	/* Cleanup time, whether an error occured or not. */
-	ai_free_manifest_values(search_origins);
-	ai_free_manifest_values(search_pubs);
-	Py_XDECREF(py_search_addall);
-	Py_XDECREF(pSearchRepoList);
-	Py_XDECREF(pDeviceList);
-	return (rval);
-}
-
-/*
- * ai_du_install_packages:
- * Install packages provided by the pPkgTupleList.  Install in the filesystem /
- * tree under install_root, skipping packages with noinstall flag set if
- * honor_noinstall is set.
- *
- * NOTE: it is assumed that the DDU library returns successful status when
- * attempting to install a package which is already installed.
- *
- * Arguments:
- *   py_state_p: Initialized py_state_t object.
- *   pPkgTupleList: Python list of (ddu_package_object, third_party_ok,
- *	noinstall) tuples which define packages to add and their parameters for
- *	adding them.  Guaranteed not to be NULL.  When third_party_ok is true,
- *	add the corresponding package even if it is to be downloaded from a
- *	third party website.  When noinstall is true in the tuple and the
- *	honor_noinstall argument is also true, skip adding the corresponding
- *	package.  (This may be used to skip installation onto the target disk,
- *	after having installed into the booted install environment.)
- *   install_root: Top of the filesystem or tree where the packages are to be
- *	installed.
- *   honor_noinstall: When true and the noinstall flag is set in a package
- *	tuple, skip installing that package.
- *   num_installed_pkgs_p: Returns the value passed in, plus the number of
- *	packages actually installed.
- *
- * Returns:
- *   AUTO_INSTALL_SUCCESS: All packages were able to be installed.
- *   AUTO_INSTALL_FAILURE: At least one package was not able to be installed.
- *
- * NOTE: check installer logfile for details of the failure.
- */
-static int
-ai_du_install_packages(py_state_t *py_state_p, PyObject *pPkgTupleList,
-    char *install_root, boolean_t honor_noinstall, int *num_installed_pkgs_p)
-{
-	Py_ssize_t len;
-	Py_ssize_t i;
-	int rval = AUTO_INSTALL_SUCCESS;
-
-	auto_log_print(gettext("Add Drivers: "
-	    "Installing packages to %s\n"), install_root);
-
-	len = PyList_Size(pPkgTupleList);
-	for (i = 0; i < len; i++) {
-
-		/* Break out the tuple. */
-
-		PyObject *pTuple = PyList_GetItem(pPkgTupleList, i);
-		PyObject *pDDUPackageObject = PyTuple_GetItem(pTuple, 0);
-		PyObject *pThirdPartyOK = PyTuple_GetItem(pTuple, 1);
-		PyObject *pNoInstall = PyTuple_GetItem(pTuple, 2);
-		char *type, *location, *name, *descr, *inf_link;
-		boolean_t third_party;
-
-		if (ai_get_ddu_package_object_values(pDDUPackageObject,
-		    &type, &location, &name, &descr, &inf_link, &third_party) !=
-		    AUTO_INSTALL_SUCCESS) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_du_install_packages: Error extracting package "
-			    "information for ddu_package_object.\n");
-			type = location = name = descr = inf_link =
-			    empty_string;
-			third_party = B_FALSE;
-		} else {
-			if (strcmp(name, empty_string) == 0) {
-				auto_log_print(gettext(
-				    "  %s package at origin:%s\n"),
-				    type, location);
-			} else {
-				auto_log_print(gettext(
-				    "  %s package at origin:%s, name:%s\n"),
-				    type, location, name);
-			}
-		}
-
-		if (PyObject_IsTrue(pNoInstall) && honor_noinstall) {
-			auto_log_print(gettext("Add Drivers: "
-			    "    honoring noinstall: skipping package.\n"));
-			continue;
-		}
-
-		/* Display any third-party package errors to console. */
-		if ((! PyObject_IsTrue(pThirdPartyOK)) && third_party) {
-			char *msg1_3p = gettext("  Manifest is not allowing "
-			    "third party packages found through search for "
-			    "installation to %s\n");
-			char *msg2_3p = gettext("  Info on the package to "
-			    "install to make device \"%s\"\n"
-			    "    operational is available:\n"
-			    "    %s\n");
-			(void) fprintf(stderr, msg1_3p, install_root);
-			auto_log_print(msg1_3p, install_root);
-			(void) fprintf(stderr, msg2_3p, descr, inf_link);
-			auto_log_print(msg2_3p, descr, inf_link);
-			rval = AUTO_INSTALL_FAILURE;
-			continue;
-		}
-
-		/* Handle uninstallable package objects. */
-		if (strcmp(location, empty_string) == 0) {
-			if (strcmp(inf_link, empty_string) == 0) {
-				auto_log_print(gettext(
-				    "Add Drivers: Package not "
-				    "found for device: \"%s\"\n"), descr);
-			} else {
-				auto_log_print(gettext(
-				    "Add Drivers: Package for device: \"%s\" "
-				    "must be installed manually.\n"
-				    "For more information go to:\n %s\n"),
-				    descr, inf_link);
-			}
-			rval = AUTO_INSTALL_FAILURE;
-			continue;
-		}
-
-		/* All is well.  Install the package. */
-		if (ai_call_ddu_install_package(py_state_p, pDDUPackageObject,
-		    install_root, PyObject_IsTrue(pThirdPartyOK)) ==
-		    AUTO_INSTALL_FAILURE) {
-			auto_log_print(gettext("Add Drivers: "
-			    "Error installing package to %s\n"), install_root);
-			rval = AUTO_INSTALL_FAILURE;
-		} else {
-			(*num_installed_pkgs_p)++;
-		}
-	}
-	return (rval);
-}
-
-/*
- * ai_uniq_manifest_values:
- * Remove duplicate values in lists returned by ai_lookup_manifest_values().
- *
- * Arguments:
- *   in: the input list of values.
- *   len_p: The length of the input list, on input.  Returns the length of the
- *	list returned.
- *
- * Returns:
- *   Success: The resulting list of unique values.
- *   Failure: NULL
- */
-static char **
-ai_uniq_manifest_values(char **in, int *len_p)
-{
-	int in_len = *len_p;
-	int dup_count = 0;
-	char **out;
-	char *comp;
-	boolean_t *is_dup = (boolean_t *)alloca(in_len * sizeof (boolean_t));
-	int i, j;
-
-	if ((in_len == 0) || (in == NULL)) {
-		return (NULL);
-	}
-
-	bzero(is_dup, (in_len * sizeof (boolean_t)));
-
-	for (i = 0; i < in_len - 1; i++) {
-		comp = in[i];
-		for (j = i + 1; j < in_len; j++) {
-			if ((!is_dup[j]) && (strcmp(comp, in[j]) == 0)) {
-				is_dup[j] = B_TRUE;
-				dup_count++;
-			}
-		}
-	}
-
-	out = (char **)malloc((in_len - dup_count + 1) * sizeof (char *));
-	if (out == NULL) {
-		return (NULL);
-	}
-	for (i = 0, j = 0; i < in_len; i++) {
-		if (!is_dup[i]) {
-			out[j++] = strdup(in[i]);
-		}
-	}
-	out[j] = NULL;
-
-	*len_p = j;
-	return (out);
-}
-
-/*
- * ai_du_call_update_archive_ict:
- * Call the bootadm update_archive ICT
- *
- * Arguments:
- *   py_state_p: Initialized py_state_t object.
- *   install_root: root of the tree where to call the ICT.
- *
- * Returns:
- *   AUTO_INSTALL_SUCCESS: ICT was successfully executed.
- *   AUTO_INSTALL_FAILURE: ICT was not successfully executed.
- */
-static int
-ai_du_call_update_archive_ict(py_state_t *py_state_p, char *install_root)
-{
-	int rval = AUTO_INSTALL_FAILURE;
-	PyObject *pICT_instance = NULL;
-	PyObject *pICT_rval = NULL;
-
-	/* Find the constructor. */
-	PyObject *pFunc = PyObject_GetAttrString(py_state_p->pICTModule,
-	    ICT_CLASS);
-
-	if ((pFunc == NULL) || (!PyCallable_Check(pFunc))) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ICT constructor not callable\n");
-	} else {
-		/* Set up args to python function. */
-		PyObject *pArgs = PyTuple_New(1);
-		(void) PyTuple_SetItem(pArgs, 0,
-		    PyString_FromString(install_root));
-
-		/* Call constructor. */
-		pICT_instance = PyObject_CallObject(pFunc, pArgs);
-		Py_XDECREF(pFunc);
-		Py_DECREF(pArgs);
-		if ((PyErr_Occurred() != NULL) || (pICT_instance == NULL) ||
-		    (pICT_instance == Py_None)) {
-			auto_debug_dump_file(AUTO_DBGLVL_ERR, DDU_ERRLOG);
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ICT constructor failed\n");
-			ai_dump_python_exception();
-			Py_CLEAR(pICT_instance);
-		}
-	}
-
-	if (pICT_instance != NULL) {
-		pICT_rval = PyObject_CallMethod(
-		    pICT_instance, ICT_UPDATE_ARCHIVE, NULL);
-		if (pICT_rval == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "Error running update_boot_archive ICT.\n");
-		} else if (PyInt_AsLong(pICT_rval) != 0) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "update_boot_archive ICT returned an error.\n");
-		} else {
-			rval = AUTO_INSTALL_SUCCESS;
-		}
-		Py_XDECREF(pICT_rval);
-		Py_DECREF(pICT_instance);
-	}
-
-	return (rval);
-}
-
-/* Exported functions. */
-
-/*
- * ai_du_get_and_install:
- * Query the manifest for the entire <add_drivers> section, and add packages
- * accordingly.  Add packages to install_root.  If a package has its noinstall
- * flag set and the honor_noinstall argument is set, skip adding that package.
- * Save the list of packages to install to the module global py_pkg_list, so
- * that the same list of packages can be installed to a different target with
- * ai_du_install().
- *
- * Install all explicitly-stated packages first.  Then do <search_all> last.
- * This is to handle any explicit requests for matching a special driver to a
- * device, before <search_all> finds the first available one.
- *
- * If search determines that a driver is missing and cannot find a package for
- * it, this is not reported as an error.  A ddu_package_object is not created in
- * this case, so no installation or package fetch is done for this driver.  Any
- * other kind of problem which occurs around searched packages, during the
- * search itself or during an installation of a package found during search, is
- * reported as an error.
- *
- * Any issue found around explicitly specified packages (<software>s), whether
- * it be that the package is not found or there was an issue during
- * installation, is reported as an error.  ddu_package_objects are always
- * created for these packages.
- *
- * Assumes ai_create_manifest_image() has set up the manifest data.
- * Does not assume any data has been verified though.
- *
- * Arguments:
- *   install_root: Top of the filesystem or tree where the packages are to be
- *	installed.
- *   honor_noinstall: When true and the noinstall flag is set in a package
- *	tuple, skip installing that package.
- *   update_boot_archive: When true, run the ICT to update the boot archive.
- *   num_pkgs_installed_p: Returns the number of packages installed.
- *
- * Returns:
- *   AUTO_INSTALL_SUCCESS: No errors found.
- *   AUTO_INSTALL_PKG_NOT_FND: At least one needed package found during search
- *	could not be found.  No other errors encountered.
- *   AUTO_INSTALL_FAILURE: An error was encountered and was different than
- *	not being able to find a package for a missing driver.
- *
- *   Boot archive update status is not reflected in this return status.
- *   NOTE: this routine will continue on most errors, in order to install as
- *	many packages as possible.
- *
- * NOTE: return status and num_pkgs_installed together tell the caller the full
- * story.  It is possible, for example, that no packages were installed because
- * one package flagged during search could not be found.
- *
- * NOTE: check installer logfile for details of the failure.
- *
- * Side effects:
- *   module global py_pkg_list is set to point to list of packages to install.
- */
-int
-ai_du_get_and_install(char *install_root, boolean_t honor_noinstall,
-    boolean_t update_boot_archive, int *num_installed_pkgs_p)
-{
-	PyObject *manual_pkg_list;
-	PyObject *searched_pkg_list;
-	py_state_t *py_state_p;
-	path_t path;
-	char **dummy_list;
-	int num_entries;
-	int len;
-	Py_ssize_t manual_size = 0;
-	Py_ssize_t searched_size = 0;
-	int rval = AUTO_INSTALL_SUCCESS;
-
-	*num_installed_pkgs_p = 0;
-
-	/* Initialize path, post_prefix_start and post_prefix_len for later. */
-	(void) strncpy(path.path_str, AIM_PREFACE, MAX_NODEPATH_SIZE);
-	len = strlen(path.path_str);
-	path.post_prefix_start = &path.path_str[len];
-	path.post_prefix_len = MAX_NODEPATH_SIZE - len;
-
-	/*
-	 * Set up an empty py_pkg_list so ai_du_install() knows this function
-	 * was called first.
-	 */
-	if (py_pkg_list != NULL) {
-		Py_CLEAR(py_pkg_list);
-	}
-
-	py_pkg_list = PyList_New(0);
-
-	/*
-	 * See if the manifest has at least one <software> or search_all entry.
-	 * If not, just return success (e.g. no-op).
-	 */
-
-	/* Get the number of <software> entries. */
-	if (strlcpy(path.post_prefix_start, PKGSPEC_NODEPATH,
-	    path.post_prefix_len) > path.post_prefix_len) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_get_and_install: <software> path buffer overflow\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	/* Get number of <software> entries in the manifest. */
-	dummy_list = ai_get_manifest_values(path.path_str, &num_entries);
-	ai_free_manifest_values(dummy_list);
-
-	if (num_entries <= 0) {
-		/* See if there is a search_all entry in the manifest. */
-		if (strlcpy(path.post_prefix_start, SEARCH_NODEPATH,
-		    path.post_prefix_len) > path.post_prefix_len) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_du_get_and_install: "
-			    "search path buffer overflow\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-
-		dummy_list = ai_get_manifest_values(path.path_str,
-		    &num_entries);
-		ai_free_manifest_values(dummy_list);
-		if (num_entries <= 0) {
-			return (AUTO_INSTALL_SUCCESS);
-		}
-	}
-
-	/*
-	 * Install all explicitly specified packages first.
-	 *
-	 * Do the search for missing devices afterward, as an independent step,
-	 * to account for newly-operational devices as a result of
-	 * explicitly-specified package installation.
-	 */
-
-	if ((py_state_p = auto_ddu_lib_init()) == NULL) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_get_and_install: "
-		    "Error initializing auto_ddu_lib.\n");
-		rval = AUTO_INSTALL_FAILURE;
-		goto done;
-	}
-
-	if (ai_du_get_manual_pkg_list(py_state_p, &path,
-	    &manual_pkg_list) != AUTO_INSTALL_SUCCESS) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_get_and_install: "
-		    "Error getting <software> package specification.\n");
-		rval = AUTO_INSTALL_FAILURE;
-		/* Keep going.  Don't abort. */
-	}
-
-	manual_size = PyList_Size(manual_pkg_list);
-	if (manual_size > 0) {
-		if (ai_du_install_packages(py_state_p, manual_pkg_list,
-		    install_root, honor_noinstall, num_installed_pkgs_p) !=
-		    AUTO_INSTALL_SUCCESS) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_du_get_and_install: Error installing at least "
-			    "one <software> package specification.\n");
-			rval = AUTO_INSTALL_FAILURE;
-			/* Keep going.  Don't abort. */
-		}
-	}
-
-	switch (ai_du_get_searched_pkg_list(py_state_p, &path, install_root,
-	    &searched_pkg_list)) {
-	case AUTO_INSTALL_FAILURE:
-		rval = AUTO_INSTALL_FAILURE;
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_get_and_install: "
-		    "Error searching for inoperable devices and "
-		    "missing driver packages.\n");
-		/* Keep going.  Don't abort. */
-		break;
-	case AUTO_INSTALL_PKG_NOT_FND:
-		if (rval != AUTO_INSTALL_FAILURE) {
-			rval = AUTO_INSTALL_PKG_NOT_FND;
-		}
-		break;
-	default:
-		break;
-	}
-
-	searched_size = PyList_Size(searched_pkg_list);
-	if (searched_size > 0) {
-		if (ai_du_install_packages(py_state_p, searched_pkg_list,
-		    install_root, honor_noinstall, num_installed_pkgs_p) !=
-		    AUTO_INSTALL_SUCCESS) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_du_get_and_install: Error installing at least "
-			    "one searched package for <search_all>.\n");
-			rval = AUTO_INSTALL_FAILURE;
-			/* Keep going.  Don't abort. */
-		}
-	}
-
-	if (update_boot_archive && (*num_installed_pkgs_p > 0)) {
-		if (ai_du_call_update_archive_ict(py_state_p, install_root) !=
-		    AUTO_INSTALL_SUCCESS) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_du_get_and_install: Warning: could not update "
-			    "boot archive for %s.\n", install_root);
-		}
-	}
-
-	/*
-	 * Save the manual and searched package lists in py_pkg_list.
-	 * The new list can be used in a later call to ai_du_install().
-	 */
-
-	if (manual_size > 0) {
-		(void) PyList_SetSlice(py_pkg_list, 0, manual_size - 1,
-		    manual_pkg_list);
-	}
-	Py_DECREF(manual_pkg_list);
-
-	if (searched_size > 0) {
-		(void) PyList_SetSlice(py_pkg_list, manual_size,
-		    manual_size + searched_size - 1, searched_pkg_list);
-	}
-	Py_DECREF(searched_pkg_list);
-
-done:
-	auto_ddu_lib_fini(py_state_p);
-
-	return (rval);
-}
-
-/*
- * ai_du_install:
- * Install additional packages based on driver update parameters fetched from a
- * previous ai_du_get_and_install() call.  The module global py_pkg_list
- * supplies the list (and order) of packages to install.  Add packages to
- * install_root.  If a package has its noinstall flag set and the
- * honor_noinstall argument is set, skip adding that package.
- *
- * This routine assumes the py_pkg_list was set up via a prior call to
- * ai_du_get_and_install_packages().
- *
- * The availability and origin (location) of all packages to be installed is
- * assumed the same as when the py_pkg_list was built (i.e. the most recent
- * call to ai_du_get_and_install()).
- *
- * Arguments:
- *   install_root: Top of the filesystem or tree where the packages are to be
- *	installed.
- *   honor_noinstall: When true and the noinstall flag is set in a package
- *	tuple, skip installing that package.
- *   update_boot_archive: When true, run the ICT to update the boot archive.
- *   num_installed_pkgs_p: Returns the number of packages successfully
- *	installed.
- *   NOTE: the modular global py_pkg_list specifies the packages to install.
- *
- * Returns:
- *   AUTO_INSTALL_SUCCESS: No errors found and at least one package was
- *	installed.
- *   AUTO_INSTALL_FAILURE: An error was encountered.  Some packages may have
- *	been installed.
- *
- *   Boot archive update status is not reflected in this return status.
- *   NOTE: this routine will continue on most errors, in order to install as
- *	many packages as possible.
- *
- * NOTE: return status and num_pkgs_installed together tell the caller the full
- * story.  It is possible, for example, that no packages were installed because
- * one package flagged during search could not be found.
- *
- * NOTE: check installer logfile for details of the failure.
- */
-int
-ai_du_install(char *install_root, boolean_t honor_noinstall,
-    boolean_t update_boot_archive, int *num_installed_pkgs_p)
-{
-	int rval = AUTO_INSTALL_SUCCESS;
-
-	py_state_t *py_state_p;
-
-	*num_installed_pkgs_p = 0;
-
-	if (py_pkg_list == NULL) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_install: ai_du_get_and_install needs to be "
-		    "called first.\n");
-		return (AUTO_INSTALL_FAILURE);
-
-	} else if (PyList_Size(py_pkg_list) == 0) {
-		return (AUTO_INSTALL_SUCCESS);
-	}
-
-	if ((py_state_p = auto_ddu_lib_init()) == NULL) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_install: "
-		    "Error initializing auto_ddu_lib.\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if ((rval = ai_du_install_packages(py_state_p, py_pkg_list,
-	    install_root, honor_noinstall, num_installed_pkgs_p)) !=
-	    AUTO_INSTALL_SUCCESS) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ai_du_install: Error installing packages.\n");
-		rval = AUTO_INSTALL_FAILURE;
-	}
-
-	if (update_boot_archive && (*num_installed_pkgs_p > 0)) {
-		if (ai_du_call_update_archive_ict(py_state_p,
-		    install_root) != AUTO_INSTALL_SUCCESS) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "ai_du_install: Warning: could not update boot "
-			    "archive for %s.\n", install_root);
-		}
-	}
-
-	auto_ddu_lib_fini(py_state_p);
-
-	return (rval);
-}
--- a/usr/src/cmd/auto-install/auto_install.c	Wed May 25 13:29:32 2011 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1848 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
- */
-
-#include <alloca.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <errno.h>
-#include <stdarg.h>
-#include <stdlib.h>
-#include <strings.h>
-#include <unistd.h>
-#include <libnvpair.h>
-#include <locale.h>
-#include <sys/param.h>
-#include <sys/types.h>
-
-#include "auto_install.h"
-#include <ls_api.h>
-#include <orchestrator_api.h>
-
-/*
- * use presence of hidden file to indicate iSCSI boot installation
- * pending code refactoring to make less kludgy
- * see also ict.py
- */
-#define	ISCSI_BOOT_INDICATOR_FILE	"/.iscsi_boot"
-#define	DEFAULT_HOSTNAME	"solaris"
-#define	DEFAULT_ROOT_PASSWORD	"solaris"
-
-static  boolean_t install_done = B_FALSE;
-static	boolean_t install_failed = B_FALSE;
-
-/* debug mode - disabled by default */
-static	boolean_t debug_mode_enabled = B_FALSE;
-
-int	install_error = 0;
-install_params	params;
-
-static boolean_t convert_to_sectors(auto_size_units_t,
-    uint64_t, uint64_t *);
-
-void auto_update_progress(om_callback_info_t *, uintptr_t);
-
-static void
-usage()
-{
-	(void) fprintf(stderr,
-	    "usage: auto-install -d <diskname> | -p <profile>\n"
-	    "\t-i - end installation before Target Instantiation\n"
-	    "\t-I - end installation after Target Instantiation\n"
-	    "\t-v - run the installer in verbose mode\n");
-}
-
-/*
- * enable_debug_mode()
- *
- * Description: Enable/disable debug mode
- *
- * Scope: private
- *
- * Parameters:
- *   enable: B_TRUE - enable debug mode
- *           B_FALSE - disable debug mode
- *
- * Returns: none
- */
-static void
-enable_debug_mode(boolean_t enable)
-{
-	debug_mode_enabled = enable;
-}
-
-/*
- * is_debug_mode_enabled()
- *
- * Description: Checks, if we run in debug mode
- *
- * Scope: private
- *
- * Parameters: none
- *
- * Returns:
- *   B_TRUE - debug mode enabled
- *   B_FALSE - debug mode disabled
- */
-static boolean_t
-is_debug_mode_enabled(void)
-{
-	return (debug_mode_enabled);
-}
-
-/*
- * auto_debug_print()
- * Description:	Posts debug message
- */
-void
-auto_debug_print(ls_dbglvl_t dbg_lvl, char *fmt, ...)
-{
-	va_list	ap;
-	char	buf[MAXPATHLEN + 1] = "";
-
-	va_start(ap, fmt);
-	(void) vsnprintf(buf, MAXPATHLEN+1, fmt, ap);
-	(void) ls_write_dbg_message("AI", dbg_lvl, buf);
-	va_end(ap);
-}
-
-/*
- * auto_log_print()
- * Description:	Posts log message
- */
-void
-auto_log_print(char *fmt, ...)
-{
-	va_list	ap;
-	char	buf[MAXPATHLEN + 1] = "";
-
-	va_start(ap, fmt);
-	(void) vsnprintf(buf, MAXPATHLEN+1, fmt, ap);
-	(void) ls_write_log_message("AI", buf);
-	va_end(ap);
-}
-
-/*
- * Callback that gets passed to om_perform_install.
- *
- * Sets the install_done variable when an install is
- * finished. If an install fails, it sets the install_failed
- * variable and also sets the install_error variable to
- * indicate the specific reason for the failure.
- */
-void
-auto_update_progress(om_callback_info_t *cb_data, uintptr_t app_data)
-{
-	if (cb_data->curr_milestone == -1) {
-		install_error = cb_data->percentage_done;
-		install_failed = B_TRUE;
-	}
-
-	if (cb_data->curr_milestone == OM_SOFTWARE_UPDATE &&
-	    cb_data->percentage_done == 100)
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Transfer completed\n");
-
-	if (cb_data->curr_milestone == OM_POSTINSTAL_TASKS &&
-	    cb_data->percentage_done == 100)
-		install_done = B_TRUE;
-}
-
-/*
- * auto_debug_dump_file()
- * Description: dumps a file using auto_debug_print()
- */
-void
-auto_debug_dump_file(ls_dbglvl_t level, char *filename)
-{
-	FILE *file_ptr;
-	char buffer[MAXPATHLEN];
-
-	/* Logfile does not exist.  Nothing to print. */
-	if (access(filename, F_OK) < 0) {
-		return;
-	}
-
-	if (access(filename, R_OK) < 0) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "ddu errlog %s does not have read permissions.\n");
-		return;
-	}
-
-	/* Use buffer to set up the command. */
-	(void) snprintf(buffer, MAXPATHLEN, "/usr/bin/cat %s", filename);
-	if ((file_ptr = popen(buffer, "r")) == NULL) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "Error opening ddu errlog %s to dump errors: %s\n",
-		    filename, strerror(errno));
-		return;
-	}
-
-	/* Reuse buffer to get the file data. */
-	while (fgets(buffer, MAXPATHLEN, file_ptr) != NULL) {
-		auto_debug_print(level, "%s", buffer);
-	}
-	(void) pclose(file_ptr);
-}
-
-/*
- * Create a file that contains the list
- * of packages to be installed or removed.
- *
- * Parameters:
- *   hardcode - if set to B_TRUE, hardcode the list of packages. This is for
- *              testing purposes only, when AI engine is not provided with
- *              AI manifest.
- *
- *   pkg_list_type - specify list of packages to be obtained -
- *                   install or remove.
- *
- *   pkg_list_file - output file where the package list will be saved
- *
- * Returns:
- *	AUTO_INSTALL_SUCCESS for success
- *	AUTO_INSTALL_FAILURE for failure
- *	AUTO_INSTALL_EMPTY_LIST - 'remove' list is empty
- */
-static int
-create_package_list_file(boolean_t hardcode,
-    auto_package_list_type_t pkg_list_type, char *pkg_list_file)
-{
-	FILE *fp;
-	char **package_list;
-	int i, num_packages = 0;
-	int ret = AUTO_INSTALL_SUCCESS;
-
-	if ((fp = fopen(pkg_list_file, "wb")) == NULL) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "Couldn't open file %s for storing list of packages\n",
-		    pkg_list_file);
-
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "File %s successfully opened - list of packages to be %s "
-	    "will be saved there\n", pkg_list_file,
-	    pkg_list_type == AI_PACKAGE_LIST_INSTALL ? "installed" : "removed");
-
-	/*
-	 * When invoked in test mode (without AI manifest), lists of packages
-	 * are hardcoded
-	 */
-
-	if (hardcode) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Hardcoded list of packages will be generated\n");
-
-		if (pkg_list_type == AI_PACKAGE_LIST_INSTALL) {
-			if (fputs(AI_TEST_PACKAGE_LIST_INSTALL, fp) == EOF)
-				ret = AUTO_INSTALL_FAILURE;
-		} else {
-			if (fputs(AI_TEST_PACKAGE_LIST_REMOVE, fp) == EOF)
-				ret = AUTO_INSTALL_FAILURE;
-		}
-
-		(void) fclose(fp);
-		return (ret);
-	}
-
-	/*
-	 * Obtain list of packages to be installed or removed from AI manifest.
-	 *
-	 * With respect to install list, there are two tags supported for
-	 * specifying list of packages in order to keep backward compatibility.
-	 * Try new tag first. If it is not specified, then try the old one.
-	 */
-	if (pkg_list_type == AI_PACKAGE_LIST_INSTALL) {
-		package_list = ai_get_manifest_packages(&num_packages,
-		    AIM_PACKAGE_INSTALL_NAME);
-
-		if (package_list == NULL) {
-			/* If no package list given, use default */
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "No install package list given, using default\n");
-
-			num_packages = 4;
-			package_list =
-			    malloc((num_packages + 1) * sizeof (char *));
-			if (package_list == NULL) {
-				auto_debug_print(AUTO_DBGLVL_ERR,
-				    "No memory.\n");
-				(void) fclose(fp);
-				return (AUTO_INSTALL_FAILURE);
-			}
-			package_list[0] = strdup("pkg:/SUNWcsd");
-			package_list[1] = strdup("pkg:/SUNWcs");
-			package_list[2] = strdup("pkg:/babel_install");
-			package_list[3] = strdup("pkg:/entire");
-			package_list[4] = NULL;
-		}
-
-		auto_log_print(gettext(
-		    "list of packages to be installed is:\n"));
-	} else {
-		package_list = ai_get_manifest_packages(&num_packages,
-		    AIM_PACKAGE_REMOVE_NAME);
-		if (package_list == NULL) {
-			auto_debug_print(AUTO_DBGLVL_INFO,
-			    "List of packages to be removed is empty\n");
-
-			(void) fclose(fp);
-			return (AUTO_INSTALL_EMPTY_LIST);
-		}
-
-		auto_log_print(gettext(
-		    "list of packages to be removed is:\n"));
-	}
-
-	/*
-	 * Save list of packages to the file
-	 */
-	for (i = 0; i < num_packages; i++) {
-		auto_log_print("%s\n", package_list[i]);
-
-		if (fputs(package_list[i], fp) == EOF) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "Write to %s file failed\n", pkg_list_file);
-
-			ret = AUTO_INSTALL_FAILURE;
-			break;
-		}
-
-		if (fputs("\n", fp) == EOF) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "Write to %s file failed\n", pkg_list_file);
-
-			ret = AUTO_INSTALL_FAILURE;
-			break;
-		}
-	}
-
-	(void) fclose(fp);
-	return (ret);
-}
-
-/*
- * Create/delete/preserve vtoc slices as specified
- * in the manifest
- */
-static int
-auto_modify_target_slices(auto_slice_info *asi, uint8_t install_slice_id)
-{
-	for (; asi->slice_action[0] != '\0'; asi++) {
-		uint64_t slice_size_sec;
-
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "slice action %s, size=%lld units=%s\n",
-		    asi->slice_action, asi->slice_size,
-		    CONVERT_UNITS_TO_TEXT(asi->slice_size_units));
-
-		if (!convert_to_sectors(asi->slice_size_units,
-		    asi->slice_size, &slice_size_sec)) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "conversion failure from %lld %s to sectors\n",
-			    asi->slice_size,
-			    CONVERT_UNITS_TO_TEXT(asi->slice_size_units));
-			return (AUTO_INSTALL_FAILURE);
-		}
-		if (strcmp(asi->slice_action, "create") == 0) {
-			om_slice_tag_type_t slice_tag;
-
-			if (asi->slice_number == install_slice_id)
-				slice_tag = OM_ROOT;
-			else
-				slice_tag = OM_UNASSIGNED;
-			if (!om_create_slice(asi->slice_number, slice_size_sec,
-			    slice_tag, asi->on_existing))
-				return (AUTO_INSTALL_FAILURE);
-		} else if (strcmp(asi->slice_action, "delete") == 0) {
-			if (!om_delete_slice(asi->slice_number))
-				return (AUTO_INSTALL_FAILURE);
-		} else if (strcmp(asi->slice_action, "preserve") == 0) {
-			if (!om_preserve_slice(asi->slice_number))
-				return (AUTO_INSTALL_FAILURE);
-		}
-	}
-	return (AUTO_INSTALL_SUCCESS);
-}
-
-/*
- * convert value to sectors given basic unit size
- * TODO uint64_t overflow check
- */
-
-static boolean_t
-convert_to_sectors(auto_size_units_t units, uint64_t src,
-    uint64_t *psecs)
-{
-	if (psecs == NULL)
-		return (B_FALSE);
-	switch (units) {
-		case AI_SIZE_UNITS_SECTORS:
-			*psecs = src;
-			break;
-		case AI_SIZE_UNITS_MEGABYTES:
-			*psecs = src*2048;
-			break;
-		case AI_SIZE_UNITS_GIGABYTES:
-			*psecs = src*2048*1024; /* sec=>MB=>GB */
-			break;
-		case AI_SIZE_UNITS_TERABYTES:
-			*psecs = src*2048*1024*1024; /* sec=>MB=>GB=>TB */
-			break;
-		default:
-			return (B_FALSE);
-	}
-	if (units != AI_SIZE_UNITS_SECTORS)
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "converting from %lld %s to %lld sectors\n",
-		    src, CONVERT_UNITS_TO_TEXT(units), *psecs);
-	return (B_TRUE);
-}
-
-#ifndef	__sparc
-/*
- * Create/delete/preserve fdisk partitions as specifed
- * in the manifest
- * Note that the partition size is converted using the units specified
- *	for both create and delete actions
- */
-static int
-auto_modify_target_partitions(auto_partition_info *api)
-{
-	for (; api->partition_action[0] != '\0'; api++) {
-		uint64_t partition_size_sec;
-
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "partition action %s, size=%lld units=%s logical? %s\n",
-		    api->partition_action, api->partition_size,
-		    CONVERT_UNITS_TO_TEXT(api->partition_size_units),
-		    api->partition_is_logical ? "yes" : "no");
-
-		if (!convert_to_sectors(api->partition_size_units,
-		    api->partition_size, &partition_size_sec)) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "conversion failure from %lld %s to sectors\n",
-			    api->partition_size,
-			    CONVERT_UNITS_TO_TEXT(api->partition_size_units));
-			return (AUTO_INSTALL_FAILURE);
-		}
-		if (strcmp(api->partition_action, "create") == 0) {
-			if (!om_create_partition(api->partition_type,
-			    api->partition_start_sector,
-			    partition_size_sec, B_FALSE,
-			    api->partition_is_logical))
-				return (AUTO_INSTALL_FAILURE);
-		} else if (strcmp(api->partition_action, "delete") == 0) {
-			if (!om_delete_partition(api->partition_number,
-			    api->partition_start_sector, partition_size_sec))
-				return (AUTO_INSTALL_FAILURE);
-		}
-	}
-	return (AUTO_INSTALL_SUCCESS);
-}
-#endif
-
-/*
- * Initialize the image area with default publisher
- * Set the nv-list for configuring default publisher to be used
- * with the installation. This passes the publisher name and url along
- * mount point (/a) and action (initilaize pkg image area). The transfer module
- * will use these parameters and calls the appropriate pkg commands to
- * initialize the pkg imag area and setup the default publisher
- */
-static int
-configure_ips_init_nv_list(nvlist_t **attr, auto_repo_info_t *repo)
-{
-	if (nvlist_add_uint32(*attr, TM_ATTR_MECHANISM,
-	    TM_PERFORM_IPS) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_ATTR_MECHANISM failed\n");
-		return (-1);
-	}
-	if (nvlist_add_uint32(*attr, TM_IPS_ACTION,
-	    TM_IPS_INIT) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TMP_IPS_ACTION failed\n");
-		return (-1);
-	}
-	if (nvlist_add_string(*attr, TM_IPS_INIT_MNTPT,
-	    INSTALLED_ROOT_DIR) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_INIT_MNTPT failed\n");
-		return (-1);
-	}
-	if (nvlist_add_string(*attr, TM_IPS_PKG_URL, repo->url) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_PKG_URL failed\n");
-		return (-1);
-	}
-
-	auto_log_print(gettext("installation will be performed "
-	    "from %s (%s)\n"), repo->url, repo->publisher);
-
-	if (nvlist_add_string(*attr, TM_IPS_PKG_AUTH, repo->publisher)
-	    != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_PKG_AUTH failed\n");
-		return (-1);
-	}
-
-	if (nvlist_add_string(*attr, TM_IPS_INIT_RETRY_TIMEOUT,
-	    TM_IPS_INIT_TIMEOUT_DEFAULT) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_INIT_RETRY_TIMEOUT failed\n");
-		return (-1);
-	}
-
-	/*
-	 * We need to ask IPS to force creating IPS image, since when
-	 * default path is chosen, IPS refuses to create the image.
-	 * The reason is that even if we created empty BE to be
-	 * populated by IPS, it contains ZFS shared and non-shared
-	 * datasets mounted on appropriate mount points. And
-	 * IPS complains in the case the target mount point contains
-	 * subdirectories.
-	 */
-
-	if (nvlist_add_boolean_value(*attr,
-	    TM_IPS_IMAGE_CREATE_FORCE, B_TRUE) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_IMAGE_CREATE_FORCE failed\n");
-		return (-1);
-	}
-	return (0);
-}
-
-/*
- * configure_ips_addl_publisher_nv_list
- * Set the nv-list for configuring additional publisher(s) to be used
- * with the installation. The nv_list contains the publisher name and url along
- * with mount point (/a) and action (set-publisher). The transfer module
- * will use these parameters and calls the appropriate pkg commands to
- * setup additional publisher.
- */
-static int
-configure_ips_addl_publisher_nv_list(
-    nvlist_t **attr, auto_repo_info_t *repo)
-{
-	if (nvlist_add_uint32(*attr, TM_ATTR_MECHANISM,
-	    TM_PERFORM_IPS) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_ATTR_MECHANISM failed\n");
-		return (-1);
-	}
-	if (nvlist_add_uint32(*attr, TM_IPS_ACTION,
-	    TM_IPS_SET_AUTH) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TMP_IPS_ACTION failed\n");
-		return (-1);
-	}
-	if (nvlist_add_string(*attr, TM_IPS_INIT_MNTPT,
-	    INSTALLED_ROOT_DIR) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_INIT_MNTPT failed\n");
-		return (-1);
-	}
-	if (nvlist_add_string(*attr, TM_IPS_ALT_URL, repo->url) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_PKG_URL failed\n");
-		return (-1);
-	}
-
-	auto_log_print(gettext("Using addditional repository "
-	    "from %s (%s)\n"), repo->url, repo->publisher);
-
-	if (nvlist_add_string(*attr, TM_IPS_ALT_AUTH, repo->publisher)
-	    != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_PKG_AUTH failed\n");
-		return (-1);
-	}
-
-	return (0);
-}
-
-/*
- * configure_ips_mirror_nv_list
- * Set the nv-list for configuring a mirror to either the default repository
- * or any additional repository to be used with the installation. The nv_list
- * contains the publisher name and url along with mount point (/a) and action
- * (set-publisher). The transfer module will use these parameters and calls
- * appropriate pkg commands to setup the mirror.
- */
-static int
-configure_ips_mirror_nv_list(nvlist_t **attr, char *publisher, char *mirror_url)
-{
-	if (publisher == NULL || mirror_url == NULL) {
-		return (-1);
-	}
-	auto_log_print(gettext("using mirror at %s for publisher %s\n"),
-	    mirror_url, publisher);
-
-	if (nvlist_add_uint32(*attr,
-	    TM_ATTR_MECHANISM, TM_PERFORM_IPS) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_ATTR_MECHANISM failed\n");
-		return (-1);
-	}
-	if (nvlist_add_string(*attr,
-	    TM_IPS_INIT_MNTPT, INSTALLED_ROOT_DIR) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_INIT_MNTPT failed\n");
-		return (-1);
-	}
-	if (nvlist_add_uint32(*attr,
-	    TM_IPS_ACTION, TM_IPS_SET_AUTH) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TMP_IPS_ACTION failed\n");
-		return (-1);
-	}
-	if (nvlist_add_string(*attr,
-	    TM_IPS_ALT_URL, mirror_url) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_ALT_URL failed\n");
-		return (-1);
-	}
-	if (nvlist_add_string(*attr,
-	    TM_IPS_ALT_AUTH, publisher) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_ALT_AUTH failed\n");
-		return (-1);
-	}
-	if (nvlist_add_string(*attr,
-	    TM_IPS_MIRROR_FLAG, TM_IPS_SET_MIRROR) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_MIRROR_FLAG failed\n");
-		return (-1);
-	}
-	auto_log_print(gettext("Using the mirror %s for the publisher %s\n"),
-	    mirror_url, publisher);
-
-	return (0);
-}
-
-/*
- * Install the target based on the criteria specified in
- * ai.xml.
- *
- * NOTE: ai_validate_manifest() MUST have been called prior
- * to calling this function.
- *
- * RETURNS:
- *	AUTO_INSTALL_SUCCESS on success
- *	AUTO_INSTALL_FAILURE on failure
- */
-static int
-install_from_manifest()
-{
-	char *p = NULL;
-	auto_disk_info adi;
-	auto_swap_device_info adsi;
-	auto_dump_device_info addi;
-	int status;
-	int return_status = AUTO_INSTALL_FAILURE;
-	uint8_t install_slice_id;
-	int ita = 0;
-	int number = 0;
-	/*
-	 * pointers to heap - free later if not NULL
-	 */
-	auto_slice_info *asi = NULL;
-#ifndef	__sparc
-	auto_partition_info *api = NULL;
-#endif
-	char *diskname = NULL;
-	nvlist_t *install_attr = NULL, **transfer_attr = NULL;
-	char *proxy = NULL;
-	auto_repo_info_t	*default_ips_repo = NULL;
-	auto_repo_info_t	*addl_ips_repo = NULL;
-	auto_repo_info_t	*rptr;
-	auto_mirror_repo_t  *mptr;
-	int ret = AUTO_INSTALL_SUCCESS;
-	char iscsi_devnam[MAXNAMELEN] = "";
-
-	/*
-	 * Start out by getting the install target and
-	 * validating that target
-	 */
-	bzero(&adi, sizeof (auto_disk_info));
-	ret = ai_get_manifest_disk_info(&adi);
-	if (ret == AUTO_INSTALL_FAILURE) {
-		auto_log_print(gettext("disk info manifest error\n"));
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	/*
-	 * Retrieve device swap information if specified
-	 */
-	bzero(&adsi, sizeof (auto_swap_device_info));
-	ret = ai_get_manifest_swap_device_info(&adsi);
-	if (ret == AUTO_INSTALL_FAILURE) {
-		auto_log_print(gettext("device swap manifest error\n"));
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	/*
-	 * Retrieve device dump information if specified
-	 */
-	bzero(&addi, sizeof (auto_dump_device_info));
-	ret = ai_get_manifest_dump_device_info(&addi);
-	if (ret == AUTO_INSTALL_FAILURE) {
-		auto_log_print(gettext("device dump manifest error\n"));
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	/*
-	 * grab target slice number
-	 */
-	install_slice_id = adi.install_slice_number;
-
-	/*
-	 * if iSCSI target requested, mount it through iSCSI initiator
-	 */
-	ret = mount_iscsi_target_if_requested(&adi,
-	    iscsi_devnam, sizeof (iscsi_devnam));
-	if (ret == -1) {
-		auto_log_print(gettext("iSCSI boot target device error\n"));
-		return (AUTO_INSTALL_FAILURE);
-	}
-	/*
-	 * if iSCSI device was discovered and mounted,
-	 *	write iSCSI boot marker file for ICT reference
-	 */
-	if (iscsi_devnam[0] == '\0') { /* no iSCSI target mounted */
-		/*
-		 * make sure indicator file not there from previous run
-		 */
-		errno = 0;
-		if (unlink(ISCSI_BOOT_INDICATOR_FILE) != 0 &&
-		    errno != ENOENT) {
-			auto_log_print(gettext(
-			    "Could not delete " ISCSI_BOOT_INDICATOR_FILE
-			    " to indicate no iSCSI boot target\n"));
-			return (AUTO_INSTALL_FAILURE);
-		}
-	} else { /* iSCSI target mounted - indicate for ICT */
-		FILE *fd;
-		/*
-		 * take device name from iSCSI target as selected
-		 * install device
-		 */
-		(void) strncpy(adi.diskname, iscsi_devnam,
-		    sizeof (adi.diskname));
-		/*
-		 * create marker to signal ICT to enable nwam
-		 * in service repository
-		 */
-		fd = fopen(ISCSI_BOOT_INDICATOR_FILE, "w");
-		if (fd == NULL) {
-			auto_log_print(gettext(
-			    "Could not create " ISCSI_BOOT_INDICATOR_FILE
-			    " to indicate iSCSI boot target\n"));
-			return (AUTO_INSTALL_FAILURE);
-		}
-		/*
-		 * write device name - used for debugging only
-		 */
-		(void) fputs(iscsi_devnam, fd);
-		(void) fclose(fd);
-	}
-
-	/*
-	 * Initiate target discovery and wait until it is finished
-	 */
-
-	if (auto_target_discovery() != AUTO_TD_SUCCESS) {
-		auto_log_print(gettext("Automated installation failed in "
-		    "Target Discovery module\n"));
-
-		auto_log_print(gettext("Please see previous messages for more "
-		    "details\n"));
-
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	/*
-	 * given manifest input and discovery information,
-	 *	select a target disk for the installation
-	 */
-	if (auto_select_install_target(&diskname, &adi) != AUTO_TD_SUCCESS) {
-		auto_log_print(gettext("ai target device not found\n"));
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	auto_log_print(gettext("Disk name selected for installation is %s\n"),
-	    diskname);
-#ifndef	__sparc
-	/*
-	 * Configure the partitions as specified in the
-	 * manifest
-	 */
-	api = ai_get_manifest_partition_info(&status);
-	if (status != 0) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "failed to process manifest due to illegal value\n");
-		goto error_ret;
-	}
-	if (api == NULL)
-		auto_log_print(gettext("no manifest partition "
-		    "information found\n"));
-	else {
-		if (auto_modify_target_partitions(api) !=
-		    AUTO_INSTALL_SUCCESS) {
-			auto_log_print(gettext("failed to modify partition(s) "
-			    "specified in the manifest\n"));
-			goto error_ret;
-		}
-
-		/* we're done with futzing with partitions, free the memory */
-		free(api);
-		api = NULL; /* don't release later */
-	}
-
-	/*
-	 * if no partition exists and no partitions were specified in manifest,
-	 *	there is no info about partitions for TI,
-	 *	so create info table from scratch
-	 */
-	om_create_target_partition_info_if_absent();
-
-	/* finalize modified partition table for TI to apply to target disk */
-	if (!om_finalize_fdisk_info_for_TI()) {
-		auto_log_print(gettext("failed to finalize fdisk info\n"));
-		return (AUTO_INSTALL_FAILURE);
-	}
-#endif
-	/*
-	 * Configure the vtoc slices as specified in the
-	 * manifest
-	 */
-	asi = ai_get_manifest_slice_info(&status);
-	if (status != 0) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "failed to process manifest due to illegal value\n");
-		goto error_ret;
-	}
-	if (asi == NULL)
-		auto_log_print(gettext(
-		    "no manifest slice information found\n"));
-	else {
-		if (auto_modify_target_slices(asi, install_slice_id) !=
-		    AUTO_INSTALL_SUCCESS) {
-			auto_log_print(gettext(
-			    "failed to modify slice(s) specified "
-			    "in the manifest\n"));
-			goto error_ret;
-		}
-
-		/* we're done with futzing with slices, free the memory */
-		free(asi);
-		asi = NULL;	/* already freed */
-	}
-
-	/* finalize modified vtoc for TI to apply to target disk partition */
-	if (!om_finalize_vtoc_for_TI(install_slice_id)) {
-		auto_log_print(gettext("failed to finalize vtoc info\n"));
-		goto error_ret;
-	}
-
-	if (nvlist_alloc(&install_attr, NV_UNIQUE_NAME, 0) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "nvlist allocation failed\n");
-		goto error_ret;
-	}
-
-	if (nvlist_add_uint8(install_attr, OM_ATTR_INSTALL_TYPE,
-	    OM_INITIAL_INSTALL) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of OM_ATTR_INSTALL_TYPE failed\n");
-		goto error_ret;
-	}
-
-	if (nvlist_add_string(install_attr, OM_ATTR_DISK_NAME,
-	    diskname) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of OM_ATTR_DISK_NAME failed\n");
-		goto error_ret;
-	}
-	free(diskname);
-	diskname = NULL;	/* already freed */
-
-	if (nvlist_add_string(install_attr, OM_ATTR_DEFAULT_LOCALE,
-	    "C") != 0) {
-		auto_log_print(gettext("Setting of OM_ATTR_DEFAULT_LOCALE"
-		    " failed\n"));
-		goto error_ret;
-	}
-
-	/*
-	 * If proxy is specified, set the http_proxy environemnet variable for
-	 * IPS to use
-	 */
-	p = ai_get_manifest_http_proxy();
-	if (p != NULL) {
-		int proxy_len;
-
-		proxy_len = strlen("http_proxy=") + strlen(p) + 1;
-		proxy = malloc(proxy_len);
-		if (proxy == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR, "No memory.\n");
-			goto error_ret;
-		}
-		(void) snprintf(proxy, proxy_len, "%s%s", "http_proxy=", p);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting http_proxy environment variable to %s\n", p);
-		if (putenv(proxy)) {
-			auto_debug_print(AUTO_DBGLVL_INFO,
-			    "Setting of http_proxy environment variable failed:"
-			    " %s\n", strerror(errno));
-			goto error_ret;
-		}
-	}
-	/*
-	 * Get the IPS default publisher, mirrors for the default publisher,
-	 * additional publishers and mirrors for each additinal publishers.
-	 * Based on the data, the space for nv list allocated to perform
-	 * Transfer initialization
-	 */
-	default_ips_repo = ai_get_default_repo_info();
-	if (default_ips_repo == NULL) {
-		auto_log_print(gettext("IPS default publisher is not "
-		    "specified\n"));
-		goto error_ret;
-	}
-
-	number = 1; /* For the default publisher */
-	/*
-	 * Count the mirrors
-	 */
-	for (mptr = default_ips_repo ->mirror_repo; mptr != NULL;
-	    mptr = mptr->next_mirror) {
-		number++;
-	}
-	addl_ips_repo = ai_get_additional_repo_info();
-	if (addl_ips_repo == NULL) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "No additional IPS publishers specified\n");
-	}
-
-	/*
-	 * Count the number of additional repos and its mirrors
-	 */
-	for (rptr =  addl_ips_repo; rptr != NULL; rptr = rptr->next_repo) {
-		number++;
-		for (mptr = rptr->mirror_repo; mptr != NULL;
-		    mptr = mptr->next_mirror) {
-			number++;
-		}
-	}
-
-	/*
-	 * Allocate enough pointer space for any possible TM initialization
-	 * 	number of publishers and their mirrors
-	 *	+ Packages to be installed
-	 *	+ Packages to be removed
-	 */
-	transfer_attr = calloc(number+2, sizeof (nvlist_t *));
-	if (transfer_attr == NULL) {
-		goto error_ret;
-	}
-
-	ita = 0;
-	if (nvlist_alloc(&transfer_attr[ita], NV_UNIQUE_NAME, 0) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "nvlist allocation failed\n");
-		goto error_ret;
-	}
-	/*
-	 * Initialize the image pkg area and setup default publisher
-	 */
-	status = configure_ips_init_nv_list(
-	    &transfer_attr[ita], default_ips_repo);
-	if (status != SUCCESS) {
-		goto error_ret;
-	}
-
-	/*
-	 * Setup the mirrors for the default publisher one at a time
-	 */
-	for (mptr = default_ips_repo->mirror_repo;
-	    mptr != NULL; mptr = mptr->next_mirror) {
-		char    *publisher;
-		char    *mirror_url;
-
-		ita++;
-		publisher = default_ips_repo->publisher;
-		mirror_url = mptr->mirror_url;
-		if (nvlist_alloc(&transfer_attr[ita], NV_UNIQUE_NAME, 0) != 0) {
-			auto_debug_print(AUTO_DBGLVL_INFO,
-			    "nvlist allocation failed\n");
-			return (-1);
-		}
-		status = configure_ips_mirror_nv_list(&transfer_attr[ita],
-		    publisher, mirror_url);
-		if (status != SUCCESS) {
-			goto error_ret;
-		}
-	}
-
-	/*
-	 * Configure the additional publisher(s)
-	 */
-	for (rptr = addl_ips_repo; rptr != NULL; rptr = rptr->next_repo) {
-		ita++;
-		if (nvlist_alloc(&transfer_attr[ita],
-		    NV_UNIQUE_NAME, 0) != 0) {
-			auto_debug_print(AUTO_DBGLVL_INFO,
-			    "nvlist allocation failed\n");
-			goto error_ret;
-		}
-		status = configure_ips_addl_publisher_nv_list
-		    (&transfer_attr[ita], rptr);
-		if (status != SUCCESS) {
-			goto error_ret;
-		}
-
-		/*
-		 * Setup mirrors (if any) for each additional publisher
-		 */
-		for (mptr = rptr->mirror_repo;
-		    mptr != NULL; mptr = mptr->next_mirror) {
-			char    *publisher;
-			char    *mirror_url;
-
-			ita++;
-			publisher = rptr->publisher;
-			mirror_url = mptr->mirror_url;
-			if (nvlist_alloc(&transfer_attr[ita],
-			    NV_UNIQUE_NAME, 0) != 0) {
-				auto_debug_print(AUTO_DBGLVL_INFO,
-				    "nvlist allocation failed\n");
-				return (-1);
-			}
-			status = configure_ips_mirror_nv_list(
-			    &transfer_attr[ita], publisher, mirror_url);
-			if (status != SUCCESS) {
-				goto error_ret;
-			}
-		}
-	}
-
-	/*
-	 * Get the list of packages and add it to the nv_list
-	 */
-	ita++;
-	if (nvlist_alloc(&transfer_attr[ita], NV_UNIQUE_NAME, 0) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "nvlist allocation failed\n");
-		goto error_ret;
-	}
-	if (nvlist_add_uint32(transfer_attr[ita], TM_ATTR_MECHANISM,
-	    TM_PERFORM_IPS) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_ATTR_MECHANISM failed\n");
-		goto error_ret;
-	}
-	if (nvlist_add_uint32(transfer_attr[ita], TM_IPS_ACTION,
-	    TM_IPS_RETRIEVE) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TMP_IPS_ACTION failed\n");
-		goto error_ret;
-	}
-	if (nvlist_add_string(transfer_attr[ita], TM_IPS_INIT_MNTPT,
-	    INSTALLED_ROOT_DIR) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_INIT_MNTPT failed\n");
-		goto error_ret;
-	}
-
-	/*
-	 * list out the list of packages to be installed
-	 * from the manifest and add it into a file
-	 */
-	if (create_package_list_file(B_FALSE, AI_PACKAGE_LIST_INSTALL,
-	    AUTO_INSTALL_PKG_LIST_FILE) != AUTO_INSTALL_SUCCESS) {
-		auto_log_print(gettext("Failed to create a file with list "
-		    "of packages to be installed\n"));
-		goto error_ret;
-	}
-	if (nvlist_add_string(transfer_attr[ita], TM_IPS_PKGS,
-	    AUTO_INSTALL_PKG_LIST_FILE) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_PKGS failed\n");
-		goto error_ret;
-	}
-
-	/*
-	 * if debug mode enabled, run 'pkg install' in verbose mode
-	 */
-	if (is_debug_mode_enabled()) {
-		if (nvlist_add_boolean_value(transfer_attr[ita],
-		    TM_IPS_VERBOSE_MODE, B_TRUE) != 0) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "Setting of TM_IPS_VERBOSE_MODE failed\n");
-			goto error_ret;
-		}
-	}
-
-	/*
-	 * Since this operation is optional (list of packages
-	 * to be removed might be empty), before we start to
-	 * populate nv list with attributes, determine if there
-	 * is anything to do.
-	 */
-	ret = create_package_list_file(B_FALSE, AI_PACKAGE_LIST_REMOVE,
-	    AUTO_REMOVE_PKG_LIST_FILE);
-
-	if (ret == AUTO_INSTALL_FAILURE) {
-		auto_log_print(gettext("Failed to create a file with list "
-		    "of packages to be removed\n"));
-		goto error_ret;
-	} else if (ret == AUTO_INSTALL_EMPTY_LIST) {
-		auto_log_print(gettext("No packages specified to be removed "
-		    "from installed system\n"));
-	} else {
-		/*
-		 * allocate nv list
-		 */
-		ita++;
-
-		if (nvlist_alloc(&transfer_attr[ita], NV_UNIQUE_NAME, 0) != 0) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "nvlist allocation failed\n");
-			goto error_ret;
-		}
-
-		/* select IPS transfer mechanism */
-		if (nvlist_add_uint32(transfer_attr[ita], TM_ATTR_MECHANISM,
-		    TM_PERFORM_IPS) != 0) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "Setting of TM_ATTR_MECHANISM failed\n");
-			goto error_ret;
-		}
-
-		/* specify 'uninstall' action */
-		if (nvlist_add_uint32(transfer_attr[ita], TM_IPS_ACTION,
-		    TM_IPS_UNINSTALL) != 0) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "Setting of TMP_IPS_ACTION failed\n");
-			goto error_ret;
-		}
-
-		/*  set target mountpoint */
-		if (nvlist_add_string(transfer_attr[ita], TM_IPS_INIT_MNTPT,
-		    INSTALLED_ROOT_DIR) != 0) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "Setting of TM_IPS_INIT_MNTPT failed\n");
-			goto error_ret;
-		}
-
-		/*  provide list of packages to be removed */
-		if (nvlist_add_string(transfer_attr[ita], TM_IPS_PKGS,
-		    AUTO_REMOVE_PKG_LIST_FILE) != 0) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "Setting of TM_IPS_PKGS failed\n");
-			goto error_ret;
-		}
-
-		/*
-		 * if debug mode enabled, run 'pkg uninstall' in verbose mode
-		 */
-		if (is_debug_mode_enabled()) {
-			if (nvlist_add_boolean_value(transfer_attr[ita],
-			    TM_IPS_VERBOSE_MODE, B_TRUE) != 0) {
-				auto_debug_print(AUTO_DBGLVL_ERR,
-				    "Setting of TM_IPS_VERBOSE_MODE failed\n");
-				goto error_ret;
-			}
-		}
-	}
-
-	if (nvlist_add_nvlist_array(install_attr, OM_ATTR_TRANSFER,
-	    transfer_attr, ita + 1) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of OM_ATTR_TRANSFER failed\n");
-		goto error_ret;
-	}
-
-	/* Add requested swap size */
-	if (adsi.swap_size >= 0) {
-		if (nvlist_add_int32(install_attr, OM_ATTR_SWAP_SIZE,
-		    adsi.swap_size) != 0) {
-			nvlist_free(install_attr);
-			auto_debug_print(AUTO_DBGLVL_INFO,
-			    "Setting of OM_ATTR_SWAP_SIZE failed\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-	}
-
-	/* Add requested dump device size */
-	if (addi.dump_size >= 0) {
-		if (nvlist_add_int32(install_attr, OM_ATTR_DUMP_SIZE,
-		    addi.dump_size) != 0) {
-			nvlist_free(install_attr);
-			auto_debug_print(AUTO_DBGLVL_INFO,
-			    "Setting of OM_ATTR_DUMP_SIZE failed\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-	}
-
-	status = om_perform_install(install_attr, auto_update_progress);
-	if (status == OM_FAILURE) { /* synchronous failure before threading */
-		install_error = om_errno;
-		install_failed = B_TRUE;
-	}
-	/* wait for thread to report final status */
-	while (!install_done && !install_failed)
-		sleep(10);
-
-	/*
-	 * If the installation failed, report where or/and why the failure
-	 * happened
-	 */
-
-	if (install_failed) {
-		/*
-		 * Check if valid failure code was returned - if not, log only
-		 * error code itself instead of descriptive strings
-		 */
-
-		if (!om_is_valid_failure_code(install_error)) {
-			auto_log_print(gettext("Automated Installation failed"
-			    " with unknown error code %d\n"), install_error);
-		} else {
-			char	*err_str;
-
-			/* Where the failure happened */
-			if ((err_str =
-			    om_get_failure_source(install_error)) != NULL)
-				auto_log_print(gettext("Automated Installation"
-				    " failed in %s module\n"), err_str);
-
-			/* Why the failure happened */
-			if ((err_str =
-			    om_get_failure_reason(install_error)) != NULL)
-				auto_log_print(gettext("%s\n"), err_str);
-		}
-	} else {
-		return_status = AUTO_INSTALL_SUCCESS;
-	}
-
-error_ret:	/* free all memory - may have jumped here upon error */
-	if (proxy != NULL)
-		free(proxy);
-#ifndef	__sparc
-	if (api != NULL)
-		free(api);
-#endif
-	if (asi != NULL)
-		free(asi);
-	if (diskname != NULL)
-		free(diskname);
-	free_repo_info_list(default_ips_repo);
-	free_repo_info_list(addl_ips_repo);
-	if (install_attr != NULL)
-		nvlist_free(install_attr);
-	if (transfer_attr != NULL) {
-		int i;
-
-		for (i = 0; i <= ita; i++)
-			if (transfer_attr[i] != NULL)
-				nvlist_free(transfer_attr[i]);
-		free(transfer_attr);
-	}
-	return (return_status);
-}
-
-/*
- * Install the target based on the specified diskname
- * or if no diskname is specified, install it based on
- * the criteria specified in ai.xml.
- *
- * Returns
- *	AUTO_INSTALL_SUCCESS on a successful install
- *	AUTO_INSTALL_FAILURE on a failed install
- */
-static int
-auto_perform_install(char *diskname)
-{
-	nvlist_t	*install_attr, *transfer_attr[2];
-	int 		status;
-
-	/*
-	 * No disk specified on command line
-	 *  - perform installation based on manifest information instead
-	 */
-
-	if (*diskname == '\0')
-		return (install_from_manifest());
-
-	/*
-	 * Install to disk specified on command line
-	 *
-	 * Initiate target discovery and wait until it is finished
-	 */
-
-	if (auto_target_discovery() != AUTO_TD_SUCCESS) {
-		auto_log_print(gettext("Automated installation failed in "
-		    "Target Discovery module\n"));
-
-		auto_log_print(gettext("Please see previous messages for more "
-		    "details\n"));
-
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	/*
-	 * We're installing on the specified diskname
-	 * Since this is usually called from a test
-	 * program, we hardcode the various system
-	 * configuration parameters
-	 */
-
-	if (auto_select_install_target(&diskname, NULL) != 0) {
-		auto_log_print(gettext("Error: Target disk name %s is "
-		    "not valid\n"), diskname);
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_alloc(&install_attr, NV_UNIQUE_NAME, 0) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "nvlist allocation failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_uint8(install_attr, OM_ATTR_INSTALL_TYPE,
-	    OM_INITIAL_INSTALL) != 0) {
-		nvlist_free(install_attr);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of OM_ATTR_INSTALL_TYPE failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_string(install_attr, OM_ATTR_DISK_NAME,
-	    diskname) != 0) {
-		nvlist_free(install_attr);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of OM_ATTR_DISK_NAME failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_string(install_attr, OM_ATTR_ROOT_PASSWORD,
-	    om_encrypt_passwd(DEFAULT_ROOT_PASSWORD, "root")) != 0) {
-		nvlist_free(install_attr);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of OM_ATTR_ROOT_PASSWORD failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_string(install_attr, OM_ATTR_USER_NAME,
-	    "fool") != 0) {
-		nvlist_free(install_attr);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of OM_ATTR_USER_NAME failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_string(install_attr, OM_ATTR_USER_PASSWORD,
-	    om_encrypt_passwd("ass", "fool")) != 0) {
-		nvlist_free(install_attr);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of OM_ATTR_USER_PASSWORD failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_string(install_attr, OM_ATTR_LOGIN_NAME,
-	    "fool") != 0) {
-		nvlist_free(install_attr);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of OM_ATTR_LOGIN_NAME failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_string(install_attr, OM_ATTR_HOST_NAME,
-	    DEFAULT_HOSTNAME) != 0) {
-		nvlist_free(install_attr);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of OM_ATTR_HOST_NAME failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_string(install_attr, OM_ATTR_DEFAULT_LOCALE,
-	    "C") != 0) {
-		nvlist_free(install_attr);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of OM_ATTR_DEFAULT_LOCALE failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_alloc(&transfer_attr[0], NV_UNIQUE_NAME, 0) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "nvlist allocation failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_uint32(transfer_attr[0], TM_ATTR_MECHANISM,
-	    TM_PERFORM_IPS) != 0) {
-		nvlist_free(install_attr);
-		nvlist_free(transfer_attr[0]);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_ATTR_MECHANISM failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_uint32(transfer_attr[0], TM_IPS_ACTION,
-	    TM_IPS_INIT) != 0) {
-		nvlist_free(install_attr);
-		nvlist_free(transfer_attr[0]);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TMP_IPS_ACTION failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_string(transfer_attr[0], TM_IPS_INIT_MNTPT,
-	    INSTALLED_ROOT_DIR) != 0) {
-		nvlist_free(install_attr);
-		nvlist_free(transfer_attr[0]);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_INIT_MNTPT failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_string(transfer_attr[0], TM_IPS_PKG_URL,
-	    "http://ipkg.sfbay:10004") != 0) {
-		nvlist_free(install_attr);
-		nvlist_free(transfer_attr[0]);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_PKG_URL failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_string(transfer_attr[0], TM_IPS_PKG_AUTH,
-	    "ipkg.sfbay") != 0) {
-		nvlist_free(install_attr);
-		nvlist_free(transfer_attr[0]);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_PKG_AUTH failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_alloc(&transfer_attr[1], NV_UNIQUE_NAME, 0) != 0) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "nvlist allocation failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_uint32(transfer_attr[1], TM_ATTR_MECHANISM,
-	    TM_PERFORM_IPS) != 0) {
-		nvlist_free(install_attr);
-		nvlist_free(transfer_attr[0]);
-		nvlist_free(transfer_attr[1]);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_ATTR_MECHANISM failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_uint32(transfer_attr[1], TM_IPS_ACTION,
-	    TM_IPS_RETRIEVE) != 0) {
-		nvlist_free(install_attr);
-		nvlist_free(transfer_attr[0]);
-		nvlist_free(transfer_attr[1]);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TMP_IPS_ACTION failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_string(transfer_attr[1], TM_IPS_INIT_MNTPT,
-	    INSTALLED_ROOT_DIR) != 0) {
-		nvlist_free(install_attr);
-		nvlist_free(transfer_attr[0]);
-		nvlist_free(transfer_attr[1]);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_INIT_MNTPT failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (create_package_list_file(B_TRUE, AI_PACKAGE_LIST_INSTALL,
-	    AUTO_INSTALL_PKG_LIST_FILE) != AUTO_INSTALL_SUCCESS) {
-		auto_log_print(gettext("Failed to create a file with list "
-		    "of packages to be installed\n"));
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_string(transfer_attr[1], TM_IPS_PKGS,
-	    AUTO_INSTALL_PKG_LIST_FILE) != 0) {
-		nvlist_free(install_attr);
-		nvlist_free(transfer_attr[0]);
-		nvlist_free(transfer_attr[1]);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of TM_IPS_PKG_URL failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-
-	if (nvlist_add_nvlist_array(install_attr, OM_ATTR_TRANSFER,
-	    transfer_attr, 2) != 0) {
-		nvlist_free(install_attr);
-		nvlist_free(transfer_attr[0]);
-		nvlist_free(transfer_attr[1]);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Setting of OM_ATTR_TRANSFER failed\n");
-		return (AUTO_INSTALL_FAILURE);
-	}
-	status = om_perform_install(install_attr, auto_update_progress);
-
-	while (!install_done && !install_failed)
-		sleep(10);
-
-	nvlist_free(install_attr);
-	nvlist_free(transfer_attr[0]);
-	nvlist_free(transfer_attr[1]);
-
-	if (install_failed || status != OM_SUCCESS)
-		return (AUTO_INSTALL_FAILURE);
-	else
-		return (AUTO_INSTALL_SUCCESS);
-}
-
-/*
- * Function:	auto_get_disk_name_from_slice
- * Description: Convert a conventional disk name into the internal canonical
- *		form. Remove the trailing index reference. The return status
- *		reflects whether or not the 'src' name is valid.
- *
- *				src			 dst
- *			---------------------------------------
- *			[/dev/rdsk/]c0t0d0s0	->	c0t0d0
- *			[/dev/rdsk/]c0t0d0p0	->	c0t0d0
- *			[/dev/rdsk/]c0d0s0	->	c0d0
- *			[/dev/rdsk/]c0d0p0	->	c0d0
- *
- * Scope:	public
- * Parameters:	dst	- used to retrieve cannonical form of drive name
- *			  ("" if not valid)
- *		src	- name of drive to be processed (see table above)
- * Return:	 0	- valid disk name
- *		-1	- invalid disk name
- */
-static void
-auto_get_disk_name_from_slice(char *dst, char *src)
-{
-	char		name[MAXPATHLEN];
-	char		*cp;
-
-	*dst = '\0';
-
-	(void) strcpy(name, src);
-	/*
-	 * The slice could be like s2 or s10
-	 */
-	cp = name + strlen(name) - 3;
-	if (*cp) {
-		if (*cp == 'p' || *cp == 's') {
-			*cp = '\0';
-		} else {
-			cp++;
-			if (*cp == 'p' || *cp == 's') {
-				*cp = '\0';
-			}
-		}
-	}
-
-	/* It could be full pathname like /dev/dsk/disk_name */
-	if ((cp = strrchr(name, '/')) != NULL) {
-		cp++;
-		(void) strcpy(dst, cp);
-	} else {
-		/* Just the disk name is provided, so return the name */
-		(void) strcpy(dst, name);
-	}
-}
-
-int
-main(int argc, char **argv)
-{
-	int		opt;
-	extern char 	*optarg;
-	char		manifestf[MAXNAMELEN];
-	char		diskname[MAXNAMELEN];
-	char		slicename[MAXNAMELEN];
-	int		num_du_pkgs_installed;
-	boolean_t	auto_reboot_enabled = B_FALSE;
-	nvlist_t	*ls_init_attr = NULL;
-	boolean_t	auto_install_failed = B_FALSE;
-
-	(void) setlocale(LC_ALL, "");
-	(void) textdomain(TEXT_DOMAIN);
-
-	manifestf[0] = '\0';
-	slicename[0] = '\0';
-	while ((opt = getopt(argc, argv, "vd:Iip:")) != -1) {
-		switch (opt) {
-		case 'd': /* target disk name for testing only */
-			(void) strlcpy(slicename, optarg, sizeof (slicename));
-			break;
-		case 'I': /* break after Target Instantiation for testing */
-			om_set_breakpoint(OM_breakpoint_after_TI);
-			break;
-		case 'i': /* break before Target Instantiation for testing */
-			om_set_breakpoint(OM_breakpoint_before_TI);
-			break;
-		case 'p': /* manifest is provided */
-			(void) strlcpy(manifestf, optarg, sizeof (manifestf));
-			break;
-		case 'v': /* debug verbose mode enabled */
-			enable_debug_mode(B_TRUE);
-			break;
-		default:
-			usage();
-			exit(AI_EXIT_FAILURE);
-		}
-	}
-
-	if (manifestf[0] == '\0' && slicename[0] == '\0') {
-		usage();
-		exit(AI_EXIT_FAILURE);
-	}
-
-	/*
-	 * initialize logging service - increase verbosity level
-	 * if installer was invoked in debug mode
-	 * print error messages to stderr, since we don't have
-	 * logging service available at this point
-	 */
-	if (is_debug_mode_enabled()) {
-		if (nvlist_alloc(&ls_init_attr, NV_UNIQUE_NAME, 0) != 0) {
-			(void) fprintf(stderr,
-			    "nvlist allocation failed for ls_init_attrs\n");
-
-			exit(AI_EXIT_FAILURE);
-		}
-
-		if (nvlist_add_int16(ls_init_attr, LS_ATTR_DBG_LVL,
-		    LS_DBGLVL_INFO) != 0) {
-			(void) fprintf(stderr,
-			    "Setting LS_ATTR_DBG_LVL failed\n");
-
-			nvlist_free(ls_init_attr);
-			exit(AI_EXIT_FAILURE);
-		}
-	}
-
-	if (ls_init(ls_init_attr) != LS_E_SUCCESS) {
-		(void) fprintf(stderr,
-		    "Couldn't initialize logging service\n");
-
-		nvlist_free(ls_init_attr);
-		exit(AI_EXIT_FAILURE);
-	}
-
-	/* release nvlist, since it is no longer needed */
-	nvlist_free(ls_init_attr);
-
-	if (manifestf[0] != '\0') {
-		char	*ai_auto_reboot;
-
-		/*
-		 * Validate the AI manifest. If it validates, set
-		 * it up in an in-memory tree so searches can be
-		 * done on it in the future to retrieve the values
-		 */
-		if (ai_create_manifest_image(manifestf) ==
-		    AUTO_VALID_MANIFEST) {
-			auto_log_print(gettext("%s manifest created\n"),
-			    manifestf);
-		} else {
-			auto_log_print(gettext("Auto install failed. Error "
-			    "creating manifest %s\n"), manifestf);
-			exit(AI_EXIT_FAILURE_AIM);
-		}
-
-		if (ai_setup_manifest_image() == AUTO_VALID_MANIFEST) {
-			auto_log_print(gettext(
-			    "%s manifest setup and validated\n"), manifestf);
-		} else {
-			char *setup_err = gettext("Auto install failed. Error "
-			    "setting up and validating manifest %s\n");
-			auto_log_print(setup_err, manifestf);
-			(void) fprintf(stderr, setup_err, manifestf);
-			exit(AI_EXIT_FAILURE_AIM);
-		}
-
-		/*
-		 * Install any drivers required for installation, in the
-		 * booted environment.
-		 *
-		 * Don't fail the whole installation if ai_du_get_and_install()
-		 * fails here.  This operation affects only the booted
-		 * environment.  It is possible that a package missing here will
-		 * already be included in the target install, so let the
-		 * installation proceed.  If something critical is still
-		 * missing, the target install will fail anyway.
-		 *
-		 * First boolean: do not honor noinstall flag.
-		 * Second boolean: do not update the boot archive.
-		 */
-		if (ai_du_get_and_install("/", B_FALSE, B_FALSE,
-		    &num_du_pkgs_installed) != AUTO_INSTALL_SUCCESS) {
-			/* Handle failure or "package not found" statuses. */
-			char *du_warning = gettext("Warning: some additional "
-			    "driver packages could not be installed\n"
-			    "  to booted installation environment.\n"
-			    "  These drivers may or may not be required for "
-			    "the installation to proceed.\n"
-			    "  Will continue anyway...\n");
-			auto_log_print(du_warning);
-			(void) fprintf(stderr, du_warning);
-
-		} else if (num_du_pkgs_installed > 0) {
-			/*
-			 * Note: Print no messages if num_du_pkgs_installed = 0
-			 * This means no packages and no errors, or no-op.
-			 */
-			auto_log_print(gettext("Add Drivers: All required "
-			    "additional driver packages successfully installed "
-			    "to booted installation environment.\n"));
-		}
-
-		diskname[0] = '\0';
-
-		/*
-		 * Since valid manifest was provided, check if automated reboot
-		 * feature is enabled.
-		 */
-
-		ai_auto_reboot = ai_get_manifest_element_value(AIM_AUTO_REBOOT);
-
-		if (ai_auto_reboot != NULL) {
-			if (strcasecmp(ai_auto_reboot, "true") == 0) {
-				auto_log_print(
-				    gettext("Auto reboot enabled\n"));
-
-				auto_reboot_enabled = B_TRUE;
-			} else {
-				auto_log_print(
-				    gettext("Auto reboot disabled\n"));
-			}
-		}
-	}
-
-	if (slicename[0] != '\0') {
-		auto_get_disk_name_from_slice(diskname, slicename);
-	}
-
-	if (auto_perform_install(diskname) != AUTO_INSTALL_SUCCESS) {
-		(void) fprintf(stderr, "Automated Installation failed\n");
-
-		auto_install_failed = B_TRUE;
-	} else {
-		/*
-		 * Install additional drivers on target.
-		 * First boolean: honor noinstall flag.
-		 * Second boolean: update boot archive.
-		 */
-		if (ai_du_install(INSTALLED_ROOT_DIR, B_TRUE, B_TRUE,
-		    &num_du_pkgs_installed) == AUTO_INSTALL_FAILURE) {
-			char *tgt_inst_err = gettext("Basic installation was "
-			    "successful.  However, there was an error\n");
-			auto_log_print(tgt_inst_err, manifestf);
-			(void) fprintf(stderr, tgt_inst_err);
-			tgt_inst_err = gettext("installing at least one "
-			    "additional driver package on target.\n");
-			auto_log_print(tgt_inst_err, manifestf);
-			(void) fprintf(stderr, tgt_inst_err);
-			tgt_inst_err = gettext("Please verify that all driver "
-			    "packages required for reboot are installed "
-			    "before rebooting.\n");
-			auto_log_print(tgt_inst_err, manifestf);
-			(void) fprintf(stderr, tgt_inst_err);
-			auto_install_failed = B_TRUE;
-		}
-	}
-
-	if (! auto_install_failed) {
-
-		if (auto_reboot_enabled) {
-			printf(gettext("Automated Installation succeeded."
-			    " System will be rebooted now\n"));
-
-			auto_log_print(gettext("Automated Installation"
-			    " succeeded. System will be rebooted now\n"));
-		} else {
-			printf(gettext("Automated Installation succeeded. You"
-			    " may wish to reboot the system at this time\n"));
-
-			auto_log_print(gettext("Automated Installation"
-			    " succeeded. You may wish to reboot the system"
-			    " at this time\n"));
-		}
-	}
-
-	(void) ai_teardown_manifest_state();
-
-	/*
-	 * Transfer /tmp/install_log file now that it is complete.
-	 * Subsequent messages are not captured in copy of log file
-	 * tranfered to destination.
-	 */
-
-	if (access(INSTALLED_ROOT_DIR, F_OK) == 0) {
-		if (ls_transfer("/", INSTALLED_ROOT_DIR) != LS_E_SUCCESS) {
-			auto_log_print(gettext(
-			    "Could not transfer log file to the target\n"));
-		}
-	}
-
-	/*
-	 * If the installation failed, abort now and let the user inspect
-	 * the system
-	 */
-
-	if (auto_install_failed)
-		exit(AI_EXIT_FAILURE);
-
-	/*
-	 * Unmount installed boot environment
-	 */
-	if (om_unmount_target_be() != OM_SUCCESS) {
-		auto_log_print(gettext(
-		    "Could not unmount target boot environment.\n"));
-
-		auto_install_failed = B_TRUE;
-	}
-
-	/*
-	 * Exit with return codes reflecting the result of the installation:
-	 *  AI_EXIT_SUCCESS - installation succeeded, don't reboot automatically
-	 *  AI_EXIT_AUTO_REBOOT - installation succeeded, reboot automatically
-	 */
-
-	if (auto_install_failed)
-		exit(AI_EXIT_FAILURE);
-
-	if (auto_reboot_enabled)
-		exit(AI_EXIT_AUTO_REBOOT);
-
-	exit(AI_EXIT_SUCCESS);
-}
--- a/usr/src/cmd/auto-install/auto_install.h	Wed May 25 13:29:32 2011 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,368 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
- */
-
-#ifndef _AUTO_INSTALL_H
-#define	_AUTO_INSTALL_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <Python.h>
-#include <sys/param.h>
-#include "td_lib.h"
-#include "td_api.h"
-#include "orchestrator_api.h"
-#include "ti_api.h"
-#include "transfermod.h"
-#include "ls_api.h"
-
-/* AI engine exit codes */
-#define	AI_EXIT_SUCCESS		0	/* success - control passed to user */
-#define	AI_EXIT_AUTO_REBOOT	64	/* success - auto reboot enabled */
-#define	AI_EXIT_FAILURE		1	/* general failure */
-#define	AI_EXIT_FAILURE_AIM	2	/* failure-invalid manifest provided */
-
-#define	AUTO_INSTALL_SUCCESS		0
-#define	AUTO_INSTALL_EMPTY_LIST		1	/* list of packages is empty */
-#define	AUTO_INSTALL_PKG_NOT_FND	2
-#define	AUTO_INSTALL_FAILURE		-1
-#define	AUTO_TD_SUCCESS			0
-#define	AUTO_TD_FAILURE			-1
-
-#define	INSTALLED_ROOT_DIR	"/a"
-#define	AUTO_UNKNOWN_STRING	"unknown"
-#define	AUTO_DBGLVL_INFO	LS_DBGLVL_INFO
-#define	AUTO_DBGLVL_WARN	LS_DBGLVL_WARN
-#define	AUTO_DBGLVL_ERR		LS_DBGLVL_ERR
-
-#define	AUTO_VALID_MANIFEST	0
-#define	AUTO_INVALID_MANIFEST	-1
-#define	AUTO_PROPERTY_ROOTPASS		"rootpass"
-#define	AUTO_PROPERTY_TIMEZONE		"timezone"
-#define	AUTO_PROPERTY_HOSTNAME		"hostname"
-#define	KEYWORD_VALUE			"value"
-#define	KEYWORD_SIZE		256
-#define	VALUE_SIZE		256
-#define	AUTO_MAX_ACTION_LEN	32	/* delete, create, preserve... */
-#define	MAX_SHELLCMD_LEN	2048
-
-/*
- * File that lists which packages need to be installed
- */
-#define	AUTO_INSTALL_PKG_LIST_FILE	"/tmp/install.pkg.list"
-/*
- * File that lists which packages will be removed
- * from installed system
- */
-#define	AUTO_REMOVE_PKG_LIST_FILE	"/tmp/remove.pkg.list"
-
-#define	AI_MANIFEST_SCHEMA	"/tmp/ai.dtd"
-
-/* Script for converting legacy System Configuration manifest to new format */
-#define	SC_CONVERSION_SCRIPT	"/usr/lib/install/sc_conv.ksh"
-
-#define	TEXT_DOMAIN		"SUNW_INSTALL_AUTOINSTALL"
-
-#define	CONVERT_UNITS_TO_TEXT(units) \
-	((units) == AI_SIZE_UNITS_MEGABYTES ? "megabytes": \
-	((units) == AI_SIZE_UNITS_GIGABYTES ? "gigabytes": \
-	((units) == AI_SIZE_UNITS_TERABYTES ? "terabytes": \
-	((units) == AI_SIZE_UNITS_SECTORS ? "sectors": \
-	"(unknown)"))))
-
-#define	MB_TO_SECTORS	((uint64_t)2048)
-#define	GB_TO_MB	((uint64_t)1024)
-#define	TB_TO_GB	((uint64_t)1024)
-
-
-/*
- * DTD schema nodepaths - see ai.dtd
- */
-#define	AIM_TARGET_DISK_KEYWORD "auto_install/ai_instance/" \
-	"target/target_device/disk/disk_keyword/key"
-#define	AIM_TARGET_DEVICE_NAME "auto_install/ai_instance/" \
-	"target/target_device/disk/disk_name[name_type='ctd']/name"
-#define	AIM_TARGET_DEVICE_BOOT_DISK "boot_disk"
-#define	AIM_TARGET_DEVICE_SELECT_VOLUME_NAME \
-	"auto_install/ai_instance/target/target_device/" \
-	"disk/disk_name[name_type='volid']/name"
-#define	AIM_TARGET_DEVICE_SELECT_DEVICE_ID \
-	"auto_install/ai_instance/target/target_device/" \
-	"disk/disk_name[name_type='devid']/name"
-#define	AIM_TARGET_DEVICE_SELECT_DEVICE_PATH \
-	"auto_install/ai_instance/target/target_device/" \
-	"disk/disk_name[name_type='devpath']/name"
-#define	AIM_TARGET_DEVICE_TYPE "auto_install/ai_instance/" \
-	"target/target_device/disk/disk_prop/dev_type"
-#define	AIM_TARGET_DEVICE_SIZE	\
-	"auto_install/ai_instance/target/target_device/" \
-	"disk/disk_prop/dev_size"
-#define	AIM_TARGET_DEVICE_VENDOR "auto_install/ai_instance/target/" \
-	"target_device/disk/disk_prop/dev_vendor"
-#define	AIM_TARGET_DEVICE_USE_SOLARIS_PARTITION	\
-	"auto_install/ai_instance/target/target_device/disk/" \
-	"partition[action='use_existing']/action"
-#define	AIM_TARGET_DEVICE_INSTALL_SLICE_NUMBER \
-	"auto_install/ai_instance/target/target_device/disk/" \
-	"slice[is_root='true']/name"
-#define	AIM_TARGET_DEVICE_ISCSI_TARGET_NAME \
-	"auto_install/ai_instance/target/target_device/disk/iscsi/name"
-#define	AIM_TARGET_DEVICE_ISCSI_TARGET_IP \
-	"auto_install/ai_instance/target/target_device/disk/iscsi/ip"
-#define	AIM_TARGET_DEVICE_ISCSI_TARGET_LUN \
-	"auto_install/ai_instance/target/target_device/disk/iscsi/target_lun"
-#define	AIM_TARGET_DEVICE_ISCSI_TARGET_PORT \
-	"auto_install/ai_instance/target/target_device/disk/iscsi/target_port"
-#define	AIM_TARGET_DEVICE_ISCSI_PARAMETER_SOURCE \
-	"auto_install/ai_instance/target/target_device/disk/iscsi/source"
-#define	AIM_SWAP_SIZE	\
-	"auto_install/ai_instance/target/target_device/swap/zvol/size/val"
-#define	AIM_DUMP_SIZE	\
-	"auto_install/ai_instance/target/target_device/dump/zvol/size/val"
-
-#define	AIM_PARTITION_ACTIONS	\
-	"auto_install/ai_instance/target/target_device/disk/partition/action"
-#define	AIM_NUMBERED_PARTITIONS	\
-	"auto_install/ai_instance/target/target_device/disk/partition/name"
-#define	AIM_NUMBERED_PARTITION_NUMBER	\
-	"auto_install/ai_instance/target/target_device/disk/" \
-	"partition[name=\"%s\":action=\"%s\"]/name"
-#define	AIM_NUMBERED_PARTITION_ACTION	\
-	"auto_install/ai_instance/target/target_device/disk/" \
-	"partition[name=\"%s\":action=\"%s\"]/action"
-#define	AIM_NUMBERED_PARTITION_START_SECTOR	\
-	"auto_install/ai_instance/target/target_device/disk/" \
-	"partition[name=\"%s\":action=\"%s\"]/size/start_sector"
-#define	AIM_NUMBERED_PARTITION_SIZE	\
-	"auto_install/ai_instance/target/target_device/disk/" \
-	"partition[name=\"%s\":action=\"%s\"]/size/val"
-#define	AIM_NUMBERED_PARTITION_TYPE	\
-	"auto_install/ai_instance/target/target_device/disk/" \
-	"partition[name=\"%s\":action=\"%s\"]/part_type"
-
-#define	AIM_USE_EXISTING_PARTITIONS	\
-	"auto_install/ai_instance/target/target_device/disk/" \
-	"partition[action='use_existing']/action"
-#define	AIM_UNNUMBERED_PARTITION_NUMBER	\
-	"auto_install/ai_instance/target/target_device/disk/" \
-	"partition[action='use_existing']/name"
-#define	AIM_UNNUMBERED_PARTITION_ACTION	\
-	"auto_install/ai_instance/target/target_device/disk/" \
-	"partition[action='use_existing']/action"
-#define	AIM_UNNUMBERED_PARTITION_START_SECTOR	\
-	"auto_install/ai_instance/target/target_device/disk/" \
-	"partition[action='use_existing']/size/start_sector"
-#define	AIM_UNNUMBERED_PARTITION_SIZE	\
-	"auto_install/ai_instance/target/target_device/disk/" \
-	"partition[action='use_existing']/size/val"
-#define	AIM_UNNUMBERED_PARTITION_TYPE	\
-	"auto_install/ai_instance/target/target_device/disk/" \
-	"partition[action='use_existing']/part_type"
-
-#define	AIM_SLICE_NUMBER "auto_install/ai_instance/target/" \
-	"target_device/disk/slice/name"
-#define	AIM_SLICE_ACTION "auto_install/ai_instance/target/" \
-	"target_device/disk/slice/action"
-#define	AIM_SLICE_SIZE "auto_install/ai_instance/target/" \
-	"target_device/disk/slice[name=\"%s\":action=\"%s\"]/size/val"
-#define	AIM_SLICE_ON_EXISTING "auto_install/ai_instance/target/" \
-	"target_device/disk/slice[name=\"%s\":action=\"%s\"]/force"
-#define	AIM_AUTO_REBOOT	"auto_install/ai_instance/auto_reboot"
-
-#define	AIM_PROXY_URL "auto_install/ai_instance/http_proxy"
-
-#define	AIM_PACKAGE_INSTALL_NAME "auto_install/ai_instance/software/" \
-	"software_data[action='install']/name"
-
-#define	AIM_PACKAGE_REMOVE_NAME "auto_install/ai_instance/software/" \
-	"software_data[action='uninstall']/name"
-
-/*
- * Primary and secondary publishers
- */
-#define	AIM_IPS_PUBLISHER_URL	\
-	"auto_install/ai_instance/software/source/publisher/origin/name"
-#define	AIM_FALLBACK_PUBLISHER_URL	"http://pkg.oracle.com/solaris/release"
-#define	AIM_FALLBACK_PUBLISHER_NAME	"solaris"
-
-/*
- * Find publisher name and mirror based on url
- */
-#define	AIM_ADD_URL_PUBLISHER_NAME "auto_install/ai_instance/software/" \
-	"source/publisher[origin/name=\"%s\"]/name"
-#define	AIM_ADD_URL_PUBLISHER_MIRROR "auto_install/ai_instance/software/" \
-	"source/publisher[origin/name=\"%s\"]/mirror/name"
-
-/* type of package list to be obtained from manifest */
-typedef enum {
-	AI_PACKAGE_LIST_INSTALL,
-	AI_PACKAGE_LIST_REMOVE
-} auto_package_list_type_t;
-
-/* hardcoded lists of packages for testing purposes */
-#define	AI_TEST_PACKAGE_LIST_INSTALL	\
-	"SUNWcsd\nSUNWcs\nbabel_install\nentire\n"
-#define	AI_TEST_PACKAGE_LIST_REMOVE	"babel_install\n"
-
-/* size units can be user-defined */
-typedef enum {
-	AI_SIZE_UNITS_MEGABYTES = 0,
-	AI_SIZE_UNITS_SECTORS,
-	AI_SIZE_UNITS_GIGABYTES,
-	AI_SIZE_UNITS_TERABYTES
-} auto_size_units_t;
-
-/* define source of iSCSI parameters */
-typedef enum {
-	AI_ISCSI_PARM_SRC_MANIFEST = 0,
-	AI_ISCSI_PARM_SRC_DHCP
-} iscsi_parm_src_t;
-
-/*
- * information needed to mount iSCSI boot target during installation
- */
-typedef struct {
-	char		name[INSTISCSI_MAX_ISCSI_NAME_LEN + 1];
-	char		ip[INSTISCSI_IP_ADDRESS_LEN + 1];
-	uint32_t	port;
-	char		lun[INSTISCSI_MAX_LUN_LEN + 1];
-	iscsi_parm_src_t parm_src;
-} iscsi_info_t;
-
-typedef struct {
-	/*
-	 * disk criteria for selection of target disk
-	 */
-	char		diskkeyword[10];		/* 'boot_disk' */
-	char		diskname[MAXNAMELEN];
-	char		disktype[MAXNAMELEN];
-	char		diskvendor[MAXNAMELEN];
-	char		diskvolname[MAXNAMELEN];
-	char		diskdevid[MAXNAMELEN];
-	char		diskdevicepath[MAXPATHLEN];
-	uint64_t	disksize;
-#ifndef	__sparc
-	char		diskusepart[6];		/* 'true' or 'false' */
-#endif
-	iscsi_info_t	diskiscsi;		/* iSCSI target parameters */
-	/*
-	 * other data related to disk target
-	 */
-	uint8_t		install_slice_number;	/* install Solaris here */
-} auto_disk_info;
-
-typedef struct {
-	char		partition_action[AUTO_MAX_ACTION_LEN];
-	int		partition_number;
-	uint64_t	partition_start_sector;
-	uint64_t	partition_size;
-	int		partition_type;
-	auto_size_units_t	partition_size_units;
-	boolean_t	partition_is_logical;
-} auto_partition_info;
-
-typedef struct {
-	char		slice_action[AUTO_MAX_ACTION_LEN];
-	int		slice_number;
-	uint64_t	slice_size;
-	auto_size_units_t	slice_size_units;
-	om_on_existing_t	on_existing; /* action to take if it exists */
-} auto_slice_info;
-
-typedef struct auto_mirror_repo {
-	char			*mirror_url;
-	struct auto_mirror_repo	*next_mirror;
-} auto_mirror_repo_t;
-
-typedef struct auto_repo_info {
-	char			*publisher;
-	char			*url;
-	auto_mirror_repo_t	*mirror_repo; /* point to the list of mirrors */
-	struct auto_repo_info	*next_repo; /* Point to the next repo */
-} auto_repo_info_t;
-
-typedef struct {
-	int32_t		swap_size;	/* Swap Size in MB */
-} auto_swap_device_info;
-
-typedef struct {
-	int32_t		dump_size;	/* Dump Size in MB */
-} auto_dump_device_info;
-
-typedef struct {
-	uint32_t	size;
-	char		diskname[MAXNAMELEN];
-	boolean_t	whole_disk;
-} install_params;
-
-void	auto_log_print(char *fmt, ...);
-void	auto_debug_print(ls_dbglvl_t dbg_lvl, char *fmt, ...);
-void	auto_debug_dump_file(ls_dbglvl_t dbg_lvl, char *filename);
-
-int	auto_target_discovery(void);
-int	auto_select_install_target(char **diskname, auto_disk_info *adi);
-
-int	ai_create_manifest_image(char *filename);
-int	ai_setup_manifest_image();
-void	ai_teardown_manifest_state();
-char	**ai_get_manifest_values(char *path, int *len);
-void	ai_free_manifest_values(char **value_list);
-int 	ai_get_manifest_disk_info(auto_disk_info *);
-int 	ai_get_manifest_swap_device_info(auto_swap_device_info *adsi);
-int 	ai_get_manifest_dump_device_info(auto_dump_device_info *addi);
-auto_partition_info *ai_get_manifest_partition_info(int *);
-auto_slice_info *ai_get_manifest_slice_info(int *);
-char	*ai_get_manifest_ipsrepo_url(void);
-char	*ai_get_manifest_ipsrepo_authname(void);
-char	*ai_get_manifest_ipsrepo_mirror(void);
-char	*ai_get_manifest_ipsrepo_addl_url(void);
-char	*ai_get_manifest_ipsrepo_addl_authname(void);
-char	*ai_get_manifest_ipsrepo_addl_mirror(void);
-auto_repo_info_t *ai_get_default_repo_info(void);
-auto_repo_info_t *ai_get_additional_repo_info(void);
-char	*ai_get_manifest_http_proxy(void);
-char	**ai_get_manifest_packages(int *num_packages_p, char *pkg_list_tag_p);
-char	*ai_get_manifest_element_value(char *element);
-void	free_repo_info_list(auto_repo_info_t *repo);
-void	free_repo_mirror_list(auto_mirror_repo_t *mirror);
-
-PyObject *ai_create_manifestserv(char *filename);
-int	ai_setup_manifestserv(PyObject *server_obj);
-void	ai_destroy_manifestserv(PyObject *server_obj);
-char	**ai_lookup_manifest_values(PyObject *server_obj, char *path, int *len);
-void	ai_free_manifest_value_list(char **value_list);
-
-int	ai_du_get_and_install(char *install_root, boolean_t honor_noinstall,
-	    boolean_t update_boot_archive, int *num_installed_pkgs_p);
-int	ai_du_install(char *install_root, boolean_t honor_noinstall,
-	    boolean_t update_boot_archive, int *num_installed_pkgs_p);
-
-int	mount_iscsi_target_if_requested(auto_disk_info *, char *, int);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _AUTO_INSTALL_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/auto_install.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,1137 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+"""AutoInstall main class and progress logging support."""
+
+import linecache
+import logging
+import optparse
+import os
+import os.path
+import random
+import socket
+import struct
+import sys
+import thread
+import time
+import traceback
+
+import osol_install.errsvc as errsvc
+
+from osol_install.liberrsvc import ES_DATA_EXCEPTION
+
+from solaris_install import \
+    ApplicationData, system_temp_path, post_install_logs_path, Popen
+from solaris_install.auto_install.ai_instance import AIInstance
+from solaris_install.auto_install.checkpoints.dmm import \
+    DERIVED_MANIFEST_DATA, DerivedManifestData
+from solaris_install.auto_install.checkpoints.target_selection import \
+    SelectionError, TargetSelection
+from solaris_install.auto_install.utmpx import users_on_console
+from solaris_install.boot import boot
+from solaris_install.data_object import ParsingError, \
+    DataObject, ObjectNotFoundError
+from solaris_install.data_object.data_dict import DataObjectDict
+from solaris_install.engine import InstallEngine
+from solaris_install.engine import UnknownChkptError, UsageError, \
+    RollbackError
+from solaris_install.ict import initialize_smf, update_dumpadm, ips, \
+    device_config, apply_sysconfig, boot_archive, transfer_files, \
+    create_snapshot, setup_swap
+from solaris_install.logger import FileHandler, ProgressHandler, MAX_INT
+from solaris_install.logger import INSTALL_LOGGER_NAME
+from solaris_install.manifest.parser import ManifestError, \
+    MANIFEST_PARSER_DATA
+from solaris_install.target import Target, discovery, instantiation
+from solaris_install.target.logical import BE, Logical
+from solaris_install.transfer import create_checkpoint
+from solaris_install.transfer.info import Software, Destination, Image, \
+    ImType, Dir, INSTALL, IPSSpec, CPIOSpec, SVR4Spec
+from solaris_install.transfer.ips import AbstractIPS
+
+ZPOOL = "/usr/sbin/zpool"
+
+class AutoInstall(object):
+    """
+    AutoInstall master class
+    """
+
+    BE_LOG_DIR = post_install_logs_path("")
+    INSTALL_LOG = "install_log"
+    AI_EXIT_SUCCESS = 0
+    AI_EXIT_FAILURE = 1
+    AI_EXIT_AUTO_REBOOT = 64
+    TARGET_INSTANTIATION_CHECKPOINT = 'target-instantiation'
+    FIRST_TRANSFER_CHECKPOINT = 'first-transfer'
+    MANIFEST_CHECKPOINTS = ["derived-manifest", "manifest-parser"]
+    CHECKPOINTS_BEFORE_IPS = ["target-discovery", TARGET_INSTANTIATION_CHECKPOINT]
+    CHECKPOINTS_BEFORE_IPS.extend(MANIFEST_CHECKPOINTS)
+    CHECKPOINTS_BEFORE_TI = ["target-discovery", TARGET_INSTANTIATION_CHECKPOINT]
+    CHECKPOINTS_BEFORE_TI.extend(MANIFEST_CHECKPOINTS)
+    TRANSFER_FILES_CHECKPOINT = 'transfer-ai-files'
+    INSTALLED_ROOT_DIR = "/a"
+
+    def __init__(self, args=None):
+        """
+        Class constructor
+        """
+        self.auto_reboot = False
+        self.doc = None
+        self.exitval = self.AI_EXIT_SUCCESS
+        self.derived_script = None
+        self.manifest = None
+
+        # To remember the BE when we find it.
+        self._be = None
+
+        # Parse command line arguments
+        self.options, self.args = self.parse_args(args)
+
+        # Initialize Install Engine
+        self.engine = InstallEngine(debug=True, stop_on_error=True)
+        self.doc = self.engine.data_object_cache
+
+        # Add ApplicationData to the DOC
+        self._app_data = ApplicationData("auto-install")
+        self.doc.persistent.insert_children(self._app_data)
+
+        # Clear error service
+        errsvc.clear_error_list()
+
+        # Create Logger and setup logfiles
+        self.install_log_fh = None
+        self.logger = None
+        self.progress_ph = None
+        self.setup_logs()
+
+        if not self.options.list_checkpoints:
+            self.logger.info("Starting Automated Installation Service")
+
+        if self.options.stop_checkpoint:
+            self.logger.debug("Pausing AI install before checkpoint: %s" % \
+                (self.options.stop_checkpoint))
+
+        if not self.options.list_checkpoints:
+
+            if self.options.manifest:
+                self.logger.info("Using Profile: %s" % (self.options.manifest))
+
+            if self.derived_script:
+                self.logger.info("Using Derived Script: %s" % \
+                    (self.derived_script))
+
+            if self.manifest:
+                self.logger.info("Using Manifest: %s" % (self.manifest))
+
+            if self.options.dry_run:
+                self.logger.info("Dry Run mode enabled")
+
+    def parse_args(self, args):
+        """
+        Method to parse command line arguments
+        """
+
+        usage = "%prog -m <manifest>\n" + \
+            "\t[-i - Stop installation before Target Instantiation |\n" + \
+            "\t -I - Stop installation after Target Instantiation]\n" + \
+            "\t[-n - Enable dry run mode]"
+
+        parser = optparse.OptionParser(usage=usage)
+
+        parser.add_option("-m", "--manifest", dest="manifest",
+            help="Specify script or XML manifest to use")
+
+        parser.add_option("-i", "--break-before-ti", dest="break_before_ti",
+            action="store_true", default=False,
+            help="Break execution before Target Instantiation, testing only")
+
+        parser.add_option("-I", "--break-after-ti", dest="break_after_ti",
+            action="store_true", default=False,
+            help="Break execution after Target Instantiation, testing only")
+
+        parser.add_option("-n", "--dry-run", dest="dry_run",
+            action="store_true", default=False,
+            help="Enable dry-run mode for testing")
+
+        parser.add_option("-l", "--list-checkpoints",  dest="list_checkpoints",
+            action="store_true", default=False,
+            help=optparse.SUPPRESS_HELP)
+
+        parser.add_option("-s", "--stop-checkpoint", dest="stop_checkpoint",
+            help=optparse.SUPPRESS_HELP)
+
+        (options, args) = parser.parse_args(args)
+
+        # If manifest argument provided, determine if script or manifest
+        if options.manifest:
+            (self.derived_script, self.manifest) =  \
+                self.determine_manifest_type(options.manifest)
+            if not self.derived_script and not self.manifest:
+                parser.error("Must specify manifest with -m option")
+
+        # If specifying to list checkpoints, we can ignore all other semantics
+        if not options.list_checkpoints:
+            # Perform some parsing semantic validation
+            # Must specify one of disk or manifest
+            if options.manifest is None:
+                parser.error("Must specify a manifest to use for installation")
+
+        if options.break_before_ti and options.break_after_ti:
+            parser.error("Cannot specify to stop installation before " + \
+                "and after Target Installation")
+
+        if (options.break_before_ti or options.break_after_ti) and \
+            options.stop_checkpoint:
+            parser.error("Cannot specify a stop checkpoint and to stop " + \
+                "before/after target instantiation at same time")
+
+        # Set stop_breakpoint to be before or after TI if requested
+        if options.break_before_ti:
+            options.stop_checkpoint = self.TARGET_INSTANTIATION_CHECKPOINT
+        elif options.break_after_ti:
+            options.stop_checkpoint = self.FIRST_TRANSFER_CHECKPOINT
+
+        return (options, args)
+
+    @staticmethod
+    def determine_manifest_type(manifest):
+        """
+        Determine of manifest file argument is a script or xml manifest.
+        Simply check reading first two characters of file for #!
+        """
+        derived_script = None
+        xml_manifest = None
+
+        if manifest is None:
+            return None, None
+        else:
+            if linecache.getline(manifest, 1)[:2] == "#!":
+                derived_script = manifest
+            else:
+                xml_manifest = manifest
+
+        return derived_script, xml_manifest
+
+    def validate_stop_checkpoint(self):
+        """
+        Validate stop checkpoint argument is valid by comparing
+        against list of registered checkpoints.
+        """
+        cp_data_list = self.engine.get_exec_list()
+
+        if len(cp_data_list) == 0:
+            self.logger.debug("No Checkpoints have been registered to run")
+        else:
+            for cp in cp_data_list:
+                if str(cp) == self.options.stop_checkpoint:
+                    return True
+        return False
+
+    def list_checkpoints(self):
+        """
+        print to stdout the current list of checkpoints registered to run.
+        """
+        cp_data_list = self.engine.get_exec_list()
+
+        if len(cp_data_list) == 0:
+            print "No Checkpoints have been registered to run."
+        else:
+            print "Checkpoints will be run in the following order:"
+            if self.derived_script:
+                print "	%s" % (str(self.MANIFEST_CHECKPOINTS[0]))
+                print "	%s" % (str(self.MANIFEST_CHECKPOINTS[1]))
+            elif self.manifest:
+                print "	%s" % (str(self.MANIFEST_CHECKPOINTS[1]))
+            for cp in cp_data_list:
+                if self.options.stop_checkpoint and \
+                    str(cp) == self.options.stop_checkpoint:
+                    break
+                print "	%s" % (str(cp))
+
+    def setup_logs(self):
+        """
+        Create the logger instanace for AI and create simple and
+        detailed log files to use.
+        """
+
+        # Create logger for AI
+        self.logger = logging.getLogger(INSTALL_LOGGER_NAME)
+
+        # Log progress and info messages to the console.
+        self.progress_ph = AIProgressHandler(self.logger,
+            skip_console_msg=self.options.list_checkpoints)
+        self.progress_ph.start_progress_server()
+        self.logger.addHandler(self.progress_ph)
+
+        # Only ever send debug info to the logs, use INFO for console
+        self.progress_ph.removeFilter(self.logger._prog_filter)
+        self.progress_ph.setLevel(logging.INFO)
+        datefmt = "%H:%M:%S"
+        formatter = AIScreenFormatter(datefmt=datefmt,
+                hide_progress=self.options.list_checkpoints)
+        self.progress_ph.setFormatter(formatter)
+
+        # create a install_log file handler and add it to the ai_logger
+
+        # set the logfile names
+        install_log = self._app_data.work_dir + self.INSTALL_LOG
+        self.install_log_fh = FileHandler(install_log)
+
+        self.install_log_fh.setLevel(logging.DEBUG)
+        if not self.options.list_checkpoints:
+            self.logger.info("Install Log: %s" % install_log)
+        self.logger.addHandler(self.install_log_fh)
+
+    @property
+    def be(self):
+        if self._be is not None:
+            return self._be
+
+        new_be = None
+        desired = self.doc.persistent.get_first_child(Target.DESIRED,
+                                                      class_type=Target)
+
+        if desired:
+            try:
+                new_be = desired.get_descendants(class_type=BE, max_count=1,
+                                                 not_found_is_err=True)[0]
+                self._be = new_be
+            except ObjectNotFoundError:
+                self.logger.error("Unable to locate new BE definition")
+
+        return new_be
+
+    def __transfer_install_log(self):
+        """If BE exists, then transfer Log Files to New BE"""
+        new_be = self.be
+
+        if new_be is not None:
+            if new_be.exists:
+                # Assumes BE is still mounted, should be, if it exists.
+                self.logger.info("Transferring log to %s" %
+                    new_be.mountpoint + self.BE_LOG_DIR)
+                self.install_log_fh.transfer_log(
+                    new_be.mountpoint + self.BE_LOG_DIR, isdir=True)
+            else:
+                self.logger.error(
+                    "Unable to determine BE mountpoint")
+                return False
+        else:
+            self.logger.error(
+                "Unable to determine location to transfer logs to")
+            return False
+
+        return True
+
+    def __cleanup_before_exit(self, error_val):
+        """Do some clean up and set exit code.
+        """
+
+        self.exitval = error_val
+        if not self.options.list_checkpoints:
+            if error_val in [self.AI_EXIT_SUCCESS, self.AI_EXIT_AUTO_REBOOT]:
+                if error_val == self.AI_EXIT_AUTO_REBOOT:
+                    self.logger.info("Automated Installation succeeded.")
+                    self.logger.info("System will be rebooted now")
+                else:
+                    self.logger.info("Automated Installation succeeded.")
+                    self.logger.info("You may wish to reboot the system at "
+                                     "this time.")
+            else:
+                # error_val == self.AI_EXIT_FAILURE:
+                self.logger.info("Automated Installation Failed")
+                self.logger.info("Please see logs for more information")
+
+        # Close logger now since it holds a handle to the log on the BE, which
+        # makes it impossible to unmount the BE
+        self.progress_ph.stop_progress_server()
+        self.logger.close()
+
+        # Only attempt to unmount BE if Target Instantiation has completed
+        if self.options.stop_checkpoint not in self.CHECKPOINTS_BEFORE_TI:
+            # Unmount the BE now.
+            if self.be is not None:
+                try:
+                    self.be.unmount(self.options.dry_run)
+                except (RuntimeError) as ex:
+                    print ex  # Print since logger is closed now.
+                    self.exitval = self.AI_EXIT_FAILURE
+
+    def import_preserved_zpools(self):
+        '''
+        Check if we are preserving Zpools in manifest.
+        If we are ensure any referenced zpools are imported.
+        If importing fails, exit
+        '''
+        from_manifest = self.doc.find_path(
+            "//[@solaris_install.auto_install.ai_instance.AIInstance?2]"
+            "//[@solaris_install.target.Target?2]")
+
+        cmd = [ZPOOL, "list", "-H", "-o", "name"]
+        p = Popen.check_call(cmd, stdout=Popen.STORE, stderr=Popen.STORE,
+                             logger=INSTALL_LOGGER_NAME)
+
+        zpool_list = p.stdout.splitlines()
+
+        if from_manifest:
+            # Check if all Targets have children
+            targets_have_children = False
+            if from_manifest:
+                for target in from_manifest:
+                    if target.has_children:
+                        targets_have_children = True
+                        break
+
+            if targets_have_children:
+                target = from_manifest[0]
+                logical = target.get_first_child(class_type=Logical)
+
+                # Should only every be one logical
+                if logical:
+                    for zpool in logical.children:
+                        if zpool.action in TargetSelection.PRESERVED and \
+                            zpool.name not in zpool_list:
+                            # Zpool being preserved but not imported
+                            # Attempt to import.
+                            cmd = [ZPOOL, "import", "-f", zpool.name]
+                            p = Popen.check_call(cmd, stdout=Popen.STORE,
+                                                 stderr=Popen.STORE,
+                                                 logger=INSTALL_LOGGER_NAME,
+                                                 check_result=Popen.ANY,
+                                                 stderr_loglevel=logging.DEBUG)
+                            if p.returncode != 0:
+                                # Import failed cannot preserve, so fail AI
+                                self.logger.error("Zpool '%s' with action '%s' "
+                                    "failed to import. AI is unable to "
+                                    "preserve unavailable zpools." % \
+                                    (zpool.name, zpool.action))
+                                return False
+        return True
+
+    def perform_autoinstall(self):
+        """
+        Main control method for performing an Automated Installation
+        """
+
+        # Check if we need to register/run derived manifest/parser checkpoints
+        # If manifest argument set or derived script argument set then
+        # need to parse the manifest.
+        if self.manifest or self.derived_script:
+            if not self.register_parse_manifest():
+                self.logger.error("Derived/Parse Manifest " + \
+                                  "registration failed")
+                self.__cleanup_before_exit(self.AI_EXIT_FAILURE)
+                return
+
+            if not self.execute_parse_manifest():
+                self.logger.error("Derived/Parse Manifest checkpoint failed")
+                self.__cleanup_before_exit(self.AI_EXIT_FAILURE)
+                return
+
+            if self.options.stop_checkpoint is not None:
+                if self.options.stop_checkpoint in self.MANIFEST_CHECKPOINTS:
+                    self.logger.debug("DOC: %s" % (str(self.doc)))
+                    self.logger.debug("DOC XML: %s" % \
+                        (str(self.doc.get_xml_tree_str())))
+                    self.logger.info("Automated Installation paused at " + \
+                        "checkpoint: %s" % (self.options.stop_checkpoint))
+                    self.__cleanup_before_exit(self.AI_EXIT_SUCCESS)
+
+                    return
+
+        errors = errsvc.get_all_errors()
+        if errors:
+            errstr = "Following errors occured parsing manifest :\n %s" % \
+                (str(errors[0]))
+            self.logger.error(errstr)
+            self.__cleanup_before_exit(self.AI_EXIT_FAILURE)
+            return
+
+        # If we are to stop before target-discovery, then stop here
+        # As target-discovery is the first checkpoint in the next
+        # engine run. No need to call the engine.
+        if self.options.stop_checkpoint is not None:
+            if self.options.stop_checkpoint == "target-discovery":
+                self.logger.debug("DOC: %s" % (str(self.doc)))
+                self.logger.debug("DOC XML: %s" % \
+                    (str(self.doc.get_xml_tree_str())))
+                self.logger.info("Automated Installation paused at " + \
+                    "checkpoint: %s" % (self.options.stop_checkpoint))
+                self.__cleanup_before_exit(self.AI_EXIT_SUCCESS)
+                return
+
+        # Need to register all checkpoints
+        if not self.configure_checkpoints():
+            self.logger.error("Registering of checkpoints failed")
+            self.__cleanup_before_exit(self.AI_EXIT_FAILURE)
+            return
+
+        # Validate stop checkpoint if specified
+        if self.options.stop_checkpoint:
+            if not self.validate_stop_checkpoint():
+                self.logger.error("Invalid stop checkpoint specified: %s" % \
+                    self.options.stop_checkpoint)
+                self.__cleanup_before_exit(self.AI_EXIT_FAILURE)
+                return
+
+        # specifying to list checkpoints, do so then exit
+        # List of checkpoints available depend on what has just been
+        # registered.
+        if self.options.list_checkpoints:
+            self.list_checkpoints()
+            self.__cleanup_before_exit(self.AI_EXIT_SUCCESS)
+            return
+
+        # Check auto_reboot and proxy in DOC and set local definition
+        ai_instance = self.doc.volatile.get_first_child(class_type=AIInstance)
+
+        if ai_instance:
+            self.auto_reboot = ai_instance.auto_reboot
+
+            if ai_instance.http_proxy is not None and \
+               len(ai_instance.http_proxy) > 0:
+                # Set the HTTP Proxy environment variable
+                os.environ["http_proxy"] = ai_instance.http_proxy
+
+        self.logger.info("Auto Reboot set to : %s" % (self.auto_reboot))
+
+        # Ensure preserved zpools are online (imported)
+        if not self.import_preserved_zpools():
+            self.__cleanup_before_exit(self.AI_EXIT_FAILURE)
+            return
+
+        # Set resume_checkpoint to None, Engine will simply resume from
+        # the next checkpoint that has not been run yet.
+        # Specifying a resume_checkpoint you need to have ZFS dataset
+        # containing a snapshot of where resumable checkpoint was paused.
+        if self.execute_checkpoints(resume_checkpoint=None,
+            pause_checkpoint=self.options.stop_checkpoint,
+            dry_run=self.options.dry_run):
+
+            if self.options.stop_checkpoint not in self.CHECKPOINTS_BEFORE_IPS:
+                if not self.__transfer_install_log():
+                    self.__cleanup_before_exit(self.AI_EXIT_FAILURE)
+                else:
+                    if self.auto_reboot:
+                        self.__cleanup_before_exit(self.AI_EXIT_AUTO_REBOOT)
+                    else:
+                        self.__cleanup_before_exit(self.AI_EXIT_SUCCESS)
+
+            # Successful Execution
+            elif self.options.stop_checkpoint:
+                self.logger.debug("DOC: %s" % (str(self.doc)))
+                self.logger.debug("DOC XML: %s" % \
+                    (str(self.doc.get_xml_tree_str())))
+                self.logger.info("Automated Installation paused at " + \
+                    "checkpoint: %s" % (self.options.stop_checkpoint))
+                self.__cleanup_before_exit(self.AI_EXIT_SUCCESS)
+            else:
+                self.__cleanup_before_exit(self.AI_EXIT_SUCCESS)
+        else:
+            self.__cleanup_before_exit(self.AI_EXIT_FAILURE)
+
+    def register_parse_manifest(self):
+        """
+        Method to parse the manifest
+
+        If derived_script argument is provided, then Use Derived Manifest
+        checkpoint. Derive the manifest to use for this automated install
+        and parse it.  Otherwise just parse the manifest.
+
+        Path of execution for Derived Manifest
+        - Store Derived Script in DOC for DM checkpoint to read.
+        - Register Derived Manifest Checkpoint (DM)
+        - Register Manifest Parser Checkpoint (MP)
+        """
+
+        try:
+            if self.derived_script:
+                if not self.options.list_checkpoints:
+                    self.logger.info("Deriving manifest from: %s" % \
+                        (self.derived_script))
+
+                # Store Derived Script name into DOC for DM checkpoint to read.
+                # Check if derived script path is on volatile doc
+                dm = self.doc.volatile.get_first_child(
+                    name=DERIVED_MANIFEST_DATA)
+
+                if dm is not None:
+                    # Just change it's value
+                    dm.script = self.derived_script
+                else:
+                    # Insert a new child
+                    dm = DerivedManifestData(DERIVED_MANIFEST_DATA, \
+                        script=self.derived_script)
+                    self.doc.volatile.insert_children(dm)
+
+                if not self.options.list_checkpoints:
+                    self.logger.info("Derived %s stored" % (dm.script))
+
+                    # Register Derived Manifest checkpoint
+                    self.logger.info("Registering Derived Manifest " \
+                        "Module Checkpoint")
+
+                self.engine.register_checkpoint("derived-manifest",
+                    "solaris_install.auto_install.checkpoints.dmm",
+                    "DerivedManifestModule", args=None, kwargs=None)
+
+                # Set arguments for Manifest Parser Checkpoint
+                kwargs = dict()
+                kwargs["call_xinclude"] = True
+                args = None
+
+            elif self.manifest:
+                # Set arguments for Manifest Parser Checkpoint
+                kwargs = dict()
+                kwargs["call_xinclude"] = True
+                args = [self.manifest]
+            else:
+                # No manifest specified to parse
+                return True
+
+            if not self.options.list_checkpoints:
+                self.logger.info("Registering Manifest Parser Checkpoint")
+
+            self.engine.register_checkpoint("manifest-parser",
+                                    "solaris_install.manifest.parser",
+                                    "ManifestParser", args=args, kwargs=kwargs)
+            return True
+        except Exception as ex:
+            self.logger.debug("Uncaught exception parsing manifest: %s" % \
+                str(ex))
+            return False
+
+    def execute_parse_manifest(self):
+        """
+        Execute derived or/and manifest pareser checkpoints
+          - DM checkpoint will read script from DOC
+          - DM will derive manifest and store final location in DOC
+          - MP will read DOC for manifest to parse, if not passed any
+            manifest as an explicit argument. (This is not implemented yet)
+        """
+        # Execute Checkpoints
+        if not self.options.list_checkpoints:
+            if self.derived_script:
+                self.logger.info("Executing Derived Manifest and Manifest " \
+                        "Parser Checkpoints")
+            else:
+                self.logger.info("Executing Manifest Parser Checkpoint")
+
+        if self.options.stop_checkpoint in self.MANIFEST_CHECKPOINTS:
+            pause_cp = self.options.stop_checkpoint
+        else:
+            pause_cp = None
+
+        if not self.execute_checkpoints(pause_checkpoint=pause_cp, \
+            dry_run=False):
+            return False
+
+        # If derived manifest run, read the stored manifest location
+        # from pm.manifest.
+        if self.derived_script:
+            pm = self.doc.volatile.get_first_child(name=MANIFEST_PARSER_DATA)
+            if pm is None or pm.manifest is None:
+                self.logger.error("Derived Manifest Failed, manifest not set")
+                return False
+
+            # Ideal Path - We have a parsed manifest at this point
+            self.logger.info("DM set manifest to: %s" % (pm.manifest))
+            self.manifest = pm.manifest
+
+        self.logger.info("Manifest %s successfully parsed" % (self.manifest))
+        self.logger.debug("DOC (tree format):\n%s\n\n\n" %
+            str(self.engine.data_object_cache))
+        self.logger.debug("DOC (xml_format):\n%s\n\n\n" %
+            str(self.engine.data_object_cache.get_xml_tree_str()))
+
+        return True
+
+    def execute_checkpoints(self, resume_checkpoint=None,
+        pause_checkpoint=None, dry_run=False):
+        """
+        Wrapper to the execute_checkpoint method
+        """
+
+        # Get execution list from engine to determine of any checkpoints
+        # to run, if not return false
+        if len(self.engine.get_exec_list(None, None)) == 0:
+            self.logger.warning("No checkpoints to execute")
+            return False
+
+        self.logger.debug("Executing Engine Checkpoints...")
+        if resume_checkpoint is not None:
+            self.logger.debug("Resuming at checkpoint: %s" % \
+                (resume_checkpoint))
+
+        if pause_checkpoint is not None:
+            self.logger.debug("Pausing before checkpoint: %s" %
+                (pause_checkpoint))
+
+        try:
+            if resume_checkpoint is not None:
+                (status, failed_cps) = self.engine.resume_execute_checkpoints(
+                    resume_checkpoint, pause_before=pause_checkpoint,
+                    dry_run=dry_run, callback=None)
+            else:
+                (status, failed_cps) = self.engine.execute_checkpoints(
+                    start_from=None, pause_before=pause_checkpoint,
+                    dry_run=dry_run, callback=None)
+        except (ManifestError, ParsingError) as ex:
+            self.logger.error("Manifest parser checkpoint error :")
+            print "\t\t%s" % str(ex)
+            return False
+        except (SelectionError) as ex:
+            self.logger.error("Target selection checkpoint error :")
+            print "\t\t%s" % str(ex)
+            return False
+        except (ValueError) as ex:
+            self.logger.error("Value errors occured :")
+            print "\t\t%s" % str(ex)
+            return False
+        except (RollbackError, UnknownChkptError, UsageError) as ex:
+            self.logger.error("RollbackError, UnknownChkptError, UsageError :")
+            print "\t\t%s" % str(ex)
+            raise RuntimeError(str(ex))
+        except Exception, ex:
+            self.logger.debug("%s" % (traceback.format_exc()))
+            raise RuntimeError(str(ex))
+
+        self.logger.debug("Checkpoints Completed : DOC : \n%s\n\n", self.doc)
+        self.logger.debug("Checkpoints Completed : "
+                          "DOC (xml_format):\n%s\n\n\n" %
+            str(self.engine.data_object_cache.get_xml_tree_str()))
+
+        if status != InstallEngine.EXEC_SUCCESS:
+            self.logger.critical("Failed Checkpoints:")
+            for failed_cp in failed_cps:
+                err_data = errsvc.get_errors_by_mod_id(failed_cp)[0]
+                self.logger.critical("\t%s" % failed_cp)
+                self.logger.exception(err_data.error_data[ES_DATA_EXCEPTION])
+            return False
+        else:
+            return True
+
+    def configure_checkpoints(self):
+        """
+        Wrapper to configure required checkpoints for performing an
+        automated installation
+        """
+        # Need to set following Checkpoints for installation
+        #   Derived Manifest (If script passed as argument)
+        #   Manifest Parser (If manifest passed or derived)
+        #   Target Discovery
+        #   Target Selection
+        #   Device Driver Update - Install Root
+        #   Target Instantiation
+        #   Transfer
+        #   Target Configuration
+        #   Device Driver Update - New BE
+
+        try:
+            if not self.options.list_checkpoints:
+                self.logger.info("Configuring Checkpoints")
+
+            # Register TargetDiscovery
+            self.engine.register_checkpoint("target-discovery",
+                                "solaris_install.target.discovery",
+                                "TargetDiscovery", args=None, kwargs=None)
+
+            # Register TargetSelection
+            self.logger.debug("Adding Target Selection Checkpoint")
+            self.engine.register_checkpoint("target-selection",
+                "solaris_install.auto_install.checkpoints.target_selection",
+                "TargetSelection", args=None, kwargs=None)
+
+            # Register TargetInstantiation
+            self.logger.debug("Adding Target Instantiation Checkpoint")
+            self.engine.register_checkpoint(self.TARGET_INSTANTIATION_CHECKPOINT,
+                                "solaris_install.target.instantiation",
+                                "TargetInstantiation", args=None, kwargs=None)
+
+            # Add destination for transfer nodes, and register checkpoints.
+            sw_nodes = self.doc.volatile.get_descendants(class_type=Software)
+            image_action = AbstractIPS.CREATE  # For first IPS only
+            transfer_count = 0  # For generating names if none provided
+            for sw in sw_nodes:
+                transfer_count += 1
+                if sw.name is None or len(sw.name) == 0:
+                    # Generate a name, setting internal attribute
+                    sw._name = "generated-transfer-%d-%d" % \
+                        (os.getpid(), transfer_count)
+
+                # Add first transfer checkpoint name to list of checkpoints
+                # to ensure -I option succeeds.
+                if transfer_count == 1:
+                    self.CHECKPOINTS_BEFORE_IPS.append(sw.name)
+                    if self.options.stop_checkpoint is not None:
+                        if self.options.stop_checkpoint == \
+                            self.FIRST_TRANSFER_CHECKPOINT:
+                            self.options.stop_checkpoint = sw.name
+
+
+                # Ensure there is at least one software_data element with
+                # Install action exists, and that all software_data elements
+                # contain at least one 'name' sub element.
+                found_install_sw_data = False
+                tran_type = sw.tran_type.upper()
+                for sw_child in sw.children:
+                    found_sw_data = False
+                    if tran_type == "IPS" and isinstance(sw_child, IPSSpec):
+                        found_sw_data = True
+                        if sw_child.action == IPSSpec.INSTALL:
+                            found_install_sw_data = True
+                    elif tran_type == "CPIO" and \
+                         isinstance(sw_child, CPIOSpec):
+                        found_sw_data = True
+                        if sw_child.action == CPIOSpec.INSTALL:
+                            found_install_sw_data = True
+                    elif tran_type == "SVR4" and \
+                         isinstance(sw_child, SVR4Spec):
+                        found_sw_data = True
+                        if sw_child.action == SVR4Spec.INSTALL:
+                            found_install_sw_data = True
+
+                    if found_sw_data and len(sw_child.contents) == 0:
+                        self.logger.error("Invalid manifest specification "
+                            "for <software_data> element. Must specify at "
+                            "least one package to install/uninstall.")
+                        return False
+
+                if not found_install_sw_data:
+                    self.logger.error("No packages specified to install. "
+                        "Manifest must contain at least one <software_data> "
+                        "element with 'install' action.")
+                    return False
+
+                self.logger.debug("Setting destination for transfer: %s to %s"
+                    % (sw.name, self.INSTALLED_ROOT_DIR))
+                dst = sw.get_first_child(class_type=Destination)
+                if dst is None:
+                    dst = Destination()
+                    if sw.tran_type.upper() == "IPS":
+                        image = Image(self.INSTALLED_ROOT_DIR, image_action)
+                        img_type = ImType("full")
+                        image.insert_children(img_type)
+                        dst.insert_children(image)
+                        image_action = AbstractIPS.EXISTING
+                    else:
+                        directory = Dir(self.INSTALLED_ROOT_DIR)
+                        dst.insert_children(directory)
+                    sw.insert_children(dst)
+                    # Next images are use_existing, not create.
+                else:
+                    raise RuntimeError(
+                        "Unexpected destination in software node: %s" % \
+                        (sw.name))
+
+                # Register a Transfer checkpoint suitable for the selected
+                # Software node
+                ckpt_info = create_checkpoint(sw)
+                if ckpt_info is not None:
+                    self.logger.debug("Adding Target Instantation Checkpoint: "
+                        "%s, %s, %s" % ckpt_info)
+                    self.engine.register_checkpoint(*ckpt_info)
+
+            # Register ICT Checkpoints
+            #=========================
+            # 1. Initialize SMF Repository
+            self.engine.register_checkpoint("initialize-smf",
+                "solaris_install.ict.initialize_smf",
+                "InitializeSMF", args=None, kwargs=None)
+
+            # 2. Boot Configuration
+            self.engine.register_checkpoint("boot-configuration",
+                "solaris_install.boot.boot",
+                "SystemBootMenu", args=None, kwargs=None)
+
+            # 3. Update dumpadm / Dump Configuration
+            self.engine.register_checkpoint("update-dump-adm",
+                "solaris_install.ict.update_dumpadm",
+                "UpdateDumpAdm", args=None, kwargs=None)
+
+            # 4. Setup Swap in Vfstab
+            self.engine.register_checkpoint("setup-swap",
+                "solaris_install.ict.setup_swap",
+                "SetupSwap", args=None, kwargs=None)
+
+            # 5. Set Flush IPS Content Flag
+            self.engine.register_checkpoint("set-flush-ips-content-cache",
+                "solaris_install.ict.ips",
+                "SetFlushContentCache", args=None, kwargs=None)
+
+            # 6. Device Configuration / Create Device Namespace
+            self.engine.register_checkpoint("device-config",
+                "solaris_install.ict.device_config",
+                "DeviceConfig", args=None, kwargs=None)
+
+            # 7. Transfer System Configuration To BE / ApplyStsConfig
+            self.engine.register_checkpoint("apply-sysconfig",
+                "solaris_install.ict.apply_sysconfig",
+                "ApplySysConfig", args=None, kwargs=None)
+
+            # 8. Boot Archive
+            self.engine.register_checkpoint("boot-archive",
+                "solaris_install.ict.boot_archive",
+                "BootArchive", args=None, kwargs=None)
+
+            # 9. Transfer Files to New BE
+            self.add_transfer_files()
+            self.engine.register_checkpoint(self.TRANSFER_FILES_CHECKPOINT,
+                "solaris_install.ict.transfer_files",
+                "TransferFiles", args=None, kwargs=None)
+
+            # 10. CreateSnapshot before reboot
+            self.engine.register_checkpoint("create-snapshot",
+                "solaris_install.ict.create_snapshot",
+                "CreateSnapshot", args=None, kwargs=None)
+
+        except Exception as ex:
+            self.logger.debug(
+                "An execption occurred registering checkpoints: %s\n%s" %
+                str(ex), traceback.format_exc())
+            return False
+
+        return True
+
+    def add_transfer_files(self):
+        """
+            Create dataobjectdict dictionary of containing src/dest
+            pairs for files that are to be transferred to the new
+            boot environment.
+        """
+        # Check for existence of transfer-ai-files data object dictionary,
+        # insert if not found
+        tf_doc_dict = None
+        tf_doc_dict = self.doc.volatile.get_first_child( \
+            name=self.TRANSFER_FILES_CHECKPOINT)
+
+        if tf_doc_dict is None:
+            # Initialize dictionary in DOC
+            tf_dict = dict()
+            tf_doc_dict = DataObjectDict(self.TRANSFER_FILES_CHECKPOINT,
+                tf_dict)
+            self.doc.volatile.insert_children(tf_doc_dict)
+        else:
+            tf_dict = tf_doc_dict.data_dict
+
+        # If using dmm, transfer script and derived manifest
+        if self.derived_script:
+            dm = self.doc.volatile.get_first_child(
+                    name=DERIVED_MANIFEST_DATA)
+            if dm is not None and dm.script is not None:
+                tf_dict[dm.script] = \
+                    post_install_logs_path('derived/manifest_script')
+
+            mp = self.doc.volatile.get_first_child(name=MANIFEST_PARSER_DATA)
+            if mp is not None and mp.manifest is not None:
+                tf_dict[mp.manifest] = \
+                    post_install_logs_path('derived/manifest.xml')
+
+        # Transfer smf logs
+        tf_dict['/var/svc/log/application-auto-installer:default.log'] = \
+            post_install_logs_path('application-auto-installer:default.log')
+        tf_dict['/var/svc/log/application-manifest-locator:default.log'] = \
+            post_install_logs_path('application-manifest-locator:default.log')
+
+        # Transfer default manifest
+        tf_dict[system_temp_path('ai.xml')] = post_install_logs_path('ai.xml')
+
+        # Transfer AI Service Discovery Log
+        tf_dict[system_temp_path('ai_sd_log')] = \
+            post_install_logs_path('ai_sd_log')
+
+        # Transfer /var/adm/messages
+        tf_dict['/var/adm/messages'] = post_install_logs_path('messages')
+
+        # Possibly copy contents of ApplicationData.work_dir, however
+        # for standard AI install, this is /system/volatile, so not feasable
+        # However for zones install, it would makes sense as work_dir in
+        # that scenario would be unique to each AI instance and would have
+        # limited number of files.
+        # e.g.
+        #  dest = post.install_logs_path(\
+        #      os.path.basename(self._app_data.work_dir))
+        #  tf_dict[self._app_data.work_dir] = dest
+
+
+class AIScreenFormatter(logging.Formatter):
+    """ AI-Specific Formatter class. Suppresses traceback printing to
+    the screen by overloading the format() method.
+
+    Checks if log message is MAX_INT (Progress Log) or Normal log and
+    formats message appropriately.
+    """
+
+    def __init__(self, fmt=None, datefmt=None, hide_progress=True):
+        """Initialize formatter class.
+
+           Consume hide_progress boolean for local processing.
+        """
+        self.hide_progress = hide_progress
+
+        logging.Formatter.__init__(self, fmt, datefmt)
+
+    def format(self, record):
+        """ Overload method to prevent the traceback from being printed.
+        """
+        record.message = record.getMessage()
+        record.asctime = self.formatTime(record, self.datefmt)
+
+        formatted_str = ""
+        fmt = None
+
+        if self.hide_progress:
+            # Don't output progress information for -l option
+            if record.levelno != MAX_INT:
+                fmt = "%(message)s"
+        else:
+            if record.levelno == MAX_INT:
+                fmt = "%(asctime)-11s %(progress)s%% %(message)s"
+            else:
+                fmt = "%(asctime)-11s %(message)s"
+
+        if fmt is not None:
+            formatted_str = fmt % record.__dict__
+
+        return formatted_str
+
+
+class AIProgressHandler(ProgressHandler):
+    """ AI-Specific ProgressHandler. """
+    def __init__(self, logger, hostname=None, portno=None,
+                 skip_console_msg=False):
+        if hostname is not None:
+            self.hostname = hostname
+        else:
+            self.hostname = 'localhost'
+
+        self.engine_skt = None
+        self.server_up = False
+        self.logger = logger
+        self.skip_console_msg = skip_console_msg
+
+        # Get a port number
+        self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+
+        if portno is not None:
+            self.portno = portno
+            try:
+                self.skt.bind((self.hostname, self.portno))
+                self.skt.listen(5)
+            except socket.error:
+                self.logger.error("AIProgresHandler init failed")
+                self.logger.debug("%s" % (traceback.format_exc()))
+                return None
+        else:
+            random.seed()
+            # Continue looping until skt.listen(5) does not cause socket.error
+            while True:
+                try:
+                    # Get a random port between 10000 and 30000
+                    self.portno = random.randint(10000, 30000)
+                    self.skt.bind((self.hostname, self.portno))
+                    self.skt.listen(5)
+                    break
+                except socket.error, msg:
+                    self.skt.close()
+                    self.skt = None
+                    continue
+
+        ProgressHandler.__init__(self, self.hostname, self.portno)
+
+    def start_progress_server(self):
+        """ Starts the socket server stream to receive progress messages. """
+        if not self.server_up:
+            self.logger.debug("Starting up Progress Handler")
+            self.server_up = True
+            self.engine_skt, address = self.skt.accept()
+            thread.start_new_thread(self.progress_server,
+                (self.progress_receiver, ))
+            time.sleep(1)
+
+    def stop_progress_server(self):
+        """ Stop the socket server stream. """
+        if self.server_up:
+            self.server_up = False
+            self.logger.debug("Shutting down Progress Handler")
+
+    def progress_server(self, cb):
+        """ Actual spawned progress_server process. """
+        try:
+            while self.server_up:
+                percentage, msg = self.parse_progress_msg(self.engine_skt, cb)
+            self.engine_skt.close()
+        except Exception, ex:
+            self.logger.error("Progress Server Error")
+            self.logger.debug("%s" % (str(ex)))
+
+    @staticmethod
+    def parse_progress_msg(skt, cb):
+        """Parse the messages sent by the client."""
+        total_len = 0
+        total_data = list()
+        size = sys.maxint
+        size_data = sock_data = ''
+        recv_size = 8192
+        percent = None
+        msg = None
+
+        while total_len < size:
+            sock_data = skt.recv(recv_size)
+            if not total_data:
+                if len(sock_data) > 4:
+                    size_data += sock_data
+                    size = struct.unpack('@i', size_data[:4])[0]
+                    recv_size = size
+                    if recv_size > 524288:
+                        recv_size = 524288
+                    total_data.append(size_data[4:])
+                else:
+                    size_data += sock_data
+            else:
+                total_data.append(sock_data)
+            total_len = sum([len(i) for i in total_data])
+            message = ''.join(total_data)
+            if message:
+                # This is a callback function that sends the message to
+                # the receiver
+                cb(message)
+                percent, msg = message.split(' ', 1)
+            break
+        return percent, msg
+
+    def progress_receiver(self, msg):
+        """Receive a message, show on screen and/or console"""
+
+        # Default to showing on stdout
+        print "%s" % (msg)
+
+        if not self.skip_console_msg and not users_on_console():
+            # Also log to console if no-one is logged in there.
+            try:
+                with open("/dev/sysmsg", "w+") as fh:
+                    fh.write("%s\n" % (msg))
+            except IOError:
+                # Quietly fail - can't log or we cause a repeating loop
+                pass
--- a/usr/src/cmd/auto-install/auto_parse.c	Wed May 25 13:29:32 2011 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1670 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
- */
-
-#include <fcntl.h>
-#include <libintl.h>
-#include <stdio.h>
-#include <errno.h>
-#include <stdlib.h>
-#include <strings.h>
-#include <unistd.h>
-#include <locale.h>
-#include <sys/param.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-
-#include "auto_install.h"
-
-static PyObject *manifest_serv_obj;
-static char *manifest_filename;
-
-/*
- * Function to execute shell commands in a thread-safe manner. Output from
- * stdout is captured in install log file.
- *
- * Parameters:
- *	cmd - the command to execute
- *
- * Return:
- *	-1 if popen() failed, otherwise exit code returned by command
- *
- * Status:
- *	private
- */
-static int
-ai_exec_cmd(char *cmd)
-{
-	FILE	*p;
-	char	buf[MAX_SHELLCMD_LEN];
-
-	auto_debug_print(AUTO_DBGLVL_INFO, "exec cmd: %s\n", cmd);
-
-	if ((p = popen(cmd, "r")) == NULL) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "Could not execute following command: %s.\n", cmd);
-
-		return (-1);
-	}
-
-	/*
-	 * capture stdout for debugging purposes
-	 */
-
-	while (fgets(buf, sizeof (buf), p) != NULL)
-		auto_debug_print(AUTO_DBGLVL_ERR, " %s", buf);
-
-	return (WEXITSTATUS(pclose(p)));
-}
-
-/*
- * Dump errors found during syntactic validation of AI manifest -
- * repeat the xmllint(1) call made on the Python side to capture the
- * stdout and stderr.  xmllint will be called with following parameters:
- *
- * /usr/bin/xmllint --noout --dtdvalid <schema> --dtdattr <manifest> 2>&1
- *
- * Returns
- * 	-1  - failed to dump syntactic errors
- *	>=0 - exit code from xmllint(1M)
- */
-static int
-dump_ai_manifest_errors(char *manifest, char *schema)
-{
-	char	*cmd;
-	size_t	cmd_ln;
-	int	ret;
-	char *dtd_xmllint =
-	    "/usr/bin/xmllint --noout --dtdvalid %s --dtdattr %s 2>&1";
-
-	/* calculate size of command string */
-	cmd_ln = snprintf(NULL, 0, dtd_xmllint, schema, manifest);
-	cmd = (char *)malloc(cmd_ln + 1);
-
-	if (cmd == NULL) {
-		auto_debug_print(AUTO_DBGLVL_ERR, "malloc() failed\n");
-
-		return (-1);
-	}
-
-	(void) snprintf(cmd, cmd_ln + 1, dtd_xmllint, schema, manifest);
-
-	ret = ai_exec_cmd(cmd);
-
-	/*
-	 * The validation is expected to fail - command returns
-	 * with non-zero exit code - log the exit code.
-	 *
-	 */
-
-	auto_debug_print(AUTO_DBGLVL_ERR,
-	    "xmllint(1M) returned with exit code %d\n", ret);
-
-	free(cmd);
-	return (ret);
-}
-
-/*
- * Translate size units from manifest into auto_size_units_t values.
- *
- * Defaults to AI_SIZE_UNITS_MEGABYTES if no value given or value
- * not recognized.
- *
- * Returns
- *  auto_size_units_t
- */
-static auto_size_units_t
-get_size_units(char *p_str)
-{
-	if ((p_str == NULL) || (! strlen(p_str))) {
-		return (AI_SIZE_UNITS_MEGABYTES);
-	}
-
-	switch (p_str[0]) {
-		case 's':
-		case 'S':
-			return (AI_SIZE_UNITS_SECTORS);
-		case 'g':
-		case 'G':
-			return (AI_SIZE_UNITS_GIGABYTES);
-		case 't':
-		case 'T':
-			return (AI_SIZE_UNITS_TERABYTES);
-		case 'm':
-		case 'M':
-		default:
-			return (AI_SIZE_UNITS_MEGABYTES);
-	}
-}
-
-/*
- * Convert size from one unit of measurement to another.
- *
- * Supported units are the auto_size_units_t enumeration:
- *  AI_SIZE_UNITS_SECTORS
- *  AI_SIZE_UNITS_MEGABYTES
- *  AI_SIZE_UNITS_GIGABYTES
- *  AI_SIZE_UNITS_TERABYTES
- * If either from_units or to_units params are not recognized, disk_size is
- * returned unaltered.
- *
- * Returns
- *  uint64_t
- */
-static uint64_t
-convert_disk_size(uint64_t disk_size, auto_size_units_t from_units,
-    auto_size_units_t to_units)
-{
-	uint64_t retval = disk_size;
-
-	switch (to_units) {
-		case AI_SIZE_UNITS_SECTORS:
-			switch (from_units) {
-				case AI_SIZE_UNITS_SECTORS:
-					retval = disk_size;
-				case AI_SIZE_UNITS_MEGABYTES:
-					retval = disk_size * MB_TO_SECTORS;
-				case AI_SIZE_UNITS_GIGABYTES:
-					retval = disk_size * GB_TO_MB *
-					    MB_TO_SECTORS;
-				case AI_SIZE_UNITS_TERABYTES:
-					retval = disk_size * TB_TO_GB *
-					    GB_TO_MB * MB_TO_SECTORS;
-			}
-		case AI_SIZE_UNITS_MEGABYTES:
-			switch (from_units) {
-				case AI_SIZE_UNITS_SECTORS:
-					retval = disk_size / MB_TO_SECTORS;
-				case AI_SIZE_UNITS_MEGABYTES:
-					retval = disk_size;
-				case AI_SIZE_UNITS_GIGABYTES:
-					retval = disk_size * GB_TO_MB;
-				case AI_SIZE_UNITS_TERABYTES:
-					retval = disk_size * TB_TO_GB *
-					    GB_TO_MB;
-			}
-		case AI_SIZE_UNITS_GIGABYTES:
-			switch (from_units) {
-				case AI_SIZE_UNITS_SECTORS:
-					retval = disk_size / GB_TO_MB /
-					    MB_TO_SECTORS;
-				case AI_SIZE_UNITS_MEGABYTES:
-					retval = disk_size / GB_TO_MB;
-				case AI_SIZE_UNITS_GIGABYTES:
-					retval = disk_size;
-				case AI_SIZE_UNITS_TERABYTES:
-					retval = disk_size * TB_TO_GB;
-			}
-		case AI_SIZE_UNITS_TERABYTES:
-			switch (from_units) {
-				case AI_SIZE_UNITS_SECTORS:
-					retval = disk_size / MB_TO_SECTORS /
-					    GB_TO_MB / TB_TO_GB;
-				case AI_SIZE_UNITS_MEGABYTES:
-					retval = disk_size / GB_TO_MB /
-					    TB_TO_GB;
-				case AI_SIZE_UNITS_GIGABYTES:
-					retval = disk_size / TB_TO_GB;
-				case AI_SIZE_UNITS_TERABYTES:
-					retval = disk_size;
-			}
-	}
-
-	return (retval);
-}
-
-/*
- * Create the manifest data image in memory.  (Does not validate it.)
- *
- * Import the manifest into an in-memory tree
- * that can subsequently be queried for the various
- * attributes. A handle to the in-memory tree is stored
- * as a ManifestServ object pointed to by manifest_serv_obj.
- *
- * The manifest filename is saved for later use, in manifest_filename.
- *
- * Note that this function must be called before anything else which
- * references manifest_serv_obj or manifest_filename in this module.
- *
- * Returns
- * 	AUTO_VALID_MANIFEST if it's a valid manifest
- * 	AUTO_INVALID_MANIFEST if it's an invalid manifest
- */
-int
-ai_create_manifest_image(char *filename)
-{
-	/*
-	 * If the manifest_serv_obj is set it means that
-	 * the manifest has already been validated and
-	 * a server object created for it
-	 */
-	if (manifest_serv_obj != NULL)
-		return (AUTO_VALID_MANIFEST);
-
-	manifest_filename = NULL;
-	manifest_serv_obj = ai_create_manifestserv(filename);
-	if (manifest_serv_obj != NULL) {
-		manifest_filename = strdup(filename);
-		return (AUTO_VALID_MANIFEST);
-	}
-
-	auto_log_print(gettext("Failure to create manifest data in memory.\n"));
-	return (AUTO_INVALID_MANIFEST);
-}
-
-/*
- * Validate the manifest syntactically as well as
- * semantically.
- *
- * As part of the validation process, fill in the
- * defaults for the attributes that aren't specified.
- *
- * Returns
- * 	AUTO_VALID_MANIFEST if it's a valid manifest
- * 	AUTO_INVALID_MANIFEST if it's an invalid manifest
- */
-int
-ai_setup_manifest_image()
-{
-	if (ai_setup_manifestserv(manifest_serv_obj) == AUTO_INSTALL_SUCCESS) {
-		return (AUTO_VALID_MANIFEST);
-	}
-
-	/*
-	 * if the validation process failed, capture output of syntactic
-	 * validation in log file
-	 */
-	auto_log_print(gettext("Syntactic validation of the manifest failed "
-	    "with following errors\n"));
-
-	if (dump_ai_manifest_errors(
-	    manifest_filename, AI_MANIFEST_SCHEMA) == -1) {
-		auto_log_print(gettext("Failed to obtain result of syntactic "
-		    "validation\n"));
-	}
-
-	return (AUTO_INVALID_MANIFEST);
-}
-
-void
-ai_teardown_manifest_state()
-{
-	if (manifest_serv_obj != NULL)
-		(void) ai_destroy_manifestserv(manifest_serv_obj);
-}
-
-char **
-ai_get_manifest_values(char *path, int *len)
-{
-	if (manifest_serv_obj == NULL) {
-		auto_debug_print(AUTO_DBGLVL_INFO, "manifestserv must be "
-		    "initialized before values can be retrieved\n");
-		return (NULL);
-	}
-
-	return (ai_lookup_manifest_values(manifest_serv_obj, path, len));
-}
-
-/*
- * Free memory allocated by ai_get_manifest_values().
- */
-void
-ai_free_manifest_values(char **value_list)
-{
-	ai_free_manifest_value_list(value_list);
-}
-
-/*
- * ai_get_manifest_element_value() - return value given xml element
- */
-char *
-ai_get_manifest_element_value(char *element)
-{
-	int len = 0;
-	char **value;
-	char *evalue;
-
-	value = ai_get_manifest_values(element, &len);
-
-	/*
-	 * Return the value and free the pointer
-	 */
-	if (len > 0) {
-		evalue = *value;
-		free(value);
-		return (evalue);
-	}
-	return (NULL);
-}
-
-/*
- * get_manifest_element_array() - return list of values given xml element
- */
-static char **
-get_manifest_element_array(char *element)
-{
-	int len = 0;
-	char **value;
-
-	value = ai_get_manifest_values(element, &len);
-
-	if (len > 0)
-		return (value);
-	return (NULL);
-}
-
-/*
- * Retrieve the target disk information
- *
- * If illegal values, return AUTO_INSTALL_FAILURE
- * else return AUTO_INSTALL_SUCCESS
- */
-int
-ai_get_manifest_disk_info(auto_disk_info *adi)
-{
-	char *p;
-
-	p = ai_get_manifest_element_value(AIM_TARGET_DISK_KEYWORD);
-	if (p != NULL)
-		(void) strncpy(adi->diskkeyword, p, sizeof (adi->diskkeyword));
-
-	p = ai_get_manifest_element_value(AIM_TARGET_DEVICE_NAME);
-	if (p != NULL)
-		(void) strncpy(adi->diskname, p, sizeof (adi->diskname));
-
-	p = ai_get_manifest_element_value(AIM_TARGET_DEVICE_TYPE);
-	if (p != NULL)
-		(void) strncpy(adi->disktype, p, sizeof (adi->disktype));
-
-	p = ai_get_manifest_element_value(AIM_TARGET_DEVICE_VENDOR);
-	if (p != NULL)
-		(void) strncpy(adi->diskvendor, p, sizeof (adi->diskvendor));
-
-	p = ai_get_manifest_element_value(AIM_TARGET_DEVICE_SELECT_VOLUME_NAME);
-	if (p != NULL)
-		(void) strlcpy(adi->diskvolname, p, sizeof (adi->diskvolname));
-
-	p = ai_get_manifest_element_value(AIM_TARGET_DEVICE_SELECT_DEVICE_ID);
-	if (p != NULL)
-		(void) strlcpy(adi->diskdevid, p, sizeof (adi->diskdevid));
-
-	p = ai_get_manifest_element_value(AIM_TARGET_DEVICE_SELECT_DEVICE_PATH);
-	if (p != NULL)
-		(void) strlcpy(adi->diskdevicepath, p,
-		    sizeof (adi->diskdevicepath));
-
-	p = ai_get_manifest_element_value(AIM_TARGET_DEVICE_SIZE);
-	if (p != NULL) {
-		char *endptr;
-		uint64_t disk_size;
-		auto_size_units_t size_units;
-
-		errno = 0;
-
-		/* Get the numerical portion of the size value */
-		disk_size = (uint64_t)strtoull(p, &endptr, 0);
-
-		if (errno == 0 && endptr != p) {
-			/*
-			 * Get the units portion of the size val and then
-			 * convert the size from given units into number
-			 * of disk sectors.
-			 */
-			size_units = get_size_units(endptr);
-			adi->disksize = convert_disk_size(disk_size, size_units,
-			    AI_SIZE_UNITS_SECTORS);
-
-			auto_debug_print(AUTO_DBGLVL_INFO,
-			    "Requested target size [%s] converted "
-			    "to [%lld] sectors\n",
-			    p, adi->disksize);
-		} else {
-			auto_log_print(
-			    "Invalid target device size specified: [%s]",
-			    p);
-			return (AUTO_INSTALL_FAILURE);
-		}
-	}
-
-	p = ai_get_manifest_element_value(
-	    AIM_TARGET_DEVICE_USE_SOLARIS_PARTITION);
-	if (p != NULL) {
-#ifdef	__sparc
-		auto_log_print("Warning: ignoring manifest element "
-		    "partition action='use_existing' on SPARC\n");
-#else
-		/*
-		 * In this Schema, a partition with attribute
-		 * action="use_existing" corresponds to
-		 * target_device_use_solaris_partition="true"
-		 * in the previous schema.
-		 */
-		(void) strncpy(adi->diskusepart, "true",
-		    sizeof (adi->diskusepart));
-#endif
-	}
-
-	p = ai_get_manifest_element_value(
-	    AIM_TARGET_DEVICE_INSTALL_SLICE_NUMBER);
-	if (p != NULL) {
-		int install_slice_number;
-
-		if (sscanf(p, "%d", &install_slice_number) > 0)
-			adi->install_slice_number =
-			    (uint8_t)install_slice_number;
-	}
-
-	/*
-	 * iSCSI target information
-	 */
-	p = ai_get_manifest_element_value(AIM_TARGET_DEVICE_ISCSI_TARGET_NAME);
-	if (p != NULL)
-		(void) strncpy(adi->diskiscsi.name, p,
-		    sizeof (adi->diskiscsi.name));
-
-	p = ai_get_manifest_element_value(AIM_TARGET_DEVICE_ISCSI_TARGET_IP);
-	if (p != NULL)
-		(void) strncpy(adi->diskiscsi.ip, p,
-		    sizeof (adi->diskiscsi.ip));
-
-	p = ai_get_manifest_element_value(AIM_TARGET_DEVICE_ISCSI_TARGET_LUN);
-	if (p != NULL)
-		(void) strncpy(adi->diskiscsi.lun, p,
-		    sizeof (adi->diskiscsi.lun));
-
-	p = ai_get_manifest_element_value(AIM_TARGET_DEVICE_ISCSI_TARGET_PORT);
-	if (p != NULL)
-		adi->diskiscsi.port = strtoll(p, NULL, 0);
-
-	p = ai_get_manifest_element_value(
-	    AIM_TARGET_DEVICE_ISCSI_PARAMETER_SOURCE);
-	if (p == NULL)
-		adi->diskiscsi.parm_src = AI_ISCSI_PARM_SRC_MANIFEST;
-	else {
-		if (strcasecmp(p, "manifest") == 0)
-			adi->diskiscsi.parm_src = AI_ISCSI_PARM_SRC_MANIFEST;
-		else if (strcasecmp(p, "dhcp") == 0)
-			adi->diskiscsi.parm_src = AI_ISCSI_PARM_SRC_DHCP;
-		else {
-			auto_log_print("Invalid iSCSI parameter source "
-			    "specified. Tag="
-			    AIM_TARGET_DEVICE_ISCSI_PARAMETER_SOURCE "\n");
-			auto_log_print("Value=%s\n", p);
-			auto_log_print("Possible values: DHCP, MANIFEST "
-			    "(default)\n");
-			return (AUTO_INSTALL_FAILURE);
-		}
-	}
-
-	/* Debug - print disk info out to log */
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "Disk info from Manifest:\n");
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tdiskkeyword\t\t\t: [%s]\n", adi->diskkeyword);
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tdiskname\t\t\t: [%s]\n", adi->diskname);
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tdisktype\t\t\t: [%s]\n", adi->disktype);
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tdiskvendor\t\t\t: [%s]\n", adi->diskvendor);
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tdiskvolname\t\t\t: [%s]\n", adi->diskvolname);
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tdiskdevid\t\t\t: [%s]\n", adi->diskdevid);
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tdiskdevicepath\t\t: [%s]\n", adi->diskdevicepath);
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tdisksize\t\t\t: [%d]\n", adi->disksize);
-#ifndef	__sparc
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tdiskusepart\t\t\t: [%s]\n", adi->diskusepart);
-#endif
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tdiskiscsi.name\t\t: [%s]\n", adi->diskiscsi.name);
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tdiskiscsi.ip\t\t: [%s]\n", adi->diskiscsi.ip);
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tdiskiscsi.port\t\t: [%d]\n", adi->diskiscsi.port);
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tdiskiscsi.lun\t\t: [%s]\n", adi->diskiscsi.lun);
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tdiskiscsi.parm_src\t: [%d] (= %s)\n", adi->diskiscsi.parm_src,
-	    adi->diskiscsi.parm_src ? "DHCP" : "MANIFEST");
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tinstall_slice_num.\t: [%d]\n", adi->install_slice_number);
-
-	return (AUTO_INSTALL_SUCCESS);
-}
-
-/*
- * Retrieve the device swap request information
- *
- * If illegal values, return AUTO_INSTALL_FAILURE
- * else return AUTO_INSTALL_SUCCESS.
- * Existence of these manifest items is optional.
- */
-int
-ai_get_manifest_swap_device_info(auto_swap_device_info *adsi)
-{
-	char *p;
-
-	adsi->swap_size = -1;
-	p = ai_get_manifest_element_value(AIM_SWAP_SIZE);
-	if (p != NULL) {
-		char *endptr;
-		int32_t swap_size;
-		auto_size_units_t size_units;
-
-		errno = 0;
-
-		/* Get the numerical portion of the size value */
-		swap_size = (int32_t)strtol(p, &endptr, 0);
-
-		if (errno == 0 && endptr != p) {
-			/*
-			 * Get the units portion of the size val and
-			 * then convert the size from given units into MB.
-			 */
-			size_units = get_size_units(endptr);
-			adsi->swap_size = (int32_t)convert_disk_size(swap_size,
-			    size_units,
-			    AI_SIZE_UNITS_MEGABYTES);
-
-			auto_debug_print(AUTO_DBGLVL_INFO,
-			    "Requested swap size [%s] converted to [%d] MB\n",
-			    p, adsi->swap_size);
-		} else {
-			adsi->swap_size = 0;
-			auto_log_print("Invalid swap size "
-			    "specified. Tag="
-			    AIM_SWAP_SIZE "\n");
-			auto_log_print("Value=%s\n", p);
-			return (AUTO_INSTALL_FAILURE);
-		}
-	}
-
-	return (AUTO_INSTALL_SUCCESS);
-}
-
-/*
- * Retrieve the device dump request information
- *
- * If illegal values, return AUTO_INSTALL_FAILURE
- * else return AUTO_INSTALL_SUCCESS.
- * Existence of these manifest items is optional.
- */
-int
-ai_get_manifest_dump_device_info(auto_dump_device_info *addi)
-{
-	char *p;
-
-	addi->dump_size = -1;
-	p = ai_get_manifest_element_value(AIM_DUMP_SIZE);
-	if (p != NULL) {
-		char *endptr;
-		int32_t dump_size;
-		auto_size_units_t size_units;
-
-		errno = 0;
-
-		/* Get the numerical portion of the size value */
-		dump_size = (int32_t)strtol(p, &endptr, 0);
-
-		if (errno == 0 && endptr != p) {
-			/*
-			 * Get the units portion of the size val and
-			 * then convert the size from given units into MB.
-			 */
-			size_units = get_size_units(endptr);
-			addi->dump_size = (int32_t)convert_disk_size(dump_size,
-			    size_units,
-			    AI_SIZE_UNITS_MEGABYTES);
-
-			auto_debug_print(AUTO_DBGLVL_INFO,
-			    "Requested dump size [%s] converted to [%d] MB\n",
-			    p, addi->dump_size);
-		} else {
-			addi->dump_size = 0;
-			auto_log_print("Invalid dump device size "
-			    "specified. Tag="
-			    AIM_DUMP_SIZE "\n");
-			auto_log_print("Value=%s\n", p);
-			return (AUTO_INSTALL_FAILURE);
-		}
-	}
-
-	return (AUTO_INSTALL_SUCCESS);
-}
-
-/*
- * Create a partition info struct and populate it with details
- * from the manifest matching the specified tags (enhanced
- * nodepaths).
- *
- * pstatus - return status pointer, must point to valid storage
- *		If no problems in validating partition info,
- *		set to zero, otherwise set to non-zero value
- *
- * This function allocates memory for an auto_partition_info
- * struct. The caller MUST free this memory.
- */
-static auto_partition_info *
-get_partition_by_tags(char *number_tag, char *action_tag,
-    char *start_tag, char *size_tag, char *type_tag, int *pstatus)
-{
-	auto_partition_info *api;
-	char *p;
-	char *endptr;
-
-	api = calloc(sizeof (auto_partition_info), 1);
-	if (api == NULL)
-		return (NULL);
-
-	/* Get the name (number) for this partition */
-	p = ai_get_manifest_element_value(number_tag);
-	if (p != NULL) {
-		errno = 0;
-		api->partition_number = (int) strtoul(p, &endptr, 10);
-		if (errno != 0 || endptr == p) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "Partition name in manifest (%s) is "
-			    "not a valid value.\n",
-			    p);
-			*pstatus = 1;
-			free(api);
-			errno = 0;
-			return (NULL);
-		}
-	}
-
-	/* Get the action for this partition */
-	p = ai_get_manifest_element_value(action_tag);
-	if (p != NULL)
-		(void) strlcpy(api->partition_action, p,
-		    sizeof (api->partition_action));
-
-	/*
-	 * Get the start_sector for this partition
-	 *
-	 * set default for starting sector (unspecified)
-	 * stored as unsigned in C, * but signed in XML
-	 * so that -1 can be used in default value manifest
-	 * to tell AI to find best location when starting sector not specified
-	 * see om_create_partition()
-	 */
-	api->partition_start_sector = (uint64_t)-1LL;
-	p = ai_get_manifest_element_value(start_tag);
-	if (p != NULL) {
-		api->partition_start_sector =
-		    (uint64_t)strtoll(p, NULL, 0);
-	}
-
-	/*
-	 * Get the size (value + units) for this partition.
-	 * This is only used for "create" action.
-	 */
-	if (strcmp(api->partition_action, "create") == 0) {
-		p = ai_get_manifest_element_value(size_tag);
-		if (p != NULL) {
-			errno = 0;
-
-			/* Get the numerical portion of the size value */
-			api->partition_size = strtoull(p, &endptr, 0);
-
-			if (errno == 0 && endptr != p) {
-				/* Get the units portion of the size value */
-				api->partition_size_units =
-				    get_size_units(endptr);
-			} else {
-				auto_debug_print(AUTO_DBGLVL_ERR,
-				    "Partition size in manifest (%s) is "
-				    "not a valid value.\n",
-				    p);
-				*pstatus = 1;
-				free(api);
-				errno = 0;
-				return (NULL);
-			}
-		} else {
-			/*
-			 * Default to 0mb.  This is not strictly necessary,
-			 * as both these values correspond to 0, which was
-			 * the value they were already set to when the struct
-			 * was calloc()ed.
-			 */
-			api->partition_size = (uint64_t) 0;
-			api->partition_size_units = AI_SIZE_UNITS_MEGABYTES;
-		}
-	}
-
-	/* Get the filesystem type for this partition */
-	p = ai_get_manifest_element_value(type_tag);
-	if (p != NULL) {
-		/* allow some common partition type names */
-		if (strcasecmp(p, "SOLARIS") == 0) {
-			api->partition_type = SUNIXOS2;
-			auto_log_print(
-			    "New Solaris2 partition requested\n");
-		} else if (strcasecmp(p, "DOS16") == 0) {
-			api->partition_type = DOSOS16;
-			auto_log_print(
-			    "New 16-bit DOS partition requested\n");
-		} else if (strcasecmp(p, "FAT32") == 0) {
-			api->partition_type = FDISK_WINDOWS;
-			auto_log_print(
-			    "New FAT32 partition requested\n");
-		} else if (strcasecmp(p, "DOSEXT") == 0) {
-			api->partition_type = EXTDOS;
-			auto_log_print(
-			    "New DOS extended partition requested\n");
-		} else if (strcasecmp(p, "DOSEXTLBA") == 0) {
-			api->partition_type = FDISK_EXTLBA;
-			auto_log_print(
-			    "New DOS extended LBA partition requested"
-			    "\n");
-		} else {
-            /*
-             * Use partition type number, eg "191" to
-             * represent a Solaris partition.
-             */
-			char *endptr;
-
-			errno = 0;
-			api->partition_type =
-			    strtoull(p, &endptr, 0);
-			if (errno != 0 || endptr == p) {
-				auto_debug_print(AUTO_DBGLVL_ERR,
-				    "Partition type in manifest (%s) is "
-				    "not a valid number or partition type.\n",
-				    p);
-				*pstatus = 1;
-				free(api);
-				errno = 0;
-				return (NULL);
-			}
-		}
-	}
-
-	/*
-	 * Determine if this is a logical partition
-	 * This is inferred from the partition number.  Numbers of
-	 * 5 or greater imply the partition must be logical.
-	 */
-	if (api->partition_number >= 5) {
-		api->partition_is_logical = B_TRUE;
-	}
-
-	return (api);
-}
-
-/*
- * Retrieve the information about the partitions
- * that need to be configured
- *
- * pstatus - return status pointer, must point to valid storage
- *	If no problems in validating partition info,
- *		set to zero, otherwise set to non-zero value
- *
- * This function allocates memory for an array
- * of auto_partition_info. The caller MUST free this memory
- */
-auto_partition_info *
-ai_get_manifest_partition_info(int *pstatus)
-{
-	auto_partition_info *ret_api;
-	auto_partition_info *api;
-	int i, j;
-	int actions_len = 0;
-	int numbered_len = 0;
-	int unnumbered_len = 0;
-	char **partition_actions;
-	char **numbered_partitions;
-	char **unnumbered_partitions;
-	char *p;
-	char number_tag[MAXPATHLEN];
-	char action_tag[MAXPATHLEN];
-	char start_tag[MAXPATHLEN];
-	char size_tag[MAXPATHLEN];
-	char type_tag[MAXPATHLEN];
-
-	*pstatus = 0;	/* assume no parsing errors */
-
-	/*
-	 * The name (number) is not mandatory for partitions, but if a partition
-	 * does not have a name then it must have the action 'use_existing'.
-	 * There can only be one 'use_existing' partition specified and it may
-	 * or may not be named.
-	 * We will first see if there is an un-named 'use_existing' partition
-	 * in the manifest and if so, fetch its details.  Then we will fetch
-	 * details for all the named partitions, using name+action as a unique
-	 * key.
-	 */
-
-	unnumbered_partitions = ai_get_manifest_values(
-	    AIM_USE_EXISTING_PARTITIONS, &unnumbered_len);
-	numbered_partitions = ai_get_manifest_values(
-	    AIM_NUMBERED_PARTITIONS, &numbered_len);
-
-	if (unnumbered_partitions == NULL) {
-		/* ai_get_manifest_values sets len to -1 if none found */
-		unnumbered_len = 0;
-	} else {
-		if (unnumbered_len > 1) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "Only one 'use_existing' partition is permitted, "
-			    "%d were specified.\n", unnumbered_len);
-			*pstatus = 1;
-			return (NULL);
-		}
-
-		p = ai_get_manifest_element_value(
-		    AIM_UNNUMBERED_PARTITION_NUMBER);
-		if (p != NULL) {
-			/*
-			 * There is one 'use_existing' partition but a
-			 * name (number) was specified with it, so it
-			 * will be handled along with numbered
-			 * partitions - no need for specical handling here.
-			 */
-			unnumbered_len = 0;
-		}
-	}
-
-	if (numbered_partitions == NULL) {
-		/* ai_get_manifest_values sets len to -1 if none found */
-		numbered_len = 0;
-	}
-
-	if ((unnumbered_len + numbered_len) == 0)
-		return (NULL);
-
-	/* len+1 -- '1' for the NULL entry */
-	ret_api = calloc(sizeof (auto_partition_info),
-	    numbered_len + unnumbered_len + 1);
-	if (ret_api == NULL)
-		return (NULL);
-
-	if (unnumbered_len) {
-		/*
-		 * We have exactly one partition whose action is 'use_existing'
-		 * which does not have a name (number) specified.  We need
-		 * to fetch its details seperately from the numbered partitions.
-		 */
-		(void) snprintf(number_tag, sizeof (number_tag),
-		    AIM_UNNUMBERED_PARTITION_NUMBER);
-		(void) snprintf(action_tag, sizeof (action_tag),
-		    AIM_UNNUMBERED_PARTITION_ACTION);
-		(void) snprintf(start_tag, sizeof (start_tag),
-		    AIM_UNNUMBERED_PARTITION_START_SECTOR);
-		(void) snprintf(size_tag, sizeof (size_tag),
-		    AIM_UNNUMBERED_PARTITION_SIZE);
-		(void) snprintf(type_tag, sizeof (type_tag),
-		    AIM_UNNUMBERED_PARTITION_TYPE);
-
-		api = get_partition_by_tags(number_tag, action_tag,
-		    start_tag, size_tag, type_tag, pstatus);
-
-		if (api == NULL) {
-			free(unnumbered_partitions);
-			free(ret_api);
-			return (NULL);
-		}
-
-		(void) memcpy(ret_api, api, sizeof (auto_partition_info));
-		free(api);
-
-		free(unnumbered_partitions);
-	}
-
-	if (numbered_len) {
-		partition_actions = ai_get_manifest_values(
-		    AIM_PARTITION_ACTIONS, &actions_len);
-
-		if (partition_actions == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "Error fetching partition actions.\n");
-			*pstatus = 1;
-			free(numbered_partitions);
-			free(ret_api);
-			return (NULL);
-		}
-
-		if (unnumbered_len) {
-			/*
-			 * Remove the unnamed 'use_existing' partition from
-			 * partition_actions.
-			 */
-			for (i = 0; i < actions_len; i++) {
-				if (strcmp(partition_actions[i],
-				    "use_existing") == 0) {
-					/*
-					 * Shuffle the remaining items up
-					 * one position.
-					 */
-					for (j = i; j < (actions_len - 1);
-					    j++) {
-						partition_actions[j] =
-						    partition_actions[j+1];
-					}
-					partition_actions[actions_len] = NULL;
-					actions_len--;
-				}
-			}
-		}
-
-		if (numbered_len != actions_len) {
-			if (numbered_len < actions_len) {
-				/*
-				 * If this mismatch occurs, there must have
-				 * been an unnamed partion whose action is
-				 * not 'use_existing'.
-				 */
-				auto_debug_print(AUTO_DBGLVL_ERR,
-				    "Invalid unnamed partition specified in "
-				    "manifest. Only one unnamed partition "
-				    "allowed, whose action must be "
-				    "'use_existing'.\n");
-				*pstatus = 1;
-				free(numbered_partitions);
-				free(partition_actions);
-				free(ret_api);
-				return (NULL);
-			} else {
-				auto_debug_print(AUTO_DBGLVL_ERR,
-				    "Error matching partition actions to "
-				    "names.\n");
-				*pstatus = 1;
-				free(numbered_partitions);
-				free(partition_actions);
-				free(ret_api);
-				return (NULL);
-			}
-		}
-
-		/*
-		 * One or more numbered partitions have been specified.
-		 * Fetch the necessary details for each.
-		 */
-		for (i = 0; i < numbered_len; i++) {
-			(void) snprintf(number_tag, sizeof (number_tag),
-			    AIM_NUMBERED_PARTITION_NUMBER,
-			    numbered_partitions[i], partition_actions[i]);
-			(void) snprintf(action_tag, sizeof (action_tag),
-			    AIM_NUMBERED_PARTITION_ACTION,
-			    numbered_partitions[i], partition_actions[i]);
-			(void) snprintf(start_tag, sizeof (start_tag),
-			    AIM_NUMBERED_PARTITION_START_SECTOR,
-			    numbered_partitions[i], partition_actions[i]);
-			(void) snprintf(size_tag, sizeof (size_tag),
-			    AIM_NUMBERED_PARTITION_SIZE,
-			    numbered_partitions[i], partition_actions[i]);
-			(void) snprintf(type_tag, sizeof (type_tag),
-			    AIM_NUMBERED_PARTITION_TYPE,
-			    numbered_partitions[i], partition_actions[i]);
-
-			api = get_partition_by_tags(number_tag, action_tag,
-			    start_tag, size_tag, type_tag, pstatus);
-
-			if (api == NULL) {
-				free(numbered_partitions);
-				free(ret_api);
-				return (NULL);
-			}
-
-			(void) memcpy((ret_api + unnumbered_len + i),
-			    api, sizeof (auto_partition_info));
-			free(api);
-		}
-
-		free(numbered_partitions);
-	}
-
-	/* Debug - print partition info out to log */
-	api = ret_api;
-	for (; api->partition_action[0] != '\0'; api++) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Partition details from Manifest:\n");
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "\tpartition_action\t\t: [%s]\n",
-		    api->partition_action);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "\tpartition_number\t\t: [%d]\n",
-		    api->partition_number);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "\tpartition_start_sector\t: [%lld]\n",
-		    api->partition_start_sector);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "\tpartition_size\t\t\t: [%lld]\n",
-		    api->partition_size);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "\tpartition_type\t\t\t: [%d]\n",
-		    api->partition_type);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "\tpartition_size_units\t: [%d] (= %s)\n",
-		    (int)api->partition_size_units,
-		    CONVERT_UNITS_TO_TEXT(api->partition_size_units));
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "\tpartition_is_logical\t: [%d] (= %s)\n",
-		    (int)api->partition_is_logical,
-		    api->partition_is_logical ? "true" : "false");
-	}
-
-	return (ret_api);
-}
-
-/*
- * Retrieve the vtoc slice information
- *
- * pstatus - return status pointer, must point to valid storage
- *	If no problems in validating slice info,
- *		set to zero, otherwise set to non-zero value
- *
- * This function allocates memory for an array
- * of auto_slice_info. The caller MUST free this memory
- */
-auto_slice_info *
-ai_get_manifest_slice_info(int *pstatus)
-{
-	auto_slice_info *asi;
-	auto_slice_info *tmp_asi;
-	int i, names_len = 0, actions_len = 0;
-	char *p;
-	char **slice_names;
-	char **slice_actions;
-	char tag[MAXPATHLEN];
-	char *endptr;
-
-	*pstatus = 0;	/* assume no parsing errors */
-
-	/*
-	 * The name (number) and action attributes are mandatory for slices, so
-	 * we will use these two values as the unique key for slice elements.
-	 * First we fetch all the slice numbers and actions, then we query the
-	 * manifest using these values to fetch the additional details for
-	 * each slice.
-	 */
-
-	slice_names = ai_get_manifest_values(AIM_SLICE_NUMBER, &names_len);
-
-	if (slice_names == NULL || names_len <= 0)
-		return (NULL);
-
-	slice_actions = ai_get_manifest_values(AIM_SLICE_ACTION, &actions_len);
-
-	if (actions_len != names_len) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "Error matching slice names to actions.\n");
-		*pstatus = 1;
-		free(slice_names);
-		free(slice_actions);
-		return (NULL);
-	}
-
-	/* len+1 -- '1' for end of array marker */
-	asi = calloc(sizeof (auto_slice_info), names_len + 1);
-
-	if (asi == NULL) {
-		free(slice_names);
-		free(slice_actions);
-		return (NULL);
-	}
-
-	for (i = 0; i < names_len; i++) {
-		/* Get the number for this slice */
-		(asi + i)->slice_number = atoi(slice_names[i]);
-
-		/* Get the action for this slice */
-		if (strlcpy((asi + i)->slice_action, slice_actions[i],
-		    AUTO_MAX_ACTION_LEN) >= AUTO_MAX_ACTION_LEN) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "Slice action in manifest is too long.\n");
-			*pstatus = 1;
-			free(asi);
-			free(slice_names);
-			free(slice_actions);
-			return (NULL);
-		}
-
-		/* Get the size (value + units) for this slice */
-		(void) snprintf(tag, sizeof (tag), AIM_SLICE_SIZE,
-		    slice_names[i], slice_actions[i]);
-		p = ai_get_manifest_element_value(tag);
-		if (p == NULL) {
-			/*
-			 * Default to 0mb.  This is not strictly necessary,
-			 * as both these values correspond to 0, which was
-			 * the value they were already set to when the struct
-			 * was calloc()ed.
-			 */
-			(asi + i)->slice_size = (uint64_t) 0;
-			(asi + i)->slice_size_units = AI_SIZE_UNITS_MEGABYTES;
-		} else {
-			errno = 0;
-
-			/* Get the numerical portion of the size value */
-			(asi + i)->slice_size = strtoull(p, &endptr, 0);
-
-			if (errno == 0 && endptr != p) {
-				/* Get the units portion of the size value */
-				(asi + i)->slice_size_units =
-				    get_size_units(endptr);
-			} else {
-				auto_debug_print(AUTO_DBGLVL_ERR,
-				    "Slice size in manifest (%s) is "
-				    "not a valid number.\n",
-				    p);
-				*pstatus = 1;
-				free(asi);
-				free(slice_names);
-				free(slice_actions);
-				errno = 0;
-				return (NULL);
-			}
-		}
-
-		/*
-		 * Determine behavior for create action on existing slices.
-		 */
-		(void) snprintf(tag, sizeof (tag),
-		    AIM_SLICE_ON_EXISTING, slice_names[i], slice_actions[i]);
-		p = ai_get_manifest_element_value(tag);
-		if (p != NULL) {
-			/*
-			 * Since the slice information array is initialized
-			 * to zero, and the default enum value is also zero,
-			 * the "error" case will also be the default in the
-			 * slice information array.
-			 *
-			 * In the new schema, the slice attribute 'force'
-			 * controls this.  If force="false" (the default)
-			 * then we leave on_existing=0, which equates to
-			 * OM_ON_EXISTING_ERROR. If force="true", then we
-			 * set it to OM_ON_EXISTING_OVERWRITE.
-			 */
-			if (strcasecmp(p, "true") == 0)
-				(asi + i)->on_existing =
-				    OM_ON_EXISTING_OVERWRITE;
-		}
-	}
-
-	free(slice_names);
-	free(slice_actions);
-
-	/* Debug - print slice info out to log */
-	tmp_asi = asi;
-	for (; tmp_asi->slice_action[0] != '\0'; tmp_asi++) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Slice details from Manifest:\n");
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "\tslice_action\t\t: [%s]\n",
-		    tmp_asi->slice_action);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "\tslice_number\t\t: [%d]\n",
-		    tmp_asi->slice_number);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "\tslice_size\t\t\t: [%lld]\n",
-		    tmp_asi->slice_size);
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "\tslice_size_units\t: [%d] (= %s)\n",
-		    (int)tmp_asi->slice_size_units,
-		    CONVERT_UNITS_TO_TEXT(tmp_asi->slice_size_units));
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "\ton_existing\t: [%d] (= %s)\n",
-		    (int)tmp_asi->on_existing,
-		    tmp_asi->on_existing ? "OVERWRITE" : "ERROR");
-	}
-
-	return (asi);
-}
-
-/*
- * Retrieve the URL for the default publisher
- */
-char *
-ai_get_manifest_default_url(int *len)
-{
-	char	**value;
-	char	*url;
-
-	value = ai_get_manifest_values(AIM_IPS_PUBLISHER_URL, len);
-
-	if (*len > 0) {
-		url = value[0];
-		free(value);
-		return (url);
-	}
-	return (NULL);
-}
-
-/*
- * Retrieve the URL(s) for the additional publisher(s)
- *
- * Default and additional (or primary and secondary) publishers
- * now use the same nodepaths, so this function repeats the same
- * search as ai_get_manifest_default_url() but the results are
- * handled differently.
- */
-char **
-ai_get_manifest_addl_url(int *len)
-{
-	char	**value;
-
-	value = ai_get_manifest_values(AIM_IPS_PUBLISHER_URL, len);
-
-	if (*len > 0) {
-		return (value);
-	}
-	return (NULL);
-}
-
-/*
- * Retrieve an publisher name from the manifest using url value
- */
-char *
-ai_get_manifest_repo_publisher(char *url)
-{
-	char	**value;
-	char	*publisher;
-	int	len;
-	char	tag[MAXPATHLEN];
-
-	(void) snprintf(tag, sizeof (tag),
-	    AIM_ADD_URL_PUBLISHER_NAME, url);
-	value = ai_get_manifest_values(tag, &len);
-
-	if (len > 0) {
-		publisher = value[0];
-		free(value);
-		return (publisher);
-	}
-	return (NULL);
-}
-
-/*
- * Retrieve the URL for an IPS repo mirrors
- */
-auto_mirror_repo_t *
-ai_get_manifest_repo_mirrors(char *url)
-{
-	int			i, len = 0;
-	char			**value;
-	char			buf[MAXPATHLEN];
-	auto_mirror_repo_t	*ptr, *tmp_ptr;
-	auto_mirror_repo_t	*mirror = NULL;
-
-	(void) snprintf(buf, sizeof (buf),
-	    AIM_ADD_URL_PUBLISHER_MIRROR, url);
-	value = ai_get_manifest_values(buf, &len);
-
-	if (len <= 0) {
-		return (NULL);
-	}
-
-	for (i = 0; i < len; i++) {
-		/*
-		 * Ignore the empty string
-		 */
-		if (strcmp(value[i], "") == 0) {
-			continue;
-		}
-		ptr = calloc(sizeof (auto_mirror_repo_t), 1);
-		if (ptr == NULL) {
-			goto get_out;
-		}
-		ptr->mirror_url = strdup(value[i]);
-		ptr->next_mirror = NULL;
-		if (mirror == NULL) {
-			mirror = ptr;
-			tmp_ptr = ptr;
-		} else {
-			tmp_ptr->next_mirror = ptr;
-			tmp_ptr = tmp_ptr->next_mirror;
-		}
-	}
-	free(value);
-	return (mirror);
-get_out:
-	free(value);
-	free_repo_mirror_list(mirror);
-	return (NULL);
-}
-
-/*
- * Collect the information about default publisher from
- * the manifest before processing them
- *
- * This function allocates memory for auto_repo_info_t and
- * the members publisher, url and mirror information.
- * The caller MUST free this memory
- */
-auto_repo_info_t *
-ai_get_default_repo_info()
-{
-	char			*p;
-	char			*current_url, *default_url;
-	int			num_url;
-	auto_repo_info_t 	*repo, *default_repo;
-
-	default_repo = NULL;
-
-	/*
-	 * Get the url of the default publisher
-	 */
-	current_url = ai_get_manifest_default_url(&num_url);
-	if (current_url == NULL) {
-		/*
-		 * If the publisher wasn't specified in the manifest,
-		 * provide a default value.
-		 */
-		current_url = AIM_FALLBACK_PUBLISHER_URL;
-	}
-
-	repo = calloc(sizeof (auto_repo_info_t), 1);
-	if (repo == NULL) {
-		return (NULL);
-	}
-
-	/*
-	 * Save the value before calling another ai_get_manifest_*()
-	 */
-	default_url = strdup(current_url);
-	p = ai_get_manifest_repo_publisher(default_url);
-	if (p == NULL) {
-		/*
-		 * If the primary publisher URL is AIM_FALLBACK_PUBLISHER_URL
-		 * and no name was specified, then provide a default value.
-		 * For all other URLs, if a name is not specified for the
-		 * publisher, then it is an error.
-		 */
-		if (strcasecmp(current_url, AIM_FALLBACK_PUBLISHER_URL) == 0)
-			p = AIM_FALLBACK_PUBLISHER_NAME;
-		else
-			goto get_out;
-	}
-	repo->publisher = strdup(p);
-	repo->url = strdup(default_url);
-	if (repo->publisher == NULL || repo->url == NULL) {
-		goto get_out;
-	}
-
-	/*
-	 * get the mirrors for this publishers
-	 */
-	repo->mirror_repo =
-	    ai_get_manifest_repo_mirrors(default_url);
-	repo->next_repo = NULL;
-	default_repo = repo;
-
-	free(default_url);
-	return (default_repo);
-get_out:
-	free(default_url);
-	if (repo != NULL)  {
-		free(repo->publisher);
-		free(repo->url);
-		free(repo);
-	}
-	return (NULL);
-}
-
-/*
- * Automated Installer allows specifying more than one additional
- * publishers. Collect all the additional publishers from
- * the manifest before processing them
- *
- * This function allocates memory for auto_repo_info_t and
- * the members publisher, url and mirror information.
- * The caller MUST free this memory
- */
-auto_repo_info_t *
-ai_get_additional_repo_info()
-{
-	char			*p;
-	char			**urls;
-	int			i,  num_url;
-	auto_repo_info_t 	*repo, *tmp_repo, *addl_repo;
-
-	addl_repo = NULL;
-	tmp_repo = NULL;
-
-	/*
-	 * This function will return one url per publisher
-	 * num_url contains the number of publishers
-	 */
-	urls = ai_get_manifest_addl_url(&num_url);
-	if (urls == NULL)
-		return (NULL);
-
-	/*
-	 * We start iterating through the urls at index 1
-	 * instead of index 0, because the first url returned
-	 * is the primary publisher.  All subsequent urls are
-	 * secondary, or additional, publishers, which is what
-	 * we want here.
-	 * Allocate space and save the urls because the next
-	 * call to ai_get_manifest_*() will overwrite them
-	 */
-	for (i = 1; i < num_url; i++) {
-		/*
-		 * Ignore the empty string
-		 */
-		if (strcmp(urls[i], "") == 0) {
-			continue;
-		}
-		repo = calloc(sizeof (auto_repo_info_t), 1);
-		if (repo == NULL) {
-			return (NULL);
-		}
-		repo->url = strdup(urls[i]);
-		if (repo->url == NULL) {
-			free(repo);
-			goto get_out;
-		}
-		repo->next_repo = NULL;
-
-		if (addl_repo == NULL) {
-			addl_repo = repo;
-			tmp_repo = repo;
-		} else {
-			tmp_repo->next_repo = repo;
-			tmp_repo = tmp_repo->next_repo;
-		}
-	}
-
-	/*
-	 * For each url (publisher), get the publisher name and
-	 * mirrors (if any).
-	 */
-	for (repo = addl_repo; repo != NULL; repo = repo -> next_repo) {
-		p = ai_get_manifest_repo_publisher(repo->url);
-		if (p == NULL) {
-			goto get_out;
-		}
-		repo->publisher = strdup(p);
-		if (repo->publisher == NULL) {
-			goto get_out;
-		}
-
-		/*
-		 * get the mirrors for this publisher
-		 */
-		repo->mirror_repo = ai_get_manifest_repo_mirrors(repo->url);
-	}
-
-	free(urls);
-	return (addl_repo);
-get_out:
-	free(urls);
-	free_repo_info_list(addl_repo);
-	return (NULL);
-}
-
-/*
- * Retrieve the proxy to use to access the IPS repo.
- */
-char *
-ai_get_manifest_http_proxy()
-{
-	int len = 0;
-	char **value;
-	char *proxy;
-
-	value = ai_get_manifest_values(AIM_PROXY_URL, &len);
-
-	if (len > 0) {
-		proxy = value[0];
-		free(value);
-		return (proxy);
-	}
-	return (NULL);
-}
-
-/*
- * Retrieve the list of packages to be installed
- *
- * Parameters:
- *    *num_packages - set to number of obtained packages
- *    pkg_list_tag - path to XML node which contents is to be obtained
- *
- * Returns:
- *    - array of strings specified for given tag
- *    - NULL, if tag is empty or not defined
- */
-char **
-ai_get_manifest_packages(int *num_packages_p, char *pkg_list_tag_p)
-{
-	char **package_list;
-
-	package_list = ai_get_manifest_values(pkg_list_tag_p, num_packages_p);
-
-	if (*num_packages_p > 0)
-		return (package_list);
-	return (NULL);
-}
-
-static int
-parse_property(char *str, char *keyword, char *value)
-{
-	char	*token;
-	char	*eol;
-
-	if (str == NULL) {
-		return (NULL);
-	}
-
-	if (*str == '#') {
-		return (NULL);
-	}
-	strcpy(value, "[not found]"); /* assume failure to parse value */
-
-	eol = str + strlen(str);
-	*keyword = '\0';
-	token = strtok(str, " ");
-
-	while ((token = strtok(NULL, " ")) != NULL) {
-		if (strstr(token, AUTO_PROPERTY_ROOTPASS) != NULL) {
-			strlcpy(keyword, AUTO_PROPERTY_ROOTPASS, KEYWORD_SIZE);
-			break;
-		} else if (strstr(token, AUTO_PROPERTY_TIMEZONE) != NULL) {
-			strlcpy(keyword, AUTO_PROPERTY_TIMEZONE, KEYWORD_SIZE);
-			break;
-		} else if (strstr(token, AUTO_PROPERTY_HOSTNAME) != NULL) {
-			strlcpy(keyword, AUTO_PROPERTY_HOSTNAME, KEYWORD_SIZE);
-			break;
-		}
-	}
-
-	/*
-	 * Tolerate unrecognized SMF properties, they might belong to SMF
-	 * services which will process those properties later during first boot.
-	 */
-
-	if (*keyword == '\0') {
-		return (AUTO_INSTALL_SUCCESS);
-	}
-
-	while ((token = strtok(NULL, " ")) != NULL) {
-		char	*pkeyword_value, *pbeg, *pend;
-
-		/* find keyword 'value=<something>' */
-		pkeyword_value = strstr(token, KEYWORD_VALUE);
-		if (pkeyword_value == NULL) {
-			continue;
-		}
-		/* find beginning value delimiter */
-		pbeg = strchr(pkeyword_value, '\'');
-		if (pbeg == NULL) {
-			pbeg = strchr(pkeyword_value, '\"');
-			if (pbeg == NULL) /* no starting delimiter */
-				return (AUTO_INSTALL_FAILURE);
-		}
-		if (eol > pbeg + strlen(pbeg)) /* if strtok inserted NULL */
-			*(pbeg + strlen(pbeg)) = ' '; /* restore orig delim */
-		/* find ending value delimiter */
-		pend = strchr(pbeg + 1, *pbeg);
-		if (pend == NULL) /* no ending delimiter */
-			return (AUTO_INSTALL_FAILURE);
-		*pend = '\0';
-		if (strlcpy(value, ++pbeg, VALUE_SIZE) >= VALUE_SIZE) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "SC manifest value for %s is too long (>%d bytes) "
-			    "and will be truncated to |%s|\n",
-			    keyword, VALUE_SIZE, pbeg);
-		}
-		return (AUTO_INSTALL_SUCCESS);
-	}
-	return (AUTO_INSTALL_FAILURE);
-}
-
-/*
- * Free the mirror list created while the parsing the manifest
- */
-void
-free_repo_mirror_list(auto_mirror_repo_t *mirror)
-{
-	auto_mirror_repo_t *mptr;
-	while (mirror != NULL) {
-		free(mirror->mirror_url);
-		mptr = mirror;
-		mirror = mirror->next_mirror;
-		free(mptr);
-	}
-}
-
-/*
- * Free the IPS repo list created while the parsing the manifest
- */
-void
-free_repo_info_list(auto_repo_info_t *repo)
-{
-	auto_repo_info_t  *rptr;
-
-	while (repo != NULL) {
-		free(repo->publisher);
-		free(repo->url);
-		free_repo_mirror_list(repo->mirror_repo);
-		rptr = repo;
-		repo = repo->next_repo;
-		free(rptr);
-	}
-}
--- a/usr/src/cmd/auto-install/auto_parse_manifest.c	Wed May 25 13:29:32 2011 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,347 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- */
-
-#include <Python.h>
-#include "auto_install.h"
-
-#define	AI_PARSE_MANIFEST_SCRIPT "osol_install.auto_install.ai_parse_manifest"
-#define	AI_CREATE_MANIFESTSERV "ai_create_manifestserv"
-#define	AI_SETUP_MANIFESTSERV "ai_setup_manifestserv"
-#define	AI_LOOKUP_MANIFEST_VALUES "ai_lookup_manifest_values"
-
-static PyThreadState * mainThreadState = NULL;
-static char *empty_argv[1] = { "" };
-
-/*
- * The C interface to ai_create_manifestserv (python module).
- * This function takes a manifest file and hands it off to
- * the ManifestServ. It returns the ManifestServ object thus
- * created which can then be used to lookup various paths.
- *
- * Note that the ManifestServ object created here has not been
- * validated before being returned.
- *
- * ai_destroy_manifestserv() must be called after all the
- * processing has been done to destroy the ManifestServ object
- *
- * This function can only be invoked from a single threaded
- * context.
- */
-PyObject *
-ai_create_manifestserv(char *filename)
-{
-	PyObject	*pFunc;
-	PyObject	*pName;
-	PyObject	*pArgs;
-	PyThreadState	*myThreadState;
-	PyObject	*pModule = NULL;
-	PyObject	*rv = NULL;
-	PyObject	*pRet = NULL;
-
-	if (!Py_IsInitialized()) {
-		Py_Initialize();
-
-		/*
-		 * sys.argv needs to be initialized, just in case other
-		 * modules access it.  It is not initialized automatically by
-		 * Py_Initialize().
-		 */
-		PySys_SetArgv(1, empty_argv); /* Init sys.argv[]. */
-	}
-
-	PyEval_InitThreads();
-	mainThreadState = PyThreadState_Get();
-	myThreadState = PyThreadState_New(mainThreadState->interp);
-	PyThreadState_Swap(myThreadState);
-
-	if ((pName = PyString_FromString(AI_PARSE_MANIFEST_SCRIPT)) != NULL) {
-		pModule = PyImport_Import(pName);
-		Py_DECREF(pName);
-	}
-	if (pModule != NULL) {
-		/* Load the ai_parse_manifest module */
-		pFunc = PyObject_GetAttrString(pModule, AI_CREATE_MANIFESTSERV);
-		/* pFunc is a new reference */
-		if (pFunc && PyCallable_Check(pFunc)) {
-
-			pArgs = PyTuple_New(1);
-			PyTuple_SetItem(pArgs, 0,
-			    PyString_FromString(filename));
-
-			/* Call the ai_parse_manifest */
-			pRet = PyObject_CallObject(pFunc, pArgs);
-			Py_DECREF(pArgs);
-			if ((pRet != NULL) && (!PyErr_Occurred())) {
-				/*
-				 * A reference is getting stolen here.
-				 * We intentionally don't do a DECREF
-				 * so that future calls using this object
-				 * have a valid ManifestServ object to work
-				 * with.
-				 */
-				if (pRet != Py_None)
-					rv = pRet;
-			} else {
-				PyErr_Print();
-				auto_debug_print(AUTO_DBGLVL_ERR,
-				    "Call failed: %s\n",
-				    AI_CREATE_MANIFESTSERV);
-			}
-		} else {
-			auto_debug_print(AUTO_DBGLVL_ERR, "Python function "
-			    "does not appear callable: %s\n",
-			    AI_CREATE_MANIFESTSERV);
-		}
-	}
-	Py_XDECREF(pFunc);
-	Py_XDECREF(pModule);
-	PyThreadState_Swap(mainThreadState);
-	PyThreadState_Clear(myThreadState);
-	PyThreadState_Delete(myThreadState);
-	return (rv);
-}
-
-/*
- * The C interface to ai_setup_manifestserv (python module).
- * Sets up and validates the data of a ManifestServ object.
- * Must be called after ai_create_manifestserv has set up a
- * ManifestServ object in memory.
- *
- * This function can only be invoked from a single threaded
- * context.
- */
-int
-ai_setup_manifestserv(PyObject *server_obj)
-{
-	PyObject	*pFunc;
-	PyObject	*pName;
-	PyObject	*pArgs;
-	PyThreadState	*myThreadState;
-	PyObject	*pRet;
-	PyObject	*pModule = NULL;
-	int		rval = AUTO_INSTALL_SUCCESS;
-
-	if (!Py_IsInitialized()) {
-		Py_Initialize();
-
-		/*
-		 * sys.argv needs to be initialized, just in case other
-		 * modules access it.  It is not initialized automatically by
-		 * Py_Initialize().
-		 */
-		PySys_SetArgv(1, empty_argv); /* Init sys.argv[]. */
-	}
-
-	PyEval_InitThreads();
-	mainThreadState = PyThreadState_Get();
-	myThreadState = PyThreadState_New(mainThreadState->interp);
-	PyThreadState_Swap(myThreadState);
-
-	if ((pName = PyString_FromString(AI_PARSE_MANIFEST_SCRIPT)) != NULL) {
-		pModule = PyImport_Import(pName);
-		Py_DECREF(pName);
-	}
-	if (pModule != NULL) {
-		/* Load the ai_parse_manifest module */
-		pFunc = PyObject_GetAttrString(pModule, AI_SETUP_MANIFESTSERV);
-		/* pFunc is a new reference */
-		if (pFunc && PyCallable_Check(pFunc)) {
-			pArgs = PyTuple_New(1);
-			Py_INCREF(server_obj);
-			PyTuple_SetItem(pArgs, 0, server_obj);
-
-			/* Call the ai_parse_manifest */
-			pRet = PyObject_CallObject(pFunc, pArgs);
-			rval = PyInt_AS_LONG(pRet);
-			Py_DECREF(pRet);
-			Py_DECREF(pArgs);
-		} else {
-			auto_debug_print(AUTO_DBGLVL_ERR, "Python function "
-			    "does not appear callable: %s\n",
-			    AI_SETUP_MANIFESTSERV);
-			rval = AUTO_INSTALL_FAILURE;
-		}
-	} else {
-		PyErr_Print();
-		auto_debug_print(AUTO_DBGLVL_ERR, "Call failed: %s\n",
-		    AI_SETUP_MANIFESTSERV);
-		rval = AUTO_INSTALL_FAILURE;
-	}
-	Py_XDECREF(pFunc);
-	Py_XDECREF(pModule);
-	PyThreadState_Swap(mainThreadState);
-	PyThreadState_Clear(myThreadState);
-	PyThreadState_Delete(myThreadState);
-	return (rval);
-}
-
-/*
- * Lookup a nodepath.
- *
- * The caller is responsible for freeing up
- * memory associated with the return value.
- * ai_free_manifest_values() is provided for this.
- */
-char **
-ai_lookup_manifest_values(PyObject *server_obj, char *path, int *len)
-{
-	PyObject	*pFunc;
-	PyObject	*pName;
-	PyObject 	*pArgs;
-	PyThreadState	*myThreadState;
-	PyObject 	*item;
-	char		**rv;
-	PyObject	*pModule = NULL;
-
-	if (!Py_IsInitialized()) {
-		Py_Initialize();
-
-		/*
-		 * sys.argv needs to be initialized, just in case other
-		 * modules access it.  It is not initialized automatically by
-		 * Py_Initialize().
-		 */
-		PySys_SetArgv(1, empty_argv); /* Init sys.argv[]. */
-	}
-
-	PyEval_InitThreads();
-	mainThreadState = PyThreadState_Get();
-	myThreadState = PyThreadState_New(mainThreadState->interp);
-	PyThreadState_Swap(myThreadState);
-
-	pName = PyString_FromString(AI_PARSE_MANIFEST_SCRIPT);
-	assert(pName != NULL);
-	if (pName == NULL) {
-		PyErr_Print();
-		auto_debug_print(AUTO_DBGLVL_INFO, "Call failed: %s\n",
-		    AI_LOOKUP_MANIFEST_VALUES);
-		Py_Finalize();
-		return (NULL);
-	}
-
-	pModule = PyImport_Import(pName);
-	assert(pModule != NULL);
-	if (pModule == NULL) {
-		Py_DECREF(pName);
-		PyErr_Print();
-		auto_debug_print(AUTO_DBGLVL_INFO, "Call failed: %s\n",
-		    AI_LOOKUP_MANIFEST_VALUES);
-		Py_Finalize();
-		return (NULL);
-	}
-
-	/* Load the ai_parse_manifest module */
-	pFunc = PyObject_GetAttrString(pModule, AI_LOOKUP_MANIFEST_VALUES);
-	/* pFunc is a new reference */
-	if (pFunc && PyCallable_Check(pFunc)) {
-		PyObject *pRet = NULL;
-
-
-		pArgs = PyTuple_New(2);
-		/*
-		 * INCREF server_obj as PyTuple_SetItem steals its reference.
-		 * A stolen reference here means that pArgs owns the reference,
-		 * and so when pArgs gets DECREFed, so does server_obj.
-		 * INCREF server_obj so it remains intact after DECREFing pArgs.
-		 *
-		 * Note: no INCREF is needed for the second arg, as the thing
-		 * which gets DECREFed via pArgs is an interim python object
-		 * created from a native C string.
-		 */
-		Py_INCREF(server_obj);
-		PyTuple_SetItem(pArgs, 0, server_obj);
-		PyTuple_SetItem(pArgs, 1, PyString_FromString(path));
-
-		pRet = PyObject_CallObject(pFunc, pArgs);
-		Py_DECREF(pArgs);
-		if (pRet != NULL) {
-			Py_ssize_t list_ln = PyList_Size(pRet);
-			Py_ssize_t i;
-
-			/* pass number of list elements to the caller */
-			*len = (int)list_ln;
-
-			if (list_ln > 0) {
-				rv = malloc((list_ln + 1) * sizeof (char *));
-				for (i = 0; i < list_ln; i++) {
-					item = PyList_GetItem(pRet, i);
-					rv[i] = strdup(PyString_AsString(item));
-				}
-				rv[list_ln] = NULL;
-			}
-			else
-				rv = NULL;
-			Py_DECREF(pRet);
-		} else {
-			Py_DECREF(pFunc);
-			Py_DECREF(pModule);
-			PyErr_Print();
-			auto_debug_print(AUTO_DBGLVL_INFO, "Call failed: %s\n",
-			    AI_LOOKUP_MANIFEST_VALUES);
-			rv = NULL;
-		}
-	} else {
-		assert(!PyErr_Occurred());
-		if (PyErr_Occurred())
-			PyErr_Print();
-		rv = NULL;
-	}
-	Py_XDECREF(pFunc);
-	Py_DECREF(pModule);
-	PyThreadState_Swap(mainThreadState);
-	PyThreadState_Clear(myThreadState);
-	PyThreadState_Delete(myThreadState);
-	return (rv);
-}
-
-/*
- * Free up memory associated with lists returned from ai_get_manifest_values()
- */
-void
-ai_free_manifest_value_list(char **value_list)
-{
-	char **curr;
-
-	if (value_list == NULL) {
-		return;
-	}
-
-	for (curr = value_list; *curr != NULL; curr++) {
-		free (*curr);
-	}
-
-	free(value_list);
-}
-
-/*
- * This function must be called to delete all
- * state created by ai_create_manifestserv
- */
-void
-ai_destroy_manifestserv(PyObject *server_obj)
-{
-	if (Py_IsInitialized())
-		Py_Finalize();
-}
--- a/usr/src/cmd/auto-install/auto_td.c	Wed May 25 13:29:32 2011 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,944 +0,0 @@
-/*
- * CDDL HEADER START
- *
- * The contents of this file are subject to the terms of the
- * Common Development and Distribution License (the "License").
- * You may not use this file except in compliance with the License.
- *
- * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- *
- * When distributing Covered Code, include this CDDL HEADER in each
- * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- * If applicable, add the following below this CDDL HEADER, with the
- * fields enclosed by brackets "[]" replaced with your own identifying
- * information: Portions Copyright [yyyy] [name of copyright owner]
- *
- * CDDL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- */
-
-#include <assert.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <libnvpair.h>
-#include <libintl.h>
-#include <locale.h>
-#include <sys/param.h>
-#include <sys/types.h>
-#include <td_api.h>
-
-#include <auto_install.h>
-#include <orchestrator_api.h>
-
-#define	NULLCHK(ptr, alternate_text) ((ptr) == NULL ? (alternate_text) : (ptr))
-#define	TAG_IS_TRUE(tag) (strncasecmp(tag, "true", sizeof (tag)) == 0)
-#define	DISK_CRIT_SPECIFIED(crit) ((crit)[0] != '\0')
-#define	STRING_CRIT_MATCHES(crit, disk_par) (strcmp(crit, disk_par) == 0)
-
-static	boolean_t	discovery_done = B_FALSE;
-
-static boolean_t disk_type_match(const char *, om_disk_type_t);
-static disk_info_t *disk_criteria_match(disk_info_t *, auto_disk_info *);
-static disk_info_t *get_disk_info(om_handle_t handle);
-static disk_info_t *select_default_disk(disk_info_t *);
-static boolean_t disk_criteria_specified(auto_disk_info *);
-static uint64_t find_solaris_disk_size(disk_info_t *);
-static void dump_disk_criteria(auto_disk_info *adi);
-static boolean_t validate_IP(char *);
-
-static om_handle_t	handle;
-void	update_progress(om_callback_info_t *cb_data, uintptr_t app_data);
-
-/*
- * Handle target discovery callbacks from orchestrator
- */
-void
-update_progress(om_callback_info_t *cb_data, uintptr_t app_data)
-{
-	if (cb_data->curr_milestone == OM_UPGRADE_TARGET_DISCOVERY &&
-	    cb_data->percentage_done == 100) {
-		discovery_done = B_TRUE;
-	}
-}
-
-/*
- * Initiate the target discovery and wait until it is finished.
- *
- * Output:
- *	initializes module private variable 'handle' which is used to refer
- *	to the data collected by target discovery service
- *
- * Returns:
- *	 AUTO_TD_SUCCESS on success
- *	 AUTO_TD_FAILURE on failure
- */
-int
-auto_target_discovery(void)
-{
-	/*
-	 * Initiate target discovery process.
-	 * Return with failure if the process can't be started
-	 */
-	auto_log_print(gettext("Initiating Target Discovery...\n"));
-	handle = om_initiate_target_discovery(update_progress);
-
-	if (handle < 0) {
-		(void) auto_log_print(gettext("Could not start target "
-		    "discovery\n"));
-		return (AUTO_TD_FAILURE);
-	}
-
-	/*
-	 * Wait for target discovery to complete
-	 */
-	while (!discovery_done) {
-		sleep(2);
-	}
-
-	/*
-	 * Return with failure if there are no potential targets
-	 * for the installation.
-	 */
-
-	if (get_disk_info(handle) == NULL) {
-		auto_log_print(gettext("No disks found on the target"
-		    " system\n"));
-
-		return (AUTO_TD_FAILURE);
-	}
-
-	auto_log_print(gettext("Target Discovery finished successfully\n"));
-	return (AUTO_TD_SUCCESS);
-}
-
-#ifndef	__sparc
-/*
- * Get the partition information given the disk name
- */
-disk_parts_t *
-get_disk_partition_info(om_handle_t handle, char *disk_name)
-{
-	disk_parts_t	*dp;
-
-	if (disk_name == NULL) {
-		(void) auto_debug_print(AUTO_DBGLVL_INFO,
-		    "disk_name is NULL\n");
-		return (NULL);
-	}
-
-	dp = om_get_disk_partition_info(handle, disk_name);
-	if (dp == NULL) {
-		(void) auto_debug_print(AUTO_DBGLVL_INFO,
-		    "Could not find partitions for %s - Error = %d\n",
-		    disk_name, om_get_error());
-		return (NULL);
-	}
-
-	return (dp);
-}
-#endif
-
-/*
- * Try to find a target disk which matches specified criteria.
- *
- * Hierarchical set of rules is applied.
- * We stop processing them as soon as target disk is identified.
- * If particular rule is applied and matching disk is not found,
- * abort processing immediately and return with failure.
- *
- * [1] If boot disk is required, use that.
- * [2] Try to find a disk that matches criteria specified.
- * [3] If no criteria were specified in manifest, apply algorithm for
- *     selecting the default disk.
- *
- * Input:
- *	target disk criteria obtained from manifest. If not provided
- *	(set to NULL), it is assumed that c#t#d# disk name was directly
- *	specified instead of AI manifest
- *
- * Output:
- *	c#t#d# name of identified target disk
- *	    - memory allocation is done in this function. Caller is responsible
- *	      for freeing it
- *
- * Returns:
- *	 AUTO_TD_SUCCESS on success
- *	 AUTO_TD_FAILURE on failure
- */
-int
-auto_select_install_target(char **diskname, auto_disk_info *adi)
-{
-	disk_info_t	*disks, *di = NULL;
-	disk_slices_t	*ds = NULL;
-#ifndef	__sparc
-	disk_parts_t	*part = NULL;
-#endif
-	boolean_t	look_for_existing_slices = B_TRUE;
-	boolean_t	target_disk_identified = B_FALSE;
-
-	/*
-	 * check if there are potential installation targets.
-	 * There is no point to continue if there are no disks available.
-	 */
-	if ((disks = get_disk_info(handle)) == NULL) {
-		auto_log_print(gettext("No disks are available for the "
-		    "installation\n"));
-
-		return (AUTO_TD_FAILURE);
-	}
-
-	/*
-	 * If there is no AI manifest, disk name was specified directly.
-	 * In this case make sure that specified disk exists.
-	 */
-	if (adi == NULL) {
-		if ((*diskname != NULL) &&
-		    om_find_disk_by_ctd_name(disks, *diskname) != NULL)
-			return (AUTO_TD_SUCCESS);
-		else
-			return (AUTO_TD_FAILURE);
-	}
-
-	/* check if boot disk required as target for the installation */
-
-	if (strcasecmp(adi->diskkeyword, AIM_TARGET_DEVICE_BOOT_DISK) == 0) {
-		if (((di = om_get_boot_disk(disks)) == NULL) ||
-		    (di->disk_name == NULL)) {
-			auto_log_print(gettext("Boot disk specified as "
-			    "installation target, but the boot disk was not "
-			    "found\n"));
-
-			return (AUTO_TD_FAILURE);
-		}
-
-		target_disk_identified = B_TRUE;
-
-		auto_log_print(gettext("Boot disk specified as installation"
-		    " target\n"));
-	}
-
-	/*
-	 * If target disk has not been determined yet, try to find one
-	 * matching the criteria specified in AI manifest
-	 *
-	 * In case that no criteria were specified, apply algorithm for
-	 * selecting the default target disk.
-	 *
-	 */
-
-	if (!target_disk_identified) {
-		if (disk_criteria_specified(adi)) {
-			di = disk_criteria_match(disks, adi);
-
-			if (di == NULL) {
-				auto_log_print(gettext("Could not find a disk "
-				    "based on manifest criteria\n"));
-				return (AUTO_TD_FAILURE);
-			}
-
-			target_disk_identified = B_TRUE;
-		} else {
-			/*
-			 * if a disk criteria wasn't specified
-			 * try selecting a default disk
-			 */
-			di = select_default_disk(disks);
-			if (di == NULL) {
-				auto_log_print(gettext("Could not find a disk "
-				    "using default search. Specify a disk name "
-				    "or other search criteria in the "
-				    "manifest.\n"));
-				return (AUTO_TD_FAILURE);
-			}
-
-			target_disk_identified = B_TRUE;
-		}
-	}
-
-	/*
-	 * If we get to this point, target disk was successfully identified
-	 * and 'di' variable points to its disk_info_t structure.
-	 * Abort if those conditions are not met.
-	 */
-
-	assert(target_disk_identified);
-	assert(di != NULL);
-
-	/* Save the c#t#d# disk name for the consumer */
-
-	*diskname = strdup(di->disk_name);
-
-#ifndef	__sparc
-	/*
-	 * Obtain partition information for target disk
-	 */
-	part = get_disk_partition_info(handle, di->disk_name);
-
-	/*
-	 * Check whether there is a Solaris partition already there
-	 * Otherwise we will use the whole disk
-	 */
-	if (part == NULL) {
-		/*
-		 * If there is no Solaris fdisk partition,
-		 * don't bother looking for slices.
-		 */
-		look_for_existing_slices = B_FALSE;
-
-		auto_log_print(gettext("Cannot find the partitions for disk %s "
-		    "on the target system\n"), di->disk_name);
-		part = om_init_disk_partition_info(di);
-		if (part == NULL) {
-			auto_log_print(gettext("Cannot init partition info\n"));
-			return (AUTO_TD_FAILURE);
-		}
-	}
-
-	if (om_set_disk_partition_info(handle, part) != OM_SUCCESS) {
-		auto_log_print(gettext("Unable to set the disk partition "
-		    "info\n"));
-		return (AUTO_TD_FAILURE);
-	}
-#endif
-
-	/*
-	 * For x86, if we didn't find a Solaris fdisk partition,
-	 * we shouldn't bother looking for vtoc slices on the disk.
-	 * This flag is set above for this case.
-	 */
-	if (look_for_existing_slices) {
-		ds = om_get_slice_info(handle, di->disk_name);
-	}
-
-	if (ds == NULL) {
-		auto_debug_print(AUTO_DBGLVL_INFO,
-		    "no disk slice info found.\n");
-		ds = om_init_slice_info(di->disk_name);
-		if (ds == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "couldn't initialize disk slice info\n");
-			return (AUTO_TD_FAILURE);
-		}
-	}
-	if (om_set_slice_info(handle, ds) != OM_SUCCESS) {
-		auto_log_print(gettext("Unable to set the disk slice "
-		    "info\n"));
-		return (AUTO_TD_FAILURE);
-	}
-	return (AUTO_TD_SUCCESS);
-}
-
-static boolean_t
-disk_type_match(const char *disk, om_disk_type_t type)
-{
-	switch (type) {
-		case (OM_DTYPE_ATA):
-			return (strcasecmp(disk, "ATA") == 0);
-		case (OM_DTYPE_SCSI):
-			return (strcasecmp(disk, "SCSI") == 0);
-		case (OM_DTYPE_FIBRE):
-			return (strcasecmp(disk, "FIBER") == 0 ||
-			    strcasecmp(disk, "FIBRE") == 0);
-		case (OM_DTYPE_USB):
-			return (strcasecmp(disk, "USB") == 0);
-		case (OM_DTYPE_SATA):
-			return (strcasecmp(disk, "SATA") == 0);
-		case (OM_DTYPE_FIREWIRE):
-			return (strcasecmp(disk, "FIREWIRE") == 0);
-		default:
-			break;
-	}
-	return (B_FALSE);
-}
-
-/*
- * Get the information about all the disks on the system
- */
-static disk_info_t *
-get_disk_info(om_handle_t handle)
-{
-	disk_info_t	*disks;
-	int		total;
-
-	disks = om_get_disk_info(handle, &total);
-
-	if (disks == NULL || total == 0) {
-		(void) auto_debug_print(AUTO_DBGLVL_INFO,
-		    "No Disks found...\n");
-		return (NULL);
-	}
-
-	(void) auto_debug_print(AUTO_DBGLVL_INFO, "Number of disks = %d\n",
-	    total);
-	return (disks);
-}
-
-static disk_info_t *
-disk_criteria_match(disk_info_t *disks, auto_disk_info *adi)
-{
-	disk_info_t *di;
-	uint64_t find_disk_size_sec = adi->disksize;
-
-	/* Dump the list of disk criteria to be applied */
-	auto_log_print(gettext("Searching for a disk target matching the "
-	    "following criteria\n"));
-	dump_disk_criteria(adi);
-
-	for (di = disks; di != NULL; di = di->next) {
-		if (find_disk_size_sec > 0) {
-			uint64_t disk_size_sec = di->disk_size * MB_TO_SECTORS;
-
-			/*
-			 * for some reason, the disk_size_sec disk info
-			 * element is coming up zero, but disk_size element OK.
-			 * TODO: investigate - until then, use disk_size
-			 */
-			if (disk_size_sec < find_disk_size_sec) {
-				auto_log_print("Disk %s "
-				    "size %lld sectors smaller than requested "
-				    "%lld sectors\n",
-				    di->disk_name, disk_size_sec,
-				    find_disk_size_sec);
-				continue; /* disk too small */
-			}
-		}
-		if (DISK_CRIT_SPECIFIED(adi->disktype) &&
-		    !disk_type_match(adi->disktype, di->disk_type)) {
-			auto_log_print(
-			    "Disk %s type %s not requested type %s\n",
-			    di->disk_name,
-			    adi->disktype == NULL ? "(unknown)" : adi->disktype,
-			    adi->disktype);
-			continue; /* no type match */
-		}
-		if (DISK_CRIT_SPECIFIED(adi->diskvendor) &&
-		    (di->vendor == NULL ||
-		    strcasecmp(adi->diskvendor, di->vendor) != 0)) {
-			auto_log_print("Disk %s "
-			    "vendor (%s) not requested vendor %s\n",
-			    di->disk_name,
-			    NULLCHK(di->vendor, "name not available"),
-			    adi->diskvendor);
-			continue; /* vendor mismatch */
-		}
-
-		/* try to find match for c#t#d# name */
-		if (DISK_CRIT_SPECIFIED(adi->diskname) &&
-		    !STRING_CRIT_MATCHES(adi->diskname, di->disk_name)) {
-			auto_log_print(gettext("Disk %s doesn't match desired "
-			    "name %s\n"), di->disk_name, adi->diskname);
-
-			continue; /* c#t#d# name doesn't match */
-		}
-
-		/* try to find match for volume name */
-		if (DISK_CRIT_SPECIFIED(adi->diskvolname) &&
-		    (di->disk_volname == NULL ||
-		    !STRING_CRIT_MATCHES(adi->diskvolname, di->disk_volname))) {
-			if (di->disk_volname == NULL)
-				auto_log_print(gettext("Volume name not set for"
-				    " disk %s\n"), di->disk_name);
-			else
-				auto_log_print(gettext("Disk %s has volume name"
-				    " \"%s\" - doesn't match desired volume"
-				    " name\n"),
-				    di->disk_name, di->disk_volname);
-
-			continue; /* volume name doesn't match */
-		}
-
-		/* try to find match for device ID */
-		if (DISK_CRIT_SPECIFIED(adi->diskdevid) &&
-		    (di->disk_devid == NULL ||
-		    !STRING_CRIT_MATCHES(adi->diskdevid, di->disk_devid))) {
-			if (di->disk_devid == NULL)
-				auto_log_print(gettext("Device ID not available"
-				    " for disk %s\n"), di->disk_name);
-			else
-				auto_log_print(gettext("Disk %s has device ID"
-				    " \"%s\" - doesn't match desired device"
-				    " ID\n"),
-				    di->disk_name, di->disk_devid);
-
-			continue; /* device ID doesn't match */
-		}
-
-		/* try to find match for device path */
-		if (DISK_CRIT_SPECIFIED(adi->diskdevicepath) &&
-		    (di->disk_device_path == NULL ||
-		    !STRING_CRIT_MATCHES(adi->diskdevicepath,
-		    di->disk_device_path))) {
-			if (di->disk_device_path == NULL)
-				auto_log_print(gettext("Device path not "
-				    "available for disk %s\n"), di->disk_name);
-			else
-				auto_log_print(gettext("Disk %s has device path"
-				    " \"%s\" - doesn't match desired device"
-				    " path\n"),
-				    di->disk_name, di->disk_device_path);
-
-			continue; /* device path doesn't match */
-		}
-
-#ifndef	__sparc
-		/* require a disk with a Solaris partition if specified */
-		if (TAG_IS_TRUE(adi->diskusepart)) {
-			int ipr;
-			disk_parts_t	*part;
-
-			auto_log_print(gettext("Manifest indicates that Solaris"
-			    " fdisk partition must \n"
-			    " be on the target disk prior to installation.\n"));
-
-			part = get_disk_partition_info(handle, di->disk_name);
-			if (part == NULL) {
-				auto_log_print(
-				    "Disk %s has no partition information\n",
-				    di->disk_name);
-				continue;
-			}
-			for (ipr = 0; ipr < OM_NUMPART; ipr++)
-				if (part->pinfo[ipr].partition_type == SUNIXOS2)
-					break;
-			free(part);
-			if (ipr >= OM_NUMPART) { /* no Solaris partition */
-				auto_log_print(
-				    "Disk %s has no Solaris2 partitions\n",
-				    di->disk_name);
-				continue;
-			}
-		}
-#endif
-		break;
-	}
-	if (di == NULL) {
-		char *errmsg = gettext(
-		    "No disk that matches all manifest criteria was found\n");
-
-		printf(errmsg);
-		auto_log_print(errmsg);
-	} else
-		auto_log_print(gettext(
-		    "Disk %s selected based on manifest criteria\n"),
-		    di->disk_name);
-	return (di);
-}
-
-/*
- * This function selects a default disk to do
- * installation on.
- *
- * The first disk that has a Solaris2 partition
- * defined and has a big enough slice0 is selected.
- *
- * Returns:
- * 	disk_info_t for the matching disk
- * 	NULL if no matching disk is found
- */
-static disk_info_t *
-select_default_disk(disk_info_t *disks)
-{
-	disk_info_t *di;
-	uint64_t min_disk_size_MB;
-	uint64_t min_disk_size_secs;
-
-	/* get the minimum recommended disk size in sectors */
-	min_disk_size_MB = om_get_recommended_size(NULL, NULL);
-	auto_log_print(
-	    "Checking any disks for minimum recommended size of %lld MB\n",
-	    min_disk_size_MB);
-	min_disk_size_secs = min_disk_size_MB * MB_TO_SECTORS;
-	for (di = disks; di != NULL; di = di->next) {
-		uint64_t disk_size_secs = find_solaris_disk_size(di);
-
-		auto_log_print("Disk %s size listed as %lld MB\n",
-		    di->disk_name, disk_size_secs / MB_TO_SECTORS);
-		if (disk_size_secs >= min_disk_size_secs) {
-			auto_log_print("Default disk selected is %s\n",
-			    di->disk_name);
-			return (di);
-		}
-		/* disk is not big enough, so move on to the next disk */
-	}
-	auto_debug_print(AUTO_DBGLVL_INFO, "No default disk was selected\n");
-	return (NULL);
-}
-
-/*
- * get disk (SPARC) or partition (x86) size in sectors from target information
- */
-static uint64_t
-find_solaris_disk_size(disk_info_t *di)
-{
-	return (di->disk_size_sec > 0 ? di->disk_size_sec:
-	    ((uint64_t)di->disk_size * MB_TO_SECTORS));
-}
-
-/*
- * Check to see if the disk criteria was specified
- * at all in the manifest.
- *
- * Returns:
- * 	B_TRUE if any of the disk selection criteria were specified
- *	B_FALSE otherwise
- */
-static boolean_t
-disk_criteria_specified(auto_disk_info *adi)
-{
-	if (adi->diskkeyword[0] != '\0')
-		return (B_TRUE);
-	if (adi->diskname[0] != '\0')
-		return (B_TRUE);
-	if (adi->diskvolname[0] != '\0')
-		return (B_TRUE);
-	if (adi->diskdevicepath[0] != '\0')
-		return (B_TRUE);
-	if (adi->diskdevid[0] != '\0')
-		return (B_TRUE);
-	if (adi->disktype[0] != '\0')
-		return (B_TRUE);
-	if (adi->diskvendor[0] != '\0')
-		return (B_TRUE);
-	if (adi->disksize != 0)
-		return (B_TRUE);
-#ifndef	__sparc
-	if (adi->diskusepart[0] != '\0')
-		return (B_TRUE);
-#endif
-	return (B_FALSE);
-}
-
-/*
- * mount iSCSI target according to iSCSI target parameters obtained from:
- * - AI manifest, or if not found, from
- * - DHCP Rootpath parameter from network interface
- * adi - contains manifest info
- * devnam - output NULL-terminated device name for the iSCSI boot target
- *	if an iSCSI boot target is identified without fatal error
- * devnamlen - max length of devnam
- *
- * Returns -1 if fatal error encountered, 0 otherwise
- * Returns iSCSI boot disk name at devnam if found (max length devnamlen)
- *	without encountering fatal error.
- *
- * Attempts to mount using libima with iSCSI initiator
- * If iSCSI parameters are provided, target must be mounted,
- *	otherwise considered fatal
- * If no ISCSI parameters are found, return 0 with no change at devnam
- */
-int
-mount_iscsi_target_if_requested(auto_disk_info *adi, char *devnam,
-    int devnamlen)
-{
-	td_errno_t ret;
-	nvlist_t *attrs;
-	char *pdevnam;
-	char *diskiscsi_name;
-	char *diskiscsi_ip;
-	uint32_t diskiscsi_port = 0;
-	char *diskiscsi_lun = "";
-
-	/*
-	 * If the source of iSCSI boot parameters is set to DHCP,
-	 * check DHCP Rootpath and fetch iSCSI boot parameters if provided
-	 */
-	if (adi->diskiscsi.parm_src == AI_ISCSI_PARM_SRC_DHCP) {
-		FILE	*pipe_fp;
-		char	rootpath[MAXPATHLEN] = "";
-		char	cmd[] = "/sbin/dhcpinfo Rootpath";
-		char	*p;
-		int	ret;
-		char	*diskiscsi_porta;
-
-		auto_log_print("Manifest indicates that the source of iSCSI "
-		    "boot parameters is DHCP parameter Rootpath\n");
-		/*
-		 * check DHCP Rootpath for iSCSI target parameters
-		 */
-		errno = 0;
-		if ((pipe_fp = popen(cmd, "r")) == NULL) {
-			auto_log_print("Could not check DHCP info for iSCSI "
-			    "boot client, since piping command %s failed.\n",
-			    cmd);
-			return (0);
-		}
-		if (fgets(rootpath, sizeof (rootpath), pipe_fp) != NULL) {
-			/* remove the trailing new-line */
-			rootpath[strlen(rootpath) - 1] = '\0';
-		}
-		if ((ret = pclose(pipe_fp)) != 0)
-			auto_log_print("Error in command to check DHCP "
-			    "for iSCSI boot client. Command:%s\n", cmd);
-		/*
-		 * if problem, diagnose dhcpinfo exit status
-		 *	log and return - not critical
-		 */
-		switch (ret) {
-		case 0:	/* success */
-			break;
-		case 2:
-			auto_log_print("DHCP error (no client daemon, "
-			    "interface failed to configure, "
-			    "or no satisfactory DHCP responses received)\n");
-			return (0);
-		case 3:
-			auto_log_print("Bad arguments\n");
-			return (0);
-		case 4:
-			auto_log_print("Timeout\n");
-			return (0);
-		case 6:
-			auto_log_print("System error\n");
-			return (0);
-		case -1:
-		default:
-			auto_log_print("Unknown error %d errno %d\n",
-			    ret, errno);
-			return (0);
-		}
-		auto_log_print("DHCP Rootpath=%s\n", rootpath);
-		/*
-		 * RFC 4173 defines format of iSCSI boot target in Rootpath
-		 *	Rootpath=iscsi:<IP>:<protocol>:<port>:<LUN>:<target>
-		 */
-		if (rootpath[0] == '\0' ||
-		    strncmp(rootpath, "iscsi:", strlen("iscsi:")) != 0) {
-			goto iscsi_rootpath_usage;
-		}
-		/*
-		 * parse iSCSI Rootpath - parse errors will fail install
-		 */
-		if ((p = strchr(rootpath, ':')) == NULL)
-			goto iscsi_rootpath_usage;
-		*p++ = '\0';
-		diskiscsi_ip = p;	/* IP */
-		if ((p = strchr(p, ':')) == NULL)
-			goto iscsi_rootpath_usage;
-		*p++ = '\0'; /* protocol ignored - assumed TCP */
-		if ((p = strchr(p, ':')) == NULL)
-			goto iscsi_rootpath_usage;
-		*p++ = '\0';
-		diskiscsi_porta = p;	/* port */
-		if ((p = strchr(p, ':')) == NULL)
-			goto iscsi_rootpath_usage;
-		*p++ = '\0';
-		diskiscsi_lun = p;	/* LUN */
-		if ((p = strchr(p, ':')) == NULL)
-			goto iscsi_rootpath_usage;
-		*p++ = '\0';
-		diskiscsi_name = p;	/* target name */
-		if (*diskiscsi_name == '\0' || *diskiscsi_ip == '\0') {
-			auto_log_print("DHCP Rootpath must specify both iSCSI "
-			    "IP and target name.\n");
-			goto iscsi_rootpath_usage;
-		}
-		if (*diskiscsi_porta != '\0')
-			diskiscsi_port = atol(diskiscsi_porta);
-		auto_log_print("iSCSI boot target parameters will be taken "
-		    "from DHCP Rootpath.\n");
-	} else {
-		/*
-		 * use manifest information for iSCSI target parameters
-		 */
-
-		/*
-		 * if neither iSCSI name nor IP were found in manifest
-		 * then no iSCSI
-		 */
-		if (adi->diskiscsi.name[0] == '\0' ||
-		    adi->diskiscsi.ip[0] == '\0') {
-			return (0);
-		}
-
-		/*
-		 * iSCSI target name and IP address are both mandatory if
-		 * manifest is used to specify iSCSI target parameters
-		 * Providing one * and not * the other will be considered a
-		 * serious error.
-		 */
-		if (adi->diskiscsi.name[0] == '\0' ^
-		    adi->diskiscsi.ip[0] == '\0') {
-			auto_log_print("iSCSI target %s not specified\n",
-			    adi->diskiscsi.name[0] == '\0' ?
-			    "name" : "IP address");
-			auto_log_print("Manifest must specify both iSCSI IP "
-			    "and target name if either one is specified.\n");
-			return (-1);
-		}
-		diskiscsi_name = adi->diskiscsi.name;
-		diskiscsi_ip = adi->diskiscsi.ip;
-		diskiscsi_port = adi->diskiscsi.port;
-		diskiscsi_lun = adi->diskiscsi.lun;
-		auto_log_print("iSCSI boot target parameters will be taken "
-		    "from AI manifest.\n");
-	}
-	if (!validate_IP(diskiscsi_ip)) {
-		auto_log_print("iSCSI target IP address format is bad.\n");
-		auto_debug_print(AUTO_DBGLVL_INFO, "\tIP address=%s\n",
-		    diskiscsi_ip);
-		auto_log_print("\tIPv4 address must be numeric in the form: "
-		    "NNN.NNN.NNN.NNN where NNN is a decimal number.\n");
-		return (-1);
-	}
-	if (diskiscsi_port > 0xFFFF) {
-		auto_log_print("iSCSI port (%d) is too large. "
-		    "Maximum value is 65535.\n", diskiscsi_port);
-		return (-1);
-	}
-	auto_debug_print(AUTO_DBGLVL_INFO, "iSCSI target parameters:\n");
-	auto_debug_print(AUTO_DBGLVL_INFO,
-	    "\tTarget name=%s\n", diskiscsi_name);
-	auto_debug_print(AUTO_DBGLVL_INFO, "\tIP address=%s\n", diskiscsi_ip);
-	auto_debug_print(AUTO_DBGLVL_INFO, "\tport=%lu\n", diskiscsi_port);
-	auto_debug_print(AUTO_DBGLVL_INFO, "\tLUN=%s\n", diskiscsi_lun);
-
-	/*
-	 * allocate TD attributes
-	 */
-	if (nvlist_alloc(&attrs, NV_UNIQUE_NAME, 0) != 0) {
-		auto_log_print("Could not create target nvlist.\n");
-		return (-1);
-	}
-	if (nvlist_add_uint32(attrs, TD_ATTR_TARGET_TYPE,
-	    TD_TARGET_TYPE_ISCSI_STATIC_CONFIG) != 0) {
-		auto_log_print("iSCSI target type could not be added. \n");
-		goto error_exit;
-	}
-	if (nvlist_add_string(attrs, TD_ISCSI_ATTR_NAME, diskiscsi_name) != 0) {
-		auto_log_print("iSCSI target name could not be added. \n");
-		goto error_exit;
-	}
-	if (nvlist_add_string(attrs, TD_ISCSI_ATTR_IP, diskiscsi_ip) != 0) {
-		auto_log_print("iSCSI target IP could not be added. \n");
-		goto error_exit;
-	}
-	if (nvlist_add_uint32(attrs, TD_ISCSI_ATTR_PORT, diskiscsi_port) != 0) {
-		auto_log_print("iSCSI target port could not be added. \n");
-		goto error_exit;
-	}
-	if (nvlist_add_string(attrs, TD_ISCSI_ATTR_LUN, diskiscsi_lun) != 0) {
-		auto_log_print("iSCSI target LUN could not be added. \n");
-		goto error_exit;
-	}
-
-	ret = td_target_search(attrs);
-	if (ret != TD_E_SUCCESS) {
-		auto_debug_print(AUTO_DBGLVL_ERR, "iSCSI static "
-		    "configuration failed\n");
-		goto error_exit;
-	}
-	if (nvlist_lookup_string(attrs, TD_ISCSI_ATTR_DEVICE_NAME,
-	    &pdevnam)) {
-		auto_debug_print(AUTO_DBGLVL_ERR,
-		    "iSCSI target device not found.\n");
-		goto error_exit;
-	} else {
-		/*
-		 * convert "/dev/rdsk/cNtNtNs2" to "cNtNdN" in place
-		 */
-		char *ps;
-		char mydevname[MAXNAMELEN];
-		char rdsk[] = "/dev/rdsk/";
-
-		if (strncmp(pdevnam, rdsk, strlen(rdsk) != 0)) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "Failed to parse device name for iSCSI:%s\n",
-			    pdevnam);
-			goto error_exit;
-		}
-		(void) strlcpy(mydevname, &pdevnam[strlen(rdsk)],
-		    sizeof (mydevname));
-		/*
-		 * locate 's' in 'ctds' format
-		 */
-		if ((ps = strrchr(mydevname, 's')) == NULL) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "discovered iSCSI device name %s is not a valid "
-			    "slice name and will be considered invalid.\n",
-			    mydevname);
-			goto error_exit;
-		}
-		*ps = '\0';	/* trim slice number designation */
-		if (strlcpy(devnam, mydevname, devnamlen) >= devnamlen) {
-			auto_debug_print(AUTO_DBGLVL_ERR,
-			    "iSCSI device name buffer overflow=%s\n",
-			    mydevname);
-			goto error_exit;
-		}
-	}
-	auto_log_print("iSCSI boot target mounted: device %s\n", devnam);
-	auto_log_print("iSCSI boot target name %s IP %s\n", diskiscsi_name,
-	    diskiscsi_ip);
-	nvlist_free(attrs);
-	return (0);
-error_exit:
-	nvlist_free(attrs);
-	return (-1);
-iscsi_rootpath_usage:
-	auto_log_print("iSCSI target parameter parsing error.\n");
-	auto_log_print("Check DHCP server Rootpath syntax against RFC 4173.\n");
-	auto_log_print("Rootpath=iscsi:<IP>:<protocol>:<port>:<LUN>:<target>"
-	    "\n");
-	return (-1);
-}
-
-/*
- * validate NULL-terminated string as IPv4 address
- * Return B_TRUE if valid, B_FALSE otherwise
- */
-static boolean_t
-validate_IP(char *p)
-{
-	unsigned short val;
-	char c;
-
-	errno = 0;
-	if (sscanf(p, "%3hd.%3hd.%3hd.%3hd%c", /* IPv4 */
-	    &val, &val, &val, &val, &c) == 4 && errno == 0)
-		return (B_TRUE);
-	return (B_FALSE);
-}
-
-/*
- * Print target disk criteria specified in the manifest.
- *
- * Returns:
- * 	none
- */
-static void
-dump_disk_criteria(auto_disk_info *adi)
-{
-	if (adi->diskkeyword[0] != '\0')
-		auto_log_print(gettext(" Disk keyword:"
-		    " %s\n"), adi->diskkeyword);
-	if (adi->diskname[0] != '\0')
-		auto_log_print(gettext(" Disk name: %s\n"), adi->diskname);
-	if (adi->diskvolname[0] != '\0')
-		auto_log_print(gettext(" Volume name: %s\n"), adi->diskvolname);
-	if (adi->diskdevid[0] != '\0')
-		auto_log_print(gettext(" Device ID: %s\n"), adi->diskdevid);
-	if (adi->diskdevicepath[0] != '\0')
-		auto_log_print(gettext(" Device path: %s\n"),
-		    adi->diskdevicepath);
-	if (adi->disktype[0] != '\0')
-		auto_log_print(gettext(" Type: %s\n"), adi->disktype);
-	if (adi->diskvendor[0] != '\0')
-		auto_log_print(gettext(" Vendor: %s\n"), adi->diskvendor);
-	if (adi->disksize != 0)
-		auto_log_print(gettext(" Size [MiB]: %llu\n"),
-		    adi->disksize / MB_TO_SECTORS);
-#ifndef	__sparc
-	if (adi->diskusepart[0] != '\0')
-		auto_log_print(gettext(" Use existing Solaris partition:"
-		    " %s\n"), adi->diskusepart);
-#endif
-
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/checkpoints/Makefile	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,60 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+
+include ../../Makefile.cmd
+
+all:=		TARGET=	all
+clean:=		TARGET=	clean
+clobber:=	TARGET=	clobber
+install:=	TARGET=	install
+
+PYMODULES=	__init__.py \
+                dmm.py \
+		target_selection.py
+
+PYCMODULES=	$(PYMODULES:%.py=%.pyc)
+
+ROOTPYMODULES=	$(PYMODULES:%=$(ROOTPYTHONVENDORSOLINSTALLAICHKPT)/%)
+
+ROOTPYCMODULES=	$(PYCMODULES:%=$(ROOTPYTHONVENDORSOLINSTALLAICHKPT)/%)
+
+all:		python
+
+clean:  
+	$(RM) $(PYCMODULES)
+
+clobber: clean
+
+install: all .WAIT $(ROOTPYTHONVENDOR) \
+	$(ROOTPYTHONVENDORSOLINSTALL) \
+	$(ROOTPYTHONVENDORSOLINSTALLAI) \
+	$(ROOTPYTHONVENDORSOLINSTALLAICHKPT) \
+	$(ROOTPYMODULES) \
+	$(ROOTPYCMODULES)
+
+python:
+		$(PYTHON) -m compileall -l $(@D)
+
+FRC:
+
+include ../../Makefile.targ
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/checkpoints/__init__.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,27 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+'''Checkpoints specific to Automated Installer'''
--- a/usr/src/cmd/auto-install/checkpoints/dmm.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/checkpoints/dmm.py	Wed May 25 21:26:43 2011 +0100
@@ -46,7 +46,8 @@
 
 from lxml import etree
 
-from solaris_install import Popen, KSH93_SHEBANG, PYTHON_SHEBANG
+from solaris_install import Popen, KSH93_SHEBANG, PYTHON_SHEBANG, \
+    system_temp_path
 from solaris_install.auto_install.ai_get_manifest import AICriteriaNetwork
 from solaris_install.data_object import DataObject
 from solaris_install.engine import InstallEngine
@@ -64,10 +65,9 @@
 MSG_HEADER = "Derived Manifest Module: "
 
 # Other configurables
-WORKING_DIR = "/system/volatile/"
-DEFAULT_AIM_MANIFEST = WORKING_DIR + "manifest.xml"
-DEFAULT_AIM_LOGFILE = WORKING_DIR + "aimanifest_log"
-INSTALL_CONF = WORKING_DIR + "install.conf"
+DEFAULT_AIM_MANIFEST = system_temp_path("manifest.xml")
+DEFAULT_AIM_LOGFILE = system_temp_path("aimanifest_log")
+INSTALL_CONF = system_temp_path("install.conf")
 DEFAULT_AI_SCHEMA = "/usr/share/install/ai.dtd"
 
 # Commands
@@ -201,7 +201,7 @@
 
         # Set up name of logfile aimanifest command can use.
         # This log will be collected after the script completes.
-        tempfile.tempdir = WORKING_DIR
+        tempfile.tempdir = system_temp_path()
         self.aim_logfile = tempfile.mktemp()
 
     def get_progress_estimate(self):
@@ -510,7 +510,8 @@
             raise DMMValidationError(errmsg)
 
         if ((tree.docinfo is not None) and
-            (tree.docinfo.system_url is not None)):
+            (tree.docinfo.system_url is not None) and
+            os.access(tree.docinfo.system_url, os.R_OK)):
             dtd = tree.docinfo.system_url
             self.logger.info(MSG_HEADER + "Using DTD from header of manifest.")
         else:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/checkpoints/target_selection.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,3219 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+''' target_selection.py - Select Install Target(s)
+'''
+import copy
+import os
+import os.path
+import platform
+import re
+import traceback
+
+from operator import attrgetter
+import osol_install.errsvc as errsvc
+from solaris_install.engine import InstallEngine
+from solaris_install.engine.checkpoint import AbstractCheckpoint as Checkpoint
+from solaris_install.target import Target, vdevs
+from solaris_install.target.controller import TargetController, \
+    DEFAULT_VDEV_NAME, SwapDumpGeneralError, SwapDumpSpaceError
+from solaris_install.target.logical import Logical, Zpool, Vdev, BE, Zvol, \
+    Filesystem, DatasetOptions, PoolOptions
+from solaris_install.target.physical import Disk, Partition, Slice
+from solaris_install.target.shadow.physical import ShadowPhysical
+from solaris_install.target.size import Size
+
+DISK_RE = "c\d+(?:t\d+)?d\d+"
+
+
+class SelectionError(Exception):
+    '''Error generated when any selection problems occur'''
+
+    def __init__(self, msg):
+        Exception.__init__(self)
+        self.msg = msg
+
+    def __str__(self):
+        return self.msg
+
+
+class TargetSelection(Checkpoint):
+    '''TargetSelection - Checkpoint to select install target.
+
+       This checkpoint attempts to select the install target(s) based on
+       the information provided in the discovered targets and the target
+       information provided in the AI Manifest.
+
+       If it's not possible to determine a selection, then a SelectionError
+       exception will be raised, causing the installation to fail.
+    '''
+    RAIDZ_REDUNDANCY = ['raidz', 'raidz1', 'raidz2', 'raidz3']
+    TOPLEVEL_REDUNDANCY = ['none', 'mirror']
+    TOPLEVEL_REDUNDANCY.extend(RAIDZ_REDUNDANCY)
+    INVALID_ROOT_VDEV = ['log', 'logmirror']
+    PRESERVED = ['preserve', 'use_existing']
+
+    def __init__(self, name, be_mountpoint="/a"):
+        super(TargetSelection, self).__init__(name)
+
+        # instance attributes
+        self.be_mountpoint = be_mountpoint
+        self.doc = InstallEngine.get_instance().data_object_cache
+
+        # Initialize TargetController
+        self.controller = TargetController(self.doc)
+
+        # Cache of Discovered Tree, Disks  in the DOC, will be filled in later
+        self._discovered = None
+        self._discovered_disks = list()
+        self._discovered_zpools = list()
+        self._discovered_zpool_map = dict()
+        self._remaining_zpool_map = dict()
+
+        self.__reset_maps()
+
+    def __reset_maps(self):
+        '''Reset all local map information to default values
+        '''
+        # Whether swap/dump is to be created, noswap = Don't create swap
+        self._nozpools = False
+        self._noswap = False
+        self._nodump = False
+
+        # Cache of various information useful during parsing of manifest info
+        self._root_pool = None     # Should be only one root pool
+        self._be = None            # Should be only one BE
+
+        self._is_generated_root_pool = False
+        self._root_vdev = None     # Will only be set if we create a new rpool
+                                   # due to none existing in manifest
+
+        self._swap_zvol_map = dict()  # Map of zpool:swap to Zvol object
+        self._dump_zvol = None   # Can only have one dump device
+
+        self._no_logical_disks = list()  # List of disks with no zpool or vdev
+
+        # Assuming only one target object per manifest.
+        # If supporting more than one target, these will become lists
+        # of dictionaries accessed via a target index.
+        self._zpool_map = dict()   # Map of zpool names to Zpool object
+        self._vdev_map = dict()    # Map of zpool:vdev names to Vdev objects
+        self._vdev_added_map = dict()  # Map of zpool:vdev names we added
+        self._zvol_map = dict()    # Map of zpool:zvols names to Zvol objects
+        self._fs_map = dict()      # Map of zpool:datasets to Dataset objects
+        self._pool_options = dict()  # Map of zpool names to PoolOption objects
+        self._dataset_options = dict()  # Map of zpool names to DatasetOptions
+        self._disk_map = dict()      # Map of disks to Disk objects
+
+    def __find_disk(self, disk):
+        '''Find a disk matching some criteria
+
+           Disk can be identified in the following order of preference :
+               1. disk.ctd (device name)
+               2. disk.volid (volume name)
+               3. disk.devid (device id)
+               4. disk.devpath (device path)
+               5. disk.is_boot_disk() (contains keyword "boot_disk")
+
+               6. If None of the above are specified, a disk can be
+                  identified via any/all of the three disk properties:
+                    - dev_type
+                    - dev_vendor
+                    - dev_size
+                  In this scenario, first matching disk will be returned.
+        '''
+
+        for discovered_disk in self._discovered_disks:
+            # Attempt to match ctd/volid/devpath/devid first
+            if discovered_disk.name_matches(disk):
+                return discovered_disk
+
+            # Attempt to match disk_prop, only match disk properties if
+            # If all ctd/volid/devpath/devid are None, then attempt to match
+            # on boot disk or one of the disk properties if specified
+            if disk.ctd is None and disk.volid is None and \
+               disk.devpath is None and disk.devid is None:
+
+                # Attempt to match on boot_disk
+                if disk.is_boot_disk() and discovered_disk.is_boot_disk():
+                    return discovered_disk
+
+                # Attempt to match disk_prop. Any of the properties
+                # dev_type/dev_vendor/dev_size must been specified
+                if discovered_disk.disk_prop is not None and \
+                    disk.disk_prop is not None:
+                    if discovered_disk.disk_prop.prop_matches(disk.disk_prop):
+                        return discovered_disk
+
+        return None
+
+    @staticmethod
+    def __is_iterable(obj):
+        '''Test if an object is iterable'''
+        try:
+            i = iter(obj)
+            return True
+        except TypeError:
+            return False
+
+    def __pretty_print_disk(self, disk):
+        '''Print disk identifier rather than whole disk's str()'''
+        if self.__is_iterable(disk):
+            ret_str = ""
+            for _disk in disk:
+                if len(ret_str) != 0:
+                    ret_str += ", "
+                ret_str += self.__pretty_print_disk(_disk)
+                return ret_str
+        else:
+            if isinstance(disk, Disk):
+                if disk.ctd is not None:
+                    return "%s" % disk.ctd
+                if disk.volid is not None:
+                    return "[volid='%s']" % disk.volid
+                if disk.devpath is not None:
+                    return "[devpath='%s']" % disk.devpath
+                if disk.devid is not None:
+                    return "[devid='%s']" % disk.devid
+
+        return "UNKNOWN"
+
+    def __handle_vdev(self, vdev):
+        '''Create Vdev object
+        '''
+        self.logger.debug("Processing Vdev '%s', redundancy='%s'" %
+            (vdev.name, vdev.redundancy))
+
+        new_vdev = copy.copy(vdev)
+        return new_vdev
+
+    def __handle_filesystem(self, fs):
+        '''Create Filesystem Object
+        '''
+        self.logger.debug("Processing Filesystem '%s', action='%s', "
+            "mountpoint='%s'" % (fs.name, fs.action, fs.mountpoint))
+
+        new_fs = copy.copy(fs)
+        return new_fs
+
+    def __handle_zvol(self, zvol):
+        '''Create Zvol object
+        '''
+        self.logger.debug("Processing Zvol '%s', action='%s', use='%s'" %
+            (zvol.name, zvol.action, zvol.use))
+
+        new_zvol = copy.copy(zvol)
+        return new_zvol
+
+    def __handle_pool_options(self, pool_options):
+        '''Create PoolOptions object
+        '''
+        self.logger.debug("Processing Pool Options '%s'" %
+            (pool_options.options))
+
+        new_pool_options = copy.copy(pool_options)
+        return new_pool_options
+
+    def __handle_dataset_options(self, dataset_options):
+        '''Create DatasetOption object
+        '''
+        self.logger.debug("Processing Dataset Options '%s'" %
+            (dataset_options.options))
+
+        new_dataset_options = copy.copy(dataset_options)
+        return new_dataset_options
+
+    def __handle_be(self, be):
+        '''Create BE object, set mountpoint to that passed in
+           to target_selection init.
+        '''
+        self.logger.debug("Processing BE '%s'" % (be.name))
+
+        new_be = copy.copy(be)
+        new_be.mountpoint = self.be_mountpoint
+
+        return new_be
+
+    def __handle_zpool(self, zpool, logical):
+        '''Process all zpool children, handling each child object type
+           and returning Zpool object
+        '''
+
+        self.logger.debug("Processing Zpool '%s', action='%s', is_root='%s',"
+            " mountpoint='%s'" % \
+            (zpool.name, zpool.action, zpool.is_root, zpool.mountpoint))
+
+        this_be = None
+
+        if zpool.name in self._zpool_map:
+            raise SelectionError("Zpool '%s' specified twice" % (zpool.name))
+
+        vdev_count = 0
+        new_zpool = copy.copy(zpool)
+        logical.insert_children(new_zpool)
+        for child in zpool.children:
+            if isinstance(child, Vdev):
+                new_vdev = self.__handle_vdev(child)
+                if new_vdev is not None:
+                    vdev_key = new_zpool.name + ":" + new_vdev.name
+                    if vdev_key not in self._vdev_map:
+                        self._vdev_map[vdev_key] = new_vdev
+                        self.logger.debug("Adding Vdev '%s' to zpool" %
+                            (new_vdev.name))
+                        new_zpool.insert_children(new_vdev)
+                        vdev_count += 1
+                    else:
+                        raise SelectionError(
+                            "Vdev '%s' specified twice in zpool '%s'" % \
+                            (new_vdev.name, new_zpool.name))
+                else:
+                    raise SelectionError("Failed to copy Vdev.")
+
+            elif isinstance(child, Filesystem):
+                new_fs = self.__handle_filesystem(child)
+                if new_fs is not None:
+                    if new_fs.action == "preserve" and \
+                        new_zpool.action not in self.PRESERVED:
+                        raise SelectionError("Filesystem '%s' cannot be "
+                            "preserved in non-preserved zpool '%s'."
+                            (new_fs.name, new_zpool.name))
+
+                    fs_key = new_zpool.name + ":" + new_fs.name
+                    # Filesystem name must be unique within each pool
+                    if fs_key not in self._fs_map and \
+                        fs_key not in self._zvol_map:
+                        self._fs_map[fs_key] = new_fs
+                        self.logger.debug("Adding Filesystem '%s' to zpool" %
+                            (new_fs.name))
+                        new_zpool.insert_children(new_fs)
+                    else:
+                        raise SelectionError(
+                            "Filesystem '%s' specified twice in zpool '%s'" % \
+                            (new_fs.name, new_zpool.name))
+                else:
+                    raise SelectionError("Failed to copy Filesystem.")
+
+            elif isinstance(child, Zvol):
+                new_zvol = self.__handle_zvol(child)
+                if new_zvol is not None:
+                    if new_zvol.action in self.PRESERVED and \
+                        new_zpool.action not in self.PRESERVED:
+                        raise SelectionError("Zvol '%s' cannot be "
+                            "preserved in non-preserved zpool '%s'."
+                            (new_zvol.name, zpool.name))
+                    zvol_key = new_zpool.name + ":" + new_zvol.name
+                    # Zvol name must be unique within each pool
+                    if zvol_key not in self._zvol_map and \
+                        zvol_key not in self._fs_map:
+                        if new_zvol.use == "swap":
+                            # Cannot specify Swap Zvol and noswap == true
+                            if self._noswap:
+                                if zpool.action in self.PRESERVED:
+                                    raise SelectionError(
+                                        "Swap zvol already exists and "
+                                        "noswap specified in manifest.")
+                                else:
+                                    raise SelectionError(
+                                        "Both swap zvol and noswap specified "
+                                        "in manifest.")
+                            self._swap_zvol_map[zvol_key] = new_zvol
+                        elif new_zvol.use == "dump":
+                            # Can only specify one Dump Zvol
+                            if self._dump_zvol is not None:
+                                raise SelectionError(
+                                    "Dump zvol specified twice.")
+
+                            # Cannot specify Dump Zvol and nodump == true
+                            if self._nodump:
+                                if zpool.action in self.PRESERVED:
+                                    raise SelectionError(
+                                        "Dump zvol already exists and "
+                                        "nodump specified in manifest.")
+                                else:
+                                    raise SelectionError(
+                                        "Both dump zvol and nodump specified "
+                                        "in manifest.")
+
+                            # Cannot delete a dump zvol
+                            if new_zvol.action == "delete":
+                                self.__raise_dump_zvol_deletion_exception()
+                            self._dump_zvol = new_zvol
+
+                        self._zvol_map[zvol_key] = new_zvol
+                        self.logger.debug("Adding Zvol '%s' to zpool" %
+                            (new_zvol.name))
+                        new_zpool.insert_children(new_zvol)
+                    else:
+                        raise SelectionError(
+                            "Zvol '%s' specified twice in zpool '%s'" % \
+                            (new_zvol.name, new_zpool.name))
+                else:
+                    raise SelectionError("Failed to copy Zvol.")
+
+            elif isinstance(child, PoolOptions):
+                new_pool_options = self.__handle_pool_options(child)
+                if new_pool_options is not None:
+                    # Can only specify one pool_options per zpool
+                    if new_zpool.name not in self._pool_options:
+                        self._pool_options[new_zpool.name] = new_pool_options
+                        self.logger.debug("Adding Pool Options '%s' to zpool"
+                            % (new_pool_options.options))
+                        new_zpool.insert_children(new_pool_options)
+                    else:
+                        raise SelectionError(
+                            "More than one pool_options specified "
+                            "zpool '%s'" % (new_zpool.name))
+                else:
+                    raise SelectionError("Failed to copy PoolOptions.")
+
+            elif isinstance(child, DatasetOptions):
+                # Validate only one dataset options
+                new_dataset_options = self.__handle_dataset_options(child)
+                if new_dataset_options is not None:
+                    # Can only specify one dataset_options per zpool
+                    if new_zpool.name not in self._dataset_options:
+                        self._dataset_options[new_zpool.name] = \
+                            new_dataset_options
+                        self.logger.debug("Adding Dataset Options '%s' to "
+                            "zpool" % (new_dataset_options.options))
+                        new_zpool.insert_children(new_dataset_options)
+                    else:
+                        raise SelectionError(
+                            "More than one dataset_options specified "
+                            "in zpool '%s'" % (new_zpool.name))
+                else:
+                    raise SelectionError("Failed to copy DatasetOptions.")
+
+            elif isinstance(child, BE):
+                if not zpool.is_root:
+                    raise SelectionError("BE cannot be part of non root "
+                        "pool '%s'" % (zpool.name))
+                new_be = self.__handle_be(child)
+                if new_be is not None:
+                    # BE can only be specified once in entire manifest
+                    if self._be is None:
+                        self._be = new_be
+                        this_be = new_be
+
+                        self.logger.debug("Adding BE '%s' to zpool" %
+                            (new_be.name))
+                        new_zpool.insert_children(new_be)
+                    else:
+                        if this_be is not None:
+                            raise SelectionError(
+                                "More than one BE specified in zpool '%s'" % \
+                                (new_zpool.name))
+                        else:
+                            raise SelectionError(
+                                "Only one BE element allowed per logical.")
+                else:
+                    raise SelectionError("Failed to copy BE.")
+
+            else:
+                raise SelectionError("Invalid zpool sub element")
+
+        if vdev_count == 0:
+            # Zpool specified in manifest but no vdevs were specified
+            # Add a default vdev of type mirror to add any disks to later
+            self.logger.debug("No Vdevs found in zpool '%s', adding mirror."
+                % (new_zpool.name))
+            new_vdev = new_zpool.add_vdev(DEFAULT_VDEV_NAME, "mirror")
+            vdev_key = new_zpool.name + ":" + new_vdev.name
+            self._vdev_map[vdev_key] = new_vdev
+            self._vdev_added_map[vdev_key] = new_vdev
+
+        return new_zpool
+
+    def __handle_preserved_zpool(self, discovered_zpool, zpool):
+        '''
+            Process all zpool children, handling each child object type
+            and returning Zpool object.
+            Preserving a Zpool effectively means maintaining the physical
+            device structure of a zpool, so you cannot add new physical
+            devices via AI, you can however change what's contained on the
+            zpool, e.g. create/delete filesystems/Zvols.
+
+            Filesystems:
+                - Create, must not already exist
+                - Delete, must exist already
+                - Preserve, nothing to do, but must exist already.
+
+            Zvols:
+                - Create, must not already exist
+                - Delete, must exist already
+                - Preserve, nothing to do, but must exist already.
+                - use_existing, must exist, and usage must be changing
+
+            BE's:
+                - Can only be one BE, BE's are not copied from discovered
+                  as there could be laods.
+                - Ensure this BE does not exist already, if not specified
+                  Ensure default created BE does not already exist.
+
+            Dataset Options:
+                - Not allowed in AI, user can do this manually, requires
+                  parsing of string options and zfs knowledge of what options
+                  can be applied post pool creation.
+
+            Pool Options:
+                - Not allowed in AI, user can do this manually, requires
+                  parsing of string options and zfs knowledge of what options
+                  can be applied post pool creation.
+        '''
+        self.logger.debug("Processing %s Zpool '%s', action='%s',"
+            " is_root='%s', mountpoint='%s'" % (zpool.action, zpool.name,
+            zpool.action, zpool.is_root, zpool.mountpoint))
+
+        discovered_bes = self.__get_discovered_be(zpool)
+        this_be = None
+
+        discovered_zpool.action = zpool.action
+
+        for child in zpool.children:
+            if isinstance(child, Vdev):
+                raise SelectionError("Cannot specify vdev's for preserved "
+                    "zpool '%s'." % (zpool.name))
+
+            elif isinstance(child, Filesystem):
+                new_fs = self.__handle_filesystem(child)
+                if new_fs is not None:
+                    fs_key = zpool.name + ":" + new_fs.name
+                    # Check if this filesystem already exists as zvol
+                    if fs_key in self._zvol_map:
+                        raise SelectionError("Filesystem '%s' specified on "
+                            "preserved zpool '%s' exists as Zvol."
+                            (new_fs.name, zpool.name))
+                    elif fs_key in self._fs_map:
+                        # Only preserve and delete are allowed for existing
+                        if new_fs.action not in ["preserve", "delete"]:
+                            raise SelectionError("Filesystem '%s' specified on"
+                                " preserved zpool '%s' contains invalid action"
+                                " of '%s'."
+                                (new_fs.name, zpool.name, new_fs.action))
+                        # Remove discovered item in order to add user specified
+                        discovered_zpool.delete_children(new_fs)
+                    else:
+                        # Only create allowed for new filesystems
+                        if new_fs.action != "create":
+                            raise SelectionError("Filesystem '%s' specified on"
+                                " preserved zpool '%s' contains invalid action"
+                                " of '%s'."
+                                (new_fs.name, zpool.name, new_fs.action))
+                        self._fs_map[fs_key] = new_fs
+
+                    self.logger.debug("Adding Filesystem '%s' to zpool" %
+                         (new_fs.name))
+                    discovered_zpool.insert_children(new_fs)
+                else:
+                    raise SelectionError("Failed to process Filesystem.")
+
+            elif isinstance(child, Zvol):
+                new_zvol = self.__handle_zvol(child)
+                if new_zvol is not None:
+                    zvol_key = zpool.name + ":" + new_zvol.name
+
+                    # Check if This Zvol already exists as filesystem
+                    if zvol_key in self._fs_map:
+                        raise SelectionError("Zvol '%s' specified on "
+                            "preserved zpool '%s' exists as Filesystem."
+                            (new_zvol.name, zpool.name))
+                    elif zvol_key in self._zvol_map:
+                        # Only preserve, delete, use_existing are allowed
+                        if new_zvol.action not in \
+                            ["preserve", "delete", "use_existing"]:
+                            raise SelectionError("Zvol '%s' specified on "
+                                "preserved zpool '%s' contains invalid action "
+                                "of '%s'."
+                                (new_zvol.name, zpool.name, new_zvol.action))
+
+                        discovered_zvol = discovered_zpool.get_first_child(
+                            new_zvol.name, Zvol)
+
+                        if discovered_zvol is None:
+                            raise SelectionError("Zvol '%s' not found in "
+                                "discovered." % (new_zvol.name))
+
+                        if new_zvol.action == "use_existing":
+                            if discovered_zvol.use == new_zvol.use:
+                                raise SelectionError("Zvol '%s' marked as "
+                                    "use_existing but usage has not changed." %
+                                    (new_zvol.name))
+                            elif discovered_zvol.use == "dump":
+                                # Cannot delete a dump zvol
+                                self.__raise_dump_zvol_deletion_exception()
+
+                            elif new_zovl.use == "dump":
+                                # Can only specify one Dump Zvol
+                                if self._dump_zvol is not None:
+                                    raise SelectionError(
+                                        "Dump zvol specified twice.")
+
+                                # Cannot specify Dump Zvol and nodump == true
+                                if self._nodump:
+                                    raise SelectionError(
+                                        "Both dump zvol and nodump "
+                                        "specified in manifest.")
+                                # Make a copy of discovered zvol to ensure we
+                                # get same size specification
+                                new_zvol = copy.deepcopy(discovered_zvol)
+                                new_zvol.action = "create"
+                                new_zvol.use = "dump"
+                                self._dump_zvol = new_zvol
+
+                            elif new_zvol.use == "swap":
+                                # Cannot have Swap Zvol and noswap == true
+                                if self._noswap:
+                                    raise SelectionError(
+                                        "Both swap zvol and noswap "
+                                        "specified in manifest.")
+                                if new_zvol in self._swap_zvol_map:
+                                    raise SelectionError("Zvol '%s' specified "
+                                        "as swap twice in preserved zpool "
+                                        "'%s'" % (new_zvol.name, zpool.name))
+                                new_zvol = copy.deepcopy(discovered_zvol)
+                                new_zvol.action = "create"
+                                new_zvol.use = "swap"
+                                self._swap_zvol_map[zvol_key] = new_zvol
+                            else:
+                                new_zvol = copy.deepcopy(discovered_zvol)
+                                new_zvol.action = "create"
+                                new_zvol.use = "none"
+
+                            if discovered_zvol.use == "swap":
+                                # Remove this device from swap map
+                                if new_zvol in self._swap_zvol_map:
+                                    del self._swap_zvol_map[zvol_key]
+
+                        # Remove discovered item in order to add user specified
+                        discovered_zpool.delete_children(new_zvol)
+                    else:
+                        # Only create allowed for new zvol
+                        if new_zvol.action != "create":
+                            raise SelectionError("Zvol '%s' specified on "
+                                "preserved zpool '%s' contains invalid action "
+                                "of '%s'."
+                                (new_zvol.name, zpool.name, new_zvol.action))
+                        self._zvol_map[zvol_key] = new_zvol
+
+                    self._zvol_map[zvol_key] = new_zvol
+                    self.logger.debug("Adding Zvol '%s' to zpool" %
+                        (new_zvol.name))
+                    discovered_zpool.insert_children(new_zvol)
+                else:
+                    raise SelectionError("Failed to copy Zvol.")
+
+            elif isinstance(child, PoolOptions):
+                raise SelectionError("Cannot specify Pool Option's for "
+                    "preserved zpool '%s'." % (zpool.name))
+
+            elif isinstance(child, DatasetOptions):
+                raise SelectionError("Cannot specify Dataset Option's for "
+                    "preserved zpool '%s'." % (zpool.name))
+
+            elif isinstance(child, BE):
+                if not zpool.is_root:
+                    raise SelectionError("BE cannot be part of non root "
+                        "pool '%s'" % (zpool.name))
+                new_be = self.__handle_be(child)
+                if new_be is not None:
+                    # Ensure this boot environment does not exist on
+                    # this preserved/use_existing zpool
+                    if new_be.exists:
+                        raise SelectionError(
+                                "BE '%s' specified in preserved "
+                                "zpool '%s' already exists. To install to "
+                                "existing zpool you must specify a unique "
+                                "BE in the manifest." % \
+                                (new_be.name, zpool.name))
+
+                    # BE can only be specified once in entire manifest
+                    if self._be is None:
+                        self._be = new_be
+                        this_be = new_be
+
+                        self.logger.debug("Adding BE '%s' to zpool" %
+                            (new_be.name))
+                        discovered_zpool.insert_children(new_be)
+                    else:
+                        if this_be is not None:
+                            raise SelectionError(
+                                "More than one BE specified in zpool '%s'" % \
+                                (zpool.name))
+                        else:
+                            raise SelectionError(
+                                "Only one BE element allowed per logical.")
+                else:
+                    raise SelectionError("Failed to copy BE.")
+
+            else:
+                raise SelectionError("Invalid zpool sub element")
+
+    def __handle_logical(self, logical):
+        '''Create Logical structure from manifest, validating
+           zpools and contents in the process
+        '''
+        # Clear the Error service
+        errsvc.clear_error_list()
+
+        self.logger.debug("Processing Logical noswap : %s, nodump : %s" %
+            (logical.noswap, logical.nodump))
+
+        # Set whether to specifically not create swap/dump
+        self._noswap = logical.noswap
+        self._nodump = logical.nodump
+
+        new_logical = copy.copy(logical)
+
+        preserving_zpools = False
+        for zpool in logical.children:
+            # This will always be true as Logical can only contain zpools
+            if isinstance(zpool, Zpool):
+                if zpool.action in self.PRESERVED:
+                    preserving_zpools = True
+                    disc_zpool = self.__get_discovered_zpool(zpool)
+                    if disc_zpool is None:
+                        raise SelectionError("Failed to find zpool '%s' in "
+                            "discovered logical tree." % (zpool.name))
+
+                    if zpool.action == "use_existing" and \
+                        not disc_zpool.is_root:
+                        raise SelectionError("Specified action of "
+                            "'use_existing' on pool '%s' is invalid. '%s' "
+                            "is not a root pool." % (zpool.name, zpool.name))
+
+                    new_zpool = self.__handle_zpool(disc_zpool, new_logical)
+
+                    # Copy physical devices to desired
+                    self.__copy_zpool_discovered_devices(new_zpool)
+                else:
+                    new_zpool = self.__handle_zpool(zpool, new_logical)
+
+                if new_zpool is not None:
+                    if new_zpool.name not in self._zpool_map:
+                        if new_zpool.is_root:
+                            # Only one root pool can be specified
+                            if self._root_pool is not None:
+                                raise SelectionError(
+                                    "Root pool specified twice")
+                            self.logger.debug("Root zpool found '%s'" %
+                                (new_zpool.name))
+                            self._root_pool = new_zpool
+                        self.logger.debug("Adding zpool '%s' to logical" %
+                            (new_zpool.name))
+                        self._zpool_map[new_zpool.name] = new_zpool
+                    else:
+                        raise SelectionError("Zpool '%s' specified twice" %
+                            (new_zpool.name))
+
+                if zpool.action in self.PRESERVED:
+                    self.__handle_preserved_zpool(new_zpool, zpool)
+            else:
+                raise SelectionError("Invalid logical child.")
+
+        if preserving_zpools:
+            # Need to update all devices, and remove any references to
+            # Zpools that do not exist.
+            self.__remove_invalid_zpool_references(new_logical)
+
+        if self._zpool_map is None or len(self._zpool_map) == 0:
+            self._nozpools = True
+            new_logical = None
+
+        # Check error service for errors
+        errors = errsvc.get_all_errors()
+        if errors:
+            existing_desired = \
+                self.doc.persistent.get_first_child(Target.DESIRED)
+            if existing_desired:
+                self.logger.debug("Desired =\n%s\n" % (str(existing_desired)))
+            errstr = "Following errors occurred processing logical :\n%s" % \
+                (str(errors[0]))
+            raise SelectionError(errstr)
+
+        return new_logical
+
+    def __raise_dump_zvol_deletion_exception(self):
+        '''
+            Dump zvol's cannot be unassigned from being a dump device.
+            Bug 6910925 is addressing this issue, however until resolved
+            we have to trap and tell the user that this is not possible.
+        '''
+        self.logger.debug("Workaround for releaseing Zvol as dump device.")
+        self.logger.debug("Create a new Zvol on another pool, and assign ")
+        self.logger.debug("this Zvol as the dump device. The original dump")
+        self.logger.debug("assigned Zvol and it's pool can now be destroyed.")
+
+        raise SelectionError("Dump device cannot be unassigned. Due to "
+            "RFE 6910925. See install_log for workaround.")
+
+    def __remove_invalid_zpool_references(self, logical):
+        '''
+            Process all physical devices, remove any references to in_zpool
+            in_vdev referencing pools not in this logical tree.
+
+            Whilst processing the disks add them to the internal maps.
+
+            At this stage self._zpool_map will have been populated.
+        '''
+        # Cycle through all desired physical disks checking what pool if
+        # any these disks/devices are assigned to.
+        # If the pool does not exist in this logical, then reset their
+        # in_zpool and in_vdev attributes to None
+        new_desired_target = self.__get_new_desired_target()
+        desired_disks = new_desired_target.get_descendants(class_type=Disk)
+
+        for disk in desired_disks:
+            if disk.in_zpool is not None and \
+                disk.in_zpool not in self._zpool_map:
+                disk.in_zpool = None
+                disk.in_vdev = None
+
+            # Process any children and ensure they are unset aswell
+            for disk_kid in disk.children:
+                if (isinstance(disk_kid, Partition) or \
+                    isinstance(disk_kid, Slice)) and \
+                    disk_kid.in_zpool is not None and \
+                    disk_kid.in_zpool not in self._zpool_map:
+                    disk_kid.in_zpool = None
+                    disk_kid.in_vdev = None
+
+                if isinstance(disk_kid, Partition):
+                    for slc in disk_kid.children:
+                        if isinstance(slc, Slice) and \
+                            slc.in_zpool is not None and \
+                            slc.in_zpool not in self._zpool_map:
+                            slc.in_zpool = None
+                            slc.in_vdev = None
+
+        # Add all these disks to map
+        self.__add_disks_to_map(desired_disks)
+
+    def __add_disks_to_map(self, disks):
+        '''
+            Given a list of disks, add them to the intermal disk map.
+            Throwing exception if disk has already been added.
+        '''
+        for disk in disks:
+            if disk.ctd in self._disk_map:
+                # Seems that the disk is specified more than once in
+                # the manifest!
+                raise SelectionError(
+                    "Disk '%s' matches already used disk '%s'." %
+                    (self.__pretty_print_disk(disk), disk.ctd))
+            self._disk_map[disk.ctd] = disk
+
+    def __get_discovered_be(self, zpool):
+        '''
+            Retrieve the list of boot environments for a specific discovered
+            zpool.
+        '''
+        discovered = self.doc.persistent.get_first_child(Target.DISCOVERED)
+        discovered_zpools = discovered.get_descendants(name=zpool.name,
+            class_type=Zpool)
+
+        if not discovered_zpools:
+            return None
+
+        found_be = discovered_zpools[0].get_descendants(class_type=BE)
+        return found_be
+
+    def __get_discovered_zpool(self, zpool):
+        '''
+            Process list of discovered zpools for matching zpool.
+            Return None if not found or zpool object if found.
+        '''
+        discovered = self.doc.persistent.get_first_child(Target.DISCOVERED)
+        discovered_zpools = discovered.get_descendants(name=zpool.name,
+            class_type=Zpool)
+
+        if not discovered_zpools:
+            return None
+
+        found_zpool = copy.deepcopy(discovered_zpools[0])
+
+        # Remove BE's as they cannot be preserved/recreated via AI
+        found_zpool.delete_children(class_type=BE)
+
+        return found_zpool
+
+    def __copy_zpool_discovered_devices(self, zpool):
+        '''
+            For zpools being preserved, copy all devices associated with
+            this zpool into desired tree.
+            When copying devices, entire disk tree needs to be copied if
+            any part of the disk resides on this zpool, by default all elements
+            are set to "preserved", so nothing will get destroyed, TI needs
+            to have all elements of the disk tree structure or it will wipe
+            them.
+            Throw exception if after processing all physical devices, and
+            none were found for this zpool.
+        '''
+        new_desired_target = self.__get_new_desired_target()
+        discovered = self.doc.persistent.get_first_child(Target.DISCOVERED)
+        discovered_disks = discovered.get_descendants(class_type=Disk)
+
+        device_found = False
+        for disk in discovered_disks:
+            disk_copy = None
+            disk_found = False
+            if disk.in_zpool is not None and disk.in_zpool == zpool.name:
+                # Copy entire disk
+                disk_copy = copy.deepcopy(disk)
+                disk_found = True
+            else:
+                for disk_kid in disk.children:
+                    if isinstance(disk_kid, Partition):
+                        if disk_kid.in_zpool is not None and \
+                            disk_kid.in_zpool == zpool.name:
+                            disk_found = True
+                            break
+                        else:
+                            for slc in disk_kid.children:
+                                if isinstance(slc, Slice):
+                                    # Only copy if in this zpool
+                                    if slc.in_zpool is not None and \
+                                        slc.in_zpool == zpool.name:
+                                        disk_found = True
+                                        break
+
+                    elif isinstance(disk_kd, Slice):
+                        if disk_kid.in_zpool is not None and \
+                            disk_kid.in_zpool == zpool.name:
+                            disk_found = True
+                            break
+
+                if disk_found:
+                    disk_copy = copy.deepcopy(disk)
+
+            if disk_copy is not None and disk_found:
+                # If this is the root pool, make sure solaris partition
+                # is marked as Active, Final Validation fails otherwise
+                if zpool.is_root and platform.processor() == 'i386':
+                    solaris2_part_type = Partition.name_to_num("Solaris2")
+                    for disk_kid in disk_copy.children:
+                        if isinstance(disk_kid, Partition) and \
+                            disk_kid.part_type == solaris2_part_type:
+                            disk_kid.bootid = Partition.ACTIVE
+                            break
+                new_desired_target.insert_children(disk_copy)
+                device_found = True
+
+        if not device_found:
+            raise SelectionError("Failed to find any discovered devices for "
+                "zpool '%s'." % (zpool.name))
+
+    def __validate_zpool_actions(self, zpool):
+        '''Perform some validation on zpool actions against
+           root pool and existing zpool's.
+           Ensure pool name does not exist as a current directory, it pool does
+           not exist already and is being created.
+        '''
+
+        if zpool.exists:
+            # Get zpool discovered object
+            discovered_zpool = self.__get_discovered_zpool(zpool)
+
+            if discovered_zpool is None:
+                raise SelectionError("zpool.exists() reports zpool '%s' "
+                    "exists, but zpool object not in Discovered tree." % \
+                    (zpool.name))
+
+            if zpool.action == "create":
+                # Log warning stating zpool will be destroyed
+                self.logger.warning("Existing zpool '%s' will be destroyed."
+                                    % (zpool.name))
+
+            elif zpool.action == "preserve" and \
+                (zpool.is_root or discovered_zpool.is_root):
+                # Manifest specifies to preserve pool, and either manifest
+                # or discovered specify this as root pool
+                raise SelectionError("Invalid preservation specified for "
+                    "root zpool '%s'. Use 'use_existing' action." % \
+                    (zpool.name))
+
+            elif zpool.action == "use_existing":
+                # Pool must be a an existing root pool
+                if not discovered_zpool.is_root:
+                    raise SelectionError("Cannot specify 'use_existing' "
+                        "action for already existing non root zpool '%s'."
+                        % (zpool.name))
+
+                if not zpool.is_root:
+                    raise SelectionError("Cannot specify 'use_existing' "
+                        "action on non root zpool '%s'." % (zpool.name))
+
+                # Preserving a root pool, let's make sure there is
+                # sufficient space left in this pool for a solaris install
+                zpool_available_size = self.__get_existing_zpool_size(zpool)
+                if zpool_available_size < self.controller.minimum_target_size:
+                    raise SelectionError("Preserved root pool '%s' has "
+                        "available space of '%s', which is insufficient "
+                        "space to install to. Minimim space "
+                        "required is '%s'."
+                        % (zpool.name, str(zpool_available_size),
+                        str(self.controller.minimum_target_size)))
+
+        else:
+            if zpool.action == "delete":
+                self.logger.warning("Manifest specifies to delete non "
+                    "existent zpool '%s'." % (zpool.name))
+            elif zpool.action == "preserve":
+                raise SelectionError("Cannot 'preserve' non existent zpool "
+                    "'%s'." % (zpool.name))
+            elif zpool.action == "use_existing":
+                raise SelectionError("Cannot 'use_existing' non existent "
+                    "zpool '%s'." % (zpool.name))
+            else:
+                # Attempting to create a new Zpool, ensure pool name
+                # is not an existing directory as zpool create will fail.
+                if os.path.isdir(os.path.join("/", zpool.name)):
+                    raise SelectionError("Pool name '%s' is not valid, "
+                        "directory exists with this name." % (zpool.name))
+
+    def __get_existing_zpool_size(self, zpool):
+        '''Retrieve the size available for this zpool via the "size"
+           zpool property. Returned size is a Size object.
+        '''
+        if zpool.exists:
+            propdict = zpool.get("size")
+            retsize = Size(propdict.get("size", "0b"))
+        else:
+            retsize = Size("0b")
+
+        return retsize
+
+    def __get_zpool_available_size(self, zpool):
+        '''Process all the devices in the first toplevel of this zpool
+           returning what the available size for this zpool if created.
+
+           "none" : concatenate size of all devices
+           "mirror", "raidz*" : get size of smallest device
+
+           returns Size object
+        '''
+        retsize = None
+
+        if zpool.action in self.PRESERVED:
+            # Pool is being presreved so no devices in desired,
+            # Get size from discovered disks instead
+            retsize = self.__get_existing_zpool_size(zpool)
+        else:
+            # Get available size from desired targets
+            for vdev in zpool.children:
+                if isinstance(vdev, Vdev):
+                    vdev_devices = self.__get_vdev_devices(zpool.name,
+                        vdev.name, self._disk_map)
+                    if vdev.redundancy in self.TOPLEVEL_REDUNDANCY and \
+                        vdev_devices:
+                        for device in vdev_devices:
+                            if retsize is None:
+                                retsize = copy.copy(device.size)
+                            else:
+                                devsize = copy.copy(device.size)
+                                if vdev.redundancy == "none":
+                                    # Concatenate device sizes together
+                                    retsize = Size(str(retsize.byte_value +
+                                        devsize.byte_value) + Size.byte_units)
+                                else:
+                                    # Get size of smallest device
+                                    if devsize < retsize:
+                                        retsize = devsize
+                        # Break after first Toplevel vdev
+                        break
+
+        if retsize is None:
+            raise SelectionError("Could not determine the available size in "
+                "pool '%s'." % (zpool.name))
+
+        return retsize
+
+    def __validate_swap_and_dump(self, desired):
+        '''Ensure at least one swap and one dump device exist in logical
+           tree.
+
+           If none exist, unless noswap or nodump are set to true we will, by
+           default, create one of each in the root pool if sufficient space
+           available.
+        '''
+        # Get Logical sections
+        logical = desired.get_first_child(class_type=Logical)
+
+        if logical is not None:
+            # swap :
+            if (not logical.noswap and len(self._swap_zvol_map) == 0) or \
+                (not logical.nodump and self._dump_zvol is None):
+
+                # One or both of swap and dump not already created, get
+                # default type/sizes if we are to create them
+
+                # Create swap/dump zvol in root pool
+                swap_added = False
+                dump_added = False
+                for zpool in [z for z in logical.children if z.is_root]:
+
+                    # This needs to be done for on the pool itself as we
+                    # need to process this pool to get the available size
+                    # To install to.
+                    try:
+                        (swap_type, swap_size, dump_type, dump_size) = \
+                            self.controller.calc_swap_dump_size(\
+                                self.controller.minimum_target_size,
+                                self.__get_zpool_available_size(zpool))
+                    except (SwapDumpGeneralError, SwapDumpSpaceError) as ex:
+                        raise SelectionError("Error determining swap/dump "
+                            "requirements.")
+
+                    # Only process root pools (should only be one either way)
+                    if not logical.noswap and len(self._swap_zvol_map) == 0:
+                        # Swap does not exist so attempt to create
+                        if swap_type == self.controller.SWAP_DUMP_ZVOL and \
+                            swap_size > Size("0b"):
+                            zvol_name = self.__get_unique_dataset_name(zpool,
+                                                                       "swap")
+                            self.__create_swap_dump_zvol(zpool, zvol_name,
+                                "swap", swap_size)
+                            swap_added = True
+
+                    if not logical.nodump and self._dump_zvol is None:
+                        # Dump does not exist so attempt to create
+                        if dump_type == self.controller.SWAP_DUMP_ZVOL and \
+                            dump_size > Size("0b"):
+                            zvol_name = self.__get_unique_dataset_name(zpool,
+                                                                       "dump")
+                            self.__create_swap_dump_zvol(zpool, zvol_name,
+                                "dump", dump_size)
+                            dump_added = True
+
+                if not swap_added and \
+                    not logical.noswap and len(self._swap_zvol_map) == 0:
+                    self.logger.warning("Failed to add default swap zvol to "
+                        "root pool")
+
+                if not dump_added and \
+                    not logical.nodump and self._dump_zvol is None:
+                    self.logger.warning("Failed to add default dump zvol to "
+                        "root pool")
+
+    def __get_unique_dataset_name(self, zpool, dataset_name):
+        '''
+            Ensure this dataset name does not exist in this zpool.
+            If it does, then append N to end until unique.
+        '''
+        unique = False
+        unique_name = dataset_name
+        append_digit = 1
+        while not unique:
+            unique = True
+            for child in zpool.children:
+                if (isinstance(child, Filesystem) or \
+                    isinstance(child, Zvol)) and \
+                    child.name == unique_name:
+                    unique_name = dataset_name + str(append_digit)
+                    append_digit = append_digit + 1
+                    unique = False
+                    break
+
+        return unique_name
+
+    def __create_swap_dump_zvol(self, zpool, zvol_name, zvol_use, zvol_size):
+        '''Create a swap or dump Zvol on a zpool.
+
+           Input:
+               zpool : zpool object to add zvol to
+               zvol_name : Str zvol name
+               zvol_use : Str zvol usage, "swap" or "dump"
+               zvol_size : Size Object
+        '''
+        zvol = zpool.add_zvol(zvol_name, int(zvol_size.get(Size.mb_units)),
+            Size.mb_units, use=zvol_use)
+
+        if zvol_use == "swap":
+            swap_key = zpool.name + ":swap"
+            self._swap_zvol_map[swap_key] = zvol
+        else:
+            self._dump_zvol = zvol
+
+    def __validate_logical(self, desired):
+        '''Process Logical components of desired tree ensuring:
+           - mirror, at least two devices
+           - logmirror, at least two devices
+           - raidz1, at least two devices
+           - raidz2, at least three devices
+           - raidz3, at least four devices
+           - Ensure at least one of none|mirror|raidz exists per pool
+           - Ensure only one BE specified per pool, if none add one.
+           - Ensure only one DatasetOptions specified per pool
+           - Ensure only one PoolOptions specified per pool
+           - Ensure only one pool is set to root pool
+           - root_pool, ensure single device or if multiple devices,
+             redundancy is set to mirror.
+           - Root pool cannot contain log or logmirror devices
+        '''
+
+        # Get Logical sections
+        logicals = desired.get_children(class_type=Logical)
+
+        # Validate correct number of devices per redundancy
+        for logical in logicals:
+            self.logger.debug("Validating desired  logical =\n%s\n" %
+                (str(logical)))
+            be = None
+            found_be = False
+            found_dataset_options = False
+            found_pool_options = False
+            found_root_pool = False
+            found_swap = False
+            found_dump = False
+            self._remaining_zpool_map = \
+                self.__compile_remaining_existing_devices_map(logical)
+            for zpool in logical.children:
+                if zpool.is_root:
+                    if found_root_pool:
+                        raise SelectionError("Root pool specified twice")
+                    else:
+                        found_root_pool = True
+
+                        # Ensure Root pool size is large enough to install to
+                        zpool_available_size = \
+                            self.__get_zpool_available_size(zpool)
+                        if zpool_available_size < \
+                            self.controller.minimum_target_size:
+                            raise SelectionError("Root pool '%s' has "
+                                "available space of '%s', which is "
+                                "insufficient space to install to. "
+                                "Minimim space required is '%s'."
+                                % (zpool.name, str(zpool_available_size),
+                                str(self.controller.minimum_target_size)))
+
+                # Perform some validations on zpool
+                # Check if this zpool already exists etc
+                self.__validate_zpool_actions(zpool)
+
+                found_toplevel = False
+                for child in zpool.children:
+                    if isinstance(child, Vdev):
+                        self.__validate_vdev(zpool, child)
+
+                        # Ensure something other than log/cache/spare is set
+                        if child.redundancy in self.TOPLEVEL_REDUNDANCY and \
+                            not found_toplevel:
+                            found_toplevel = True
+
+                    elif isinstance(child, BE):
+                        if not zpool.is_root:
+                            raise SelectionError("BE '%s' cannot be part of "
+                                "non root pool '%s'." %
+                                (child.name, zpool.name))
+
+                        # Verify BE does not already exist
+                        if child.exists:
+                            raise SelectionError("BE '%s' already exists."
+                                "BE must be unique." % (child.name))
+
+                        if found_be:
+                            raise SelectionError(
+                                "More than one BE specified in zpool '%s'." % \
+                                (zpool.name))
+                        else:
+                            found_be = True
+                            be = child
+
+                    elif isinstance(child, DatasetOptions):
+                        if found_dataset_options:
+                            raise SelectionError(
+                                "More than one dataset_options specified "
+                                "in zpool '%s'." % (zpool.name))
+                        else:
+                            found_dataset_options = True
+
+                    elif isinstance(child, PoolOptions):
+                        if found_pool_options:
+                            raise SelectionError(
+                                "More than one pool_options specified "
+                                "zpool '%s'." % (zpool.name))
+                        else:
+                            found_pool_options = True
+
+                    elif isinstance(child, Zvol):
+                        if child.use == "swap":
+                            found_swap = True
+                        elif child.use == "dump":
+                            found_dump = True
+
+                if not found_toplevel:
+                    raise SelectionError("Must specify at least one toplevel"
+                        " child redundancy in pool '%s'." % (zpool.name))
+
+                if zpool.is_root:
+                    if not found_be:
+                        # Root pool with no BE, so insert one
+                        self.logger.debug("Found root pool '%s' with no BE, "
+                            "inserting one." % (zpool.name))
+
+                        # Ensure BE name is unique and does not exist already
+                        self._be = BE()
+                        if self._be.exists:
+                            raise SelectionError(
+                                "BE '%s' specified in zpool '%s' already "
+                                "exists. You must specify a unique "
+                                "BE in the manifest." % \
+                                (self._be.name, zpool.name))
+
+                        self._be.mountpoint = self.be_mountpoint
+                        zpool.insert_children(self._be)
+                        found_be = True
+                    else:
+                        # Ensure mountpoint is set
+                        if be.mountpoint is None:
+                            be.mountpoint = self.be_mountpoint
+
+            if not logical.noswap and not found_swap:
+                raise SelectionError("At least one swap Zvol must exist.")
+
+            if not logical.nodump and not found_dump:
+                raise SelectionError("At least one dump Zvol must exist.")
+
+            if not found_root_pool:
+                raise SelectionError("No root pool specified.")
+
+            if not found_be:
+                raise SelectionError("No BE specified.")
+
+    def __validate_vdev(self, zpool, child):
+        '''
+            Validate Vdev
+            - Ensure correct number of devices for redundancy
+            - Root pool contains correct redundancy type
+            - Preserved pool contains no devices
+        '''
+        vdev_devices = self.__get_vdev_devices(zpool.name,
+            child.name, self._disk_map)
+        self.__validate_vdev_devices(zpool, child,
+            vdev_devices)
+
+        if not zpool.action in self.PRESERVED:
+            if ((not zpool.is_root and
+                child.redundancy == "mirror") or \
+                child.redundancy == "logmirror" or
+                child.redundancy == "raidz" or
+                child.redundancy == "raidz1") and \
+                len(vdev_devices) < 2:
+                vdev_key = zpool.name + ":" + child.name
+                if vdev_key in self._vdev_added_map:
+                    # Data pool where we added default child of
+                    # type mirror, reset to "none"
+                    self.logger.debug("Changing data redundancy"
+                        " from 'mirror' to 'none'.")
+                    child.redundancy = "none"
+                else:
+                    raise SelectionError(\
+                        "Invalid %s redundancy specified in pool"
+                        " '%s', child '%s'. "
+                        "Must contain at least 2 devices." % \
+                        (child.redundancy, zpool.name, child.name))
+
+            elif child.redundancy == "raidz2" and \
+                len(vdev_devices) < 3:
+                raise SelectionError(\
+                    "Invalid raidz2 redundancy specified in "
+                    "pool '%s', child '%s'. "
+                    "Must contain at least 3 devices." % \
+                    (zpool.name, child.name))
+
+            elif child.redundancy == "raidz3" and \
+                len(vdev_devices) < 4:
+                raise SelectionError(\
+                    "Invalid raidz3 redundancy specified in "
+                    "zpool '%s', child '%s'. "
+                    "Must contain at least 4 devices." % \
+                    (zpool.name, child.name))
+
+            elif not vdev_devices:
+                raise SelectionError(\
+                    "Invalid '%s' redundancy specified in "
+                    "zpool '%s', child '%s'. "
+                    "Must contain at least 1 device." % \
+                    (child.redundancy, zpool.name, child.name))
+
+            elif zpool.is_root and len(vdev_devices) > 1:
+                if child.redundancy == "none":
+                    # Root pool with more than one device cannot
+                    # have redundancy of "none", reset to "mirror"
+                    self.logger.debug("Changing root redundancy"
+                        " from 'none' to 'mirror'.")
+                    child.redundancy = "mirror"
+                elif child.redundancy in self.RAIDZ_REDUNDANCY:
+                    raise SelectionError("Root pool redundancy"
+                        " cannot be raidz*. "
+                        "zpool '%s', child '%s'" % \
+                        (zpool.name, child.name))
+                elif child.redundancy in self.INVALID_ROOT_VDEV:
+                    raise SelectionError("Root pool cannot"
+                        " contain '%s' vdevs. "
+                        "zpool '%s', child '%s'" % \
+                        (child.redundancy, zpool.name, child.name))
+
+            elif len(vdev_devices) == 1 and \
+                child.redundancy == "mirror":
+                vdev_key = zpool.name + ":" + child.name
+                if zpool.is_root:
+                    # Root pool with one device cannot
+                    # have redundancy of "mirror", reset to "none"
+                    self.logger.debug("Changing root redundancy"
+                        " from 'mirror' to 'none'.")
+                    child.redundancy = "none"
+                elif vdev_key in self._vdev_added_map:
+                    # Data pool where we added default child of
+                    # type mirror, reset to "none"
+                    self.logger.debug("Changing data redundancy"
+                        " from 'mirror' to 'none'.")
+                    child.redundancy = "none"
+
+        if zpool.is_root and \
+            child.redundancy in self.RAIDZ_REDUNDANCY:
+            raise SelectionError("Root pool redundancy"
+                " cannot be raidz. "
+                "zpool '%s', child '%s'" % \
+                (zpool.name, child.name))
+
+        elif zpool.is_root and \
+            child.redundancy in self.INVALID_ROOT_VDEV:
+            raise SelectionError("Root pool cannot"
+                " contain '%s' vdevs. "
+                "zpool '%s', child '%s'" % \
+                (child.redundancy, zpool.name, child.name))
+
+    def __validate_vdev_devices(self, zpool, vdev, vdev_devices):
+        '''
+            Given a list of devices being used in this zpool/vdev, validate
+            that these devices are not already being used in another zpool
+            that exists and is not being destroyed or recreated by this
+            install.
+
+            self._remaining_zpool_map contains a list of devices that cannot
+            be used in an install. This method assumes this has already been
+            populated before calling this routine.
+
+            Also validate that each device being used is physically greater
+            than 64MB, or zpool create will fail.
+        '''
+        if vdev.redundancy in self.TOPLEVEL_REDUNDANCY:
+            size_64mb = Size("64" + Size.mb_units)
+
+        for device in vdev_devices:
+            tmp_slice = None
+            tmp_part = None
+            tmp_disk = None
+            if isinstance(device, Slice):
+                tmp_slice = device
+                if isinstance(device.parent, Partition):
+                    tmp_disk = device.parent.parent
+                else:
+                    tmp_disk = device.parent
+            elif isinstance(device, Partition):
+                tmp_part = device
+                tmp_disk = device.parent
+            else:
+                tmp_disk = device
+
+            # Construct what device would be in zpool create
+            ctd = ""
+            if tmp_part is not None and tmp_slice is None:
+                ctd = "p" + tmp_part.name
+            elif tmp_part is None and tmp_slice is not None:
+                ctd = "s" + tmp_slice.name
+            ctd = ":" + tmp_disk.ctd + ctd
+
+            matching_devs = [key for key in self._remaining_zpool_map \
+                             if key.endswith(ctd)]
+            if matching_devs:
+                key_parts = matching_devs[0].split(':')
+                raise SelectionError("Device '%s' already in use by zpool "
+                                     " '%s'. Cannot be reused in zpool '%s'." %
+                                     (key_parts[2], key_parts[0], zpool.name))
+            else:
+                # If just using disk, we need to append s0, when the pool
+                # is created libzfs actually uses device s0 to create the
+                # the pool, zpool status strips this s0 off when displaying
+                # back to the user, but libzfs correctly returns s0.
+                # So add s0 to end and check map again
+                if tmp_part is None and tmp_slice is None:
+                    ctd = ctd + "s0"
+                    matching_devs = [key for key in self._remaining_zpool_map \
+                                                    if key.endswith(ctd)]
+                    if matching_devs:
+                        key_parts = matching_devs[0].split(':')
+                        raise SelectionError("Device '%s' already in use by "
+                            "zpool '%s'. Cannot be reused in zpool '%s'." %
+                            (key_parts[2], key_parts[0], zpool.name))
+
+            if isinstance(device, Disk):
+                if device.disk_prop is not None:
+                    dev_size = device.disk_prop.dev_size
+                else:
+                    raise SelectionError("Disk device '%s' in vdev '%s' on "
+                        "pool '%s' missing size specification." % \
+                        (key_parts[2], vdev.name, key_parts[0]))
+            else:
+                dev_size = device.size
+
+            if vdev.redundancy in self.TOPLEVEL_REDUNDANCY and \
+                dev_size < size_64mb:
+                raise SelectionError("Device '%s' in toplevel vdev '%s' "
+                    " on zpool '%s' is less then minimum zpool device "
+                    "size of 64MB." % (key_parts[2], vdev.name, key_parts[0]))
+
+    def __compile_remaining_existing_devices_map(self, logical):
+        '''
+            Process list of self._discovered_zpool_map, along with
+            all the zpools being defined in desired.logical, and come
+            up with a unique list of devices that will exist on the system
+            and cannot be specified in logical.desired.
+        '''
+        remaining_map = copy.copy(self._discovered_zpool_map)
+
+        for zpool in logical.children:
+            zpool_key = zpool.name + ":"
+            if zpool.exists:
+                # Desired zpool exists, regardless of action simply
+                # remove all current devices relating to this zpool
+                for zpool_map_key in remaining_map:
+                    if zpool_map_key.startswith(zpool_key):
+                        del remaining_map[zpool_map_key]
+
+        return remaining_map
+
+    def __get_existing_zpool_map(self):
+        '''
+            For all zpools that have been discovered, get dictionary of
+            devices for each zpool and populate private variable
+            self._discovered_zpool_map.
+        '''
+        zpool_map = dict()
+
+        # Cycle through existing zpool
+        for zpool in self._discovered_zpools:
+            # Get list of physical devices on this existing pool
+
+            # Get dictionary of disk devices for this zpool
+            # Must check zpool.exists, if get_vdev_mapping is called
+            # with a pool that does not exist, zpool_get_config() cores
+            # and self._discovered_zpools could be populated from tests
+            if zpool.exists:
+                vdev_map = vdevs._get_vdev_mapping(zpool.name)
+            else:
+                vdev_map = dict()
+            for vdev_key in vdev_map:
+                for device in vdev_map[vdev_key]:
+                    # Get device ctd from end of device path
+                    device_ctd = device.split('/')[-1]
+
+                    # Create a dummy disk object for __find_disk() to work with
+                    disk = Disk("foo")
+
+                    # Retrieve the disk only portion of the ctd
+                    match = re.match(DISK_RE, device_ctd, re.I)
+                    if match is not None:
+                        disk.ctd = match.group()
+
+                        # Attempt to find a matching discovered disk
+                        # this should always be successful
+                        discovered_disk = self.__find_disk(disk)
+                        if discovered_disk is not None:
+                            devkey = zpool.name + ":" + disk.ctd + ":" + \
+                                     device_ctd
+                            zpool_map[devkey] = discovered_disk
+                        else:
+                            raise SelectionError("Unable to find discovered "
+                                "disk '%s', used in zpool '%s'." %
+                                (disk.ctd, zpool.name))
+                    else:
+                        # Zpool device is not a disk, must be a file
+                        disk.ctd = device_ctd
+                        devkey = zpool.name + ":" + device + ":" + \
+                                 device_ctd
+                        zpool_map[devkey] = discovered_disk
+        return zpool_map
+
+    def __validate_disks(self, desired):
+        '''Process Disk components of desired tree ensuring:
+           A device is uniquely identifiable as belonging to a zpool
+           either via in_zpool and/or in_vdev being specified. A parent
+           cannot be identifiable if it contains children, the identifiers
+           must reside on the lowest child.
+
+           - Disk validation :
+               Whole-Disk, has kids - fail
+               Whole-Disk, no kids, not identifiable - fail
+               Whole-Disk, in root pool - fail
+
+               Not Whole-Disk, no kids - fail
+               Not Whole-Disk, has kids, is identifiable - fail as kids should
+                   contain the identifying information not the disk
+               Not whole-disk, has kids, not identifiable - good, process kids
+
+               Two disks with same name, fail
+               If root pool disk, ensure label is VTOC not GPT
+
+           - Partition validation :
+               - To get to validate a partition, disk parent must be not
+                 whole disk, and parent disk is not identifiable
+               - Action other than create and use_existing are ignored.
+
+               - for each create/use_existing partition :
+                   - identifiable, has kids - fail should not have slices
+                   - identifiable, no kids - good for non root pool
+                    - identifiable, no kids - fail if root pool
+                   - not identifiable, no kids - fail
+                   - not identifiable, has kids - good, process kids(slices)
+
+               - two partitions with same name on same disk, fail
+               - Only one is_solaris partition exists on a disk
+
+           - Slice validation :
+               - To get to validate a slice on sparc, disk parent must be not
+                 whole disk. and disk parent is not identifiable
+
+               - Any action other than create is ignored
+               - For each sparc create slices :
+                   - identifiable - good
+                   - not identifiable - fail
+
+               - For X86 slices, to get this far, parent partition must be
+                 not identifiable and actioned create/use-existing
+               - For each i386 create slice:
+                   - identifiable - good
+                   - not identifiable - fail
+
+               - two slices with same name on same disk, or partition, fail
+
+        '''
+
+        # Get Disk sections
+        disks = desired.get_children(class_type=Disk)
+        self.logger.debug("Validating DESIRED Disks")
+        self.logger.debug("Disks(Desired) =\n%s\n" %
+            (self.__pretty_print_disk(disks)))
+
+        tmp_disk_map = list()       # List of disk ctd's
+        tmp_partition_map = list()  # list of disk:partiton
+        tmp_slice_map = list()  # list of disk:slice or disk:partition:slice
+
+        for disk in disks:
+            if self.__check_disk_in_root_pool(disk) and disk.label != "VTOC":
+                raise SelectionError(
+                    "Root pool Disk '%s' must contain VTOC label not '%s' ." %
+                    (self.__pretty_print_disk(disk), disk.label))
+
+            # Validate in_zpool specified and in_vdev not, but > 1 vdevs
+            if disk.ctd in tmp_disk_map:
+                raise SelectionError(
+                    "Disk '%s' specified more than once." %
+                    (self.__pretty_print_disk(disk)))
+
+            # Add disk to temporary list map
+            tmp_disk_map.append(disk.ctd)
+
+            if disk.whole_disk:
+                # Whole disk cannot be in root pool
+                if self.__check_in_root_pool(disk):
+                    raise SelectionError("Disk '%s', Using whole disk "
+                        "and located in root pool not valid." % \
+                        (self.__pretty_print_disk(disk)))
+
+                # Whole disk cannot have children specified.
+                if disk.has_children:
+                    raise SelectionError("Disk '%s', Using whole disk "
+                        "and has partition/slice specified is not valid." % \
+                        (self.__pretty_print_disk(disk)))
+
+                # Disk is not uniquely identifiable
+                if not self.__device_is_identifiable(disk):
+                    raise SelectionError("Disk '%s', Using whole disk "
+                        "and not logically uniquely identifiable. "
+                        "in_zpool '%s', in_vdev '%s'" % \
+                        (self.__pretty_print_disk(disk), disk.in_zpool,
+                        disk.in_vdev))
+            else:
+                # Not Whole-Disk, no kids - fail
+                if len(disk.children) == 0:
+                    raise SelectionError("Disk '%s' Not using whole disk "
+                        "and no partition/slice specified which ss invalid."
+                        % (self.__pretty_print_disk(disk)))
+
+                # Not Whole-Disk, has kids, is identifiable - fail
+                if disk.has_children and \
+                    self.__device_is_identifiable(disk):
+                    raise SelectionError("Disk '%s', Not using whole disk, "
+                        "has children and is logically uniquely identifiable"
+                        " which is invalid.  in_zpool '%s', in_vdev '%s'" % \
+                        (self.__pretty_print_disk(disk), disk.in_zpool,
+                        disk.in_vdev))
+
+                # Not whole-disk, has kids, not identifiable - good
+                if disk.has_children and \
+                    not self.__device_is_identifiable(disk):
+                    # Process kids, identify should be set there
+                    solaris_partition_found = False
+                    for disk_kid in disk.children:
+                        # Partition, check only exists once
+                        if isinstance(disk_kid, Partition):
+                            partkey = disk.ctd + ":" + disk_kid.name
+                            if partkey in tmp_partition_map:
+                                raise SelectionError("Partition '%s' "
+                                    "specified twice on disk '%s'." % \
+                                    (disk_kid.name,
+                                    self.__pretty_print_disk(disk)))
+                            tmp_partition_map.append(partkey)
+
+                            # Ensure only one solaris partition resides on disk
+                            if disk_kid.is_solaris and \
+                               disk_kid.action != "delete":
+                                if solaris_partition_found:
+                                    raise SelectionError("Disk '%s' cannot "
+                                        "contain multiple Solaris partitions."
+                                        % (self.__pretty_print_disk(disk)))
+                                solaris_partition_found = True
+
+                            # Slice check only exists once on this disk/part.
+                            for slc in disk_kid.children:
+                                slicekey = disk.ctd + ":" + disk_kid.name + \
+                                    ":" + slc.name
+                                if slicekey in tmp_slice_map:
+                                    raise SelectionError("Slice '%s' "
+                                        "specified twice within partiton '%s'"
+                                        " on disk '%s'." % \
+                                        (slc.name, disk_kid.name,
+                                        self.__pretty_print_disk(disk)))
+                                tmp_slice_map.append(slicekey)
+
+                        # Slice check only exists once on this disk
+                        elif isinstance(disk_kid, Slice):
+                            slicekey = disk.ctd + ":" + disk_kid.name
+                            if slicekey in tmp_slice_map:
+                                raise SelectionError("Slice '%s' "
+                                    "specified twice on disk '%s'." % \
+                                    (disk_kid.name,
+                                    self.__pretty_print_disk(disk)))
+                            tmp_slice_map.append(slicekey)
+                        # Not partition/slice throw error
+                        else:
+                            raise SelectionError("Invalid child element on "
+                                "disk '%s' : '%s'." %
+                                (self.__pretty_print_disk(disk), disk_kid))
+
+                        # Partition, create or use_existing action
+                        if isinstance(disk_kid, Partition) and \
+                           disk_kid.action in \
+                           ["create", "use_existing_solaris2"]:
+
+                            # identifiable, has kids
+                            if self.__device_is_identifiable(disk_kid) and \
+                                disk_kid.has_children:
+                                raise SelectionError("Partition '%s' on disk "
+                                    "'%s' is logically uniquely "
+                                    "identifiable and has slice children "
+                                    "which is invalid. "
+                                    "in_zpool '%s', in_vdev '%s'" % \
+                                    (disk_kid.name,
+                                    self.__pretty_print_disk(disk),
+                                    disk_kid.in_zpool, disk_kid.in_vdev))
+
+                            # identifiable, no kids, in root pool
+                            if self.__device_is_identifiable(disk_kid) and \
+                                len(disk_kid.children) == 0 and \
+                                 self.__check_in_root_pool(disk_kid):
+                                raise SelectionError("Partition '%s' on disk "
+                                    "'%s' is logically uniquely "
+                                    "identifiable with no slice children "
+                                    "but is on root pool which is invalid. "
+                                    "in_zpool '%s', in_vdev '%s'" % \
+                                    (disk_kid.name,
+                                    self.__pretty_print_disk(disk),
+                                    disk_kid.in_zpool, disk_kid.in_vdev))
+
+                            # not identifiable, no kids - fail
+                            if not self.__device_is_identifiable(disk_kid) \
+                               and len(disk_kid.children) == 0 \
+                               and disk_kid.is_solaris:
+                                raise SelectionError("Partition '%s' on disk "
+                                    "'%s' is not logically uniquely "
+                                    "identifiable and has no slice children "
+                                    "whish is invalid. "
+                                    "in_zpool '%s', in_vdev '%s'" % \
+                                    (disk_kid.name,
+                                    self.__pretty_print_disk(disk),
+                                    disk_kid.in_zpool, disk_kid.in_vdev))
+
+                            # has kids, at least 1 should be identifiable
+                            if not self.__device_is_identifiable(disk_kid) \
+                               and disk_kid.has_children:
+                                found_identifiable_slice = True
+                                for slc in disk_kid.children:
+                                    if isinstance(slc, Slice) and \
+                                        slc.action == "create":
+                                        # X86 Slice not identifiable
+                                        if self.__device_is_identifiable(slc):
+                                            found_identifiable_slice = True
+
+                                    if not isinstance(slc, Slice):
+                                        raise SelectionError("Invalid child "
+                                          "element on partition '%s' : "
+                                          "'%s'." % (disk_kid.name, str(slc)))
+
+                                if not found_identifiable_slice:
+                                    raise SelectionError("Slice '%s'"
+                                        " on partition '%s' is not "
+                                        "logically uniquely "
+                                        "identifiable. in_zpool '%s'"
+                                        ", in_vdev '%s'" % \
+                                        (slc.name, disk_kid.name,
+                                        slc.in_zpool, slc.in_vdev))
+
+                        # Slice, create action
+                        elif isinstance(disk_kid, Slice) and \
+                            disk_kid.action == "create":
+                            # Sparc Slice not identifiable
+                            if not self.__device_is_identifiable(disk_kid):
+                                raise SelectionError("Slice '%s' on disk "
+                                    "'%s' is not logically uniquely "
+                                    "identifiable. "
+                                    "in_zpool '%s', in_vdev '%s'" % \
+                                    (disk_kid.name,
+                                    self.__pretty_print_disk(disk),
+                                    disk_kid.in_zpool, disk_kid.in_vdev))
+
+    def __device_is_identifiable(self, device):
+        '''A device can be uniquely identified by using one or both of
+           it's in_zpool and in_vdev attributes.
+
+           Input :
+               device - A disk, partition or slice device Object
+
+           Output :
+               None : if not uniquely identifiable in logical section
+               zpool/vdev tuple : Unique logical location
+        '''
+        unique_zpool = None
+        unique_vdev = None
+
+        # Both in_zpool and in_vdev are specified
+        if device.in_zpool is not None and device.in_vdev is not None:
+            vdev_key = device.in_zpool + ":" + device.in_vdev
+            if vdev_key not in self._vdev_map:
+                self.logger.debug("Device '%s' identification failed : "
+                    "Combination does not exist. "
+                    "in_zpool '%s', in_vdev '%s'." % \
+                    (device.name, device.in_zpool, device.in_vdev))
+                return None
+            unique_zpool = self._zpool_map[device.in_zpool]
+            unique_vdev = self._vdev_map[vdev_key]
+
+        # in_zpool specified, in_vdev not specified
+        elif device.in_zpool is not None and device.in_vdev is None:
+            if device.in_zpool not in self._zpool_map:
+                self.logger.debug("Device '%s' identification failed. "
+                    "Pool does not exist. in_zpool '%s', in_vdev '%s'." % \
+                    (device.name, device.in_zpool, device.in_vdev))
+                return None
+
+            unique_zpool = self._zpool_map[device.in_zpool]
+
+            pool_vdevs = self.__get_vdevs_in_zpool(device.in_zpool)
+            if len(pool_vdevs) > 1:
+                self.logger.debug("Device '%s' identification failed. "
+                    "More than one vdev in zpool. "
+                    "in_zpool '%s', in_vdev '%s'." % \
+                    (device.name, device.in_zpool, device.in_vdev))
+                return None
+
+            elif len(pool_vdevs) == 1:
+                # Only one vdev in this zpool so therefore device is
+                # uniquely identifiable, set in_vdev on this device
+                # for completeness.
+                device.in_vdev = pool_vdevs[0].name
+                unique_vdev = pool_vdevs[0]
+
+            else:
+                # Pool has no vdevs, this should not be the case at
+                # this juncture, throw exception.
+                raise SelectionError("Zpool '%s' does not contain any "
+                    "Vdev's." % (device.in_zpool))
+
+        # in_zpool not specified, in_vdev specified
+        elif device.in_zpool is None and device.in_vdev is not None:
+            # The only way a device can be uniquely identified in this
+            # Scenario is if the vdev name specified exists uniquely
+            # when compared across all pools
+            for vdev_key in self._vdev_map:
+                if self._vdev_map[vdev_key].name == device.in_vdev:
+                    if not unique_vdev:
+                        zpool_name = vdev_key.split(":")[0]
+                        unique_zpool = self._zpool_map[zpool_name]
+                        unique_vdev = self._vdev_map[vdev_key]
+                    else:
+                        # 2nd Vdev with this name, so not unique
+                        self.logger.debug(
+                            "Device '%s' identification failed. Vdev not "
+                            "unique. in_zpool '%s', in_vdev '%s'." % \
+                            (device.name, device.in_zpool, device.in_vdev))
+                        return None
+
+            # Vdev not found at all
+            if not unique_vdev:
+                self.logger.debug("Device '%s' identification failed. "
+                    "Vdev not found. in_zpool '%s', in_vdev '%s'." % \
+                    (device.name, device.in_zpool, device.in_vdev))
+                return None
+            else:
+                # We've found one unique vdev, set the device in_zpool
+                # for completeness.
+                device.in_zpool = unique_zpool.name
+
+        # Neither are set so not identifiable
+        else:
+            return None
+
+        return (unique_zpool, unique_vdev)
+
+    def __get_vdevs_in_zpool(self, zpoolname):
+        '''Given a zpool name, retrieve the list of vdevs in this
+           pool from the vdev_map.
+        '''
+
+        pool_vdevs = list()
+        start_vdev_key = zpoolname + ":"
+        for vdev_key in self._vdev_map:
+            if vdev_key.startswith(start_vdev_key):
+                pool_vdevs.append(self._vdev_map[vdev_key])
+
+        return pool_vdevs
+
+    @staticmethod
+    def __get_zpool_redundancies(zpool):
+        '''Traverse all vdev children in a zpool returning a unique list
+           of redundancy types defined in this zpool.
+        '''
+        vdev_redundancies = dict()
+
+        for vdev in zpool.children:
+            if isinstance(vdev, Vdev):
+                vdev_redundancies[vdev.redundancy] = True
+
+        return vdev_redundancies.keys()
+
+    def __get_vdev_devices(self, zpool_name, vdev_name, device_map):
+        '''Get list of devices with this vdev name.
+           If not set then recursively check for in_vdev setting on children.
+
+           Cater for device_map being either a list or a dictionary.
+
+           If in_vdev set on both parent and children, throw exception.
+
+           Device can be identified as part of a vdev either via in_vdev or
+           in_zpool or both. As long as it's uniquely identifiable.
+
+           Remember a vdev name is only unique within's it's zpool, different
+           zpool's can contain vdevs with the same name.
+        '''
+        vdev_devices = list()
+
+        if isinstance(device_map, dict):
+            tmp_device_map = device_map.values()
+        elif self.__is_iterable(device_map):
+            tmp_device_map = device_map
+        else:
+            # Return now, as nothing to traverse
+            return vdev_devices
+
+        for device in tmp_device_map:
+            identity_tuple = self.__device_is_identifiable(device)
+            if identity_tuple:
+                if (isinstance(device, Disk) or
+                    isinstance(device, Partition)) and device.has_children:
+                    # If disk or partition and has children, and is
+                    # identifiable this is an error, as one/all of the
+                    # children should contain identifying information
+                    device_str = self.__get_device_type_string(device)
+                    raise SelectionError("%s '%s' is uniquely identifiable "
+                        "and it has children. Invalid, as children should "
+                        "contain identifying information. "
+                        "in_zpool '%s', in_vdev '%s'." % (device_str,
+                        device.name, device.in_zpool, device.in_vdev))
+
+                # Append this device if the found device matches both
+                # zpool and vdev names
+                if identity_tuple[0].name == zpool_name and \
+                    identity_tuple[1].name == vdev_name:
+                    vdev_devices.append(device)
+
+            else:
+                if device.in_zpool is not None or device.in_vdev is not None:
+                    # Throw exception, as cannot identify uniquely yet
+                    # some logical identification information present
+                    device_str = self.__get_device_type_string(device)
+
+                    raise SelectionError("Logical information present on "
+                        "%s '%s', but is not enough to uniquely identify it."
+                        " in_zpool '%s', in_vdev '%s'." % (device_str,
+                        device.name, device.in_zpool, device.in_vdev))
+
+                if (isinstance(device, Disk) or
+                    isinstance(device, Partition)) and device.has_children:
+                    # Device has children which should identify location
+                    tmp_devices = self.__get_vdev_devices(zpool_name,
+                        vdev_name, device.children)
+                    for dev in tmp_devices:
+                        vdev_devices.append(dev)
+
+        return vdev_devices
+
+    @staticmethod
+    def __get_device_type_string(device):
+        '''Get a descriptive string for the device'''
+        if isinstance(device, Disk):
+            device_str = "Disk"
+        elif isinstance(device, Partition):
+            device_str = "Partition"
+        elif isinstance(device, Slice):
+            device_str = "Slice"
+        return device_str
+
+    def __check_disk_in_root_pool(self, disk):
+        '''Tests if a specified disk is in the root pool
+           if the disk does not have in_zpool, in_vdev specified
+           then start processing children. If no chilcren throw
+           error.
+           Check all children, and if any one of them are in the root
+           pool return true, otherwise return false.
+        '''
+        if self.__check_in_root_pool(disk):
+            return True
+        elif disk.has_children:
+            for disk_kid in disk.children:
+                if self.__check_in_root_pool(disk_kid):
+                    return True
+
+                if isinstance(disk_kid, Partition) and disk_kid.children > 0:
+                    for slc in disk_kid.children:
+                        if self.__check_in_root_pool(slc):
+                            return True
+        return False
+
+    def __check_in_root_pool(self, device):
+        '''Tests if a device (disk/partition/slice) is in the root pool.
+        '''
+
+        if device.in_zpool == self._root_pool.name:
+            return True
+
+        rpool_vdev_id = "%s:%s" % \
+            (self._root_pool.name, device.in_vdev)
+
+        if device.in_zpool is None and \
+           rpool_vdev_id in self._vdev_map:
+            # Assumption is we've validated the uniqueness of vdev
+            # specification in the object earlier in __check_valid_zpool_vdev()
+            return True
+
+        return False
+
+    def __handle_partition(self, partition, new_disk, discovered_disk):
+        '''Handle the partition as specified in the manifest.
+
+           Will return a new Partition object to be inserted in the DESIRED
+           tree such that Target Instantiation is able to perform.
+        '''
+        # Ensure this is an i386 machine if partitions are specified.
+        if platform.processor() != "i386":
+            raise SelectionError(
+                "Cannot specify partitions on this machine architecture")
+
+        existing_partition = discovered_disk.get_first_child(
+            partition.name, class_type=Partition)
+
+        if partition.action in ["preserve", "delete", "use_existing_solaris2"]:
+            if existing_partition is None:
+                # If it's delete action, just do nothing since it's not really
+                # an issue to delete something that's not there.
+                if partition.action == "delete":
+                    self.logger.warn(
+                        "Attempt to delete non-existant partition"
+                        " %s on disk %s ignored" % (partition.name,
+                        self.__pretty_print_disk(discovered_disk)))
+                    # Return now since there's nothing to do.
+                    return
+                else:
+                    raise SelectionError(
+                        "Cannot %s partition %s that doesn't exist on disk %s"
+                        % (partition.action, partition.name,
+                           self.__pretty_print_disk(discovered_disk)))
+            else:
+                # Do nothing, just copy over.
+                new_partition = copy.copy(existing_partition)
+                new_partition.action = partition.action
+        elif partition.action == "create":
+            if existing_partition is not None:
+                self.logger.warn(
+                    "Creating partition %s on disk %s will "
+                    "destroy existing data." % \
+                    (partition.name,
+                     self.__pretty_print_disk(discovered_disk)))
+
+            new_partition = copy.copy(partition)
+        else:
+            raise SelectionError("Unexpected action '%s' on partition %s"
+                % (partition.name, partition.action))
+
+        if partition.action in ["use_existing_solaris2", "create"]:
+            # Mark partition as ACTIVE, if it's Solaris
+            if new_partition.is_solaris:
+                # Need to set attribute, since Partition.change_bootid
+                # manipulates the parent object, which there isn't yet...
+                # Only applies to primary partitions
+                if partition.is_primary:
+                    new_partition.bootid = Partition.ACTIVE
+
+            if new_partition.size.sectors == 0 or \
+               new_partition.start_sector is None:
+                # According to DTD, if size is not specified, should use
+                # parents size information.
+                # If start_sector is None, try to allocate size into a gap.
+                if partition.action == "create":
+                    if partition.is_logical:
+                        gaps = new_disk.get_logical_partition_gaps()
+                    else:
+                        gaps = new_disk.get_gaps()
+                    largest_gap = None
+                    for gap in gaps:
+                        if new_partition.start_sector is None and \
+                           new_partition.size.sectors != 0 and \
+                           gap.size >= new_partition.size:
+                            # Specified a size with only start_sector missing.
+                            new_partition.start_sector = gap.start_sector
+                            break
+                        if largest_gap is None or \
+                           gap.size.sectors > largest_gap.size.sectors:
+                            largest_gap = gap
+                    else:
+                        # Will be skipped if searching for a gap to insert a
+                        # partition of a given size succeeds.
+                        if largest_gap is None or \
+                           largest_gap.size < \
+                           self.controller.minimum_target_size or \
+                           largest_gap.size < new_partition.size:
+                            raise SelectionError("Failed to find gap on disk"
+                                " of sufficient size to put partition"
+                                " %s in to" % (new_partition.name))
+                        new_partition.start_sector = largest_gap.start_sector
+                        new_partition.size = largest_gap.size
+                else:
+                    raise SelectionError(
+                        "Cannot find size of existing partition '%s' on "
+                        "discovered_disk '%s'"
+                        % (new_partition.name, discovered_disk.name))
+
+            # Need to do more than just copy
+            if discovered_disk.disk_prop is not None and \
+               new_partition.size > discovered_disk.disk_prop.dev_size:
+                raise SelectionError(
+                    "Partition %s has a size larger than the disk %s" %
+                    (new_partition.name,
+                    self.__pretty_print_disk(discovered_disk)))
+
+            # Insert partition now, since shadowlist validation will require
+            # that the disk be known for partitions or children.
+            new_disk.insert_children(new_partition)
+
+            # Only process solaris partitions
+            if partition.is_solaris:
+                if not partition.has_children:
+                    # If it's a root pool, or we have a partition that's not in
+                    # a pool we assume it should be in the root pool.
+                    if self.__check_in_root_pool(partition) \
+                       or (partition.in_zpool is None and \
+                           partition.in_vdev is None and \
+                           self._root_pool is not None):
+
+                        # Need to add a slice since a root pool cannot exist on
+                        # with partition vdevs, must be slice vdevs
+                        start = 1  # Will be rounded up to cylinder by TI
+                        slice_size = new_partition.size.sectors
+                        new_slice = new_partition.add_slice("0", start,
+                            slice_size, Size.sector_units, force=True)
+
+                        if partition.in_zpool is None and \
+                           partition.in_vdev is None:
+
+                            # Assume it should be in the root pool since
+                            # nothing specific provided in the manifest
+                            new_slice.in_zpool = self._root_pool.name
+                            if self._root_vdev is not None:
+                                new_slice.in_vdev = self._root_vdev.name
+                        else:
+                            # Copy in_zpool/vdev to slice, and remove from
+                            # partition.
+                            new_slice.in_zpool = new_partition.in_zpool
+                            new_slice.in_vdev = new_partition.in_vdev
+                            new_partition.in_zpool = None
+                            new_partition.in_vdev = None
+
+                else:
+                    self.__handle_slices(partition.children,
+                                         new_partition, existing_partition)
+        else:
+            # Insert partition now, since shadowlist validation will require
+            # that the disk be known for partitions or children.
+            new_disk.insert_children(new_partition)
+
+        return new_partition
+
+    def __handle_slice(self, orig_slice, parent_object, existing_parent_obj):
+        '''Handle the slice as specified in the manifest.
+
+           Will return a new Slice object to be inserted in the DESIRED
+           tree such that Target Instantiation is able to perform.
+        '''
+        if orig_slice.name == "2":
+            # Skip s2 definition, shouldn't be there, and Target Instantiation
+            # will create
+            self.logger.warning("Skipping orig_slice 2 definition")
+            return None
+
+        existing_slice = existing_parent_obj.get_first_child(orig_slice.name,
+                                                    class_type=Slice)
+
+        if orig_slice.action in ["preserve", "delete"]:
+            if existing_slice is None:
+                # If it's delete action, just do nothing since it's not really
+                # an issue to delete something that's not there.
+                if orig_slice.action == "delete":
+                    if isinstance(parent_object, Disk):
+                        self.logger.warn(
+                            "Attempt to delete non-existant slice"
+                            " %s on disk %s ignored" %
+                            (orig_slice.name,
+                            self.__pretty_print_disk(parent_object)))
+                    else:
+                        self.logger.warn(
+                            "Attempt to delete non-existant slice"
+                            " %s on partition '%s', disk %s ignored" %
+                            (orig_slice.name, parent_object.name,
+                            self.__pretty_print_disk(parent_object.parent)))
+                    # Return now since there's nothing to do.
+                    return None
+                else:
+                    if isinstance(parent_object, Disk):
+                        raise SelectionError(
+                            "Cannot %s slice %s that doesn't exist on disk %s"
+                            % (orig_slice.action, orig_slice.name,
+                            self.__pretty_print_disk(discovered_disk)))
+                    else:
+                        raise SelectionError(
+                            "Cannot %s slice %s that doesn't exist on "
+                            "partition %s, disk %s" % (orig_slice.action,
+                            orig_slice.name, parent_object.name,
+                            self.__pretty_print_disk(parent_object.parent)))
+            else:
+                # Do nothing, just copy over.
+                new_slice = copy.copy(existing_slice)
+                new_slice.action = orig_slice.action
+        elif orig_slice.action == "create":
+            if existing_slice is not None:
+                if isinstance(parent_object, Disk):
+                    self.logger.warn(
+                        "Creating slice %s on disk %s will "
+                        "destroy existing data." % \
+                        (orig_slice.name,
+                        self.__pretty_print_disk(parent_object)))
+                else:
+                    self.logger.warn(
+                        "Creating slice %s on partition %s, disk %s will "
+                        "destroy existing data." % \
+                        (orig_slice.name, parent_object.name,
+                        self.__pretty_print_disk(parent_object.parent)))
+
+            new_slice = copy.copy(orig_slice)
+        else:
+            raise SelectionError("Unexpected action '%s' on slice %s"
+                % (orig_slice.name, orig_slice.action))
+
+        if new_slice.action == "create":
+
+            if isinstance(parent_object, Disk):
+                if parent_object.disk_prop is not None and \
+                   new_slice.size > parent_object.disk_prop.dev_size:
+                    raise SelectionError(
+                        "Slice %s has a size larger than the disk %s" %
+                        (new_slice.name,
+                         self.__pretty_print_disk(parent_object)))
+            else:
+                # It's a partition
+                if parent_object.size is not None and \
+                   new_slice.size > parent_object.size:
+                    raise SelectionError(
+                        "Slice %s has a size larger than the containing "
+                        "partition %s" % (new_slice.name,
+                        parent_object.name))
+
+        if new_slice.action == "create" and (new_slice.size.sectors == 0 or
+           new_slice.start_sector is None):
+            # According to DTD, if size is not specified, should use
+            # parents size information.
+
+            if new_slice.size.sectors == 0 or new_slice.start_sector is None:
+                # According to DTD, if size is not specified, should use
+                # parents size information.
+                # If start_sector is None, try to allocate size into a gap.
+                if orig_slice.action == "create":
+                    gaps = parent_object.get_gaps()
+                    largest_gap = None
+                    for gap in gaps:
+                        if new_slice.start_sector is None and \
+                           new_slice.size.sectors > 0 and \
+                           gap.size >= new_slice.size:
+                            # Specified a size with only start_sector missing.
+                            new_slice.start_sector = gap.start_sector
+                            break
+                        if largest_gap is None or \
+                           gap.size.sectors > largest_gap.size.sectors:
+                            largest_gap = gap
+                    else:
+                        # Will be skipped if searching for a gap to insert a
+                        # slice of a given size succeeds.
+                        if largest_gap is None or \
+                           largest_gap.size < \
+                           self.controller.minimum_target_size:
+                            raise SelectionError("Failed to find gap on disk"
+                                " of sufficient size to put slice"
+                                " %s in to" % (new_slice.name))
+                        new_slice.start_sector = largest_gap.start_sector
+                        new_slice.size = largest_gap.size
+
+        return new_slice
+
+    def __handle_slices(self, slices, new_parent_obj, existing_parent_obj):
+        '''Process list of slices and attach to new_parent_obj as children.
+
+           Returns a list of new slices.
+        '''
+        # Specifics in manifest take precedence, so
+        # if they exist already, ignore them.
+        if existing_parent_obj is not None:
+            tmp_slices = list()
+            for exist_slice in existing_parent_obj.children:
+                skip_slice = False
+                for mf_slice in slices:
+                    if mf_slice.name == exist_slice.name:
+                        # Already inserted, skip
+                        break
+                else:
+                    slice_copy = copy.copy(exist_slice)
+                    # Remove in_zpool/in_vdev values
+                    # because these should exist in
+                    # manifest but don't and will cause
+                    # validations to fail.
+                    slice_copy.in_zpool = None
+                    slice_copy.in_vdev = None
+
+                    tmp_slices.append(slice_copy)
+
+            # Temporarily skip validation since
+            # may cause validation failures
+            new_parent_obj.validate_children = False
+            new_parent_obj.insert_children(tmp_slices)
+            new_parent_obj.validate_children = True
+
+        # Need to handle all preserved slices first inserting them into
+        # new parent, this ensures get_gaps works for newly created slices
+        for orig_slice in [s for s in slices if s.action == "preserve"]:
+            new_slice = self.__handle_slice(orig_slice, new_parent_obj,
+                                            existing_parent_obj)
+            if new_slice is not None:
+                new_parent_obj.insert_children(new_slice)
+
+        # While processing slices, remember whether we found
+        # any slices with an in_zpool or in_vdev, and if not
+        # then we will use the first large enough slice.
+        first_large_slice = None
+        found_zpool_vdev_slice = False
+        for orig_slice in [s for s in slices if s.action != "preserve"]:
+            new_slice = self.__handle_slice(orig_slice, new_parent_obj,
+                                            existing_parent_obj)
+            if new_slice is not None:
+                new_parent_obj.insert_children(new_slice)
+                if new_slice.in_zpool is None and \
+                   new_slice.in_vdev is None:
+                    if first_large_slice is None and \
+                       new_slice.action == "create" and \
+                       new_slice.size >= self.controller.minimum_target_size:
+                        # Remember the first sufficiently large slice thats
+                        # got a create or use_existing action
+                        first_large_slice = new_slice
+                else:
+                    found_zpool_vdev_slice = True
+
+        # Check to see if we didn't find any specific references to vdevs
+        if not found_zpool_vdev_slice:
+            if first_large_slice is not None:
+                # Set in_zpool/in_vdev on slice, and remove from
+                # disk parent object (just in case)
+                first_large_slice.in_zpool = self._root_pool.name
+                if self._root_vdev is not None:
+                    first_large_slice.in_vdev = self._root_vdev.name
+                new_parent_obj.in_zpool = None
+                new_parent_obj.in_vdev = None
+            else:
+                raise SelectionError(
+                    "No slice large enough to install to was found on disk")
+
+    def __handle_disk(self, disk):
+        '''Handle a disk object.
+
+           Find the disk in the discovered tree, take a copy, and then if
+           necessary add/remove partitions based on version passed that came
+           from the manifest.
+
+           Returns: the new disk to be inserted into the DESIRED Tree
+        '''
+
+        self.logger.debug("Processing disk : %s" %
+            self.__pretty_print_disk(disk))
+
+        ret_disk = None
+        errsvc.clear_error_list()
+
+        discovered_disk = self.__find_disk(disk)
+
+        if discovered_disk is None:
+            raise SelectionError(
+               "Unable to locate the disk '%s' on the system." %
+                self.__pretty_print_disk(disk))
+
+        if discovered_disk.ctd in self._disk_map:
+            # Seems that the disk is specified more than once in the manifest!
+            raise SelectionError(
+                "Disk '%s' matches already used disk '%s'." %
+                (self.__pretty_print_disk(disk), discovered_disk.name))
+
+        # Check that in_zpool and in_vdev values from manifest are valid
+        self.__check_valid_zpool_vdev(disk)
+
+        if disk.whole_disk:
+            # Fail if we somehow get whole_disk = True and partitions/slices
+            if disk.has_children:
+                raise SelectionError("Invalid request to use whole disk when"
+                    " specifying partitions or slices on disk %s" %
+                    (self.__pretty_print_disk(disk)))
+
+            # Only copy the disk, not it's children.
+            disk_copy = copy.copy(discovered_disk)
+            self._disk_map[disk_copy.ctd] = disk_copy
+            self.logger.debug("Using Whole Disk")
+            if disk.in_zpool is None and disk.in_vdev is None:
+                self.logger.debug("Zpool/Vdev not specified")
+                if self._is_generated_root_pool:
+                    self.logger.debug("Assigning to temporary root pool")
+                    # Assign to temporary root pool if one exists
+                    disk.in_zpool = self._root_pool.name
+                    disk.in_vdev = self._root_vdev.name
+                elif self._root_pool is not None:
+                    # Assign to real root pool if one exists
+                    disk.in_zpool = self._root_pool.name
+
+            if not self.__check_in_root_pool(disk):
+                self.logger.debug("Disk Not in root pool")
+                # We can leave it for ZFS to partition itself optimally
+                disk_copy.whole_disk = True
+                # Add vdev/zpool values to disk_copy
+                disk_copy.in_zpool = disk.in_zpool
+                disk_copy.in_vdev = disk.in_vdev
+            else:
+                disk_copy.whole_disk = False
+                # When bug : 7037884 we can then add this back and support
+                # The Changing of a GPT disk to VTOC automatically
+                # disk_copy.label = "VTOC"
+                self.logger.debug("Disk in root pool")
+
+                # Layout disk using partitions since root pools can't use
+                # EFI/GPT partitioned disks to boot from yet.
+                self.logger.debug("Whole Disk, applying default layout")
+                root_vdev = None
+                if self._root_vdev is not None:
+                    root_vdev = self._root_vdev.name
+                elif disk.in_vdev is not None:
+                    # If the disk specified a vdev, copy it.
+                    root_vdev = disk.in_vdev
+                self.controller.apply_default_layout(disk_copy, False, True,
+                    in_zpool=self._root_pool.name, in_vdev=root_vdev)
+
+            ret_disk = disk_copy
+        else:
+            # Copy disk, and and try to merge partitions and slices from
+            # manifest and existing layouts.
+            disk_copy = copy.copy(discovered_disk)
+            self._disk_map[disk_copy.ctd] = disk_copy
+
+            # Get partitions and slices from manifest version of Disk
+            partitions = disk.get_children(class_type=Partition)
+            slices = disk.get_children(class_type=Slice)
+
+            # Do some basic sanity checks
+
+            # If not partitions, but have slices, fail now, if i386
+            if platform.processor() == "i386" and not partitions and slices:
+                raise SelectionError("Invalid specification of slices "
+                        "outside of partition on the %s platform"
+                        % (platform.processor()))
+
+            # If partitions, fail now, if not i386
+            if platform.processor() != "i386" and partitions:
+                raise SelectionError("Invalid specification of partitions "
+                        "on this %s platform" % (platform.processor()))
+
+            if (platform.processor() == "i386" and not partitions) or \
+               (platform.processor() != "i386" and not slices):
+                raise SelectionError(
+                    "If whole_disk is False, you need to provide"
+                    " information for partitions or slices")
+
+            if partitions:
+                for partition in partitions:
+                    # If there is no name specified only seek if no name
+                    # provided.
+                    if (partition.name is None or
+                       len(partition.name) != 0) and \
+                       partition.action == "use_existing_solaris2":
+                        # Pre-empt this by replacing it with a discovered
+                        # Solaris2 partition if one exists
+                        solaris_partition = None
+                        for existing_partition in discovered_disk.children:
+                            if existing_partition.is_solaris:
+                                solaris_partition = existing_partition
+
+                        if solaris_partition is None:
+                            raise SelectionError(
+                                "Cannot find a pre-existig Solaris "
+                                "partition on disk %s"
+                                % (self.__pretty_print_disk(discovered_disk)))
+                        else:
+                            tmp_partition = copy.copy(solaris_partition)
+                            # Ensure partition action maintained.
+                            tmp_partition.action = partition.action
+                            if not partition.has_children:
+                                if solaris_partition.has_children:
+                                    # Because no slices are being specified in
+                                    # the manifest we have to assume we are to
+                                    # create a slice for root.
+                                    # TODO: Should this be looking for a gap?
+                                    self.logger.warn(
+                                        "Existing partition's slices are not"
+                                        "being preserved")
+
+                                # Temporarily skip validation since
+                                # tmp_partition not yet in disk and will cause
+                                # failures.
+                                tmp_partition.validate_children = False
+
+                                # Add a slice 0 for the root pool
+                                # Size calculated automatically later.
+                                tmp_slice = tmp_partition.add_slice("0", 1, 0,
+                                    Size.sector_units, force=True)
+                                tmp_partition.validate_children = True
+
+                                # Assign to root pool
+                                tmp_slice.in_zpool = self._root_pool.name
+                                if self._root_vdev is not None:
+                                    tmp_slice.in_vdev = self._root_vdev.name
+
+                            else:
+                                # Specifics in manifest take precedence, so
+                                # copy first.
+                                tmp_slices = list()
+                                for mf_slice in partition.children:
+                                    tmp_slices.append(copy.copy(mf_slice))
+
+                                for exist_slice in solaris_partition.children:
+                                    skip_slice = False
+                                    for mf_slice in partition.children:
+                                        if mf_slice.name == exist_slice.name:
+                                            # Already inserted, skip
+                                            break
+                                    else:
+                                        slice_copy = copy.copy(exist_slice)
+                                        # Remove in_zpool/in_vdev values
+                                        # because these should exist in
+                                        # manifest but don't and will cause
+                                        # validations to fail.
+                                        slice_copy.in_zpool = None
+                                        slice_copy.in_vdev = None
+
+                                        tmp_slices.append(slice_copy)
+
+                                # Temporarily skip validation since
+                                # tmp_partition not yet in disk and will cause
+                                # failures.
+                                tmp_partition.validate_children = False
+                                tmp_partition.insert_children(tmp_slices)
+                                tmp_partition.validate_children = True
+
+                            # Do susbstitution
+                            partitions.remove(partition)
+                            partitions.insert(int(tmp_partition.name),
+                                tmp_partition)
+                            # Break now, since partitions list has changed.
+                            break
+
+                # Copy over any existing partitions, if they are not in the
+                # manifest, since the manifest takes priority.
+                # Sort byname to ensure we handle primaries first.
+                skip_existing_logicals = False
+                for existing_partition in \
+                    sorted(discovered_disk.children, key=attrgetter("name")):
+                    for mf_partition in partitions:
+                        if mf_partition.name == existing_partition.name:
+                            if existing_partition.is_extended and \
+                               mf_partition.action in ["create", "delete"]:
+                                # If we're replacing an extended partition then
+                                # we need to ensure any existing logicals are
+                                # not copied over, effectively deleting them.
+                                skip_existing_logicals = True
+                            break
+                    else:
+                        # Copy everything else, unless it's a logical and we're
+                        # supposed to be skipping them.
+                        if not (existing_partition.is_logical and 
+                                skip_existing_logicals):
+                            partitions.append(copy.copy(existing_partition))
+                            # Also insert to new disk so gaps calculations work.
+                            disk_copy.insert_children(
+                                copy.copy(existing_partition))
+
+                extended_partitions = [p for p in partitions if p.is_extended]
+                if len(extended_partitions) > 1:
+                    raise SelectionError(
+                        "It is only possible to have at most 1 extended"
+                        " partition defined")
+
+                for partition in partitions:
+                    if disk_copy.get_first_child(partition.name) is not None:
+                        # Skip this, we've already processed it above.
+                        continue
+
+                    # Ensure partition is set to be in temporary root pool
+                    # __handle_partition() relies on this.
+                    if partition.is_solaris:
+                        if self._is_generated_root_pool \
+                           and not partition.has_children \
+                           and partition.action == "create" \
+                           and partition.in_zpool is None \
+                           and partition.in_vdev is None:
+                            # Assign to temporary root pool
+                            partition.in_zpool = self._root_pool.name
+                            partition.in_vdev = self._root_vdev.name
+
+                    # Process partitions, and contained slices
+                    new_partition = self.__handle_partition(partition,
+                        disk_copy, discovered_disk)
+
+                    # Insertion to disk done in __handle_partition()
+
+            else:
+                # Can assume we're on SPARC machine now.
+                if len(slices) == 1 and slices[0].name == "2":
+                    # There is only one slice we need to check to see if
+                    # it's slice "2". If is it we need to add a slice "0"
+                    # and set in_vdev and in_zpool.
+                    # TODO: This will need to be updated for GPT when ready
+                    # Add a slice
+                    start = 1  # Will be rounded up to cylinder by TI
+                    slice_size = \
+                      disk_copy.disk_copy_prop.dev_size.sectors - start
+
+                    new_slice = disk_copy.add_slice("0", start,
+                        slice_size, Size.sector_units, force=True)
+                    if slices[0].in_vdev is not None:
+                        new_slice.in_vdev = slices[0].in_vdev
+                    if self._root_pool is not None:
+                        new_slice.in_zpool = self._root_pool.name
+                else:
+                    self.__handle_slices(slices, disk_copy, discovered_disk)
+
+            ret_disk = disk_copy
+
+        self.logger.debug("Finished processing disk : %s" %
+            self.__pretty_print_disk(disk))
+
+        # Check error service for errors
+        errors = errsvc.get_all_errors()
+
+        # Found errors and they cannot be ignored
+        if errors and not self.__can_ignore_errors(errors):
+            # Print desired contents to log
+            existing_desired = \
+                self.doc.persistent.get_first_child(Target.DESIRED)
+            if existing_desired:
+                self.logger.debug("Desired =\n%s\n" % (str(existing_desired)))
+            self.logger.debug("Disk =\n%s\n" % (str(ret_disk)))
+            errstr = "Following errors occurred processing disks :\n%s" % \
+                (str(errors[0]))
+            raise SelectionError(errstr)
+
+        return ret_disk
+
+    def __can_ignore_errors(self, errors):
+        '''
+            Process list of errors found in error service, and make a
+            judgement call on whether we can ignore them.
+
+            Add errors being ignored and reasoning here.
+
+            Errors to ignore for physical :
+            - SliceInUseError() we are overriding this existing slice
+              so we can ignore.
+        '''
+        return False
+        can_ignore = True
+
+        for error in errors:
+            for key in error.error_data:
+                if error.mod_id == "physical validation" and \
+                    isinstance(error.error_data[key], \
+                        ShadowPhysical.SliceInUseError):
+                    # We can ignore, just pass onto next error
+                    pass
+                else:
+                    # Error does not match one we can ignore, so set return
+                    # value and break.
+                    can_ignore = False
+                    break
+
+        return can_ignore
+
+    def __create_temp_logical_tree(self, existing_logicals):
+        '''Create a set of logicals that we will use should there be no other
+           logicals defined in the manifest.
+        '''
+        if existing_logicals:
+            # Add Zpool to existing logical, pick first one
+            logical = existing_logicals[0]
+        else:
+            logical = None
+
+        logical = self.controller.apply_default_logical(logical,
+            self.be_mountpoint, redundancy="mirror")
+
+        zpool = logical.get_first_child(class_type=Zpool)
+        vdev = zpool.get_first_child(class_type=Vdev)
+        be = zpool.get_first_child(class_type=BE)
+
+        # Set instance variables
+        self._is_generated_root_pool = True
+        self._root_pool = zpool
+        self._root_vdev = vdev
+        self._be = be
+        # Add pool to maps so final validations will work
+        self._zpool_map[zpool.name] = zpool
+        vdev_key = zpool.name + ":" + vdev.name
+        self._vdev_map[vdev_key] = vdev
+
+        return logical
+
+    def __cleanup_temp_logical(self, logical, existing_logicals):
+        if not self._is_generated_root_pool:
+            return
+
+        if logical not in existing_logicals:
+            logical.delete()
+        else:
+            logical.delete_children(self._root_pool)
+
+        # Remove pool from maps since we're not using it.
+        del self._zpool_map[self._root_pool.name]
+        vdev_key = self._root_pool.name + ":" + self._root_vdev.name
+        del self._vdev_map[vdev_key]
+        # Reset instance variables
+        self._is_generated_root_pool = False
+        self._root_pool = None
+        self._root_vdev = None
+        self._be = None
+
+    def __handle_target(self, target):
+        '''Process target section in manifest'''
+
+        # Reset all local map information
+        self.__reset_maps()
+
+        new_desired_target = None
+        logical_inserted = False
+        skip_disk_processing = False
+
+        # Get Logical sections
+        logicals = target.get_children(class_type=Logical)
+
+        # It's possible that there are no logicial sections.
+        if logicals:
+            for logical in logicals:
+                new_logical = self.__handle_logical(logical)
+                if new_logical is not None:
+                    if new_desired_target is None:
+                        new_desired_target = self.__get_new_desired_target()
+                    new_desired_target.insert_children(new_logical)
+
+        if new_desired_target is None:
+            all_whole_disk = None
+            disks = target.get_children(class_type=Disk)
+            for disk in disks:
+                if disk.whole_disk and disk.in_zpool is None \
+                   and disk.in_vdev is None:
+                    if all_whole_disk is None:
+                        all_whole_disk = True
+                else:
+                    all_whole_disk = False
+                    break
+
+            if all_whole_disk is not None and all_whole_disk:
+                # Can use TargetController now
+                # Call initialize again to ensure logicals created, since
+                # previously called to not create logicals.
+                self.controller.initialize(no_initial_disk=True,
+                                           unique_zpool_name=True)
+
+                # Disk specified in target may not contain a name, find
+                # A matching disk in the discovered tree
+                discovered_disks = list()
+                for disk in disks:
+                    dd = self.__find_disk(disk)
+                    if dd is not None:
+                        discovered_disks.append(dd)
+
+                if not discovered_disks:
+                    raise SelectionError("Failed to match target disk(s) "
+                        "discovered disks.")
+
+                selected_disks = self.controller.select_disk(discovered_disks,
+                    use_whole_disk=True)
+
+                # When bug : 7037884 we can then add this back and support
+                # The Changing of a GPT disk to VTOC automatically
+                #for disk in selected_disks:
+                #    # Ensure we're using VTOC labelling until GPT integrated
+                #    disk.label = "VTOC"
+
+                # Target Controller will insert default root pool need to
+                # Set this for validation of name later on
+                self._is_generated_root_pool = True
+
+                # Need to update the logical map, as new one just created
+                new_desired_target = self.__get_new_desired_target()
+
+                # Get New Logical sections
+                logicals = new_desired_target.get_children(class_type=Logical)
+
+                if logicals is not None and logicals:
+                    for logical in logicals:
+                        # A logical may have been specified just for
+                        # the purposes of noswap and nodump, ensure
+                        # This information is passed onto handle_logical
+                        if self._nozpools:
+                            logical.noswap = self._noswap
+                            logical.nodump = self._nodump
+                        # No need to insert into new_desired, already there
+                        self.__handle_logical(logical)
+
+                # Update disk map
+                self.__add_disks_to_map(selected_disks)
+
+                skip_disk_processing = True
+
+        if not skip_disk_processing:
+            if self._root_pool is None:
+                # There is no zpool, so we will add one temporarily
+                # This will allow us to fill-in the in_zpool/in_vdev for disks
+                # that don't have any explicitly set already.
+                tmp_logical = self.__create_temp_logical_tree(logicals)
+                if new_desired_target is None:
+                    new_desired_target = self.__get_new_desired_target()
+                if tmp_logical not in logicals or self._nozpools:
+                    # Could be re-using existing logical section.
+                    new_desired_target.insert_children(tmp_logical)
+
+            # It's also possible to have no disks, if so install to first
+            # available disk that is large enough to install to.
+            # Should be cycling through manifest disks or discovered disks
+            # looking at __handle_disk
+            disks = target.get_children(class_type=Disk)
+
+            # If no Disks, but have a logical section, then we need to
+            # auto-select, and add disk.
+            if not disks and self._root_pool is not None and \
+                self._root_pool.action not in self.PRESERVED:
+                disk = self.controller.select_initial_disk()
+
+                if disk is not None:
+                    self.logger.info("Selected Disk(s) : %s" % \
+                        (self.__pretty_print_disk(disk)))
+                    # When bug : 7037884 we can then add this back and support
+                    # The Changing of a GPT disk to VTOC automatically
+                    ## Ensure were using a VTOC label
+                    #disk.label = "VTOC"
+                    pool_vdevs = self.__get_vdevs_in_zpool(
+                        self._root_pool.name)
+                    root_vdev_name = None
+                    if self._root_vdev is not None:
+                        root_vdev_name = self._root_vdev.name
+                    elif len(pool_vdevs) > 1:
+                        self.logger.debug(
+                            "Automatic disk selection failed. "
+                            "More than one possible vdev in zpool. "
+                            "in_zpool '%s'." % (self._root_pool.name))
+                    elif len(pool_vdevs) == 1:
+                        root_vdev_name = pool_vdevs[0].name
+
+                    # Ensure using whole-disk in a way suitable for root pool
+                    self.controller.apply_default_layout(disk, False, True,
+                        in_zpool=self._root_pool.name, in_vdev=root_vdev_name)
+
+                    if new_desired_target is None:
+                        new_desired_target = self.__get_new_desired_target()
+                    new_desired_target.insert_children(disk)
+
+                    self._disk_map[disk.ctd] = disk
+
+            for disk in disks:
+                if self._is_generated_root_pool:
+                    # Fail if manifest has disk references to temporary pool we
+                    # just created.
+                    if disk.in_zpool == self._root_pool.name or \
+                       disk.in_vdev == self._root_vdev.name:
+                        raise SelectionError(
+                            "Invalid selection of non-existent pool"
+                            " or vdev for disk")
+
+                new_disk = self.__handle_disk(disk)
+                if new_disk is not None:
+                    if new_desired_target is None:
+                        new_desired_target = self.__get_new_desired_target()
+                    new_desired_target.insert_children(new_disk)
+
+            # If disks were added to temporary root pool, make it permanent
+            if self._is_generated_root_pool:
+                vdev_devices = self.__get_vdev_devices(self._root_pool.name,
+                                                       self._root_vdev.name,
+                                                       self._disk_map)
+                if vdev_devices is not None and vdev_devices:
+                    # Keep root pool since we used it.
+                    self._is_generated_root_pool = False
+                else:
+                    self.__cleanup_temp_logical(logical, logicals)
+
+        if new_desired_target is not None:
+            self.logger.debug("Validating desired =\n%s\n" %
+                (str(new_desired_target)))
+            self.__validate_swap_and_dump(new_desired_target)
+            self.__validate_logical(new_desired_target)
+            self.__validate_disks(new_desired_target)
+            # Do final validation before passing to Target Instantiation
+            if not new_desired_target.final_validation():
+                errors = errsvc.get_all_errors()
+                if errors:
+                    if not self.__can_ignore_errors(errors):
+                        errstr = "Following errors occurred during final " \
+                            "validation :\n%s" % (str(errors[0]))
+                        raise SelectionError(errstr)
+                else:
+                    raise SelectionError("Final Validation Failed. See "
+                        "install_log for more details.")
+
+        return new_desired_target
+
+    def select_targets(self, from_manifest, discovered):
+        '''The starting point for selecting targets.
+
+           Arguments:
+
+           - from_manifest: A referents to a list of target objects that were
+                            imported from the manifest.
+
+           - discovered:    A reference to the root node of the discovered
+                            targets.
+
+           If there are no targets in the manifest, we will defer to Target
+           Controller to do the selection of the disk to install to.
+
+           If there are targets, we will process them by traversing the tree
+           using __handle_XXXX methods for each object type.
+        '''
+
+        if discovered is None:
+            raise SelectionError("No installation targets found.")
+
+        # Store list of discovered disks
+        self._discovered_disks = discovered.get_descendants(class_type=Disk)
+
+        # Store list of discovered zpools
+        self._discovered_zpools = discovered.get_descendants(class_type=Zpool)
+        if len(self._discovered_zpools) > 0:
+            # Store map of devices on discovered zpools
+            self._discovered_zpool_map = self.__get_existing_zpool_map()
+
+        if len(self._discovered_disks) == 0:
+            raise SelectionError("No installation target disks found.")
+
+        if from_manifest:
+            self.logger.debug("from_manifest =\n%s\n" %
+                              (str(from_manifest[0])))
+        else:
+            self.logger.debug("from_manifest = NONE\n")
+        self.logger.debug("discovered =\n%s\n" % (str(discovered)))
+
+        # Check if all Targets have children
+        targets_have_children = False
+        if from_manifest:
+            for target in from_manifest:
+                if target.has_children:
+                    targets_have_children = True
+                    break
+
+        selected_disks = None
+        new_target = None
+        if from_manifest is None or not targets_have_children:
+            # Default to TargetController's automatic mechanism
+            selected_disks = self.controller.initialize(unique_zpool_name=True)
+
+            # Occasionally initialize fails to select a disk because
+            # it cannot find a slice large enough to install to, however
+            # we are using whole disk, so just find first one large enough
+            if not selected_disks:
+                selected_disks = self.controller.select_initial_disk()
+
+            self.logger.info("Selected Disk(s) : %s" % \
+                (self.__pretty_print_disk(selected_disks)))
+
+            # Ensure whole-disk is selected for each disk.
+            desired_disks = self.controller.select_disk(selected_disks,
+                use_whole_disk=True)
+
+            # When bug : 7037884 we can then add this back and support
+            # The Changing of a GPT disk to VTOC automatically
+            #for disk in desired_disks:
+            #    # Ensure we're using VTOC labelling until GPT integrated
+            #    disk.label = "VTOC"
+
+            # Target Controller will insert default root pool need to
+            # Set this for validation of name later on
+            self._is_generated_root_pool = True
+
+            # Target Controller is not setting the mountpoint for the default
+            # BE so need to set it here just in case.
+            # Also getting the desired tree and doing some validation here
+            # seems like a good idea too.
+
+            # As desired tree has been configured, makes sense to fill out
+            # The internal maps, and call various validate functions, doing
+            # this ensure both target controller and target selection are
+            # populating the desired tree in the same manner.
+
+            existing_desired = \
+                self.doc.persistent.get_first_child(Target.DESIRED)
+
+            if existing_desired:
+                # Traverse Logicals until we get the BE
+
+                self.logger.debug("No targets specified in Manifest, "
+                    "Target Controller has selected default target.")
+
+                self.logger.debug("Target Controller Pre-Desired : \n%s\n" %
+                    (str(existing_desired)))
+
+                self.logger.debug("Target Selection ensuring BE "
+                    "mountpoint set to '%s'." % (self.be_mountpoint))
+
+                # Get New Logical sections
+                logicals = existing_desired.get_children(class_type=Logical)
+
+                # Update logical maps and set be mountpoint
+                if logicals is not None and logicals:
+                    be = None
+                    for logical in logicals:
+                        # A logical may have been specified just for
+                        # the purposes of noswap and nodump, ensure
+                        # This information is passed onto handle_logical
+                        if self._nozpools:
+                            logical.noswap = self._noswap
+                            logical.nodump = self._nodump
+                        # No need to insert into new_desired, already there
+                        self.__handle_logical(logical)
+
+                        # Get BE object from root pool
+                        for zpool in \
+                                [z for z in logical.children if z.is_root]:
+                            be = zpool.get_first_child(class_type=BE)
+
+                if be is not None:
+                    self.logger.debug("Setting BE mountpoint to '%s'" %
+                        (self.be_mountpoint))
+                    be.mountpoint = self.be_mountpoint
+
+                # Update disk map
+                for disk in selected_disks:
+                    if disk.ctd in self._disk_map:
+                        # Seems that the disk is specified more than once in
+                        # the manifest!
+                        raise SelectionError(
+                            "Disk '%s' matches already used disk '%s'." %
+                            (self.__pretty_print_disk(disk), disk.ctd))
+                    self._disk_map[disk.ctd] = disk
+
+                # As TC has configured the logical section we also need
+                # to ensure swap and dump zvols exist if required.
+                self.logger.debug("Target Selection ensuring swap/dump "
+                    "configured if required.")
+
+                self.__validate_swap_and_dump(existing_desired)
+
+                # Validate logical/disk portions of desired tree
+                self.__validate_logical(existing_desired)
+                self.__validate_disks(existing_desired)
+
+                self.logger.debug("Target Controller Post-Desired : \n%s\n" %
+                    (str(existing_desired)))
+
+        else:
+            # Can't rely on TargetController much here, so perform own
+            # selections and interpret values from the manifest.
+
+            if from_manifest:
+                # The AI DTD only allows for one <target> element.
+                new_target = self.__handle_target(from_manifest[0])
+                if new_target is not None:
+                    # Got a new DESIRED tree, so add to DOC
+                    existing_desired = \
+                        self.doc.persistent.get_first_child(Target.DESIRED)
+                    if existing_desired:
+                        self.doc.persistent.delete_children(existing_desired)
+                    self.doc.persistent.insert_children(new_target)
+
+        if not selected_disks and not new_target:
+            raise SelectionError("Unable to find suitable target for install.")
+
+        self.logger.debug("Selected disk(s): %s" % (repr(selected_disks)))
+
+    def __check_valid_zpool_vdev(self, disk):
+        '''Check that disk refers to known zpool and/or vdev
+           Will raise an SelectionError exception if anything is wrong.
+        '''
+        if disk.in_zpool is not None and len(disk.in_zpool) > 0:
+            if disk.in_zpool not in self._zpool_map:
+                raise SelectionError(
+                    "Disk %s specifies non-existent in_zpool: %s"
+                    % (self.__pretty_print_disk(disk), disk.in_zpool))
+            # Limit vdev match to specific zpool
+            zpool_list_to_match = [disk.in_zpool]
+        else:
+            # Need to compare vdev to all known zpools.
+            zpool_list_to_match = self._zpool_map.keys()
+
+        # If only specify vdev, then try see if it exists
+        # as a vdev in one of the known zpools
+        if disk.in_vdev is not None and len(disk.in_vdev) > 0:
+            vdev_matches = 0
+            for zpool in zpool_list_to_match:
+                vdev_id = "%s:%s" % \
+                    (zpool, disk.in_vdev)
+                if vdev_id in self._vdev_map:
+                    vdev_matches += 1
+            if vdev_matches == 0:
+                raise SelectionError(
+                    "Disk %s specifies non-existent in_vdev: %s"
+                    % (self.__pretty_print_disk(disk), disk.in_vdev))
+            elif vdev_matches > 1:
+                raise SelectionError(
+                    "Disk %s specifies non-unique in_vdev: %s"
+                    % (self.__pretty_print_disk(disk), disk.in_vdev))
+
+        # TODO : If disk.in_zpool and disk.in_vdev are none, should we
+        # be checking the children here
+
+        # If we got this far we're ok, otherwise an exception will be raised.
+        return True
+
+    def __get_new_desired_target(self):
+        '''Create a new DESIRED tree using Target Controller
+           initialize which performs the following :
+           - Sets minimum_target_size
+           - deletes any existing desired tree
+           - Calls Target(Target.DESIRED)
+           - Inserts Desired into DOC
+        '''
+        new_desired_target = \
+            self.doc.persistent.get_first_child(Target.DESIRED)
+        if new_desired_target is None:
+            self.controller.initialize(no_initial_logical=True,
+                                       unique_zpool_name=True)
+            new_desired_target = \
+                self.doc.persistent.get_first_child(Target.DESIRED)
+
+        if new_desired_target is None:
+            raise SelectionError("Failed to create new DESIRED tree.")
+
+        return new_desired_target
+
+    def parse_doc(self):
+        '''Method for locating objects in the  data object cache (DOC) for
+           use by the checkpoint.
+
+           Will return a tuple of Data Object references for the Targets:
+
+           (from_manifest, discovered)
+        '''
+
+        from_manifest = self.doc.find_path(
+            "//[@solaris_install.auto_install.ai_instance.AIInstance?2]"
+            "//[@solaris_install.target.Target?2]")
+        discovered = self.doc.persistent.get_first_child(Target.DISCOVERED)
+
+        return (from_manifest, discovered)
+
+    # Implement AbstractCheckpoint methods.
+    def get_progress_estimate(self):
+        '''Returns an estimate of the time this checkpoint will take
+        '''
+        return 3
+
+    def execute(self, dry_run=False):
+        '''Primary execution method used by the Checkpoint parent class
+           to select the targets during an install.
+        '''
+        self.logger.info("=== Executing Target Selection Checkpoint ==")
+
+        try:
+            (from_manifest, discovered) = self.parse_doc()
+
+            self.select_targets(from_manifest, discovered)
+        except Exception:
+            self.logger.debug("%s" % (traceback.format_exc()))
+            raise
--- a/usr/src/cmd/auto-install/checkpoints/test/dmm_build_test.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/checkpoints/test/dmm_build_test.py	Wed May 25 21:26:43 2011 +0100
@@ -46,20 +46,20 @@
     # This environment variable defines the start of the gate's proto area.
     ROOT = os.environ.get("ROOT", "/")
 
-    LOGFILE = "/var/run/dmm_buildtest.out"
+    LOGFILE = "/tmp/dmm_buildtest.out"
 
-    BASE_MANIFEST = "/usr/share/auto_install/default.xml"
+    BASE_MANIFEST = ROOT + "/usr/share/auto_install/ai_manifest.xml"
 
     # Script run in the DMM
     SCRIPT = "/tmp/dmm_buildtest_script"
 
     # Names of the XML files which hold one section apiece.
-    SC_EMB_MAN_XML = "/tmp/test_sc_embedded_manifest.xml"
+    TARGET_XML = "/tmp/test_target.xml"
     SOFTWARE_XML = "/tmp/test_software.xml"
     ADD_DRIVER_XML = "/tmp/test_add_drivers.xml"
 
     # Paths to roots of each of the three sections.
-    SC_EMB_SUBTREE = "/auto_install/ai_instance/sc_embedded_manifests"
+    TARGET_SUBTREE = "/auto_install/ai_instance/target"
     SOFTWARE_SUBTREE = "/auto_install/ai_instance/software"
     ADD_DRIVER_SUBTREE = "/auto_install/ai_instance/add_drivers"
 
@@ -101,7 +101,7 @@
         self.logger.addHandler(self.file_handler)
 
         # Assume the manifest used has separate sibling sections for
-        # add_drivers, software and sc_embedded_manifest, and no others.
+        # add_drivers, software and target, and no others.
         # Create three files, each with one of the sections.
 
         # Read in base manifest, and write it out, stripping whitespace lines.
@@ -110,15 +110,15 @@
         # Generate the three files with subsections.
         self.prune(self.ADD_DRIVER_SUBTREE)
         self.prune(self.SOFTWARE_SUBTREE)
-        self.tree.write(self.SC_EMB_MAN_XML, pretty_print=True)
+        self.tree.write(self.TARGET_XML, pretty_print=True)
 
         self.tree = etree.parse(self.BASE_MANIFEST)
         self.prune(self.ADD_DRIVER_SUBTREE)
-        self.prune(self.SC_EMB_SUBTREE)
+        self.prune(self.TARGET_SUBTREE)
         self.tree.write(self.SOFTWARE_XML, pretty_print=True)
 
         self.tree = etree.parse(self.BASE_MANIFEST)
-        self.prune(self.SC_EMB_SUBTREE)
+        self.prune(self.TARGET_SUBTREE)
         self.prune(self.SOFTWARE_SUBTREE)
         self.tree.write(self.ADD_DRIVER_XML, pretty_print=True)
 
@@ -133,7 +133,7 @@
             script.write("${ROOT}/usr/bin/aimanifest load -i %s\n" %
                          self.ADD_DRIVER_XML)
             script.write("${ROOT}/usr/bin/aimanifest load -i %s\n" %
-                         self.SC_EMB_MAN_XML)
+                         self.TARGET_XML)
             script.write("${ROOT}/usr/bin/aimanifest validate\n")
             script.write("print \"Validated manifest is "
                          "at $AIM_MANIFEST !!!\"\n")
@@ -147,6 +147,13 @@
         # Cleans up engine and logging
         engine_test_utils.reset_engine()
 
+        os.unlink(self.TARGET_XML)
+        os.unlink(self.SOFTWARE_XML)
+        os.unlink(self.ADD_DRIVER_XML)
+
+        os.unlink(self.SCRIPT)
+        os.unlink(self.LOGFILE)
+
     def test_env_setup(self):
         '''
         Run the script, then verify.
--- a/usr/src/cmd/auto-install/checkpoints/test/dmm_env_test.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/checkpoints/test/dmm_env_test.py	Wed May 25 21:26:43 2011 +0100
@@ -41,10 +41,10 @@
     '''
 
     SCRIPT = "/tmp/dmm_env_test.ksh"
-    LOGFILE = "/var/run/dmm_env_test.out"
+    LOGFILE = "/tmp/dmm_env_test.out"
 
     # Same as the default manifest defined in the DMM checkpoint.
-    MANIFEST = "/var/run/manifest.xml"
+    MANIFEST = "/tmp/manifest.xml"
 
     def create_script_file(self):
         '''
@@ -119,6 +119,9 @@
         # Cleans up engine and logging
         engine_test_utils.reset_engine()
 
+        os.unlink(self.SCRIPT)
+        os.unlink(self.LOGFILE)
+
     def test_env_setup(self):
         '''
         Run the associated dmm_env_test script, then verify.
--- a/usr/src/cmd/auto-install/checkpoints/test/dmm_log_test.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/checkpoints/test/dmm_log_test.py	Wed May 25 21:26:43 2011 +0100
@@ -90,6 +90,9 @@
         # Cleans up engine and logging
         engine_test_utils.reset_engine()
 
+        os.unlink(self.SCRIPT)
+        os.unlink(self.LOGFILE)
+
     @staticmethod
     def check_output(lines, index, expected):
         '''
--- a/usr/src/cmd/auto-install/configuration.dtd	Wed May 25 13:29:32 2011 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-<!--
- CDDL HEADER START
-
- The contents of this file are subject to the terms of the
- Common Development and Distribution License (the "License").
- You may not use this file except in compliance with the License.
-
- You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- or http://www.opensolaris.org/os/licensing.
- See the License for the specific language governing permissions
- and limitations under the License.
-
- When distributing Covered Code, include this CDDL HEADER in each
- file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- If applicable, add the following below this CDDL HEADER, with the
- fields enclosed by brackets "[]" replaced with your own identifying
- information: Portions Copyright [yyyy] [name of copyright owner]
-
- CDDL HEADER END
-
- Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
-
--->
-
-<!ELEMENT configuration (validation?)>
-<!ATTLIST configuration source CDATA #REQUIRED>
-<!ATTLIST configuration dest CDATA #IMPLIED>
-
-<!--
-	Default to user configuration if type is not set. 
--->
-<!ATTLIST configuration type (network|sysconf|user) #IMPLIED>
-
-<!--
-	Configuration name should match the name of the checkpoint consuming
-	the configuration data.
--->
-<!ATTLIST configuration name CDATA #REQUIRED>
-
-<!ELEMENT validation EMPTY>
-<!ATTLIST validation path CDATA #IMPLIED>
-<!ATTLIST validation args CDATA #IMPLIED>
-<!ATTLIST validation on_error CDATA "stop">
-
--- a/usr/src/cmd/auto-install/default.xml	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/default.xml	Wed May 25 21:26:43 2011 +0100
@@ -22,10 +22,10 @@
  Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
 
 -->
-<!DOCTYPE auto_install SYSTEM "file:///usr/share/auto_install/ai.dtd">
+<!DOCTYPE auto_install SYSTEM "file:///usr/share/install/ai.dtd">
 <auto_install>
   <ai_instance name="default">
-    <software>
+    <software type="IPS">
       <source>
         <publisher name="solaris">
           <origin name="http://pkg.oracle.com/solaris/release"/>
@@ -39,7 +39,7 @@
 
 	<name>pkg:/[email protected]#</name>
       -->
-      <software_data action="install" type="IPS">
+      <software_data action="install">
         <name>pkg:/entire</name>
         <name>pkg:/server_install</name>
       </software_data>
--- a/usr/src/cmd/auto-install/software.dtd	Wed May 25 13:29:32 2011 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,105 +0,0 @@
-<!--
- CDDL HEADER START
-
- The contents of this file are subject to the terms of the
- Common Development and Distribution License (the "License").
- You may not use this file except in compliance with the License.
-
- You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- or http://www.opensolaris.org/os/licensing.
- See the License for the specific language governing permissions
- and limitations under the License.
-
- When distributing Covered Code, include this CDDL HEADER in each
- file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- If applicable, add the following below this CDDL HEADER, with the
- fields enclosed by brackets "[]" replaced with your own identifying
- information: Portions Copyright [yyyy] [name of copyright owner]
-
- CDDL HEADER END
-
- Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
-
--->
-
-<!ELEMENT software (destination?, source*, software_data*)>
-
-<!--
-	The software name is utilized to allow users to associate
-	a specific software instance with a specific checkpoint.
-	For example, we could have multiple transfer types during the
-	course of an installation process. If a software is
-	to be associated with a specific checkpoint the software inst
-	name must be utilized and must be the same as the associated
-	checkpoint. If no name provided the software elements
-	will be used in order based on the type provided.
--->
-
-<!ATTLIST software name CDATA #IMPLIED>
-
-<!ELEMENT software_data (name*)>
-<!ATTLIST software_data action (install|uninstall|unpack|noinstall) "install">
-<!ATTLIST software_data type (IPS|SVR4|ARCHIVE|IMAGE|P5I|DU|P5P|FILE|DIR) "IPS">
-
-<!ELEMENT name (#PCDATA)>
-
-<!--
-	Destination element is not required. If specified there can only
-	be one destination per software element. If not specified,
-	the destination is assumed to be an ipkg image and will be
-	discovered automatically. 
--->
-
-<!ELEMENT destination (image|dir)>
-
-<!ELEMENT image (facet*, img_type?, property*)>
-<!ATTLIST image action (create|use_existing) "create">
-<!ATTLIST image index (true|false) "false">
-<!ATTLIST image ssl_key CDATA #IMPLIED>
-<!ATTLIST image ssl_cert CDATA #IMPLIED>
-<!ATTLIST image img_root CDATA #IMPLIED>
-
-<!ELEMENT img_type EMPTY>
-<!ATTLIST img_type completeness (full|partial) #REQUIRED>
-<!ATTLIST img_type zone (true|false) "false">
-
-<!--
-	A property on an image is set via pkg set-property <propname>.
-	So, for use in this schema an example would be:
-		
--->
-<!ELEMENT property (#PCDATA)>
-<!ATTLIST property val (true|false) #REQUIRED>
-
-<!--
-	A facet is an option that may be selected or not selected,
-	such as various locales, documentation, etc. This is per
-	image.
--->
-
-<!ELEMENT facet (#PCDATA)>
-<!ATTLIST facet set (true|false) "true">
-
-<!ELEMENT source (publisher+|dir)>
-
-<!--
-	If name is not specified, and this is an ips install,
-	then publishers known by the specified repository will be added to 
-	the image. Origin can be an uri, path to a file, archive, directory.
--->
-<!ELEMENT publisher (origin+, mirror*)>
-<!ATTLIST publisher name CDATA #IMPLIED>
-
-<!ELEMENT origin EMPTY>
-<!ATTLIST origin name CDATA #REQUIRED>
-
-<!ELEMENT mirror EMPTY>
-<!ATTLIST mirror name CDATA #REQUIRED>
-
-<!ELEMENT dir EMPTY>
-<!ATTLIST dir path CDATA #REQUIRED>
--- a/usr/src/cmd/auto-install/svc/auto-installer	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/svc/auto-installer	Wed May 25 21:26:43 2011 +0100
@@ -38,7 +38,14 @@
 AI_ENGINE_EXIT_REBOOT=64
 
 # Auto install manifest
-AI_MANIFEST=/tmp/ai.xml
+AI_MANIFEST=/system/volatile/ai.xml
+
+# Profile directory
+PROFILE_DIR=/system/volatile/profile
+
+# Profile used to enable SCI tool
+ENABLE_SCI_PROFILE=/usr/share/auto_install/sc_profiles/enable_sci.xml
+ENABLE_SCI_DEST=profile_enable_sci.xml
 
 ISA_INFO=`/usr/bin/uname -p`
 PRTCONF=/usr/sbin/prtconf
@@ -47,6 +54,8 @@
 NAWK=/usr/bin/nawk
 BEADM=/usr/sbin/beadm
 REBOOT=/usr/sbin/reboot
+CP=/usr/bin/cp
+MKDIR=/usr/bin/mkdir
 
 . /lib/svc/share/smf_include.sh
 
@@ -101,20 +110,30 @@
 # point XML validator to where these file are.
 #
 for dtd_file in "ai.dtd" "configuration.dtd" "software.dtd" "target.dtd" ; do
-	if [ ! -f "/usr/share/auto_install/$dtd_file" ] ; then
-		echo "Could not find /usr/share/auto_install/$dtd_file"
+	if [ ! -f "/usr/share/install/$dtd_file" ] ; then
+		echo "Could not find /usr/share/install/$dtd_file"
 		exit $SMF_EXIT_ERR_FATAL
 	fi
-	/usr/bin/cp "/usr/share/auto_install/$dtd_file" /tmp/
+	$CP "/usr/share/install/$dtd_file" /system/volatile/
 done
 
 echo "" | $TEE_LOGTOCONSOLE
 echo "Automated Installation started" | $TEE_LOGTOCONSOLE
-echo "The progress of the Automated Installation can be followed by viewing" |
+echo "The progress of the Automated Installation will be output to the console" |
     $TEE_LOGTOCONSOLE
-echo "the logfile at /tmp/install_log" | $TEE_LOGTOCONSOLE
+echo "Detailed logging is in the logfile at /system/volatile/install_log" | $TEE_LOGTOCONSOLE
+echo "Press RETURN to get a login prompt at any time." | $TEE_LOGTOCONSOLE
 echo "" | $TEE_LOGTOCONSOLE
 
+# If PROFILE_DIR does not exist, or does not contain any profiles,
+# copy into it the profile which will enable SCI tool.
+if [ ! -d $PROFILE_DIR ] ; then
+    $MKDIR $PROFILE_DIR
+    $CP $ENABLE_SCI_PROFILE $PROFILE_DIR/$ENABLE_SCI_DEST
+elif [ -z "$(ls -A $PROFILE_DIR)" ] ; then
+    $CP $ENABLE_SCI_PROFILE $PROFILE_DIR/$ENABLE_SCI_DEST
+fi
+
 #
 # Enable the installer to be run in debug mode if requested.
 #
@@ -127,9 +146,9 @@
 
 	# enable verbose mode for logging service and ICT
 	export LS_DBG_LVL=4
-	$AI_ENGINE -v -p $AI_MANIFEST
+	$AI_ENGINE -v -m $AI_MANIFEST
 else
-	$AI_ENGINE -p $AI_MANIFEST
+	$AI_ENGINE -m $AI_MANIFEST
 fi
 
 ret=$?
@@ -145,7 +164,7 @@
 		echo "Automated Installation finished successfully" |
 		    $TEE_LOGTOCONSOLE
 		echo "The system can be rebooted now" | $TEE_LOGTOCONSOLE
-		echo "Please refer to the /tmp/install_log file" \
+		echo "Please refer to the /system/volatile/install_log file" \
 		    "for details" | $TEE_LOGTOCONSOLE
 
 		echo "After reboot it will be located at" \
@@ -206,7 +225,7 @@
 	#
 	$AI_ENGINE_EXIT_FAILURE)
 		echo "Automated Installation failed" | $TEE_LOGTOCONSOLE
-		echo "Please refer to the /tmp/install_log file for" \
+		echo "Please refer to the /system/volatile/install_log file for" \
 		    "details" | $TEE_LOGTOCONSOLE
 
 		exit $SMF_EXIT_ERR_FATAL
--- a/usr/src/cmd/auto-install/svc/manifest-locator	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/svc/manifest-locator	Wed May 25 21:26:43 2011 +0100
@@ -60,10 +60,10 @@
 # Service Choosing Engine
 AISC_ENGINE=/usr/bin/ai_get_manifest
 # Auto install manifest to be used for installation
-AI_MANIFEST=/tmp/ai.xml
+AI_MANIFEST=/system/volatile/ai.xml
 # List of services which Service Discovery Engine will
 # contact for obtaining the manifest
-AI_SERVICE_LIST=/tmp/service_list.$$
+AI_SERVICE_LIST=/system/volatile/service_list.$$
 # debug mode
 AI_DBGLVL=4
 # timeout for service discovery process
@@ -88,7 +88,7 @@
 	#
 	# For SPARC, parameters are stored in 
 	# <install_media_root_dir>/install.conf
-	# This file is downloaded using HTTP protocol and saved in /tmp.
+	# This file is downloaded using HTTP protocol and saved in /system/volatile.
 	# For X86, parameters are in defined in GRUB menu.lst
 	#
 	# TODO: Unify the implementation - bug 7789
--- a/usr/src/cmd/auto-install/svc/manifest-locator.xml	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/svc/manifest-locator.xml	Wed May 25 21:26:43 2011 +0100
@@ -19,8 +19,7 @@
 
  CDDL HEADER END
 
- Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
- Use is subject to license terms.
+ Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
 
  NOTE:  This service manifest is not editable; its contents will
  be overwritten by package or patch operations, including
@@ -40,15 +39,6 @@
 	<create_default_instance enabled='false' />
 	<single_instance/>
 
-	<!-- Must be able to access /tmp. -->
-	<dependency
-		name='filesystem-minimal'
-		grouping='require_all'
-		restart_on='none'
-		type='service'>
-		<service_fmri value='svc:/system/filesystem/minimal' />
-	</dependency>
-
 	<!-- dns/multicast is required for service discovery -->
 	<dependency
 		name='multicast'
--- a/usr/src/cmd/auto-install/target.dtd	Wed May 25 13:29:32 2011 -0600
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,196 +0,0 @@
-<!--
- CDDL HEADER START
-
- The contents of this file are subject to the terms of the
- Common Development and Distribution License (the "License").
- You may not use this file except in compliance with the License.
-
- You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- or http://www.opensolaris.org/os/licensing.
- See the License for the specific language governing permissions
- and limitations under the License.
-
- When distributing Covered Code, include this CDDL HEADER in each
- file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- If applicable, add the following below this CDDL HEADER, with the
- fields enclosed by brackets "[]" replaced with your own identifying
- information: Portions Copyright [yyyy] [name of copyright owner]
-
- CDDL HEADER END
-
- Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
-
--->
-
-<!ELEMENT target (target_device+)>
-
-<!--
-	A partition and slice element must be specified within a
-	containing element, such as a disk, zpool or vdev. There must
-	be one element, if disk or pool are specified, that is
-	tagged as the root device. If no target_devices are specified
-	the the application must choose the device based on its
-	specific criteria and assume this is the root device.
--->
-
-<!ELEMENT target_device (disk|zpool+|swap|dump)>
-
-<!--
-	If a disk is specified at the top level, that is not contained
-	within a zpool specification, this disk will be assumed
-	to be the root device. If a disk target is specified
-	at the top level, and then a zpool with the is_root attribute
-	set this is an error. The user can specify a specific
-	slice within the disk to be used as the root slice. If
-	no slice specified then root slice will be 0.
--->
-
-<!ELEMENT disk ((disk_name|disk_prop|disk_keyword|iscsi), partition*, slice*)>
-<!--
-	Disk name can be one of ctd, volid, devpath or devid name.
-	Default is "ctd".
--->
-<!ELEMENT disk_name EMPTY>
-<!ATTLIST disk_name name CDATA #REQUIRED>
-<!ATTLIST disk_name name_type (ctd|volid|devpath|devid) "ctd">
-
-<!ELEMENT disk_prop EMPTY>
-<!ATTLIST disk_prop dev_type CDATA #IMPLIED>
-<!ATTLIST disk_prop dev_vendor CDATA #IMPLIED>
-<!ATTLIST disk_prop dev_size CDATA #IMPLIED>
-
-<!ELEMENT disk_keyword EMPTY>
-<!ATTLIST disk_keyword key (boot_disk) #REQUIRED>
-
-<!--
-	A vdev must start with a disk element. The slice and partition
-	elements use numerical names, such as 0 or 1. A disk must
-	be named for a vdev, using the disk element notation. 
--->
-
-<!ELEMENT vdev (disk+, partition*, slice*)>
-<!ATTLIST vdev redundancy (mirror|raidz|raidz1|raidz2|raidz3|none) "mirror">
-
-<!ELEMENT dataset (zvol|filesystem)>
-
-<!--
-	No size specification means we create the slice the whole size of
-	the disk. If multiple slices specified for one disk, with
-	no sizes, this is an error. The attribute is_root is only
-	valid when a slice is part of a disk definition, outside of
-	a zpool definition. The user can request to format the disk
-	with multiple slices but specify one that they want to
-	be included in the root pool.
-
--->
-
-
-<!ELEMENT slice (size?)>
-<!ATTLIST slice action (create|delete|preserve) "create">
-<!ATTLIST slice name CDATA #REQUIRED>
-<!ATTLIST slice is_root (true|false) #IMPLIED>
-
-<!--
-	The use of the 'force' attribute on slice specifies that on
-	a 'create' of a slice that already exists we overwrite the
-	slice if force==true. Otherwise the application errors.
--->
-
-<!ATTLIST slice force (true|false) "false">
-
-<!--
-	If partition size is not provided the partition will be the
-	remaining free size left on the disk.
--->
-
-<!ELEMENT partition (slice*, size?)>
-<!ATTLIST partition action (create|delete|use_existing) "create">
-
-<!--
-	A partition name is a numeric value, e.g. 1, will be
-	interpreted as partition 1. If a name is not provided 
-	the user must specify the use_existing action, otherwise
-	this will be an invalid specification.
--->
-<!ATTLIST partition name CDATA #IMPLIED>
-<!ATTLIST partition part_type CDATA "191">
-
-<!--
-	Size must be suffixed with a size unit. i.e 100gb, 2secs, 2tb.
--->
-<!ELEMENT size EMPTY>
-<!ATTLIST size val CDATA #REQUIRED>
-<!ATTLIST size start_sector CDATA #IMPLIED>
-
-
-<!ELEMENT options (#PCDATA)>
-
-<!--
-	Option elements allow any string type, and this string is parsable
-	character data, should the application require it.
--->
-
-<!--
-	Filesystem options are for zfs filesystems. The format of these
-	is this: "-o property=value". Any editable ZFS filesystem property
-	can be set at creation time. Multiple -o options can be
-	specified. An error will occur if a propert is specified in
-	multiple -o options.
--->
-
-<!ELEMENT filesystem (options?)>
-<!ATTLIST filesystem name CDATA #REQUIRED>
-<!ATTLIST filesystem action (create|delete|preserve) "create">
-<!ATTLIST filesystem mountpoint CDATA #IMPLIED>
-
-<!--
-	Redundancy needs to be part of the vdev grouping,
-	not a property on zpool itself. There can be multiple
-	vdev groupings within one pool configuration.
--->
-
-<!ELEMENT zpool (vdev*, dataset*, pool_options?, dataset_options?)>
-<!ATTLIST zpool action (create|delete|preserve|use_existing) "create">
-<!ATTLIST zpool name CDATA #REQUIRED>
-<!ATTLIST zpool is_root (true|false) "false">
-
-<!--
-	The pool option string, which is also a parsable string, 
-	can include both pool options and filesystem options.
-	For pool options the format is: "-o property=value". For
-	filesystem properties the format is: "-O file-system-property=value"
-	Both of these typs of properties can be set in the option string.
-	An example of combining these in the option string:
-
-"-o altroot=/a -o autoexpand=off -o delegation=off -O atime=on -O compression=lzbj"
--->
-
-<!ELEMENT pool_options (options)>
-<!ELEMENT dataset_options (options)>
-
-
-<!ELEMENT zvol (options?, size) >
-<!ATTLIST zvol action (create|delete|preserve|use_existing) "create">
-<!ATTLIST zvol name CDATA #REQUIRED>
-
-<!-- 
-	ISCSI does not have an action attribute. We use iscsi devices but
-	we do not operate directly on them.
--->
-<!ELEMENT iscsi (ip)>
-<!ATTLIST iscsi name CDATA #REQUIRED>
-<!ATTLIST iscsi source CDATA #IMPLIED>
-<!ATTLIST iscsi target_lun CDATA #IMPLIED>
-<!ATTLIST iscsi target_port CDATA #IMPLIED>
-
-<!ELEMENT ip (#PCDATA)>
-
-<!--
-	Swap and dump are optional with Solaris install.
--->
-
-<!ELEMENT swap (zvol)>
-<!ATTLIST swap no_swap (true|false) "false">
-
-<!ELEMENT dump (zvol)>
-<!ATTLIST dump no_dump (true|false) "false">
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/test/manifest_auto_reboot_false.xml	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,236 @@
+<?xml version="1.0"?>
+<!--
+CDDL HEADER START
+
+The contents of this file are subject to the terms of the
+Common Development and Distribution License (the "License").
+You may not use this file except in compliance with the License.
+
+You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+or http://www.opensolaris.org/os/licensing.
+See the License for the specific language governing permissions
+and limitations under the License.
+
+When distributing Covered Code, include this CDDL HEADER in each
+file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+If applicable, add the following below this CDDL HEADER, with the
+fields enclosed by brackets "[]" replaced with your own identifying
+information: Portions Copyright [yyyy] [name of copyright owner]
+
+CDDL HEADER END
+
+Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+-->
+<!--
+===============================================================================
+DTD sample manifest for Automatic Installer input manifest specification.
+===============================================================================
+-->
+<!DOCTYPE auto_install SYSTEM "file:///usr/share/install/ai.dtd">
+<auto_install>
+  <!--
+	"auto_reboot" set to "true" may be an issue for x86 machines.
+	The boot order is not guaranteed and may cause unexpected
+	behavior. If auto_reboot is not desired, set auto_reboot="false".
+
+        The name of the manifest is obtained from (in this order):
+        1) the name from the installadm add-manifest command line "-m" option.
+        2) a name attribute in the manifest, e.g.: 
+           <ai_instance name="my_ai_manifest" auto_reboot="true">
+        3) manifest filename 
+    -->
+  <ai_instance auto_reboot="false" http_proxy="192.168.1.1" name="rebootfalse">
+    <!--
+      =======================================================================
+      <target/target_device> - selections for AI target Device specification
+
+      Disk criteria are divided into three mutually exclusive groups:
+
+      G1 - deterministic disk criteria
+      ................................
+        * target_device/disk/iscsi parameters
+        * target_device/disk/disk_name, with name_type attribute:
+          one of ctd, volid, devpath or devid
+
+      G2 - non-deterministic disk criteria
+      ..........................
+        * target_device/disk/disk_prop: Any of dev_type, dev_vendor or
+          dev_size
+
+      G3 - keyword disk criteria
+      ...........................
+        * target_device/disk/disk_keyword: "boot_disk"
+
+      Schema ai.dtd enforces following policy:
+
+      * criteria in group G1 are mutually exclusive - only
+        one can be specified at a time
+
+      * groups G1, G2 and G3 are mutually exclusive - i.e.
+        if criteria from G1 is specified, no criteria
+        from G2 or G3 are allowed and vice versa
+
+      * multiple criteria from G2 can be specified
+      =======================================================================
+    -->
+    <target>
+        <disk>
+          <!-- G1 -->
+          <!--
+            c#t#d# device name like c0t0d0 or 
+            MPXIO name like c0t2002037CD9F72d0
+          -->
+          <disk_name name="c7d0" name_type="ctd"/>
+          <!-- volume name set for instance by means
+            of format(1M) command
+          -->
+          <!--
+          <disk_name name="ai-disk" name_type="volid"/>
+          -->
+          <!-- device id - e.g. can be obtained by means of
+            iostat(1M) -iEn
+          -->
+          <!--
+          <disk_name name="id1,cmdk@AST31000340NS=____________9QJ2LNYY" name_type="devid"/>
+          -->
+          <!-- device path under /devices directory, e.g.
+            /pci@1e,600/pci@0/pci@9/pci@0/scsi@1/sd@0,0
+          -->
+          <!--
+          <disk_name name="/pci@0/pci@9/pci@0/scsi@1/sd@0,0" name_type="devpath"/>
+          -->
+          <!--
+            ISCSI target device
+
+          <iscsi name="c0d2E0001010F68">
+            <ip>192.168.1.34</ip>
+          </iscsi> 
+          -->
+          <!-- G2 -->
+          <!--
+          <disk_prop dev_vendor="hitachi" dev_size="204801000"/>
+          -->
+          <!-- G3 -->
+          <!--
+          <disk_keyword key="boot_disk"/>
+          -->
+          <!--
+            Uncomment this to force AI to find an existing Solaris
+            partition instead of creating a new one.
+          -->
+          <!--
+          <partition action="use_existing"/>
+          -->
+	  <!--
+          <partition name="1" part_type="191">
+            <size start_sector="200" val="204801000"/>
+          </partition>
+          -->
+        </disk>
+    </target>
+    <software name="ips" type="IPS">
+      <source>
+        <publisher name="solaris">
+          <origin name="http://pkg.oracle.com/solaris/release"/>
+        </publisher>
+      </source>
+      <!--
+        By default the latest build available, in the
+        specified IPS repository, is installed.
+        if another build is required, the build nu1000er has
+        to be appended to the 'entire' package in following
+        form:
+      <name="[email protected]#"/>
+      -->
+      <software_data>
+        <name>pkg:/entire</name>
+        <name>pkg:/babel_install</name>
+      </software_data>
+      <!--
+          babel_install and slim_install are group packages used to
+          define the default installation.  They are removed here so
+          that they do not inhibit removal of other packages on the installed
+          system
+      -->
+      <software_data action="uninstall">
+        <name>pkg:/babel_install</name>
+        <name>pkg:/slim_install</name>
+      </software_data>
+    </software>
+    <add_drivers>
+      <!--
+	    Driver Updates: This section is for adding driver packages to the
+            boot environment before the installation takes place.  The
+            installer can then access all devices on the system.  The
+            packages installed in the boot environment will also be installed
+            on the target.
+
+            A <search_all> entry performs a search for devices which are
+            missing their drivers.  A repository publisher and location
+            may be specified, and that repository and its database will
+            be used.  If no publisher and location is specified, the
+            configured repositories will be used.
+            (See pkg publisher command.)  If <addall> is specified as
+            "true", then drivers the database says are third-party drivers
+            will be added like all others; otherwise third-party drivers
+            will not be added.
+
+                <search_all addall="true">
+                    <source>
+                        <publisher name="solaris">
+                            <origin name="http://pkg.oracle.com/solaris/release"/>
+                        </publisher>
+                    </source>
+                </search_all>
+
+            <software> entries are user-provided specifications of packages
+            needed in order to perform the install.  types are P5I, SVR4, DU.
+            A <software_data> action of "noinstall" inhibits adding to target.
+
+            P5I: A pkg(5) P5I file, full path is in the source/publisher/origin.
+            Path may be to a local file or an http or ftp specification.
+                <software>
+                    <source>
+                        <publisher>
+                            <origin
+				name=
+	"http://pkg.oracle.com/solaris/release/p5i/0/driver/firewire.p5i"/>
+                        </publisher>
+                    </source>
+		    <software_data type="P5I"/>
+                </software>
+
+            SVR4: An SVR4 package spec. The source/publisher/origin corresponds
+            to the directory containing the packages.  The 
+	    software/software_data/name refers tp the package's top level
+	    directory or the package's datastream file.
+
+                <software>
+                    <source>
+                        <publisher>
+                            <origin name="/export/package_dir"/>
+                        </publisher>
+                    </source>
+                    <software_data type="SVR4">
+                        <name>my_disk_driver.d</name>
+                    </software_data>
+                </software>
+
+            DU: An ITU (Install Time Update) or Driver Update image.
+            The source/publisher/origin refers to the path just above the 
+	    image's DU directory (if expanded) or the name of the .iso image.  
+	    All packages in the image will be added.
+
+                <software>
+                    <source>
+                        <publisher>
+                            <origin name="/export/duimages/mydriver.iso"/>
+                        </publisher>
+                    </source>
+                    <software_data type="DU"/>
+                </software>	
+      -->
+      <search_all/>
+    </add_drivers>
+  </ai_instance>
+</auto_install>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/test/manifest_auto_reboot_invalid.xml	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,236 @@
+<?xml version="1.0"?>
+<!--
+CDDL HEADER START
+
+The contents of this file are subject to the terms of the
+Common Development and Distribution License (the "License").
+You may not use this file except in compliance with the License.
+
+You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+or http://www.opensolaris.org/os/licensing.
+See the License for the specific language governing permissions
+and limitations under the License.
+
+When distributing Covered Code, include this CDDL HEADER in each
+file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+If applicable, add the following below this CDDL HEADER, with the
+fields enclosed by brackets "[]" replaced with your own identifying
+information: Portions Copyright [yyyy] [name of copyright owner]
+
+CDDL HEADER END
+
+Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+-->
+<!--
+===============================================================================
+DTD sample manifest for Automatic Installer input manifest specification.
+===============================================================================
+-->
+<!DOCTYPE auto_install SYSTEM "file:///usr/share/install/ai.dtd">
+<auto_install>
+  <!--
+	"auto_reboot" set to "true" may be an issue for x86 machines.
+	The boot order is not guaranteed and may cause unexpected
+	behavior. If auto_reboot is not desired, set auto_reboot="false".
+
+        The name of the manifest is obtained from (in this order):
+        1) the name from the installadm add-manifest command line "-m" option.
+        2) a name attribute in the manifest, e.g.: 
+           <ai_instance name="my_ai_manifest" auto_reboot="true">
+        3) manifest filename 
+    -->
+  <ai_instance auto_reboot="invalid" http_proxy="192.168.1.1" name="rebootinvalid">
+    <!--
+      =======================================================================
+      <target/target_device> - selections for AI target Device specification
+
+      Disk criteria are divided into three mutually exclusive groups:
+
+      G1 - deterministic disk criteria
+      ................................
+        * target_device/disk/iscsi parameters
+        * target_device/disk/disk_name, with name_type attribute:
+          one of ctd, volid, devpath or devid
+
+      G2 - non-deterministic disk criteria
+      ..........................
+        * target_device/disk/disk_prop: Any of dev_type, dev_vendor or
+          dev_size
+
+      G3 - keyword disk criteria
+      ...........................
+        * target_device/disk/disk_keyword: "boot_disk"
+
+      Schema ai.dtd enforces following policy:
+
+      * criteria in group G1 are mutually exclusive - only
+        one can be specified at a time
+
+      * groups G1, G2 and G3 are mutually exclusive - i.e.
+        if criteria from G1 is specified, no criteria
+        from G2 or G3 are allowed and vice versa
+
+      * multiple criteria from G2 can be specified
+      =======================================================================
+    -->
+    <target>
+        <disk>
+          <!-- G1 -->
+          <!--
+            c#t#d# device name like c0t0d0 or 
+            MPXIO name like c0t2002037CD9F72d0
+          -->
+          <disk_name name="c7d0" name_type="ctd"/>
+          <!-- volume name set for instance by means
+            of format(1M) command
+          -->
+          <!--
+          <disk_name name="ai-disk" name_type="volid"/>
+          -->
+          <!-- device id - e.g. can be obtained by means of
+            iostat(1M) -iEn
+          -->
+          <!--
+          <disk_name name="id1,cmdk@AST31000340NS=____________9QJ2LNYY" name_type="devid"/>
+          -->
+          <!-- device path under /devices directory, e.g.
+            /pci@1e,600/pci@0/pci@9/pci@0/scsi@1/sd@0,0
+          -->
+          <!--
+          <disk_name name="/pci@0/pci@9/pci@0/scsi@1/sd@0,0" name_type="devpath"/>
+          -->
+          <!--
+            ISCSI target device
+
+          <iscsi name="c0d2E0001010F68">
+            <ip>192.168.1.34</ip>
+          </iscsi> 
+          -->
+          <!-- G2 -->
+          <!--
+          <disk_prop dev_vendor="hitachi" dev_size="204801000"/>
+          -->
+          <!-- G3 -->
+          <!--
+          <disk_keyword key="boot_disk"/>
+          -->
+          <!--
+            Uncomment this to force AI to find an existing Solaris
+            partition instead of creating a new one.
+          -->
+          <!--
+          <partition action="use_existing"/>
+          -->
+	  <!--
+          <partition name="1" part_type="191">
+            <size start_sector="200" val="204801000"/>
+          </partition>
+          -->
+        </disk>
+    </target>
+    <software name="ips" type="IPS">
+      <source>
+        <publisher name="solaris">
+          <origin name="http://pkg.oracle.com/solaris/release"/>
+        </publisher>
+      </source>
+      <!--
+        By default the latest build available, in the
+        specified IPS repository, is installed.
+        if another build is required, the build nu1000er has
+        to be appended to the 'entire' package in following
+        form:
+      <name="[email protected]#"/>
+      -->
+      <software_data>
+        <name>pkg:/entire</name>
+        <name>pkg:/babel_install</name>
+      </software_data>
+      <!--
+          babel_install and slim_install are group packages used to
+          define the default installation.  They are removed here so
+          that they do not inhibit removal of other packages on the installed
+          system
+      -->
+      <software_data action="uninstall">
+        <name>pkg:/babel_install</name>
+        <name>pkg:/slim_install</name>
+      </software_data>
+    </software>
+    <add_drivers>
+      <!--
+	    Driver Updates: This section is for adding driver packages to the
+            boot environment before the installation takes place.  The
+            installer can then access all devices on the system.  The
+            packages installed in the boot environment will also be installed
+            on the target.
+
+            A <search_all> entry performs a search for devices which are
+            missing their drivers.  A repository publisher and location
+            may be specified, and that repository and its database will
+            be used.  If no publisher and location is specified, the
+            configured repositories will be used.
+            (See pkg publisher command.)  If <addall> is specified as
+            "true", then drivers the database says are third-party drivers
+            will be added like all others; otherwise third-party drivers
+            will not be added.
+
+                <search_all addall="true">
+                    <source>
+                        <publisher name="solaris">
+                            <origin name="http://pkg.oracle.com/solaris/release"/>
+                        </publisher>
+                    </source>
+                </search_all>
+
+            <software> entries are user-provided specifications of packages
+            needed in order to perform the install.  types are P5I, SVR4, DU.
+            A <software_data> action of "noinstall" inhibits adding to target.
+
+            P5I: A pkg(5) P5I file, full path is in the source/publisher/origin.
+            Path may be to a local file or an http or ftp specification.
+                <software>
+                    <source>
+                        <publisher>
+                            <origin
+				name=
+	"http://pkg.oracle.com/solaris/release/p5i/0/driver/firewire.p5i"/>
+                        </publisher>
+                    </source>
+		    <software_data type="P5I"/>
+                </software>
+
+            SVR4: An SVR4 package spec. The source/publisher/origin corresponds
+            to the directory containing the packages.  The 
+	    software/software_data/name refers tp the package's top level
+	    directory or the package's datastream file.
+
+                <software>
+                    <source>
+                        <publisher>
+                            <origin name="/export/package_dir"/>
+                        </publisher>
+                    </source>
+                    <software_data type="SVR4">
+                        <name>my_disk_driver.d</name>
+                    </software_data>
+                </software>
+
+            DU: An ITU (Install Time Update) or Driver Update image.
+            The source/publisher/origin refers to the path just above the 
+	    image's DU directory (if expanded) or the name of the .iso image.  
+	    All packages in the image will be added.
+
+                <software>
+                    <source>
+                        <publisher>
+                            <origin name="/export/duimages/mydriver.iso"/>
+                        </publisher>
+                    </source>
+                    <software_data type="DU"/>
+                </software>	
+      -->
+      <search_all/>
+    </add_drivers>
+  </ai_instance>
+</auto_install>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/test/manifest_auto_reboot_not_set.xml	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,236 @@
+<?xml version="1.0"?>
+<!--
+CDDL HEADER START
+
+The contents of this file are subject to the terms of the
+Common Development and Distribution License (the "License").
+You may not use this file except in compliance with the License.
+
+You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+or http://www.opensolaris.org/os/licensing.
+See the License for the specific language governing permissions
+and limitations under the License.
+
+When distributing Covered Code, include this CDDL HEADER in each
+file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+If applicable, add the following below this CDDL HEADER, with the
+fields enclosed by brackets "[]" replaced with your own identifying
+information: Portions Copyright [yyyy] [name of copyright owner]
+
+CDDL HEADER END
+
+Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+-->
+<!--
+===============================================================================
+DTD sample manifest for Automatic Installer input manifest specification.
+===============================================================================
+-->
+<!DOCTYPE auto_install SYSTEM "file:///usr/share/install/ai.dtd">
+<auto_install>
+  <!--
+	"auto_reboot" set to "true" may be an issue for x86 machines.
+	The boot order is not guaranteed and may cause unexpected
+	behavior. If auto_reboot is not desired, set auto_reboot="false".
+
+        The name of the manifest is obtained from (in this order):
+        1) the name from the installadm add-manifest command line "-m" option.
+        2) a name attribute in the manifest, e.g.: 
+           <ai_instance name="my_ai_manifest" auto_reboot="true">
+        3) manifest filename 
+    -->
+  <ai_instance http_proxy="192.168.1.1" name="rebootnotset">
+    <!--
+      =======================================================================
+      <target/target_device> - selections for AI target Device specification
+
+      Disk criteria are divided into three mutually exclusive groups:
+
+      G1 - deterministic disk criteria
+      ................................
+        * target_device/disk/iscsi parameters
+        * target_device/disk/disk_name, with name_type attribute:
+          one of ctd, volid, devpath or devid
+
+      G2 - non-deterministic disk criteria
+      ..........................
+        * target_device/disk/disk_prop: Any of dev_type, dev_vendor or
+          dev_size
+
+      G3 - keyword disk criteria
+      ...........................
+        * target_device/disk/disk_keyword: "boot_disk"
+
+      Schema ai.dtd enforces following policy:
+
+      * criteria in group G1 are mutually exclusive - only
+        one can be specified at a time
+
+      * groups G1, G2 and G3 are mutually exclusive - i.e.
+        if criteria from G1 is specified, no criteria
+        from G2 or G3 are allowed and vice versa
+
+      * multiple criteria from G2 can be specified
+      =======================================================================
+    -->
+    <target>
+        <disk>
+          <!-- G1 -->
+          <!--
+            c#t#d# device name like c0t0d0 or 
+            MPXIO name like c0t2002037CD9F72d0
+          -->
+          <disk_name name="c7d0" name_type="ctd"/>
+          <!-- volume name set for instance by means
+            of format(1M) command
+          -->
+          <!--
+          <disk_name name="ai-disk" name_type="volid"/>
+          -->
+          <!-- device id - e.g. can be obtained by means of
+            iostat(1M) -iEn
+          -->
+          <!--
+          <disk_name name="id1,cmdk@AST31000340NS=____________9QJ2LNYY" name_type="devid"/>
+          -->
+          <!-- device path under /devices directory, e.g.
+            /pci@1e,600/pci@0/pci@9/pci@0/scsi@1/sd@0,0
+          -->
+          <!--
+          <disk_name name="/pci@0/pci@9/pci@0/scsi@1/sd@0,0" name_type="devpath"/>
+          -->
+          <!--
+            ISCSI target device
+
+          <iscsi name="c0d2E0001010F68">
+            <ip>192.168.1.34</ip>
+          </iscsi> 
+          -->
+          <!-- G2 -->
+          <!--
+          <disk_prop dev_vendor="hitachi" dev_size="204801000"/>
+          -->
+          <!-- G3 -->
+          <!--
+          <disk_keyword key="boot_disk"/>
+          -->
+          <!--
+            Uncomment this to force AI to find an existing Solaris
+            partition instead of creating a new one.
+          -->
+          <!--
+          <partition action="use_existing"/>
+          -->
+	  <!--
+          <partition name="1" part_type="191">
+            <size start_sector="200" val="204801000"/>
+          </partition>
+          -->
+        </disk>
+    </target>
+    <software name="ips" type="IPS">
+      <source>
+        <publisher name="solaris">
+          <origin name="http://pkg.oracle.com/solaris/release"/>
+        </publisher>
+      </source>
+      <!--
+        By default the latest build available, in the
+        specified IPS repository, is installed.
+        if another build is required, the build nu1000er has
+        to be appended to the 'entire' package in following
+        form:
+      <name="[email protected]#"/>
+      -->
+      <software_data>
+        <name>pkg:/entire</name>
+        <name>pkg:/babel_install</name>
+      </software_data>
+      <!--
+          babel_install and slim_install are group packages used to
+          define the default installation.  They are removed here so
+          that they do not inhibit removal of other packages on the installed
+          system
+      -->
+      <software_data action="uninstall">
+        <name>pkg:/babel_install</name>
+        <name>pkg:/slim_install</name>
+      </software_data>
+    </software>
+    <add_drivers>
+      <!--
+	    Driver Updates: This section is for adding driver packages to the
+            boot environment before the installation takes place.  The
+            installer can then access all devices on the system.  The
+            packages installed in the boot environment will also be installed
+            on the target.
+
+            A <search_all> entry performs a search for devices which are
+            missing their drivers.  A repository publisher and location
+            may be specified, and that repository and its database will
+            be used.  If no publisher and location is specified, the
+            configured repositories will be used.
+            (See pkg publisher command.)  If <addall> is specified as
+            "true", then drivers the database says are third-party drivers
+            will be added like all others; otherwise third-party drivers
+            will not be added.
+
+                <search_all addall="true">
+                    <source>
+                        <publisher name="solaris">
+                            <origin name="http://pkg.oracle.com/solaris/release"/>
+                        </publisher>
+                    </source>
+                </search_all>
+
+            <software> entries are user-provided specifications of packages
+            needed in order to perform the install.  types are P5I, SVR4, DU.
+            A <software_data> action of "noinstall" inhibits adding to target.
+
+            P5I: A pkg(5) P5I file, full path is in the source/publisher/origin.
+            Path may be to a local file or an http or ftp specification.
+                <software>
+                    <source>
+                        <publisher>
+                            <origin
+				name=
+	"http://pkg.oracle.com/solaris/release/p5i/0/driver/firewire.p5i"/>
+                        </publisher>
+                    </source>
+		    <software_data type="P5I"/>
+                </software>
+
+            SVR4: An SVR4 package spec. The source/publisher/origin corresponds
+            to the directory containing the packages.  The 
+	    software/software_data/name refers tp the package's top level
+	    directory or the package's datastream file.
+
+                <software>
+                    <source>
+                        <publisher>
+                            <origin name="/export/package_dir"/>
+                        </publisher>
+                    </source>
+                    <software_data type="SVR4">
+                        <name>my_disk_driver.d</name>
+                    </software_data>
+                </software>
+
+            DU: An ITU (Install Time Update) or Driver Update image.
+            The source/publisher/origin refers to the path just above the 
+	    image's DU directory (if expanded) or the name of the .iso image.  
+	    All packages in the image will be added.
+
+                <software>
+                    <source>
+                        <publisher>
+                            <origin name="/export/duimages/mydriver.iso"/>
+                        </publisher>
+                    </source>
+                    <software_data type="DU"/>
+                </software>	
+      -->
+      <search_all/>
+    </add_drivers>
+  </ai_instance>
+</auto_install>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/test/manifest_auto_reboot_true.xml	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,236 @@
+<?xml version="1.0"?>
+<!--
+CDDL HEADER START
+
+The contents of this file are subject to the terms of the
+Common Development and Distribution License (the "License").
+You may not use this file except in compliance with the License.
+
+You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+or http://www.opensolaris.org/os/licensing.
+See the License for the specific language governing permissions
+and limitations under the License.
+
+When distributing Covered Code, include this CDDL HEADER in each
+file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+If applicable, add the following below this CDDL HEADER, with the
+fields enclosed by brackets "[]" replaced with your own identifying
+information: Portions Copyright [yyyy] [name of copyright owner]
+
+CDDL HEADER END
+
+Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+-->
+<!--
+===============================================================================
+DTD sample manifest for Automatic Installer input manifest specification.
+===============================================================================
+-->
+<!DOCTYPE auto_install SYSTEM "file:///usr/share/install/ai.dtd">
+<auto_install>
+  <!--
+	"auto_reboot" set to "true" may be an issue for x86 machines.
+	The boot order is not guaranteed and may cause unexpected
+	behavior. If auto_reboot is not desired, set auto_reboot="false".
+
+        The name of the manifest is obtained from (in this order):
+        1) the name from the installadm add-manifest command line "-m" option.
+        2) a name attribute in the manifest, e.g.: 
+           <ai_instance name="my_ai_manifest" auto_reboot="true">
+        3) manifest filename 
+    -->
+  <ai_instance auto_reboot="true" http_proxy="192.168.1.1" name="reboottrue">
+    <!--
+      =======================================================================
+      <target/target_device> - selections for AI target Device specification
+
+      Disk criteria are divided into three mutually exclusive groups:
+
+      G1 - deterministic disk criteria
+      ................................
+        * target_device/disk/iscsi parameters
+        * target_device/disk/disk_name, with name_type attribute:
+          one of ctd, volid, devpath or devid
+
+      G2 - non-deterministic disk criteria
+      ..........................
+        * target_device/disk/disk_prop: Any of dev_type, dev_vendor or
+          dev_size
+
+      G3 - keyword disk criteria
+      ...........................
+        * target_device/disk/disk_keyword: "boot_disk"
+
+      Schema ai.dtd enforces following policy:
+
+      * criteria in group G1 are mutually exclusive - only
+        one can be specified at a time
+
+      * groups G1, G2 and G3 are mutually exclusive - i.e.
+        if criteria from G1 is specified, no criteria
+        from G2 or G3 are allowed and vice versa
+
+      * multiple criteria from G2 can be specified
+      =======================================================================
+    -->
+    <target>
+        <disk>
+          <!-- G1 -->
+          <!--
+            c#t#d# device name like c0t0d0 or 
+            MPXIO name like c0t2002037CD9F72d0
+          -->
+          <disk_name name="c7d0" name_type="ctd"/>
+          <!-- volume name set for instance by means
+            of format(1M) command
+          -->
+          <!--
+          <disk_name name="ai-disk" name_type="volid"/>
+          -->
+          <!-- device id - e.g. can be obtained by means of
+            iostat(1M) -iEn
+          -->
+          <!--
+          <disk_name name="id1,cmdk@AST31000340NS=____________9QJ2LNYY" name_type="devid"/>
+          -->
+          <!-- device path under /devices directory, e.g.
+            /pci@1e,600/pci@0/pci@9/pci@0/scsi@1/sd@0,0
+          -->
+          <!--
+          <disk_name name="/pci@0/pci@9/pci@0/scsi@1/sd@0,0" name_type="devpath"/>
+          -->
+          <!--
+            ISCSI target device
+
+          <iscsi name="c0d2E0001010F68">
+            <ip>192.168.1.34</ip>
+          </iscsi> 
+          -->
+          <!-- G2 -->
+          <!--
+          <disk_prop dev_vendor="hitachi" dev_size="204801000"/>
+          -->
+          <!-- G3 -->
+          <!--
+          <disk_keyword key="boot_disk"/>
+          -->
+          <!--
+            Uncomment this to force AI to find an existing Solaris
+            partition instead of creating a new one.
+          -->
+          <!--
+          <partition action="use_existing"/>
+          -->
+	  <!--
+          <partition name="1" part_type="191">
+            <size start_sector="200" val="204801000"/>
+          </partition>
+          -->
+        </disk>
+    </target>
+    <software name="ips" type="IPS">
+      <source>
+        <publisher name="solaris">
+          <origin name="http://pkg.oracle.com/solaris/release"/>
+        </publisher>
+      </source>
+      <!--
+        By default the latest build available, in the
+        specified IPS repository, is installed.
+        if another build is required, the build nu1000er has
+        to be appended to the 'entire' package in following
+        form:
+      <name="[email protected]#"/>
+      -->
+      <software_data>
+        <name>pkg:/entire</name>
+        <name>pkg:/babel_install</name>
+      </software_data>
+      <!--
+          babel_install and slim_install are group packages used to
+          define the default installation.  They are removed here so
+          that they do not inhibit removal of other packages on the installed
+          system
+      -->
+      <software_data action="uninstall">
+        <name>pkg:/babel_install</name>
+        <name>pkg:/slim_install</name>
+      </software_data>
+    </software>
+    <add_drivers>
+      <!--
+	    Driver Updates: This section is for adding driver packages to the
+            boot environment before the installation takes place.  The
+            installer can then access all devices on the system.  The
+            packages installed in the boot environment will also be installed
+            on the target.
+
+            A <search_all> entry performs a search for devices which are
+            missing their drivers.  A repository publisher and location
+            may be specified, and that repository and its database will
+            be used.  If no publisher and location is specified, the
+            configured repositories will be used.
+            (See pkg publisher command.)  If <addall> is specified as
+            "true", then drivers the database says are third-party drivers
+            will be added like all others; otherwise third-party drivers
+            will not be added.
+
+                <search_all addall="true">
+                    <source>
+                        <publisher name="solaris">
+                            <origin name="http://pkg.oracle.com/solaris/release"/>
+                        </publisher>
+                    </source>
+                </search_all>
+
+            <software> entries are user-provided specifications of packages
+            needed in order to perform the install.  types are P5I, SVR4, DU.
+            A <software_data> action of "noinstall" inhibits adding to target.
+
+            P5I: A pkg(5) P5I file, full path is in the source/publisher/origin.
+            Path may be to a local file or an http or ftp specification.
+                <software>
+                    <source>
+                        <publisher>
+                            <origin
+				name=
+	"http://pkg.oracle.com/solaris/release/p5i/0/driver/firewire.p5i"/>
+                        </publisher>
+                    </source>
+		    <software_data type="P5I"/>
+                </software>
+
+            SVR4: An SVR4 package spec. The source/publisher/origin corresponds
+            to the directory containing the packages.  The 
+	    software/software_data/name refers tp the package's top level
+	    directory or the package's datastream file.
+
+                <software>
+                    <source>
+                        <publisher>
+                            <origin name="/export/package_dir"/>
+                        </publisher>
+                    </source>
+                    <software_data type="SVR4">
+                        <name>my_disk_driver.d</name>
+                    </software_data>
+                </software>
+
+            DU: An ITU (Install Time Update) or Driver Update image.
+            The source/publisher/origin refers to the path just above the 
+	    image's DU directory (if expanded) or the name of the .iso image.  
+	    All packages in the image will be added.
+
+                <software>
+                    <source>
+                        <publisher>
+                            <origin name="/export/duimages/mydriver.iso"/>
+                        </publisher>
+                    </source>
+                    <software_data type="DU"/>
+                </software>	
+      -->
+      <search_all/>
+    </add_drivers>
+  </ai_instance>
+</auto_install>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/test/test_auto_install_manifest.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+#
+'''
+Tests auto install to a specified XML Profile
+'''
+
+import os
+import sys
+import unittest
+
+from solaris_install.auto_install import auto_install
+from solaris_install.engine.test.engine_test_utils import reset_engine
+
+
+class TestAutoInstallProfile(unittest.TestCase):
+    '''Tests to auto installation succeeds with -m specified manifest '''
+    AI = None
+
+    def setUp(self):
+        '''
+        Create a auto_install client for testing with.
+        '''
+        self.AI = None
+
+    def tearDown(self):
+        '''
+        Clean Up
+        '''
+        if self.AI is not None:
+            # Reset the Engine for next test
+            if self.AI.engine is not None:
+                reset_engine(self.AI.engine)
+
+            # Remove install log as test has succeeded
+            if os.path.isfile(self.AI.INSTALL_LOG):
+                os.remove(self.AI.INSTALL_LOG)
+
+            self.AI = None
+
+    def test_manifest_install_auto_reboot_true(self):
+        '''
+        Test installation with manifest containing auto_reboot set to true
+        '''
+        # To run tests bldenv script will have been run, thus we can assume
+        # that $SRC environment variable will be set.
+        testmanifest = os.environ['SRC'] + \
+            "/cmd/auto-install/test/manifest_auto_reboot_true.xml"
+        args = ["-n", "-s", "target-selection", "-m", testmanifest]
+
+        try:
+            self.AI = auto_install.AutoInstall(args)
+            self.assertNotEqual(self.AI, None)
+            self.AI.perform_autoinstall()
+            self.assertNotEqual(self.AI.exitval, self.AI.AI_EXIT_FAILURE)
+        except KeyboardInterrupt:
+            pass
+
+    def test_manifest_install_auto_reboot_false(self):
+        '''
+        Test installation with manifest containing auto_reboot set to false
+        '''
+        # To run tests bldenv script will have been run, thus we can assume
+        # that $SRC environment variable will be set.
+        testmanifest = os.environ['SRC'] + \
+            "/cmd/auto-install/test/manifest_auto_reboot_false.xml"
+        args = ["-n", "-s", "target-selection", "-m", testmanifest]
+
+        try:
+            self.AI = auto_install.AutoInstall(args)
+            self.assertNotEqual(self.AI, None)
+            self.AI.perform_autoinstall()
+            self.assertNotEqual(self.AI.exitval, self.AI.AI_EXIT_FAILURE)
+        except KeyboardInterrupt:
+            pass
+
+    def test_manifest_install_auto_reboot_not_set(self):
+        '''
+        Test installation with manifest containing auto_reboot set to not set
+        '''
+        # To run tests bldenv script will have been run, thus we can assume
+        # that $SRC environment variable will be set.
+        testmanifest = os.environ['SRC'] + \
+            "/cmd/auto-install/test/manifest_auto_reboot_not_set.xml"
+        args = ["-n", "-s", "target-selection", "-m", testmanifest]
+
+        try:
+            self.AI = auto_install.AutoInstall(args)
+            self.assertNotEqual(self.AI, None)
+            self.AI.perform_autoinstall()
+            self.assertNotEqual(self.AI.exitval, self.AI.AI_EXIT_FAILURE)
+        except KeyboardInterrupt:
+            pass
+
+    def test_manifest_auto_reboot_invalid(self):
+        '''
+        Test installation with a manifest that fails to parse.
+        Achieved by setting auto_reboot to an invalid value
+        exitval should be set to AI_EXIT_FAILIRE
+        '''
+        # To run tests bldenv script will have been run, thus we can assume
+        # that $SRC environment variable will be set.
+        testmanifest = os.environ['SRC'] + \
+            "/cmd/auto-install/test/manifest_auto_reboot_invalid.xml"
+        args = ["-n", "-s", "target-selection", "-m", testmanifest]
+
+        try:
+            self.AI = auto_install.AutoInstall(args)
+            self.assertNotEqual(self.AI, None)
+            self.AI.perform_autoinstall()
+            self.assertNotEqual(self.AI.exitval, None)
+            self.assertEqual(self.AI.exitval, self.AI.AI_EXIT_FAILURE)
+        except:
+            raise
+
+if __name__ == '__main__':
+    unittest.main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/test/test_auto_install_parse_args.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+#
+'''
+Tests to ensure argument parsing errors are printed
+
+All these tests should throw a parsing error. AutoInstall instance will not
+be created and thus an InstallEngine instance is also not created.
+'''
+
+import os
+import unittest
+
+from solaris_install.auto_install import auto_install
+
+
+class TestAutoInstallParseArgs(unittest.TestCase):
+    '''Tests to ensure errors are reported for incorrect command line args '''
+
+    AI = None
+
+    def setUp(self):
+        '''
+        Nothing to really set up, so just pass
+        '''
+        self.AI = None
+
+    def tearDown(self):
+        '''
+        As nothing was specifically set up, there's nothing to tear down.
+        '''
+        if self.AI is not None:
+            # Reset the Engine for next test
+            if self.AI.engine is not None:
+                reset_engine(self.AI.engine)
+
+            # Remove install log as test has succeeded
+            if os.path.isfile(self.AI.INSTALL_LOG):
+                os.remove(self.AI.INSTALL_LOG)
+
+    def test_no_disk_manifest(self):
+        '''
+        Test if no disk or manifest is specified fail
+        '''
+        args = ["-n"]
+        try:
+            self.AI = auto_install.AutoInstall(args)
+        except:
+            pass
+
+        self.assertEqual(self.AI, None)
+
+    def test_break_before_after_ti(self):
+        '''
+        Test that both break before and after ti are not specified
+        '''
+        args = ["-n", "-m", "testmanifest", "-i", "-I"]
+        AI = None
+        try:
+            AI = auto_install.AutoInstall(args)
+        except:
+            pass
+
+        self.assertEqual(AI, None)
+
+    def test_invalid_argument(self):
+        '''
+        Test passing of unknown/invalid argument
+        '''
+        args = ["-n", "-z"]
+        try:
+            self.AI = auto_install.AutoInstall(args)
+        except:
+            pass
+
+        self.assertEqual(self.AI, None)
+
+if __name__ == '__main__':
+    unittest.main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/test/test_auto_install_script.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,102 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+#
+'''
+Tests auto install with script which derives the manifest to be used
+'''
+
+import os
+import unittest
+
+from solaris_install.auto_install import auto_install
+from solaris_install.engine.test.engine_test_utils import reset_engine
+
+
+class TestAutoInstallScript(unittest.TestCase):
+    '''Tests to auto installation succeeds with -m specified derived script '''
+
+    AI = None
+
+    def setUp(self):
+        '''
+        Create a auto_install client for testing with.
+        '''
+        self.AI = None
+
+    def tearDown(self):
+        '''
+        Clean Up
+        '''
+        if self.AI is not None:
+            # Reset the Engine for next test
+            if self.AI.engine is not None:
+                reset_engine(self.AI.engine)
+
+            # Remove install log as test has succeeded
+            if os.path.isfile(self.AI.INSTALL_LOG):
+                os.remove(self.AI.INSTALL_LOG)
+
+    def test_shell_script(self):
+        '''
+        Test installation with derived manifest shell script
+        '''
+        # To run tests bldenv script will have been run, thus we can assume
+        # that $SRC environment variable will be set.
+        testscript = os.environ['SRC'] +  \
+            "/cmd/auto-install/test/test_shell_script.sh"
+        args = ["-n", "-m", testscript]
+
+        # TODO, write the testscript.sh
+
+        try:
+            self.AI = auto_install.AutoInstall(args)
+            self.assertNotEqual(self.AI, None)
+            self.AI.perform_autoinstall()
+            self.assertNotEqual(self.AI.exitval, self.AI.AI_EXIT_FAILURE)
+        except KeyboardInterrupt:
+            pass
+
+    def test_python_script(self):
+        '''
+        Test installation with derived manifest python script
+        '''
+        # To run tests bldenv script will have been run, thus we can assume
+        # that $SRC environment variable will be set.
+        testscript = os.environ['SRC'] +  \
+            "/cmd/auto-install/test/test_python_script"
+        args = ["-n", "-m", testscript]
+
+        # TODO, write the testscript.sh
+
+        try:
+            self.AI = auto_install.AutoInstall(args)
+            self.assertNotEqual(self.AI, None)
+            self.AI.perform_autoinstall()
+            self.assertNotEqual(self.AI.exitval, self.AI.AI_EXIT_FAILURE)
+        except KeyboardInterrupt:
+            pass
+
+if __name__ == '__main__':
+    unittest.main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/test/test_python_script	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,8 @@
+#!/usr/bin/python2.6
+
+import os
+import sys
+
+sys_str = "pfexec cp -f " + os.environ['SRC'] + "/cmd/auto-install/test/profile_auto_reboot_true.xml /var/run/manifest.xml"
+ret = os.system(sys_str)
+sys.exit(ret)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/test/test_shell_script.sh	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,5 @@
+#!/usr/bin/bash
+
+pfexec cp -f ${SRC}/cmd/auto-install/test/profile_auto_reboot_true.xml /var/run/manifest.xml
+
+exit $?
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/test/test_target_selection_sparc.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,1109 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+import difflib
+import os
+import platform
+import re
+import unittest
+
+import osol_install.errsvc as errsvc
+from lxml import etree
+from solaris_install.auto_install.checkpoints.target_selection \
+    import TargetSelection
+from solaris_install.engine import InstallEngine
+from solaris_install.engine.test.engine_test_utils import \
+    get_new_engine_instance, reset_engine
+from solaris_install.target import Target
+from solaris_install.target.physical import Disk, Slice
+
+
+class  TestTargetSelectionTestCase(unittest.TestCase):
+    DISCOVERED_TARGETS_XML = '''
+    <root>
+      <target name="discovered">
+        <disk whole_disk="false">
+          <disk_name name="c2t0d0" name_type="ctd"/>
+          <disk_prop dev_type="scsi" dev_vendor="HITACHI" 
+           dev_size="143349312secs"/>
+          <disk_keyword key="boot_disk"/>
+          <slice name="0" action="preserve" force="false" is_swap="false" 
+           in_zpool="rpool_test" in_vdev="rpool_test-none">
+            <size val="16770048secs" start_sector="10176"/>
+          </slice>
+          <slice name="2" action="preserve" force="false" tag="5" is_swap="false">
+            <size val="143349312secs" start_sector="0"/>
+          </slice>
+        </disk>
+        <disk whole_disk="false">
+          <disk_name name="c2t1d0" name_type="ctd"/>
+          <disk_prop dev_type="scsi" dev_vendor="HITACHI" 
+           dev_size="143349312secs"/>
+          <slice name="0" action="preserve" force="false" is_swap="false">
+            <size val="41945472secs" start_sector="0"/>
+          </slice>
+          <slice name="1" action="preserve" force="false" is_swap="false">
+            <size val="4202688secs" start_sector="41945472"/>
+          </slice>
+          <slice name="2" action="preserve" force="false" tag="5" is_swap="false">
+            <size val="143349312secs" start_sector="0"/>
+          </slice>
+        </disk>
+        <logical noswap="false" nodump="false">
+          <zpool name="rpool_test" action="preserve" is_root="false" 
+           mountpoint="/rpool_test">
+            <vdev name="rpool_test-none" redundancy="none"/>
+            <filesystem name="ROOT" action="preserve" mountpoint="legacy" 
+             in_be="false"/>
+            <filesystem name="ROOT/solaris" action="preserve" mountpoint="/" 
+             in_be="false"/>
+            <filesystem name="ROOT/solaris/testing" action="preserve" 
+             mountpoint="/testing" in_be="false"/>
+            <zvol name="dump" action="preserve" use="dump">
+              <size val="1.03gb"/>
+            </zvol>
+            <zvol name="swap" action="preserve" use="swap">
+              <size val="2.06gb"/>
+            </zvol>
+            <be name="solaris"/>
+          </zpool>
+        </logical>
+      </target>
+    </root>
+    '''
+
+    def __gendiff_str(self, a, b):
+        a_lines = a.splitlines()
+        b_lines = b.splitlines()
+        return "\n".join(list(difflib.ndiff(a_lines, b_lines)))
+
+    def __run_simple_test(self, input_xml, expected_xml, fail_ex_str=None):
+        '''Run a simple test where given specific xml in the manifest, we
+        validate that the generated DESIRED tree is as expected.
+
+        'expected_xml' should have the values indented using '.' instead of
+        spaces to ensure that perfect match is made.
+        '''
+        errsvc.clear_error_list()
+
+        # Different processor to what these tests were written for.
+        if platform.processor() != 'sparc':
+            print "Skipping test on wrong arch"
+            return
+
+        if input_xml is not None:
+            manifest_dom = etree.fromstring(input_xml)
+            self.doc.import_from_manifest_xml(manifest_dom, volatile=True)
+            self.doc.logger.debug("DOC AFTER IMPORT TEST XML:\n%s\n\n" %
+                                  (str(self.doc)))
+            if len(errsvc._ERRORS) > 0:
+                self.fail(errsvc._ERRORS[0])
+
+        # Define expected string, compensate for indent. Using '.' in expected
+        # string to remove conflict with indent replacement.
+        indentation = '''\
+        '''
+        expected_xml = expected_xml.replace(indentation, "").replace(".", " ")
+
+        try:
+            self.target_selection.select_targets(
+                self.doc.volatile.get_descendants(class_type=Target,
+                                                  max_depth=2),
+                self.doc.persistent)
+            if (fail_ex_str is not None):
+                self.fail("Expected failure but test succeeded.")
+        except Exception, ex:
+            if (fail_ex_str is not None):
+                self.assertEquals(str(ex), fail_ex_str)
+            else:
+                import traceback
+                traceback.print_exc()
+                raise ex
+
+        try:
+            desired = \
+                self.doc.get_descendants(
+                    name=Target.DESIRED, class_type=Target, max_depth=2)[0]
+
+            xml_str = desired.get_xml_tree_str()
+
+            expected_re = re.compile(expected_xml)
+            if not expected_re.match(xml_str):
+                self.fail("Resulting XML doesn't match expected:\nDIFF:\n%s\n" %
+                          self.__gendiff_str(expected_xml, xml_str))
+
+            desired.final_validation()
+            if len(errsvc._ERRORS) > 0:
+                self.fail(errsvc._ERRORS[0])
+        except Exception, e:
+            import traceback
+            traceback.print_exc()
+            self.fail(e)
+
+    def setUp(self):
+        self.engine = get_new_engine_instance()
+
+        self.target_selection = TargetSelection("Test Checkpoint")
+        self.doc = InstallEngine.get_instance().data_object_cache
+        discovered_dom = etree.fromstring(self.DISCOVERED_TARGETS_XML)
+        self.doc.import_from_manifest_xml(discovered_dom, volatile=False)
+
+        # Ensure backup slices have tag = 5 (V_BACKUP)
+        slices = self.doc.get_descendants("2", Slice)
+        for s in slices:
+            s.tag = 5
+
+        # As we are not really discovering disks, label  will be set to "None"
+        # Ensure they set to VTOC
+        discovered = self.doc.persistent.get_first_child(Target.DISCOVERED)
+        self.disks = discovered.get_descendants(class_type=Disk)
+        for disk in self.disks:
+            if disk.label is None:
+                disk.label = "VTOC"
+
+    def tearDown(self):
+        if self.engine is not None:
+            reset_engine(self.engine)
+
+        self.doc = None
+        self.target_selection = None
+
+    def test_target_selection_no_target(self):
+        '''Test Success if no target in manifest'''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<be name="solaris"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ....</zpool>
+        ..</logical>
+        </target>
+        '''
+
+        self.__run_simple_test(None, expected_xml)
+
+    def test_target_selection_no_disk_target_with_logical(self):
+        '''Test Success if no disk targets, but with a logical section'''
+
+        test_manifest_xml = '''
+        <auto_install>
+           <ai_instance name="orig_default">
+             <target>
+               <logical>
+                 <zpool name="myrpool" is_root="true">
+                   <filesystem name="/export"/>
+                   <filesystem name="/export/home"/>
+                   <be name="solaris"/>
+                 </zpool>
+               </logical>
+             </target>
+          </ai_instance>
+        </auto_install>
+        '''
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<filesystem name="/export" action="create" in_be="false"/>
+        ......<filesystem name="/export/home" action="create" in_be="false"/>
+        ......<be name="solaris"/>
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ......<size val="143348800secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_single_whole_disk_target(self):
+        '''Test Success if single whole target in manifest'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="True">
+                <disk_name name="c2t0d0" name_type="ctd"/>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_boot_disk_target(self):
+        '''Test Success if boot_disk target in manifest'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_zpool="myrpool" in_vdev="vdev">
+                <disk_keyword key="boot_disk" />
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <vdev name="vdev" redundancy="none" />
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_multiple_whole_disk_targets(self):
+        '''Test Success if multiple disk targets, no zpool, in manifest'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="True">
+                <disk_name name="c2t0d0" name_type="ctd"/>
+              </disk>
+              <disk whole_disk="True">
+                <disk_name name="c2t1d0" name_type="ctd"/>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t1d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="mirror"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_multiple_whole_disk_false_no_slices(self):
+        '''Test Success if whole disk false, no slices, in manifest.
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name="c2t0d0" name_type="ctd"/>
+              </disk>
+              <disk whole_disk="false">
+                <disk_name name="c2t1d0" name_type="ctd"/>
+                <size val="390714880secs" start_sector="512"/>
+                <slice name="0" action="create" force="false"
+                 is_swap="false">
+                  <size val="390714880secs" start_sector="512"/>
+                </slice>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml, fail_ex_str=
+            "If whole_disk is False, you need to provide information for"
+            " partitions or slices")
+
+    def test_target_selection_multiple_whole_disk_mixed_no_logicals(self):
+        '''Test Success if 1 whole & 1 partitioned disk, no logicals'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true">
+                <disk_name name="c2t0d0" name_type="ctd"/>
+              </disk>
+              <disk whole_disk="false">
+                <disk_name name="c2t1d0" name_type="ctd"/>
+                <slice name="1" action="delete"/>
+                <slice name="0" action="create" force="false"
+                 is_swap="false">
+                  <size val="143348736secs" start_sector="512"/>
+                </slice>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="mirror"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t1d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<slice name="2" action="preserve" force="false" is_swap="false">
+        ......<size val="143349312secs" start_sector="0"/>
+        ....</slice>
+        ....<slice name="1" action="delete" force="false" is_swap="false">
+        ......<size val="4202496secs" start_sector="41945600"/>
+        ....</slice>
+        ....<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_2_disks_whole_disk_true_and_rpool(self):
+        '''Test Success if 2 disks, whole_disk=True & root pool'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_zpool="myrpool">
+                <disk_name name="c2t0d0" name_type="ctd" />
+              </disk>
+              <disk whole_disk="true" in_zpool="myrpool">
+                <disk_name name="c2t1d0" name_type="ctd" />
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true"/>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="mirror"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t1d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_2_disks_whole_disk_false_and_rpool(self):
+        '''Test Success If 2 Disks w/Whole-Disks = False & Root Pool Specified
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name="c2t0d0" name_type="ctd"/>
+                  <slice name="1" action="delete"/>
+                  <slice name="0" action="create" force="false"
+                      is_swap="false" in_zpool="myrpool">
+                    <size val="143349312secs" start_sector="512"/>
+                  </slice>
+              </disk>
+              <disk whole_disk="false">
+                <disk_name name="c2t1d0" name_type="ctd"/>
+                  <slice name="1" action="delete"/>
+                  <slice name="0" action="create" force="false"
+                    is_swap="false" in_zpool="myrpool">
+                    <size val="143349312secs" start_sector="512"/>
+                  </slice>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true"/>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="mirror"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="2" action="preserve" force="false" is_swap="false">
+        ......<size val="143349312secs" start_sector="0"/>
+        ....</slice>
+        ....<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ......<size val="143349248secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t1d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<slice name="2" action="preserve" force="false" is_swap="false">
+        ......<size val="143349312secs" start_sector="0"/>
+        ....</slice>
+        ....<slice name="1" action="delete" force="false" is_swap="false">
+        ......<size val="4202496secs" start_sector="41945600"/>
+        ....</slice>
+        ....<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ......<size val="143349248secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_2_disks_mixed_whole_disk_and_rpool(self):
+        '''Test Success If 2 Disks, Mixed Whole-Disk Values & Root Pool
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_zpool="myrpool">
+                <disk_name name="c2t0d0" name_type="ctd"/>
+              </disk>
+              <disk whole_disk="false">
+                <disk_name name="c2t1d0" name_type="ctd"/>
+                <slice name="1" action="delete"/>
+                <slice name="0" action="create" force="false" is_swap="false"
+                 in_zpool="myrpool">
+                  <size val="143349312secs" start_sector="512"/>
+                </slice>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <zvol name="swap" action="create" use="swap">
+                    <size val="747m"/>
+                  </zvol>
+                  <zvol name="dump" action="create" use="dump">
+                    <size val="747m"/>
+                  </zvol>
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="747m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="747m"/>
+        ......</zvol>
+        ......<vdev name="vdev" redundancy="mirror"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t1d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<slice name="2" action="preserve" force="false" is_swap="false">
+        ......<size val="143349312secs" start_sector="0"/>
+        ....</slice>
+        ....<slice name="1" action="delete" force="false" is_swap="false">
+        ......<size val="4202496secs" start_sector="41945600"/>
+        ....</slice>
+        ....<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ......<size val="143349248secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_2_disks_whole_disk_true_and_rpool_w_vdev(self):
+        '''Test Success If 2 Disks w/Whole-Disk = True & Root Vdev Specified
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_vdev="myvdev">
+                <disk_name name="c2t0d0" name_type="ctd"/>
+              </disk>
+              <disk whole_disk="true" in_vdev="myvdev">
+                <disk_name name="c2t1d0" name_type="ctd"/>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <vdev name="myvdev" redundancy="mirror"/>
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<vdev name="myvdev" redundancy="mirror"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="myvdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t1d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="myvdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_if_root_pool_non_be_datasets(self):
+        '''Test Success If Root Pool Non-BE Datasets Specified
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_zpool="myrpool">
+                <disk_name name="c2t0d0" name_type="ctd"/>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <filesystem name="to_share" mountpoint="/share"/>
+                  <filesystem name="export2"/>
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<filesystem name="to_share" action="create" mountpoint="/share" \
+        in_be="false"/>
+        ......<filesystem name="export2" action="create" in_be="false"/>
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_if_root_pool_with_be_datasets(self):
+        '''Test Success If Root Pool with BE Datasets Specified
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_zpool="myrpool">
+                <disk_name name="c2t0d0" name_type="ctd"/>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <filesystem name="to_share" mountpoint="/share"/>
+                  <filesystem name="export2"/>
+                  <filesystem name="opt" in_be="true"/>
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<filesystem name="to_share" action="create" mountpoint="/share" \
+        in_be="false"/>
+        ......<filesystem name="export2" action="create" in_be="false"/>
+        ......<filesystem name="opt" action="create" in_be="true"/>
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_if_root_pool_with_be_specified(self):
+        '''Test Success If Root Pool With BE Specified
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_zpool="myrpool">
+                <disk_name name="c2t0d0" name_type="ctd"/>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <be name="my_solaris_be"/>
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<be name="my_solaris_be"/>
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ......<size val="143348736secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_if_disk_with_slice_no_size(self):
+        '''Test Success If Have a Disk, containing 1 slice without size
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name="c2t0d0" name_type="ctd"/>
+                <slice name="0" action="create" force="false"
+                 is_swap="false"/>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="2" action="preserve" force="false" is_swap="false">
+        ......<size val="143349312secs" start_sector="0"/>
+        ....</slice>
+        ....<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ......<size val="143349248secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_if_disk_with_slice_no_start_sector(self):
+        '''Test Success If Have a Disk, containing 1 slice without start_sector
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name="c2t0d0" name_type="ctd"/>
+                <slice name="0" action="create" force="false"
+                 is_swap="false">
+                  <size val="30G"/>
+                </slice>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="2" action="preserve" force="false" is_swap="false">
+        ......<size val="143349312secs" start_sector="0"/>
+        ....</slice>
+        ....<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ......<size val="62914560secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_slice_too_large(self):
+        '''Test Fail if slice is specified with a value too large'''
+
+        test_manifest_xml = '''
+        <auto_install>
+           <ai_instance name="orig_default">
+            <target>
+             <disk>
+                <disk_name name_type="ctd" name="c2t0d0"/>
+                <slice name="0" action="create" is_swap="false"
+                 in_zpool="rpool" in_vdev="vdev">
+                  <size val="20000001mb"/>
+                </slice>
+              </disk>
+              <logical>
+                <zpool name="rpool" is_root="true">
+                  <vdev name="vdev" redundancy="none"/>
+                </zpool>
+              </logical>
+             </target>
+          </ai_instance>
+        </auto_install>
+        '''
+        expected_xml = ""
+
+        self.__run_simple_test(test_manifest_xml, expected_xml, fail_ex_str=
+            "Slice 0 has a size larger than the disk c2t0d0")
+
+    def test_target_selection_swap_and_dump_size(self):
+        '''Test Success In Calc of Swap and Dump Size'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name_type="ctd" name="c2t0d0"/>
+                <slice name="0" action="create" is_swap="false"
+                 in_zpool="myrpool" in_vdev="vdev">
+                  <size val="6GB"/>
+                </slice>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <vdev name="vdev" redundancy="none"/>
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="682m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="341m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c2t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="scsi" dev_vendor="HITACHI" \
+        dev_size="143349312secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<slice name="2" action="preserve" force="false" is_swap="false">
+        ......<size val="143349312secs" start_sector="0"/>
+        ....</slice>
+        ....<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ......<size val="12582912secs" start_sector="512"/>
+        ....</slice>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+
+if __name__ == '__main__':
+    unittest.main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/test/test_target_selection_x86.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,2881 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+import difflib
+import os
+import platform
+import re
+import unittest
+
+import osol_install.errsvc as errsvc
+from lxml import etree
+from solaris_install.auto_install.checkpoints.target_selection \
+    import TargetSelection
+from solaris_install.engine import InstallEngine
+from solaris_install.engine.test.engine_test_utils import \
+    get_new_engine_instance, reset_engine
+from solaris_install.target import Target
+from solaris_install.target.physical import Disk
+
+
+class  TestTargetSelectionTestCase(unittest.TestCase):
+    DISCOVERED_TARGETS_XML = '''
+    <root>
+      <target name="discovered">
+        <disk whole_disk="false">
+          <disk_name name="c10t2d0" name_type="ctd"/>
+          <disk_prop dev_type="FIXED" dev_vendor="Lenovo"
+           dev_size="625141760secs"/>
+          <partition action="preserve" name="1" part_type="191">
+            <size val="348144615secs" start_sector="0"/>
+            <slice name="1" action="preserve" force="false" is_swap="false"
+               in_zpool="rpool" in_vdev="rpool-none">
+              <size val="1060290secs" start_sector="48195"/>
+            </slice>
+            <slice name="3" action="preserve" force="false" is_swap="false"
+             in_zpool="myrpool" in_vdev="rpool-mirror-0">
+              <size val="43022070secs" start_sector="1108485"/>
+            </slice>
+            <slice name="7" action="preserve" force="false" is_swap="false">
+              <size val="190257795secs" start_sector="44130555"/>
+            </slice>
+            <slice name="8" action="preserve" force="false" is_swap="false">
+              <size val="16065secs" start_sector="0"/>
+            </slice>
+          </partition>
+        </disk>
+        <disk whole_disk="false">
+          <disk_name name="c10t1d0" name_type="ctd"/>
+          <disk_prop dev_type="FIXED" dev_vendor="Lenovo"
+           dev_size="625141760secs"/>
+          <partition action="preserve" name="1" part_type="191">
+            <size val="348144615secs" start_sector="0"/>
+          </partition>
+        </disk>
+        <disk whole_disk="false">
+          <disk_name name="c10t0d0" name_type="ctd"/>
+          <disk_prop dev_type="FIXED" dev_vendor="Lenovo"
+           dev_size="625141760secs"/>
+          <partition action="preserve" name="1" part_type="131">
+            <size val="348144615secs" start_sector="0"/>
+          </partition>
+          <partition action="preserve" name="2" part_type="130">
+            <size val="276976665secs" start_sector="348160680"/>
+            <slice name="1" action="preserve" force="false" is_swap="false"
+               in_zpool="rpool" in_vdev="rpool-none">
+              <size val="1060290secs" start_sector="48195"/>
+            </slice>
+            <slice name="2" action="preserve" force="false" is_swap="false">
+              <size val="276944535secs" start_sector="0"/>
+            </slice>
+            <slice name="3" action="preserve" force="false" is_swap="false"
+             in_zpool="myrpool" in_vdev="rpool-mirror-0">
+              <size val="43022070secs" start_sector="1108485"/>
+            </slice>
+            <slice name="7" action="preserve" force="false" is_swap="false">
+              <size val="190257795secs" start_sector="44130555"/>
+            </slice>
+            <slice name="8" action="preserve" force="false" is_swap="false">
+              <size val="16065secs" start_sector="0"/>
+            </slice>
+            <slice name="9" action="preserve" force="false" is_swap="false">
+              <size val="32129secs" start_sector="16066"/>
+            </slice>
+          </partition>
+        </disk>
+        <disk whole_disk="false">
+          <disk_name name="c7d0" name_type="ctd"/>
+          <disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+          <disk_keyword key="boot_disk"/>
+          <partition action="preserve" name="1" part_type="175">
+            <size val="401625secs" start_sector="0"/>
+          </partition>
+          <partition action="preserve" name="2" part_type="175">
+            <size val="133949970secs" start_sector="401625"/>
+          </partition>
+          <partition action="preserve" name="3" part_type="130">
+            <size val="234420480secs" start_sector="134367660"/>
+            <slice name="1" action="preserve" force="false" is_swap="false">
+              <size val="1060290secs" start_sector="48195"/>
+            </slice>
+            <slice name="2" action="preserve" force="false" is_swap="false">
+              <size val="234388350secs" start_sector="0"/>
+            </slice>
+            <slice name="3" action="preserve" force="false" is_swap="false">
+              <size val="43022070secs" start_sector="1108485"/>
+            </slice>
+            <slice name="7" action="preserve" force="false" is_swap="false">
+              <size val="190257795secs" start_sector="44130555"/>
+            </slice>
+            <slice name="8" action="preserve" force="false" is_swap="false">
+              <size val="16065secs" start_sector="0"/>
+            </slice>
+            <slice name="9" action="preserve" force="false" is_swap="false">
+              <size val="32130secs" start_sector="16065"/>
+            </slice>
+          </partition>
+        </disk>
+        <disk whole_disk="false">
+          <disk_name name="c7d1" name_type="ctd"/>
+          <disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+            <partition action="create" name="1" part_type="191">
+            <size val="390714880secs" start_sector="512"/>
+            <slice name="0" action="create" force="false" is_swap="false">
+              <size val="390713344secs" start_sector="512"/>
+            </slice>
+          </partition>
+        </disk>
+        <disk whole_disk="false">
+          <disk_name name="c8d0" name_type="ctd"/>
+          <disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+            <partition action="create" name="1" part_type="191">
+            <size val="390714880secs" start_sector="512"/>
+            <slice name="0" action="create" force="false" is_swap="false">
+              <size val="390713344secs" start_sector="512"/>
+            </slice>
+          </partition>
+        </disk>
+        <disk whole_disk="false">
+          <disk_name name="c8d1" name_type="ctd"/>
+          <disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+            <partition action="create" name="1" part_type="191">
+            <size val="390714880secs" start_sector="512"/>
+            <slice name="0" action="create" force="false" is_swap="false">
+              <size val="390713344secs" start_sector="512"/>
+            </slice>
+          </partition>
+        </disk>
+        <disk whole_disk="false">
+          <disk_name name="c9d0" name_type="ctd"/>
+          <disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+            <partition action="create" name="1" part_type="191">
+            <size val="390714880secs" start_sector="512"/>
+            <slice name="0" action="create" force="false" is_swap="false">
+              <size val="390713344secs" start_sector="512"/>
+            </slice>
+          </partition>
+        </disk>
+        <disk whole_disk="false">
+          <disk_name name="c9d1" name_type="ctd"/>
+          <disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+            <partition action="create" name="1" part_type="191">
+            <size val="390714880secs" start_sector="512"/>
+            <slice name="0" action="create" force="false" is_swap="false">
+              <size val="390713344secs" start_sector="512"/>
+            </slice>
+          </partition>
+        </disk>
+        <disk whole_disk="false">
+          <disk_name name="c3d0" name_type="ctd"/>
+          <disk_prop dev_type="FIXED" dev_size="29.98gb"/>
+            <partition action="preserve" name="1" part_type="12">
+              <size val="2.99Gb" start_sector="16065"/>
+            </partition>
+            <partition action="preserve" name="2" part_type="131">
+              <size val="2.99Gb" start_sector="6286550"/>
+            </partition>
+            <partition action="preserve" name="3" part_type="12">
+              <size val="2.99Gb" start_sector="12557035"/>
+            </partition>
+            <partition action="preserve" name="4" part_type="5">
+              <size val="14.99Gb" start_sector="18827520"/>
+            </partition>
+            <partition action="preserve" name="5" part_type="12">
+              <size val="1.5Gb" start_sector="18827520"/>:w
+            </partition>
+            <partition action="preserve" name="6" part_type="191">
+              <size val="5.5Gb" start_sector="21973248"/>
+            </partition>
+        </disk>
+        <logical noswap="true" nodump="true">
+          <zpool name="export" action="preserve" is_root="false"
+           mountpoint="/export">
+            <vdev name="export-mirror-0" redundancy="mirror-0"/>
+            <filesystem name="home" action="preserve"
+             mountpoint="/export/home"/>
+            <filesystem name="synchronized" action="preserve"
+             mountpoint="/export/synchronized"/>
+            <filesystem name="test_install" action="preserve"
+             mountpoint="/a"/>
+          </zpool>
+          <zpool name="myrpool" action="preserve" is_root="true"
+           mountpoint="/rpool">
+            <vdev name="rpool-none" redundancy="none"/>
+            <vdev name="rpool-mirror-0" redundancy="mirror-0"/>
+            <filesystem name="ROOT" action="preserve" mountpoint="none"/>
+            <filesystem name="ROOT/os153" action="preserve" mountpoint="/"/>
+            <filesystem name="ROOT/os158" action="preserve" mountpoint="/"/>
+            <filesystem name="ROOT/os159" action="preserve" mountpoint="/"/>
+            <filesystem name="ROOT/os161" action="preserve" mountpoint="/"/>
+            <zvol name="dump" action="preserve" use="none">
+              <size val="1.50gb"/>
+            </zvol>
+            <zvol name="swap" action="preserve" use="none">
+              <size val="1.00gb"/>
+            </zvol>
+            <be name="os153"/>
+            <be name="os158"/>
+            <be name="os159"/>
+            <be name="os161"/>
+          </zpool>
+        </logical>
+      </target>
+    </root>
+    '''
+
+    def __gendiff_str(self, a, b):
+        a_lines = a.splitlines()
+        b_lines = b.splitlines()
+        return "\n".join(list(difflib.ndiff(a_lines, b_lines)))
+
+    def __run_simple_test(self, input_xml, expected_xml, fail_ex_str=None):
+        '''Run a simple test where given specific xml in the manifest, we
+        validate that the generated DESIRED tree is as expected.
+
+        'expected_xml' should have the values indented using '.' instead of
+        spaces to ensure that perfect match is made.
+        '''
+        errsvc.clear_error_list()
+
+        # Different processor to what these tests were written for.
+        if platform.processor() != 'i386':
+            print "Skipping test on wrong arch"
+
+            return
+
+        if input_xml is not None:
+            manifest_dom = etree.fromstring(input_xml)
+            self.doc.import_from_manifest_xml(manifest_dom, volatile=True)
+            self.doc.logger.debug("DOC AFTER IMPORT TEST XML:\n%s\n\n" %
+                                  (str(self.doc)))
+            if len(errsvc._ERRORS) > 0:
+                self.fail(errsvc._ERRORS[0])
+
+        # Define expected string, compensate for indent. Using '.' in expected
+        # string to remove conflict with indent replacement.
+        indentation = '''\
+        '''
+        expected_xml = expected_xml.replace(indentation, "").replace(".", " ")
+
+        try:
+            self.target_selection.select_targets(
+                self.doc.volatile.get_descendants(class_type=Target,
+                                                  max_depth=2),
+                self.doc.persistent)
+            if (fail_ex_str is not None):
+                self.fail("Expected failure but test succeeded.")
+        except Exception, ex:
+            if (fail_ex_str is not None):
+                self.assertEquals(str(ex), fail_ex_str)
+
+        desired = \
+            self.doc.get_descendants(
+                name=Target.DESIRED, class_type=Target, max_depth=2)[0]
+
+        xml_str = desired.get_xml_tree_str()
+
+        expected_re = re.compile(expected_xml)
+        if not expected_re.match(xml_str):
+            self.fail("Resulting XML doesn't match expected:\nDIFF:\n%s\n" %
+                      self.__gendiff_str(expected_xml, xml_str))
+
+        desired.final_validation()
+        if len(errsvc._ERRORS) > 0:
+            self.fail(errsvc._ERRORS[0])
+
+    def setUp(self):
+        self.engine = get_new_engine_instance()
+
+        self.target_selection = TargetSelection("Test Checkpoint")
+        self.doc = InstallEngine.get_instance().data_object_cache
+        discovered_dom = etree.fromstring(self.DISCOVERED_TARGETS_XML)
+        self.doc.import_from_manifest_xml(discovered_dom, volatile=False)
+
+        # As we are not really discovering disks, label  will be set to "None"
+        # Ensure they set to VTOC
+        discovered = self.doc.persistent.get_first_child(Target.DISCOVERED)
+        self.disks = discovered.get_descendants(class_type=Disk)
+        for disk in self.disks:
+            if disk.label is None:
+                disk.label = "VTOC"
+
+    def tearDown(self):
+        if self.engine is not None:
+            reset_engine(self.engine)
+
+        self.doc = None
+        self.target_selection = None
+
+    def test_target_selection_no_target(self):
+        '''Test Success if no target in manifest'''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<be name="solaris"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ....</zpool>
+        ..</logical>
+        </target>
+        '''
+
+        self.__run_simple_test(None, expected_xml)
+
+    def test_target_selection_no_disk_target_with_logical(self):
+        '''Test Success if no disk targets, but with a logical section'''
+
+        test_manifest_xml = '''
+        <auto_install>
+           <ai_instance name="orig_default">
+             <target>
+               <logical>
+                 <zpool name="myrpool" is_root="true">
+                   <filesystem name="/export"/>
+                   <filesystem name="/export/home"/>
+                   <be name="solaris"/>
+                 </zpool>
+               </logical>
+             </target>
+          </ai_instance>
+        </auto_install>
+        '''
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<filesystem name="/export" action="create" in_be="false"/>
+        ......<filesystem name="/export/home" action="create" in_be="false"/>
+        ......<be name="solaris"/>
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_single_whole_disk_target(self):
+        '''Test Success if single whole target in manifest'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="True">
+                <disk_name name="c10t0d0" name_type="ctd"/>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_boot_disk_target(self):
+        '''Test Success if boot_disk target in manifest'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_zpool="myrpool" in_vdev="vdev">
+                <disk_keyword key="boot_disk" />
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <vdev name="vdev" redundancy="none" />
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_multiple_whole_disk_targets(self):
+        '''Test Success if multiple disk targets, no zpool, in manifest'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="True">
+                <disk_name name="c10t0d0" name_type="ctd"/>
+              </disk>
+              <disk whole_disk="True">
+                <disk_name name="c7d0" name_type="ctd"/>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="mirror"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_multiple_whole_disk_false_no_logicals(self):
+        '''Test Success if multiple partitioned disks, no logical, in manifest.
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name="c10t0d0" name_type="ctd"/>
+                <partition action="delete" name="2"/>
+                <partition action="create" name="1" part_type="191">
+                  <size val="625141248secs" start_sector="512"/>
+                  <slice name="0" action="create" force="false"
+                   is_swap="false">
+                    <size val="625139712secs" start_sector="512"/>
+                  </slice>
+                </partition>
+              </disk>
+              <disk whole_disk="false">
+                <disk_name name="c7d0" name_type="ctd"/>
+                <partition action="delete" name="2"/>
+                <partition action="delete" name="3"/>
+                <partition action="create" name="1" part_type="191">
+                  <size val="390714880secs" start_sector="512"/>
+                  <slice name="0" action="create" force="false"
+                   is_swap="false">
+                    <size val="390713344secs" start_sector="512"/>
+                  </slice>
+                </partition>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        . <logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="mirror"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="delete" name="2" part_type="130">
+        ......<size val="276976640secs" start_sector="348160512"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="delete" name="2" part_type="175">
+        ......<size val="133949952secs" start_sector="401408"/>
+        ....</partition>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        . </disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_multiple_whole_disk_mixed_no_logicals(self):
+        '''Test Success if 1 whole & 1 partitioned disk, no logicals'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true">
+                <disk_name name="c10t0d0" name_type="ctd"/>
+              </disk>
+              <disk whole_disk="false">
+                <disk_name name="c7d0" name_type="ctd"/>
+                <partition action="delete" name="2"/>
+                <partition action="delete" name="3"/>
+                <partition action="create" name="1" part_type="191">
+                  <size val="390714880secs" start_sector="512"/>
+                  <slice name="0" action="create" force="false"
+                   is_swap="false">
+                    <size val="390713344secs" start_sector="512"/>
+                  </slice>
+                </partition>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="mirror"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="delete" name="2" part_type="175">
+        ......<size val="133949952secs" start_sector="401408"/>
+        ....</partition>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_2_disks_whole_disk_true_and_rpool(self):
+        '''Test Success if 2 disks, whole_disk=True & root pool'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_zpool="myrpool">
+                <disk_name name="c10t0d0" name_type="ctd" />
+              </disk>
+              <disk whole_disk="true" in_zpool="myrpool">
+                <disk_name name="c7d0" name_type="ctd"/>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true"/>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="mirror"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_2_disks_whole_disk_false_and_rpool(self):
+        '''Test Success If 2 Disks w/Whole-Disks = False & Root Pool Specified
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name="c10t0d0" name_type="ctd"/>
+                <partition action="delete" name="2"/>
+                <partition action="create" name="1" part_type="191">
+                  <size val="625141248secs" start_sector="512"/>
+                  <slice name="0" action="create" force="false"
+                      is_swap="false" in_zpool="myrpool">
+                    <size val="625139712secs" start_sector="512"/>
+                  </slice>
+                </partition>
+              </disk>
+              <disk whole_disk="false">
+                <disk_name name="c7d0" name_type="ctd"/>
+                <partition action="delete" name="2"/>
+                <partition action="delete" name="3"/>
+                <partition action="create" name="1" part_type="191">
+                  <size val="390714880secs" start_sector="512"/>
+                  <slice name="0" action="create" force="false"
+                    is_swap="false" in_zpool="myrpool">
+                    <size val="390713344secs" start_sector="512"/>
+                  </slice>
+                </partition>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true"/>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="mirror"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="delete" name="2" part_type="130">
+        ......<size val="276976640secs" start_sector="348160512"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="delete" name="2" part_type="175">
+        ......<size val="133949952secs" start_sector="401408"/>
+        ....</partition>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_2_disks_mixed_whole_disk_and_rpool(self):
+        '''Test Success If 2 Disks, Mixed Whole-Disk Values & Root Pool
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_zpool="myrpool">
+                <disk_name name="c10t0d0" name_type="ctd"/>
+              </disk>
+              <disk whole_disk="false">
+            <disk_name name="c7d0" name_type="ctd"/>
+            <partition action="delete" name="2"/>
+            <partition action="delete" name="3"/>
+            <partition action="create" name="1" part_type="191">
+              <size val="390714880secs" start_sector="512"/>
+              <slice name="0" action="create" force="false" is_swap="false"
+               in_zpool="myrpool">
+                <size val="390713344secs" start_sector="512"/>
+              </slice>
+            </partition>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <zvol name="swap" action="create" use="swap">
+                    <size val="747m"/>
+                  </zvol>
+                  <zvol name="dump" action="create" use="dump">
+                    <size val="747m"/>
+                  </zvol>
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="747m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="747m"/>
+        ......</zvol>
+        ......<vdev name="vdev" redundancy="mirror"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="delete" name="2" part_type="175">
+        ......<size val="133949952secs" start_sector="401408"/>
+        ....</partition>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_2_disks_whole_disk_true_and_rpool_w_vdev(self):
+        '''Test Success If 2 Disks w/Whole-Disk = True & Root Vdev Specified
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_vdev="myvdev">
+                <disk_name name="c10t0d0" name_type="ctd"/>
+              </disk>
+              <disk whole_disk="true" in_vdev="myvdev">
+                <disk_name name="c7d0" name_type="ctd"/>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <vdev name="myvdev" redundancy="mirror"/>
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<vdev name="myvdev" redundancy="mirror"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="myvdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="myvdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_2_disks_whole_disk_false_and_rpool_w_vdev(self):
+        '''Test Success If 2 Disks w/Whole-Disk = False & Root Pool w/Vdev
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name="c10t0d0" name_type="ctd"/>
+                <partition action="delete" name="2"/>
+                <partition action="create" name="1" part_type="191">
+                  <size val="625141248secs" start_sector="512"/>
+                  <slice name="0" action="create" force="false"
+                   is_swap="false" in_vdev="myvdev">
+                    <size val="625139712secs" start_sector="512"/>
+                  </slice>
+                </partition>
+              </disk>
+              <disk whole_disk="false">
+                <disk_name name="c7d0" name_type="ctd"/>
+                <partition action="delete" name="2"/>
+                <partition action="delete" name="3"/>
+                <partition action="create" name="1" part_type="191">
+                  <size val="390714880secs" start_sector="512"/>
+                  <slice name="0" action="create" force="false"
+                   is_swap="false" in_vdev="myvdev">
+                    <size val="390713344secs" start_sector="512"/>
+                  </slice>
+                </partition>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <vdev name="myvdev" redundancy="mirror"/>
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<vdev name="myvdev" redundancy="mirror"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="delete" name="2" part_type="130">
+        ......<size val="276976640secs" start_sector="348160512"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="myrpool" in_vdev="myvdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="delete" name="2" part_type="175">
+        ......<size val="133949952secs" start_sector="401408"/>
+        ....</partition>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="myrpool" in_vdev="myvdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_2_disks_mixed_whole_disk__and_rpool_w_vdev(self):
+        '''Test Success If 2 Disks, Mixed Whole-Disk Values & Root w/Vdev
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_vdev="myvdev">
+                <disk_name name="c10t0d0" name_type="ctd"/>
+              </disk>
+              <disk whole_disk="false">
+                <disk_name name="c7d0" name_type="ctd"/>
+                <partition action="delete" name="2"/>
+                <partition action="delete" name="3"/>
+                <partition action="create" name="1" part_type="191">
+                  <size val="390714880secs" start_sector="512"/>
+                  <slice name="0" action="create" force="false" is_swap="false"
+                   in_vdev="myvdev">
+                    <size val="390713344secs" start_sector="512"/>
+                  </slice>
+                </partition>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <vdev name="myvdev" redundancy="mirror"/>
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<vdev name="myvdev" redundancy="mirror"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="myvdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="delete" name="2" part_type="175">
+        ......<size val="133949952secs" start_sector="401408"/>
+        ....</partition>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="myrpool" in_vdev="myvdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_if_root_pool_non_be_datasets(self):
+        '''Test Success If Root Pool Non-BE Datasets Specified
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_zpool="myrpool">
+                <disk_name name="c10t0d0" name_type="ctd"/>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <filesystem name="to_share" mountpoint="/share"/>
+                  <filesystem name="export2"/>
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<filesystem name="to_share" action="create" mountpoint="/share" \
+        in_be="false"/>
+        ......<filesystem name="export2" action="create" in_be="false"/>
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_if_root_pool_with_be_datasets(self):
+        '''Test Success If Root Pool with BE Datasets Specified
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_zpool="myrpool">
+                <disk_name name="c10t0d0" name_type="ctd"/>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <filesystem name="to_share" mountpoint="/share"/>
+                  <filesystem name="export2"/>
+                  <filesystem name="opt" in_be="true"/>
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<filesystem name="to_share" action="create" mountpoint="/share" \
+        in_be="false"/>
+        ......<filesystem name="export2" action="create" in_be="false"/>
+        ......<filesystem name="opt" action="create" in_be="true"/>
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_if_root_pool_with_be_specified(self):
+        '''Test Success If Root Pool With BE Specified
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="true" in_zpool="myrpool">
+                <disk_name name="c10t0d0" name_type="ctd"/>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <be name="my_solaris_be"/>
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<be name="my_solaris_be"/>
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_if_disk_with_partition_no_size(self):
+        '''Test Success If Have a Disk, containing 1 partition without size
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name="c7d0" name_type="ctd"/>
+                <partition action="delete" name="2"/>
+                <partition action="delete" name="3"/>
+                <partition action="create" name="1" part_type="191">
+                  <slice name="0" action="create" force="false"
+                   is_swap="false">
+                    <size val="390714879secs" start_sector="512"/>
+                  </slice>
+                </partition>
+              </disk>
+              <logical nodump="true" noswap="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="delete" name="2" part_type="175">
+        ......<size val="133949952secs" start_sector="401408"/>
+        ....</partition>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_if_disk_with_slice_no_size(self):
+        '''Test Success If Have a Disk, containing 1 slice without size
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name="c7d0" name_type="ctd"/>
+                <partition action="delete" name="2"/>
+                <partition action="delete" name="3"/>
+                <partition action="create" name="1" part_type="191">
+                  <slice name="0" action="create" force="false"
+                   is_swap="false"/>
+                </partition>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="delete" name="2" part_type="175">
+        ......<size val="133949952secs" start_sector="401408"/>
+        ....</partition>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_if_disk_with_partition_no_start_sector(self):
+        '''Test Success If Have a Disk, with 1 partition w/out start_sector
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name="c7d0" name_type="ctd"/>
+                <partition action="delete" name="2"/>
+                <partition action="delete" name="3"/>
+                <partition action="create" name="1" part_type="191">
+                  <size val="30G"/>
+                  <slice name="0" action="create" force="false"
+                   is_swap="false">
+                    <size val="25G" start_sector="512"/>
+                  </slice>
+                </partition>
+              </disk>
+              <logical nodump="true" noswap="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="delete" name="2" part_type="175">
+        ......<size val="133949952secs" start_sector="401408"/>
+        ....</partition>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="62914560secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="52428800secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_if_disk_with_slice_no_start_sector(self):
+        '''Test Success If Have a Disk, containing 1 slice without start_sector
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name="c7d0" name_type="ctd"/>
+                <partition action="delete" name="2"/>
+                <partition action="delete" name="3"/>
+                <partition action="create" name="1" part_type="191">
+                  <size val="30G" start_sector="512"/>
+                  <slice name="0" action="create" force="false"
+                   is_swap="false">
+                    <size val="30G"/>
+                  </slice>
+                </partition>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="delete" name="2" part_type="175">
+        ......<size val="133949952secs" start_sector="401408"/>
+        ....</partition>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="62914560secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="62914560secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_if_disk_with_2_partition_no_start_sector(self):
+        '''Test Success If Have a Disk, with 2 partition w/out start_sector
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name="c7d0" name_type="ctd"/>
+                <partition action="delete" name="3"/>
+                <partition action="create" name="1" part_type="191">
+                  <size val="25G"/>
+                  <slice name="0" action="create" force="false"
+                   is_swap="false">
+                    <size val="20G" start_sector="512"/>
+                  </slice>
+                </partition>
+                <partition action="create" name="2" part_type="11">
+                  <size val="4G"/>
+                </partition>
+              </disk>
+              <logical nodump="true" noswap="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="52428800secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="41943040secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ....<partition action="create" name="2" part_type="11">
+        ......<size val="8388608secs" start_sector="52429312"/>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_if_disk_with_2_slice_no_start_sector(self):
+        '''Test Success If Have a Disk, containing 2 slice without start_sector
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name="c7d0" name_type="ctd"/>
+                <partition action="delete" name="2"/>
+                <partition action="delete" name="3"/>
+                <partition action="create" name="1" part_type="191">
+                  <size val="30G" start_sector="512"/>
+                  <slice name="0" action="create" force="false"
+                   is_swap="false">
+                    <size val="20G"/>
+                  </slice>
+                  <slice name="1" action="create" force="false"
+                   is_swap="false">
+                    <size val="9G"/>
+                  </slice>
+                </partition>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="delete" name="2" part_type="175">
+        ......<size val="133949952secs" start_sector="401408"/>
+        ....</partition>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="62914560secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="41943040secs" start_sector="512"/>
+        ......</slice>
+        ......<slice name="1" action="create" force="false" is_swap="false">
+        ........<size val="18874368secs" start_sector="41943552"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_rpool_1_disk_and_data_2_disk(self):
+        '''Test Success If Have 1 Disk in the Root Pool, Data Pool with 2 Disks
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+              <target>
+                <disk whole_disk="true" in_zpool="myroot">
+                  <disk_name name="c7d1" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="data">
+                  <disk_name name="c8d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="data">
+                  <disk_name name="c8d1" name_type="ctd"/>
+                </disk>
+                <logical>
+                  <zpool name="myroot" is_root="true" action="create">
+                  </zpool>
+                  <zpool name="data"/>
+                </logical>
+             </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myroot" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ....<zpool name="data" action="create" is_root="false">
+        ......<vdev name="vdev" redundancy="mirror"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d1" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="vdev">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk in_zpool="data" in_vdev="vdev" whole_disk="true">
+        ....<disk_name name="c8d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ..</disk>
+        ..<disk in_zpool="data" in_vdev="vdev" whole_disk="true">
+        ....<disk_name name="c8d1" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_rpool_2_disk_mirror_and_2_disk_spare(self):
+        '''Test Success If Have 2 Disks in the Root Pool and 2 Disks spare
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+              <target>
+                <disk whole_disk="true" in_zpool="myroot" in_vdev="mirrored">
+                  <disk_name name="c7d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="myroot" in_vdev="mirrored">
+                  <disk_name name="c7d1" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="myroot" in_vdev="spared">
+                  <disk_name name="c8d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="myroot" in_vdev="spared">
+                  <disk_name name="c8d1" name_type="ctd"/>
+                </disk>
+                <logical>
+                  <zpool name="myroot" is_root="true" action="create">
+                    <vdev name="mirrored" redundancy="mirror"/>
+                    <vdev name="spared" redundancy="spare"/>
+                  </zpool>
+                </logical>
+             </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myroot" action="create" is_root="true">
+        ......<vdev name="mirrored" redundancy="mirror"/>
+        ......<vdev name="spared" redundancy="spare"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="mirrored">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d1" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="mirrored">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c8d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="spared">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c8d1" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="spared">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_data_pool_2_disk_mirror_and_2_logmirror(self):
+        '''Test Success If Have 2 Disks in a Data Pool and 2 Disks log-mirror
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+              <target>
+                <disk whole_disk="true" in_zpool="myroot">
+                  <disk_name name="c10t0d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="data" in_vdev="mirrored">
+                  <disk_name name="c7d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="data" in_vdev="mirrored">
+                  <disk_name name="c7d1" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="data" in_vdev="mirrored-log">
+                  <disk_name name="c8d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="data" in_vdev="mirrored-log">
+                  <disk_name name="c8d1" name_type="ctd"/>
+                </disk>
+                <logical>
+                  <zpool name="myroot" is_root="true" action="create"/>
+                  <zpool name="data" is_root="false" action="create">
+                    <vdev name="mirrored" redundancy="mirror"/>
+                    <vdev name="mirrored-log" redundancy="logmirror"/>
+                  </zpool>
+                </logical>
+             </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myroot" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ....<zpool name="data" action="create" is_root="false">
+        ......<vdev name="mirrored" redundancy="mirror"/>
+        ......<vdev name="mirrored-log" redundancy="logmirror"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="vdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk in_zpool="data" in_vdev="mirrored" whole_disk="true">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ..</disk>
+        ..<disk in_zpool="data" in_vdev="mirrored" whole_disk="true">
+        ....<disk_name name="c7d1" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ..</disk>
+        ..<disk in_zpool="data" in_vdev="mirrored-log" whole_disk="true">
+        ....<disk_name name="c8d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ..</disk>
+        ..<disk in_zpool="data" in_vdev="mirrored-log" whole_disk="true">
+        ....<disk_name name="c8d1" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_data_pool_3_disk_raid2(self):
+        '''Test Success If Have 3 Disks in a Data Pool with RAIDZ2
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+              <target>
+                <disk whole_disk="true" in_zpool="myroot">
+                  <disk_name name="c10t0d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="data" in_vdev="raid">
+                  <disk_name name="c7d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="data" in_vdev="raid">
+                  <disk_name name="c7d1" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="data" in_vdev="raid">
+                  <disk_name name="c8d0" name_type="ctd"/>
+                </disk>
+                <logical>
+                  <zpool name="myroot" is_root="true" action="create"/>
+                  <zpool name="data" is_root="false" action="create">
+                    <vdev name="raid" redundancy="raidz2"/>
+                  </zpool>
+                </logical>
+             </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myroot" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ....<zpool name="data" action="create" is_root="false">
+        ......<vdev name="raid" redundancy="raidz2"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="vdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk in_zpool="data" in_vdev="raid" whole_disk="true">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ..</disk>
+        ..<disk in_zpool="data" in_vdev="raid" whole_disk="true">
+        ....<disk_name name="c7d1" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ..</disk>
+        ..<disk in_zpool="data" in_vdev="raid" whole_disk="true">
+        ....<disk_name name="c8d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_data_pool_2_disk_mirror_1_hot_spare_1_log(self):
+        '''Test Success If Have 2 Disks in a Data Pool with hot-spare and log
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+              <target>
+                <disk whole_disk="true" in_zpool="myroot">
+                  <disk_name name="c10t0d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="data" in_vdev="mymirrored">
+                  <disk_name name="c7d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="data" in_vdev="mymirrored">
+                  <disk_name name="c7d1" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="data" in_vdev="myspare">
+                  <disk_name name="c8d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="data" in_vdev="mylog">
+                  <disk_name name="c8d1" name_type="ctd"/>
+                </disk>
+                <logical>
+                  <zpool name="myroot" is_root="true" action="create"/>
+                  <zpool name="data" is_root="false" action="create">
+                    <vdev name="mymirrored" redundancy="mirror"/>
+                    <vdev name="myspare" redundancy="spare"/>
+                    <vdev name="mylog" redundancy="log"/>
+                  </zpool>
+                </logical>
+             </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myroot" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ....<zpool name="data" action="create" is_root="false">
+        ......<vdev name="mymirrored" redundancy="mirror"/>
+        ......<vdev name="myspare" redundancy="spare"/>
+        ......<vdev name="mylog" redundancy="log"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t0d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="625141248secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="vdev">
+        ........<size val="625139712secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk in_zpool="data" in_vdev="mymirrored" whole_disk="true">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ..</disk>
+        ..<disk in_zpool="data" in_vdev="mymirrored" whole_disk="true">
+        ....<disk_name name="c7d1" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ..</disk>
+        ..<disk in_zpool="data" in_vdev="myspare" whole_disk="true">
+        ....<disk_name name="c8d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ..</disk>
+        ..<disk in_zpool="data" in_vdev="mylog" whole_disk="true">
+        ....<disk_name name="c8d1" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_rpool_2_disk_mirror_1_hot_spare_1_cache(self):
+        '''Test Success If Have 2 Disks in a Root Pool with hot-spare and cache
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+              <target>
+                <disk whole_disk="true" in_zpool="myroot" in_vdev="mymirrored">
+                  <disk_name name="c7d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="myroot" in_vdev="mymirrored">
+                  <disk_name name="c7d1" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="myroot" in_vdev="myspare">
+                  <disk_name name="c8d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="myroot" in_vdev="mycache">
+                  <disk_name name="c8d1" name_type="ctd"/>
+                </disk>
+                <logical>
+                  <zpool name="myroot" is_root="true" action="create">
+                    <vdev name="mymirrored" redundancy="mirror"/>
+                    <vdev name="myspare" redundancy="spare"/>
+                    <vdev name="mycache" redundancy="cache"/>
+                  </zpool>
+                </logical>
+             </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myroot" action="create" is_root="true">
+        ......<vdev name="mymirrored" redundancy="mirror"/>
+        ......<vdev name="myspare" redundancy="spare"/>
+        ......<vdev name="mycache" redundancy="cache"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="mymirrored">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d1" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="mymirrored">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c8d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="myspare">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c8d1" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="mycache">
+        ........<size val="390713344secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_use_existing_solaris2_target(self):
+        '''Test Success if use_existing_solaris2 target in manifest'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="False">
+                <disk_name name="c10t1d0" name_type="ctd"/>
+                <partition action="use_existing_solaris2"/>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t1d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="use_existing_solaris2" name="1" part_type="191">
+        ......<size val="348144128secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="348144128secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_use_existing_solaris2_target_with_slices(self):
+        '''Test Success if use_existing_solaris2 target with slices in manifest
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="False">
+                <disk_name name="c10t2d0" name_type="ctd"/>
+                <partition action="use_existing_solaris2">
+                  <slice name="0" action="create" force="true"
+                   is_swap="false">
+                    <size val="9Gb" start_sector="48000"/>
+                  </slice>
+                  <slice name="1" action="delete" force="false"
+                   is_swap="false"/>
+                  <slice name="3" action="delete" force="false"
+                   is_swap="false"/>
+                  <slice name="7" action="delete" force="false"
+                   is_swap="false"/>
+                  <slice name="8" action="delete" force="false"
+                   is_swap="false"/>
+                </partition>
+              </disk>
+              <logical noswap="true" nodump="true"/>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="true" nodump="true">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="mirror"/>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c10t2d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_vendor="Lenovo" \
+        dev_size="625141760secs"/>
+        ....<partition action="use_existing_solaris2" name="1" part_type="191">
+        ......<size val="348144128secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false">
+        ........<size val="18874368secs" start_sector="48128"/>
+        ......</slice>
+        ......<slice name="1" action="delete" force="false" is_swap="false" \
+        in_zpool="rpool" in_vdev="rpool-none">
+        ........<size val="1059840secs" start_sector="48128"/>
+        ......</slice>
+        ......<slice name="3" action="delete" force="false" is_swap="false" \
+        in_zpool="myrpool" in_vdev="rpool-mirror-0">
+        ........<size val="43021824secs" start_sector="1108480"/>
+        ......</slice>
+        ......<slice name="7" action="delete" force="false" is_swap="false">
+        ........<size val="190257664secs" start_sector="44130304"/>
+        ......</slice>
+        ......<slice name="8" action="delete" force="false" is_swap="false">
+        ........<size val="15872secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_gpt_rpool_2_disk_mirror_1_spare_1_cache(self):
+        '''Test Success If Disks w/GPT labels, 2 Disks in rpool w/spare & cache
+        '''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+              <target>
+                <disk whole_disk="true" in_zpool="myroot" in_vdev="mymirrored">
+                  <disk_name name="c7d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="myroot" in_vdev="mymirrored">
+                  <disk_name name="c7d1" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="myroot" in_vdev="myspare">
+                  <disk_name name="c8d0" name_type="ctd"/>
+                </disk>
+                <disk whole_disk="true" in_zpool="myroot" in_vdev="mycache">
+                  <disk_name name="c8d1" name_type="ctd"/>
+                </disk>
+                <logical>
+                  <zpool name="myroot" is_root="true" action="create">
+                    <vdev name="mymirrored" redundancy="mirror"/>
+                    <vdev name="myspare" redundancy="spare"/>
+                    <vdev name="mycache" redundancy="cache"/>
+                  </zpool>
+                </logical>
+             </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myroot" action="create" is_root="true">
+        ......<vdev name="mymirrored" redundancy="mirror"/>
+        ......<vdev name="myspare" redundancy="spare"/>
+        ......<vdev name="mycache" redundancy="cache"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="mymirrored">
+        ........<size val="390714880secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d1" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="mymirrored">
+        ........<size val="390714880secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c8d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="myspare">
+        ........<size val="390714880secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c8d1" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myroot" in_vdev="mycache">
+        ........<size val="390714880secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        for disk in self.disks:
+            disk.label = "GPT"
+
+        # When GPT support gets added and bug : 7037884 gets fixed we
+        # can re-enable this test
+        #self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_no_target_all_gpt_disks(self):
+        '''Test Success if no target in manifest with all GPT disks'''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="rpool1" in_vdev="vdev">
+        ........<size val="390714880secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="rpool1" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<be name="solaris"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ....</zpool>
+        ..</logical>
+        </target>
+        '''
+
+        for disk in self.disks:
+            disk.label = "GPT"
+
+        # When GPT support gets added and bug : 7037884 gets fixed we
+        # can re-enable this test
+        #self.__run_simple_test(None, expected_xml)
+
+    def test_target_selection_no_disk_target_with_logical_all_gpt_disks(self):
+        '''Test Success if no disks, but with logical section and all GPT
+        '''
+
+        test_manifest_xml = '''
+        <auto_install>
+           <ai_instance name="orig_default">
+             <target>
+               <logical>
+                 <zpool name="myrpool" is_root="true">
+                   <filesystem name="/export"/>
+                   <filesystem name="/export/home"/>
+                   <be name="solaris"/>
+                 </zpool>
+               </logical>
+             </target>
+          </ai_instance>
+        </auto_install>
+        '''
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<filesystem name="/export" action="create" in_be="false"/>
+        ......<filesystem name="/export/home" action="create" in_be="false"/>
+        ......<be name="solaris"/>
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="390714880secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        for disk in self.disks:
+            disk.label = "GPT"
+
+        # When GPT support gets added and bug : 7037884 gets fixed we
+        # can re-enable this test
+        #self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_slice_too_large(self):
+        '''Test Fail if slice is specified with a value too large'''
+
+        test_manifest_xml = '''
+        <auto_install>
+           <ai_instance name="orig_default">
+            <target>
+             <disk>
+                <disk_name name_type="ctd" name="c7d0"/>
+                <partition action="delete" name="1"/>
+                <partition action="delete" name="3"/>
+                <partition name="2" action="create" part_type="191">
+                  <size val="20gb" start_sector="100000"/>
+                  <slice name="0" action="create" is_swap="false"
+                   in_zpool="rpool" in_vdev="vdev">
+                    <size val="20000001mb"/>
+                  </slice>
+                </partition>
+              </disk>
+              <logical>
+                <zpool name="rpool" is_root="true">
+                  <vdev name="vdev" redundancy="none"/>
+                </zpool>
+              </logical>
+             </target>
+          </ai_instance>
+        </auto_install>
+        '''
+        expected_xml = ""
+
+        self.__run_simple_test(test_manifest_xml, expected_xml, fail_ex_str=
+            "Slice 0 has a size larger than the containing partition 2")
+
+    def test_target_selection_partition_too_large(self):
+        '''Test Fail if partition is specified with a value too large'''
+
+        test_manifest_xml = '''
+        <auto_install>
+           <ai_instance name="orig_default">
+            <target>
+             <disk>
+                <disk_name name_type="ctd" name="c7d0"/>
+                <partition action="delete" name="1"/>
+                <partition action="delete" name="3"/>
+                <partition name="2" action="create" part_type="191">
+                  <size val="200000000mb" start_sector="100000"/>
+                  <slice name="0" action="create" is_swap="false"
+                   in_zpool="rpool" in_vdev="vdev">
+                    <size val="200000001mb"/>
+                  </slice>
+                </partition>
+              </disk>
+              <logical>
+                <zpool name="rpool" is_root="true">
+                  <vdev name="vdev" redundancy="none"/>
+                </zpool>
+              </logical>
+             </target>
+          </ai_instance>
+        </auto_install>
+        '''
+        expected_xml = ""
+
+        self.__run_simple_test(test_manifest_xml, expected_xml,
+            fail_ex_str="Partition 2 has a size larger than the disk c7d0")
+
+    def test_target_selection_multiple_partitions_and_existing_partition(self):
+        '''Test Success if multiple partitions and existing partition'''
+
+        test_manifest_xml = '''
+        <auto_install>
+           <ai_instance name="orig_default">
+            <target>
+             <disk>
+                <disk_name name_type="ctd" name="c7d0"/>
+                <partition name="3" action="delete"/>
+                <partition name="2" action="create" part_type="191">
+                  <size val="20000mb"/>
+                  <slice name="0" action="create" is_swap="false"
+                   in_zpool="myrpool" in_vdev="vdev">
+                    <size val="19999mb"/>
+                  </slice>
+                </partition>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <vdev name="vdev" redundancy="none"/>
+                </zpool>
+              </logical>
+             </target>
+          </ai_instance>
+        </auto_install>
+        '''
+        expected_xml = ""
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_swap_and_dump_size(self):
+        '''Test Success In Calc of Swap and Dump Size'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name_type="ctd" name="c7d0"/>
+                  <partition name="2" action="delete"/>
+                  <partition name="3" action="delete"/>
+                  <partition name="1" action="create" part_type="191">
+                    <slice name="0" action="create" is_swap="false"
+                     in_zpool="myrpool" in_vdev="vdev">
+                    <size val="6GB"/>
+                    </slice>
+                  </partition>
+              </disk>
+              <logical>
+                <zpool name="myrpool" is_root="true">
+                  <vdev name="vdev" redundancy="none"/>
+                </zpool>
+              </logical>
+            </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="682m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="341m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="delete" name="2" part_type="175">
+        ......<size val="133949952secs" start_sector="401408"/>
+        ....</partition>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="1" part_type="191">
+        ......<size val="390714880secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="false" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="12582912secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_create_logical_partition(self):
+        '''Test Success Creating a logical partition'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk>
+                <disk_name name_type="ctd" name="c7d0"/>
+                <partition action="delete" name="1"/>
+                <partition action="delete" name="2"/>
+                <partition action="delete" name="3"/>
+                <partition action="create" name="4" part_type="5">
+                  <size val="15000mb"/>
+                </partition>
+                <partition action="create" name="6" part_type="191">
+                  <size val="10240mb"/>
+                </partition>
+            </disk>
+            <logical>
+              <zpool name="myrpool" is_root="true">
+                <vdev name="vdev" redundancy="none"/>
+              </zpool>
+            </logical>
+          </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="delete" name="1" part_type="175">
+        ......<size val="401408secs" start_sector="512"/>
+        ....</partition>
+        ....<partition action="delete" name="2" part_type="175">
+        ......<size val="133949952secs" start_sector="401408"/>
+        ....</partition>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="4" part_type="5">
+        ......<size val="30720000secs" start_sector="512"/>
+        ....</partition>
+        ....<partition action="create" name="6" part_type="191">
+        ......<size val="20971520secs" start_sector="512"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="20971520secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_too_many_extended_partitions(self):
+        '''Test Fail with too many extended partitions'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance name="ai_test_manifest" auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name_type="ctd" name="c3d0"/>
+                <partition action="delete" name="7" part_type="191">
+                  <size val="0mb"/>
+                </partition>
+                <partition action="create" name="3" part_type="12">
+                  <size val="2.99gb"/>
+                </partition>
+                <partition action="create" name="4" part_type="5">
+                  <size val="9000mb"/>
+                </partition>
+                <partition action="create" name="5" part_type="191">
+                  <size val="8000mb"/>
+                  <slice name="0" action="create" force="false"
+                   is_swap="false"/>
+                </partition>
+              </disk>
+              <logical noswap="false" nodump="false">
+                <zpool name="myrpool" is_root="true" action="create">
+                  <vdev name="vdev" redundancy="none"/>
+                  <filesystem name="/testing" in_be="true" action="create"/>
+                </zpool>
+              </logical>
+            </target>
+         </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = ""
+
+        self.__run_simple_test(test_manifest_xml, expected_xml,
+            fail_ex_str= "It is only possible to have at most 1 "
+                         "extended partition defined")
+
+    def test_target_selection_delete_non_existant(self):
+        '''Test Success deleting a non-existant partition'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance auto_reboot="false">
+            <target>
+              <disk>
+                <disk_name name_type="ctd" name="c7d0"/>
+                <partition action="delete" name="3"/>
+                <partition action="delete" name="4"/>
+                <partition action="create" name="2">
+                    <size val="10G"/>
+                </partition>
+            </disk>
+            <logical>
+              <zpool name="myrpool" is_root="true">
+                <vdev name="vdev" redundancy="none"/>
+              </zpool>
+            </logical>
+          </target>
+          </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = '''\
+        <target name="desired">
+        ..<logical noswap="false" nodump="false">
+        ....<zpool name="myrpool" action="create" is_root="true">
+        ......<vdev name="vdev" redundancy="none"/>
+        ......<zvol name="swap" action="create" use="swap">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<zvol name="dump" action="create" use="dump">
+        ........<size val="\d+m"/>
+        ......</zvol>
+        ......<be name="solaris"/>
+        ....</zpool>
+        ..</logical>
+        ..<disk whole_disk="false">
+        ....<disk_name name="c7d0" name_type="ctd"/>
+        ....<disk_prop dev_type="FIXED" dev_size="390715392secs"/>
+        ....<disk_keyword key="boot_disk"/>
+        ....<partition action="preserve" name="1" part_type="175">
+        ......<size val="401408secs" start_sector="512"/>
+        ....</partition>
+        ....<partition action="delete" name="3" part_type="130">
+        ......<size val="234420224secs" start_sector="134367744"/>
+        ....</partition>
+        ....<partition action="create" name="2" part_type="191">
+        ......<size val="20971520secs" start_sector="401920"/>
+        ......<slice name="0" action="create" force="true" is_swap="false" \
+        in_zpool="myrpool" in_vdev="vdev">
+        ........<size val="20971520secs" start_sector="512"/>
+        ......</slice>
+        ....</partition>
+        ..</disk>
+        </target>
+        '''
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+    def test_target_selection_delete_extended_includes_logical(self):
+        '''Test Success deleting an extended, deletes logicals'''
+        test_manifest_xml = '''
+        <auto_install>
+          <ai_instance name="ai_test_manifest" auto_reboot="false">
+            <target>
+              <disk whole_disk="false">
+                <disk_name name_type="ctd" name="c3d0"/>
+                <partition action="delete" name="1"/>
+                <partition action="delete" name="3"/>
+                <partition action="create" name="4" part_type="5">
+                  <size val="9000mb"/>
+                </partition>
+                <partition action="create" name="5" part_type="191">
+                  <size val="8000mb"/>
+                  <slice name="0" action="create" force="false"
+                   is_swap="false"/>
+                </partition>
+              </disk>
+              <logical noswap="false" nodump="false">
+                <zpool name="myrpool" is_root="true" action="create">
+                  <vdev name="vdev" redundancy="none"/>
+                  <filesystem name="/testing" in_be="true" action="create"/>
+                </zpool>
+              </logical>
+            </target>
+         </ai_instance>
+        </auto_install>
+        '''
+
+        expected_xml = ""
+
+        self.__run_simple_test(test_manifest_xml, expected_xml)
+
+if __name__ == '__main__':
+    unittest.main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/utmpx.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+'''Python CTypes interface to utmpx methods.
+
+Used to determine if someone is logged in to the console.
+'''
+
+import ctypes as C
+
+_LIBC = C.CDLL("/usr/lib/libc.so", use_errno=True)
+
+# Definitions for ut_type
+UT_TYPE = (
+    EMPTY,
+    RUN_LVL,
+    BOOT_TIME,
+    OLD_TIME,
+    NEW_TIME,
+    INIT_PROCESS,
+    LOGIN_PROCESS,
+    USER_PROCESS,
+    DEAD_PROCESS,
+    ACCOUNTING,
+    DOWN_TIME
+) = xrange(11)
+
+
+# Typedef the descriptor type.
+class StructUtmpx(C.Structure):
+    """ struct utmpx ctypes definition
+
+        struct utmpx {
+          char    ut_user[32];           /* user login name */
+          char    ut_id[4];              /* inittab id */
+          char    ut_line[32];           /* device name (console, lnxx) */
+          pid_t   ut_pid;                /* process id */
+          short   ut_type;               /* type of entry */
+          struct ut_exit_status ut_exit; /* process termination/exit status */
+          struct timeval ut_tv;          /* time entry was made */
+          int     ut_session;            /* session ID, used for windowing */
+          int     pad[5];                /* reserved for future use */
+          short   ut_syslen;             /* significant length of ut_host */
+                                         /*   including terminating null */
+          char    ut_host[257];          /* remote host name */
+        };
+    """
+    _fields_ = [
+        ("ut_user",     C.c_char * 32),
+        ("ut_id",       C.c_char * 4),
+        ("ut_line",     C.c_char * 32),
+        ("ut_pid",      C.c_uint),
+        ("ut_type",     C.c_short),
+        ("ut_exit",     C.c_short * 2),  # Shouldn't need it, so no need for
+                                         # struct here just yet.
+        ("ut_tv",       C.c_long * 2),   # or here either.
+        ("ut_session",  C.c_int),
+        ("pad",         C.c_int * 5),
+        ("ut_syslen",   C.c_short),
+        ("ut_host",     C.c_char * 257)
+    ]
+
+_FUNCS = [
+    ("setutxent", None, None),
+    ("endutxent", None, None),
+    ("getutxent", C.POINTER(StructUtmpx), None),
+    ("getutxid", C.POINTER(StructUtmpx), [C.POINTER(StructUtmpx)]),
+    ("getutxline", C.POINTER(StructUtmpx), [C.POINTER(StructUtmpx)])
+]
+
+# update the namespace of this module
+variables = vars()
+for (function, restype, args) in _FUNCS:
+    variables[function] = getattr(_LIBC, function)
+    variables[function].restype = restype
+    variables[function].argtypes = args
+
+
+def users_on_console(print_entry=False):
+    '''Check is there is any user logged in on the console'''
+    setutxent()
+    ret_val = False
+    while True:
+        entry = getutxent()
+        try:
+            # Just match on console or first VT.
+            if entry.contents.ut_type == USER_PROCESS and \
+               (entry.contents.ut_line.startswith("console") or \
+                entry.contents.ut_line == "vt/1"):
+                ret_val = True
+                if print_entry:
+                    print "%32s %4s %4s %32s" % (
+                        str(entry.contents.ut_user),
+                        str(entry.contents.ut_id),
+                        str(entry.contents.ut_type),
+                        str(entry.contents.ut_line))
+        except ValueError:  # Catch NULL reference
+            break
+    endutxent()
+
+    return ret_val
+
+if __name__ == '__main__':
+    users_on_console(True)
--- a/usr/src/cmd/auto-install/xslt/README	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/xslt/README	Wed May 25 21:26:43 2011 +0100
@@ -1,10 +1,22 @@
-This directory contains an XSLT file for transforming old-schema AI Manifests
-into new-schema equivalents and a Python wrapper script for executing the
-transformation via the xsltproc program.
+This directory contains some XSLT files for converting old-schema AI manifests
+to the newer formats.
+
+For Solaris 11 Express there was an update to the schema, which s handled by
+the old-to-new.xslt.
+
+Since Solaris 11 Express there has been another update to the XML format to
+handle multi-pool and multi-disk, and some other bugs found. This is handled
+by the new-to-newer.xslt.
+
+Two Python wrapper scripts are provided for executing the transformation via
+the xsltproc program.
 
 These files are expected to be of use to QE teams and users who have a number
 of custom AI Manifests that adhere to the AI Manifest schema in effect prior
-to build 147 and who wish to transform these manifests for use by AI in
-builds 147 and later.
+to build 147 (for Solaris 11 Express) or build 167 (for Solaris 11) and who
+wish to transform these manifests for use by AI in builds 167 and later.
+
+To migrate from pre-build 147, there is a two step process using old-to-new.py
+and then using new-to-newer.py on the converted files.
 
 These files are not currently delivered as part of any package.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/xslt/new-to-newer.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,297 @@
+#!/usr/bin/python2.6
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+import sys
+import os
+import getopt
+from subprocess import Popen, PIPE
+
+
+XSLT_PROC = "/usr/bin/xsltproc"
+XSLT_FILE = "new-to-newer.xslt"
+
+
+def usage(exitcode=1):
+    '''
+        Print help page and exit script.
+
+        Default exit code is 1, which indicates an error.  To exit
+        normally, set keyword param exitcode to 0.
+    '''
+    this_script = os.path.basename(sys.argv[0])
+
+    print ""
+    print "%s - Convert old-style XML AI Manifests to the new schema." % \
+        this_script
+    print ""
+    print "For convenience, the command-line interface has similar semantics "\
+        "to cp(1)."
+    print ""
+    print "Usage:"
+    print "\t%s [options] infile outfile" % this_script
+    print "\t%s [options] infile... outdir" % this_script
+    print "\t%s -r [options] indir... outdir" % this_script
+    print "\nOptions:"
+    print "\t-f        : force overwrite if output already exists"
+    print "\t-r        : recursively transform .xml files in named "\
+        "sub-directory"
+    print "\t-h --help : print this help page and exit"
+
+    sys.exit(exitcode)
+
+
+def run_cmd(cmd):
+    '''
+        Execute the given command in a subprocess.
+
+        On success, returns the stdout from running the command.
+        On failure, raises one of:
+            OSError
+            ValueError
+            Exception
+    '''
+
+    try:
+        cmd_popen = Popen(cmd, shell=True, stdout=PIPE)
+        (cmd_stdout, cmd_stderr) = cmd_popen.communicate()
+    except OSError, err:
+        print "ERROR running [%s] : [%s]" % \
+            (cmd, str(err))
+        raise
+    except ValueError, err:
+        print "ERROR running [%s] : [%s]" % \
+            (cmd, str(err))
+        raise
+
+    if cmd_popen.returncode != 0:
+        errstr = "ERROR: command [%s] returned [%d] : [%s]" % \
+            (cmd, cmd_popen.returncode, str(cmd_stderr))
+        print errstr
+        raise Exception(errstr)
+
+    if cmd_stderr is not None:
+        print "WARNING: command [%s] produced stderr output: [%s]" % \
+            (cmd, cmd_stderr)
+
+    return cmd_stdout
+
+
+def do_transform(xsltfile, infile, outfile, mkdirs=False, overwrite=False):
+    '''
+        Create the output directory, if appropriate, and run the xsltproc
+        command to transform infile to oufile.
+
+        On success, returns True.
+        On failure, returns False.
+    '''
+
+    # Normalize the paths so we can check if they are really the same file
+    # (doesn't always work, eg for paths beginning with "..")
+    infile = os.path.normpath(infile)
+    outfile = os.path.normpath(outfile)
+
+    if infile == outfile:
+        print "ERROR: source [%s] and target [%s] are the same" % \
+            (infile, outfile)
+        return False
+
+    outdir = os.path.dirname(outfile)
+    if (len(outdir)) and (not os.path.isdir(outdir)):
+        if os.path.exists(outdir):
+            print "ERROR: target dir [%s] is not a directory" % \
+                outdir
+            return False
+
+        if not mkdirs:
+            print "ERROR: target dir [%s] doesn't exist" % \
+                outdir
+            return False
+
+        try:
+            os.makedirs(outdir)
+        except OSError, err:
+            print "ERROR: failed to make dir [%s] : [%s]" % \
+                (outdir, str(err))
+            return False
+
+    if (os.path.exists(outfile)) and (not overwrite):
+        print "ERROR: target file [%s] already exists. Use -f." % \
+            outfile
+        return False
+
+    # Construct command
+    cmd = "%s -o %s %s %s" % (XSLT_PROC, outfile, xsltfile, infile)
+
+    try:
+        output = run_cmd(cmd)
+    except:
+        return False
+
+    return True
+
+
+def do_main():
+    '''
+        Process command line options and call do_transform() for
+        each file to be processed.
+
+        Returns: nothing.
+    '''
+
+    sources = []
+    target = None
+    force_overwrite = False
+    recursive = False
+    target_exists = False
+
+    # Check xsltproc is installed
+    if not os.access(XSLT_PROC, os.X_OK):
+        print "ERROR: Cannot find %s" % XSLT_PROC
+        print "You may be able to install it with:"
+        print "\tpfexec pkg install pkg:/library/libxslt"
+        sys.exit(1)
+
+    # Check xsl transform file is available in same dir this
+    # script was run from
+    xsltdir = os.path.dirname(sys.argv[0])
+    xsltfile = "%s/%s" % (xsltdir, XSLT_FILE)
+    if (not os.path.exists(xsltfile)):
+        print "XSLT file [%s] is missing from directory [%s]" % \
+            (XSLT_FILE, xsltdir)
+        sys.exit(1)
+
+    # Fetch and process command line params and options
+    try:
+        optlist, args = getopt.getopt(sys.argv[1:], "frh", ["help"])
+    except getopt.GetoptError:
+        usage()
+
+    for opt, arg in optlist:
+        if (opt == "-f"):
+            force_overwrite = True
+        if (opt == "-r"):
+            recursive = True
+        if (opt == "-h") or (opt == "--help"):
+            usage(exitcode=0)
+
+    # There must be at least 2 params.  The last param is the
+    # target; all the other params are the source(s).
+    if len(args) < 2:
+        usage()
+
+    sources = args[:len(args) - 1]
+    target = args[len(args) - 1]
+
+    # note whether the target existed before we started
+    if os.path.exists(target):
+        target_exists = True
+
+    # Check for invalid paramaters (pt. 1)
+    if ((len(sources) > 1) and
+        (not os.path.isdir(target))):
+        # if there are multiple sources (files or dirs), then
+        # target must be an existing directory
+        print "ERROR: [%s] is not a directory" % \
+            target
+        sys.exit(1)
+
+    for source in sources:
+        # normalize source path
+        source = os.path.normpath(source)
+
+        # Check for invalid paramaters (pt. 2)
+        if source == "/":
+            print "ERROR: '/' not allowed"
+            sys.exit(1)
+        if not os.path.exists(source):
+            print "ERROR: no such file or directory: [%s]" % \
+                source
+            sys.exit(1)
+        if (os.path.isdir(source)) and (not recursive):
+            print "ERROR: [%s] is a directory, but '-r' not specified" % \
+                source
+            sys.exit(1)
+        if (not os.path.isdir(source)) and (recursive):
+            print "ERROR: [%s] is not a directory, but '-r' was specified" % \
+                source
+            sys.exit(1)
+        if ((os.path.isdir(source)) and
+            (os.path.exists(target)) and
+            (not os.path.isdir(target))):
+            print "ERROR: [%s] is not a directory" % \
+                target
+            sys.exit(1)
+
+        if os.path.isdir(source):
+            # recursively iterate through source dir, processing each file
+            for dirpath, dirnames, filenames in os.walk(source):
+                # alter dirnames in-place to skip .*
+                dirnames[:] = [d for d in dirnames if not d.startswith('.')]
+
+                for name in filenames:
+                    srcfile = os.path.join(dirpath, name)
+
+                    partial_dstfile = os.path.join(dirpath, name)
+
+                    # replicate how cp -r treats sub-dirs:
+                    # 1. if source contains multiple sub-dirs, eg "a/b/c"
+                    # then only create rightmost one, eg "c", under target
+                    index = source.rfind("/", 1)
+                    if index != -1:
+                        # ensure partial_dstfile begins with source
+                        if partial_dstfile.find(source) == 0:
+                            partial_dstfile = partial_dstfile[index + 1:]
+
+                    # replicate how cp -r treats sub-dirs:
+                    # 2. if target already existed then chop off leftmost
+                    # dir of source from target
+                    if not target_exists:
+                        index = partial_dstfile.find("/", 1)
+                        if index != -1:
+                            partial_dstfile = partial_dstfile[index + 1:]
+
+                    dstfile = os.path.join(target, partial_dstfile)
+
+                    if not do_transform(xsltfile, srcfile, dstfile,
+                        mkdirs=True, overwrite=force_overwrite):
+                        print "ERROR: Transform failed."
+                        sys.exit(1)
+        elif os.path.isdir(target):
+            dstfile = os.path.join(target, os.path.basename(source))
+
+            if not do_transform(xsltfile, source, dstfile,
+                mkdirs=False, overwrite=force_overwrite):
+                print "ERROR: Transform failed."
+                sys.exit(1)
+        else:
+            # this must be a simple "single infile" -> "single outfile" job
+            if not do_transform(xsltfile, source, target,
+                mkdirs=False, overwrite=force_overwrite):
+                print "ERROR: Transform failed."
+                sys.exit(1)
+
+
+if __name__ == "__main__":
+    do_main()
+
+    sys.exit(0)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/xslt/new-to-newer.xslt	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,410 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License (the "License").
+ You may not use this file except in compliance with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+-->
+
+
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+    <xsl:output method="xml" indent="yes" encoding="UTF-8" 
+     doctype-system="file:///usr/share/install/ai.dtd"/>
+    
+
+    <xsl:template match="/">
+        <xsl:if test="auto_install">
+            <auto_install>
+                <xsl:apply-templates select="auto_install/ai_instance"/>
+            </auto_install>
+        </xsl:if>    
+    </xsl:template>
+
+    <xsl:template match="auto_install/ai_instance">
+        <xsl:copy>
+            <xsl:if test="@name">
+                <xsl:attribute name="name">
+                    <xsl:value-of select="@name"/>
+                </xsl:attribute>
+            </xsl:if>    
+            <xsl:if test="@auto_reboot">
+                <xsl:attribute name="auto_reboot">
+                    <xsl:value-of select="@auto_reboot"/>
+                </xsl:attribute>
+            </xsl:if>    
+            <xsl:if test="@http_proxy">
+                <xsl:attribute name="http_proxy">
+                    <xsl:value-of select="@http_proxy"/>
+                </xsl:attribute>
+            </xsl:if>    
+            <xsl:if test="target">
+                <target>
+                    <xsl:for-each select="target/target_device">
+                        <xsl:apply-templates select="disk"/>
+                    </xsl:for-each>
+                    <logical>
+                        <xsl:choose>
+                          <xsl:when
+                           test="target/target_device/swap/zvol/size/@val = '0mb'">
+                            <xsl:attribute name="noswap">
+                                <xsl:text>true</xsl:text>
+                            </xsl:attribute>
+                          </xsl:when>
+                          <xsl:otherwise>
+                            <xsl:attribute name="noswap">
+                                <xsl:text>false</xsl:text>
+                            </xsl:attribute>
+                          </xsl:otherwise>
+                        </xsl:choose>    
+                        <xsl:choose>
+                          <xsl:when
+                           test="target/target_device/dump/zvol/size/@val = '0mb'">
+                            <xsl:attribute name="nodump">
+                                <xsl:text>true</xsl:text>
+                            </xsl:attribute>
+                          </xsl:when>
+                          <xsl:otherwise>
+                            <xsl:attribute name="nodump">
+                                <xsl:text>false</xsl:text>
+                            </xsl:attribute>
+                          </xsl:otherwise>
+                        </xsl:choose>    
+                        <zpool name="rpool" is_root="true">
+                            <vdev name="vdev" redundancy="none"/>
+                            <xsl:for-each select="target/target_device">
+                                <xsl:apply-templates select="swap"/>
+                                <xsl:apply-templates select="dump"/>
+                            </xsl:for-each>
+                        </zpool>
+                    </logical>
+                </target>
+            </xsl:if>
+            <xsl:apply-templates select="software"/>
+        </xsl:copy>
+    </xsl:template>
+
+    <xsl:template match="disk">
+        <xsl:copy>
+            <xsl:choose>
+                <xsl:when test="partition or slice"/>
+                <xsl:otherwise>
+                    <xsl:attribute name="whole_disk">
+                        <xsl:text>true</xsl:text>
+                    </xsl:attribute>
+                    <xsl:attribute name="in_zpool">
+                        <xsl:text>rpool</xsl:text>
+                    </xsl:attribute>
+                    <xsl:attribute name="in_vdev">
+                        <xsl:text>vdev</xsl:text>
+                    </xsl:attribute>
+                </xsl:otherwise>
+            </xsl:choose>        
+            <xsl:apply-templates select="disk_name|disk_prop|disk_keyword|iscsi"/>
+            <xsl:if test="partition or slice">
+                <xsl:choose>
+                    <xsl:when test="partition">
+                        <xsl:apply-templates select="partition"/>
+                    </xsl:when>
+                    <xsl:otherwise>
+                        <xsl:apply-templates select="slice"/>
+                    </xsl:otherwise>
+                </xsl:choose>        
+            </xsl:if>
+        </xsl:copy>
+    </xsl:template>
+
+    <xsl:template match="disk_name|disk_keyword|iscsi">
+        <xsl:copy-of select="."/>
+    </xsl:template>
+
+    <xsl:template match="disk_prop">
+        <disk_prop>
+        <xsl:if test="@dev_type">
+            <xsl:attribute name="dev_type">
+                <xsl:value-of select="@dev_type"/>
+            </xsl:attribute>
+        </xsl:if>
+        <xsl:if test="@dev_vendor">
+            <xsl:attribute name="dev_vendor">
+                <xsl:value-of select="@dev_vendor"/>
+            </xsl:attribute>
+        </xsl:if>
+        <xsl:if test="@dev_size">
+            <xsl:attribute name="dev_size">
+                <xsl:choose>
+                    <xsl:when test="number(@dev_size) > 0">
+                        <xsl:value-of select="concat(@dev_size, 'mb')"/>
+                    </xsl:when>
+                    <xsl:otherwise>
+                        <xsl:value-of select="@dev_size"/>
+                    </xsl:otherwise>
+                </xsl:choose>
+            </xsl:attribute>
+        </xsl:if>
+        </disk_prop>
+    </xsl:template>
+
+    <xsl:template match="size">
+        <size>
+        <xsl:if test="@start_sector">
+            <xsl:attribute name="start_sector">
+                <xsl:value-of select="@start_sector"/>
+            </xsl:attribute>
+        </xsl:if>
+        <xsl:if test="@val">
+            <xsl:attribute name="val">
+                <xsl:choose>
+                    <!-- Uppercase -->
+                    <xsl:when test="contains(@val, 'GIGABYTE')">
+                        <xsl:value-of select="concat(substring-before(@val, 'GIGABYTE'), 'gb')"/>
+                    </xsl:when>
+                    <xsl:when test="contains(@val, 'GIGABYTES')">
+                        <xsl:value-of select="concat(substring-before(@val, 'GIGABYTES'), 'gb')"/>
+                    </xsl:when>
+                    <xsl:when test="contains(@val, 'MEGABYTE')">
+                        <xsl:value-of select="concat(substring-before(@val, 'MEGABYTE'), 'mb')"/>
+                    </xsl:when>
+                    <xsl:when test="contains(@val, 'MEGABYTES')">
+                        <xsl:value-of select="concat(substring-before(@val, 'MEGABYTES'), 'mb')"/>
+                    </xsl:when>
+                    <xsl:when test="contains(@val, 'SEC')">
+                        <xsl:value-of select="concat(substring-before(@val, 'SEC'), 'secs')"/>
+                    </xsl:when>
+                    <xsl:when test="contains(@val, 'SECTORS')">
+                        <xsl:value-of select="concat(substring-before(@val, 'SECTORS'), 'secs')"/>
+                    </xsl:when>
+                    <!-- Lowercase -->
+                    <xsl:when test="contains(@val, 'gigabyte')">
+                        <xsl:value-of select="concat(substring-before(@val, 'gigabyte'), 'gb')"/>
+                    </xsl:when>
+                    <xsl:when test="contains(@val, 'gigabytes')">
+                        <xsl:value-of select="concat(substring-before(@val, 'gigabytes'), 'gb')"/>
+                    </xsl:when>
+                    <xsl:when test="contains(@val, 'megabyte')">
+                        <xsl:value-of select="concat(substring-before(@val, 'megabyte'), 'mb')"/>
+                    </xsl:when>
+                    <xsl:when test="contains(@val, 'megabytes')">
+                        <xsl:value-of select="concat(substring-before(@val, 'megabytes'), 'mb')"/>
+                    </xsl:when>
+                    <xsl:when test="contains(@val, 'sec')">
+                        <xsl:value-of select="concat(substring-before(@val, 'sec'), 'secs')"/>
+                    </xsl:when>
+                    <xsl:when test="contains(@val, 'sectors')">
+                        <xsl:value-of select="concat(substring-before(@val, 'sectors'), 'secs')"/>
+                    </xsl:when>
+                    <xsl:when test="number(@val) > 0">
+                        <xsl:value-of select="concat(@val, 'mb')"/>
+                    </xsl:when>
+                    <xsl:otherwise>
+                        <xsl:value-of select="@val"/>
+                    </xsl:otherwise>
+                </xsl:choose>
+            </xsl:attribute>
+        </xsl:if>
+        </size>
+    </xsl:template>
+
+    <xsl:template match="partition">
+        <xsl:copy>
+            <xsl:if test="@name">
+                <xsl:attribute name="name">
+                    <xsl:value-of select="@name"/>
+                </xsl:attribute>
+            </xsl:if>
+            <xsl:if test="@action">
+                <xsl:attribute name="action">
+                    <xsl:choose>
+                        <xsl:when test="@action='use_existing'">
+                            <xsl:text>use_existing_solaris2</xsl:text>
+                        </xsl:when>
+                        <xsl:otherwise>
+                            <xsl:value-of select="@action"/>
+                        </xsl:otherwise>
+                    </xsl:choose>
+                </xsl:attribute>
+            </xsl:if>
+            <xsl:if test="@part_type">
+                <xsl:attribute name="part_type">
+                    <xsl:value-of select="@part_type"/>
+                </xsl:attribute>
+            </xsl:if>
+            <xsl:apply-templates select="size"/>
+            <xsl:if test="@part_type = '191' or @part_type = '130' or @action='use_existing'">
+                <xsl:if test="../slice">
+                    <xsl:apply-templates select="../slice"/>
+                </xsl:if>
+            </xsl:if>
+       </xsl:copy>
+    </xsl:template>
+
+    <xsl:template match="slice">
+        <xsl:copy>
+            <xsl:attribute name="name">
+                <xsl:value-of select="@name"/>
+            </xsl:attribute>
+            <xsl:if test="@action">
+                <xsl:attribute name="action">
+                    <xsl:value-of select="@action"/>
+                </xsl:attribute>
+            </xsl:if>
+            <xsl:if test="@is_swap">
+                <xsl:attribute name="is_swap">
+                    <xsl:value-of select="@is_swap"/>
+                </xsl:attribute>
+            </xsl:if>
+            <xsl:if test="@is_root = 'true' or count(../slice) = 1">
+                <xsl:attribute name="in_zpool">
+                    <xsl:text>rpool</xsl:text>
+                </xsl:attribute>
+                <xsl:attribute name="in_vdev">
+                    <xsl:text>vdev</xsl:text>
+                </xsl:attribute>
+            </xsl:if>
+            <xsl:apply-templates select="size"/>
+        </xsl:copy>
+    </xsl:template>
+
+    <xsl:template match="swap">
+        <xsl:if test="zvol/size/@val != '0mb'">
+            <xsl:choose>
+                <xsl:when test="starts-with(zvol/@name, 'rpool')">
+                    <xsl:variable name="swap_name" 
+                     select="substring-after(zvol/@name, '/')"/>
+                    <zvol name="{$swap_name}" action="create" 
+                     use="swap">
+                        <xsl:apply-templates select=".//size"/>
+                    </zvol>
+                </xsl:when>
+                <xsl:otherwise>
+                    <xsl:variable name="swap_name" 
+                     select="zvol/@name"/>
+                    <zvol name="{$swap_name}" action="create" 
+                     use="swap">
+                        <xsl:apply-templates select=".//size"/>
+                    </zvol>
+                </xsl:otherwise>
+            </xsl:choose>
+        </xsl:if>
+    </xsl:template>
+
+    <xsl:template match="dump">
+        <xsl:if test="zvol/size/@val != '0mb'">
+            <xsl:choose>
+                <xsl:when test="starts-with(zvol/@name, 'rpool')">
+                    <xsl:variable name="dump_name" 
+                     select="substring-after(zvol/@name, '/')"/>
+                    <zvol name="{$dump_name}" action="create" 
+                     use="dump">
+                        <xsl:apply-templates select=".//size"/>
+                    </zvol>
+                </xsl:when>
+                <xsl:otherwise>
+                    <xsl:variable name="dump_name" 
+                     select="zvol/@name"/>
+                    <zvol name="{$dump_name}" action="create" 
+                     use="dump">
+                        <xsl:apply-templates select=".//size"/>
+                    </zvol>
+                </xsl:otherwise>
+            </xsl:choose>
+        </xsl:if>
+    </xsl:template>
+
+    <xsl:template name="generate-software-node">
+        <xsl:param name="trans_type"/>
+        <software type="{$trans_type}">
+            <xsl:copy-of select="source"/>
+            <xsl:for-each select="software_data">
+                <software_data action="{@action}">
+                    <xsl:copy-of select="name"/>
+                </software_data>
+            </xsl:for-each>
+        </software>
+    </xsl:template>
+
+    <xsl:template match="software">
+            <xsl:choose>
+                <xsl:when test="software_data/@type">
+                    <xsl:choose>
+                        <xsl:when test="software_data/@type = 'ips'">
+                            <xsl:variable name="trans_type">IPS</xsl:variable>
+                            <xsl:call-template name="generate-software-node">
+                                <xsl:with-param name="trans_type" select="$trans_type"/>
+                            </xsl:call-template>
+                        </xsl:when>
+                        <xsl:when test="software_data/@type = 'svr4'">
+                            <xsl:variable name="trans_type">SVR4</xsl:variable>
+                            <xsl:call-template name="generate-software-node">
+                                <xsl:with-param name="trans_type" select="$trans_type"/>
+                            </xsl:call-template>
+                        </xsl:when>
+                        <xsl:when test="software_data/@type = 'cpio'">
+                            <xsl:variable name="trans_type">ARCHIVE</xsl:variable>
+                            <xsl:call-template name="generate-software-node">
+                                <xsl:with-param name="trans_type" select="$trans_type"/>
+                            </xsl:call-template>
+                        </xsl:when>
+                        <xsl:when test="software_data/@type = 'archive'">
+                            <xsl:variable name="trans_type">CPIO</xsl:variable>
+                            <xsl:call-template name="generate-software-node">
+                                <xsl:with-param name="trans_type" select="$trans_type"/>
+                            </xsl:call-template>
+                        </xsl:when>
+                        <xsl:when test="software_data/@type = 'image'">
+                            <xsl:variable name="trans_type">IMAGE</xsl:variable>
+                            <xsl:call-template name="generate-software-node">
+                                <xsl:with-param name="trans_type" select="$trans_type"/>
+                            </xsl:call-template>
+                        </xsl:when>
+                        <xsl:when test="software_data/@type = 'p5i'">
+                            <xsl:variable name="trans_type">P5I</xsl:variable>
+                            <xsl:call-template name="generate-software-node">
+                                <xsl:with-param name="trans_type" select="$trans_type"/>
+                            </xsl:call-template>
+                        </xsl:when>
+                        <xsl:when test="software_data/@type = 'du'">
+                            <xsl:variable name="trans_type">DU</xsl:variable>
+                            <xsl:call-template name="generate-software-node">
+                                <xsl:with-param name="trans_type" select="$trans_type"/>
+                            </xsl:call-template>
+                        </xsl:when>
+                        <xsl:when test="software_data/@type = 'p5p'">
+                            <xsl:variable name="trans_type">P5P</xsl:variable>
+                            <xsl:call-template name="generate-software-node">
+                                <xsl:with-param name="trans_type" select="$trans_type"/>
+                            </xsl:call-template>
+                        </xsl:when>
+                        <xsl:otherwise>
+                            <xsl:variable name="trans_type" 
+                             select="software_data/@type"/>
+                            <xsl:call-template name="generate-software-node">
+                                <xsl:with-param name="trans_type" select="$trans_type"/>
+                            </xsl:call-template>
+                        </xsl:otherwise>
+                    </xsl:choose>
+                </xsl:when>
+                <xsl:otherwise>
+                    <xsl:copy-of select="source"/>
+                    <xsl:copy-of select="software_data"/>
+                </xsl:otherwise>
+            </xsl:choose>
+    </xsl:template>
+</xsl:stylesheet>
+
--- a/usr/src/cmd/auto-install/xslt/old-to-new.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/auto-install/xslt/old-to-new.py	Wed May 25 21:26:43 2011 +0100
@@ -27,8 +27,8 @@
 from subprocess import Popen, PIPE
 
 
-XSLT_PROC="/usr/bin/xsltproc"
-XSLT_FILE="old-to-new.xslt"
+XSLT_PROC = "/usr/bin/xsltproc"
+XSLT_FILE = "old-to-new.xslt"
 
 
 def usage(exitcode=1):
@@ -41,9 +41,11 @@
     this_script = os.path.basename(sys.argv[0])
 
     print ""
-    print "%s - Convert old-style XML AI Manifests to the new schema." % this_script
+    print "%s - Convert old-style XML AI Manifests to the new schema." % \
+        this_script
     print ""
-    print "For convenience, the command-line interface has similar semantics to cp(1)."
+    print "For convenience, the command-line interface has similar semantics "\
+        "to cp(1)."
     print ""
     print "Usage:"
     print "\t%s [options] infile outfile" % this_script
@@ -51,7 +53,8 @@
     print "\t%s -r [options] indir... outdir" % this_script
     print "\nOptions:"
     print "\t-f        : force overwrite if output already exists"
-    print "\t-r        : recursively transform .xml files in named sub-directory"
+    print "\t-r        : recursively transform .xml files in named "\
+        "sub-directory"
     print "\t-h --help : print this help page and exit"
 
     sys.exit(exitcode)
@@ -84,7 +87,7 @@
         errstr = "ERROR: command [%s] returned [%d] : [%s]" % \
             (cmd, cmd_popen.returncode, str(cmd_stderr))
         print errstr
-        raise Exception, (errstr)
+        raise Exception(errstr)
 
     if cmd_stderr is not None:
         print "WARNING: command [%s] produced stderr output: [%s]" % \
@@ -257,7 +260,7 @@
                     if index != -1:
                         # ensure partial_dstfile begins with source
                         if partial_dstfile.find(source) == 0:
-                            partial_dstfile = partial_dstfile[index+1:]
+                            partial_dstfile = partial_dstfile[index + 1:]
 
                     # replicate how cp -r treats sub-dirs:
                     # 2. if target already existed then chop off leftmost
@@ -265,7 +268,7 @@
                     if not target_exists:
                         index = partial_dstfile.find("/", 1)
                         if index != -1:
-                            partial_dstfile = partial_dstfile[index+1:]
+                            partial_dstfile = partial_dstfile[index + 1:]
 
                     dstfile = os.path.join(target, partial_dstfile)
 
--- a/usr/src/cmd/distro_const/checkpoints/pre_pkg_img_mod.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/distro_const/checkpoints/pre_pkg_img_mod.py	Wed May 25 21:26:43 2011 +0100
@@ -328,7 +328,15 @@
         os.chdir(os.path.join(self.pkg_img_path, "usr/share"))
         # set destination path
         pkg_ai_path = os.path.join(self.pkg_img_path, "auto_install")
+        # Copy files from /usr/share/auto_install
         shutil.copytree("auto_install", pkg_ai_path, symlinks=True)
+        # Copy files from /usr/share/install too
+        old_wd = os.getcwd()
+        os.chdir(os.path.join(self.pkg_img_path, "usr/share/install"))
+        for dtd_file in [f for f in os.listdir(".") if f.endswith(".dtd")]:
+             shutil.copy(dtd_file, pkg_ai_path)
+        os.chdir(old_wd) # Restore Working Directory
+
         # move in service_bundle(4) for AI server profile validation
         shutil.copy("lib/xml/dtd/service_bundle.dtd.1", pkg_ai_path)
 
--- a/usr/src/cmd/installadm/Makefile	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/installadm/Makefile	Wed May 25 21:26:43 2011 +0100
@@ -43,7 +43,8 @@
 
 SVCMETHODSRC =	svc-install-server
 
-PYMODULES=	ai_smf_service.py \
+PYMODULES=	__init__.py \
+		ai_smf_service.py \
 		aimdns_mod.py \
 		create_client.py \
 		create_service.py \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/installadm/__init__.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,31 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+#
+# This file is installed into
+# usr/lib/python2.6/vendor-packages/osol_install/auto_install/ directory
+# and lets the Python interpreter know that this directory contains valid
+# Python modules which can be imported using following command:
+# from osol_install.auto_install.<module_name> import <object>
+#
--- a/usr/src/cmd/system-config/svc/svc-system-config	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/cmd/system-config/svc/svc-system-config	Wed May 25 21:26:43 2011 +0100
@@ -668,10 +668,10 @@
 		# inherit the mountpoint from parent dataset
 		#
 		if [[ -n "$home_mntpoint" ]] ; then
-			zfs create -o mountpoint="$home_mntpoint" \
+			zfs create -p -o mountpoint="$home_mntpoint" \
 			    "$home_zfs_fs"
 		else
-			zfs create "$home_zfs_fs"
+			zfs create -p "$home_zfs_fs"
 		fi
 
 		if (( $? != 0 )) ; then
--- a/usr/src/lib/install_common/__init__.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_common/__init__.py	Wed May 25 21:26:43 2011 +0100
@@ -38,6 +38,8 @@
 import sys
 import subprocess
 
+from data_object import DataObject
+
 _ = gettext.translation('AI', '/usr/share/locale', fallback=True).gettext
 
 # Shebang lines to tell a (derived manifest) script apart from an XML manifest.
@@ -47,6 +49,12 @@
 
 # Useful common directories and path pieces
 
+# System Temporary Directory - for secure processes
+SYSTEM_TEMP_DIR = '/system/volatile'
+
+# Post-Install Logs Location
+POST_INSTALL_LOGS_DIR = '/var/sadm/system/logs'
+
 # Directory for per service information
 AI_SERVICE_DIR_PATH = '/var/ai/'
 
@@ -355,3 +363,62 @@
         else:
             stderr = None
         return stdout, stderr
+
+
+class ApplicationData(DataObject):
+    """Application Data class
+
+    Provides a location for CUD applications to store application specific data
+    that checkpoints, etc. may require access to.
+
+    Currently stores:
+    - Application Name
+    - Work Directory, defaulting to /system/volatile
+    """
+
+    def __init__(self, application_name, work_dir="/system/volatile/"):
+        super(ApplicationData, self).__init__(application_name)
+
+        self._application_name = application_name
+        self._work_dir = work_dir
+
+    @property
+    def application_name(self):
+        """Read-only Application Name - set at initialisation"""
+        return self._application_name
+
+    @property
+    def work_dir(self):
+        """Read-only Work Directory - set at initialisation"""
+        return self._work_dir
+
+    # Implement no-op XML methods
+    def to_xml(self):
+        return None
+
+    @classmethod
+    def can_handle(cls, element):
+        return False
+
+    @classmethod
+    def from_xml(cls, element):
+        return None
+
+# Utility methods to generate paths given files
+
+def system_temp_path(file=None):
+    ''' Return System Temporary Directory, with file string appended
+    '''
+    if file is not None:
+        return os.path.sep.join([SYSTEM_TEMP_DIR, file])
+    else:
+        return SYSTEM_TEMP_DIR
+
+def post_install_logs_path(file=None):
+    ''' Return Post-Install Logs Directory, with file string appended
+    '''
+    if file is not None:
+        return os.path.sep.join([POST_INSTALL_LOGS_DIR, file])
+    else:
+        return POST_INSTALL_LOGS_DIR
+
--- a/usr/src/lib/install_engine/__init__.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_engine/__init__.py	Wed May 25 21:26:43 2011 +0100
@@ -735,7 +735,7 @@
                      str(normalized_prog),
                      str(self.__current_completed + normalized_prog)))
 
-        return(str(self.__current_completed + normalized_prog))
+        return(str(int(self.__current_completed + normalized_prog)))
 
     def __blocking_callback(self, status, failed_checkpoint_list):
         ''' Callback used for the blocking case of execute_checkpoints '''
--- a/usr/src/lib/install_ict/Makefile	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_ict/Makefile	Wed May 25 21:26:43 2011 +0100
@@ -27,11 +27,12 @@
 		  apply_sysconfig.py \
 		  boot_archive.py \
 		  cleanup_cpio_install.py \
-                  create_snapshot.py \
- 		  device_config.py \
+		  create_snapshot.py \
+		  device_config.py \
 		  generate_sc_profile.py \
 		  initialize_smf.py \
-                  ips.py \
+		  ips.py \
+		  setup_swap.py \
                   transfer_files.py \
 		  update_dumpadm.py
 
--- a/usr/src/lib/install_ict/__init__.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_ict/__init__.py	Wed May 25 21:26:43 2011 +0100
@@ -29,7 +29,7 @@
 __all__ = ["apply_sysconfig", "boot_archive", "cleanup_cpio_install",
            "common", "create_snapshot", "device_config",
            "generate_sc_profile", "initialize_smf",
-           "ips", "transfer_files", "update_dumpadm"]
+           "ips", "setup_swap", "transfer_files", "update_dumpadm"]
 
 from solaris_install.engine import InstallEngine
 from solaris_install.engine.checkpoint import AbstractCheckpoint
@@ -65,6 +65,7 @@
 SVC_BUNDLE = 'usr/share/lib/xml/dtd/service_bundle.dtd.1'
 SVC_REPO = 'etc/svc/repository.db'
 SYS = 'sys'
+VFSTAB = 'etc/vfstab'
 
 # Variables associated with the package image
 CLIENT_API_VERSION = 57
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/lib/install_ict/setup_swap.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,132 @@
+#!/usr/bin/python
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+import grp
+import os
+import shutil
+import solaris_install.ict as ICT
+
+from solaris_install import Popen
+from solaris_install.target.physical import Disk, Slice, Partition
+from solaris_install.target.logical import Zvol
+
+
+class SetupSwap(ICT.ICTBaseClass):
+    '''ICT checkpoint that sets up the swap in /etc/vfstab on the to the target
+       during an AI installation
+    '''
+
+    def __init__(self, name):
+        '''Initializes the class
+           Parameters:
+               -name - this arg is required by the AbstractCheckpoint
+                       and is not used by the checkpoint.
+        '''
+        super(SetupSwap, self).__init__(name)
+
+    def _get_swap_devices(self):
+        '''Get a list of swap devices from DESIRED tree'''
+
+        self.logger.debug("Searching for swap devices")
+
+        swap_path = ""
+        swap_devices = list()
+
+        # Find swap slices
+        slices = self.target.get_descendants(class_type=Slice)
+        swap_slices = [s for s in slices if s.is_swap]
+        for swap_slice in swap_slices:
+            # Construct path
+            if swap_slice.parent is not None and \
+               swap_slice.parent.parent is not None and \
+               isinstance(swap_slice.parent, Partition):
+                slice_ctd = "%ss%s" % (swap_slice.parent.parent.ctd,
+                    swap_slice.name)
+            elif swap_slice.parent is not None and \
+                 isinstance(swap_slice.parent, Disk):
+                slice_ctd = "%ss%s" % (swap_slice.parent.ctd,
+                    swap_slice.name)
+            else:
+                # Can't figure out path, so log and move on.
+                self.logger.debug("Unable to determine path to slice %s",
+                    str(swap_slice))
+                continue
+            
+            swap_path = "/dev/dsk/%s" %(slice_ctd)
+            self.logger.debug("Found swap slice %s", swap_path)
+            swap_devices.append(swap_path)
+
+        # Find swap zvol
+        zvols = self.target.get_descendants(class_type=Zvol)
+        swap_zvols = [zvol for zvol in zvols if zvol.use == "swap"]
+        for swap_zvol in swap_zvols:
+            if swap_zvol.parent is not None:
+                zvol_path = "/dev/zvol/dsk/%s/%s" % \
+                    (swap_zvol.parent.name, swap_zvol.name)
+                self.logger.debug("Found swap zvol %s", swap_path)
+                swap_devices.append(zvol_path)
+            else:
+                # Can't figure out path, so log and move on.
+                self.logger.debug("Unable to determine path to zvol %s",
+                    str(swap_zvol))
+
+        return swap_devices
+
+    def execute(self, dry_run=False):
+        '''
+            The AbstractCheckpoint class requires this method
+            in sub-classes.
+
+            Looks for swap devices in the DESIRED target tree, and adds each of
+            these to the /etc/vfstab.
+
+            Parameters:
+            - the dry_run keyword paramater. The default value is False.
+              If set to True, the log message describes the checkpoint tasks.
+
+            Returns:
+            - Nothing
+              On failure, errors raised are managed by the engine.
+        '''
+        self.logger.debug('ICT current task: Setting up swap devices')
+
+        # parse_doc populates variables necessary to execute the checkpoint
+        self.parse_doc()
+
+        vfstab = os.path.join(self.target_dir, ICT.VFSTAB)
+
+        swap_devices = self._get_swap_devices()
+
+        if not dry_run:
+            try:
+                with open(vfstab, "a+") as vf:
+                    for device in swap_devices:
+                        vf.write("%s\t%s\t\t%s\t\t%s\t%s\t%s\t%s\n" %
+                            (device, "-", "-", "swap", "-", "no", "-"))
+            except IOError, ioe:
+                self.logger.debug(
+                    "Failed to open file %s for writing." % (vfstab))
+                self.logger.debug(str(ioe))
+                raise RuntimeError(
+                    "Unable to setup swap devices in file %s" % (vfstab))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/lib/install_ict/test/test_setup_swap.py	Wed May 25 21:26:43 2011 +0100
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+'''test_setup_swap
+   Test program for setup_swap checkpoint
+'''
+
+import os
+import os.path
+import shutil
+import tempfile
+import unittest
+
+from lxml import etree
+from solaris_install.ict.setup_swap import SetupSwap
+from solaris_install.engine.test.engine_test_utils import reset_engine, \
+    get_new_engine_instance
+from solaris_install.target.logical import BE
+
+
+class TestSetupSwap(unittest.TestCase):
+    '''test the functionality for SetupSwap Class'''
+
+    def populate_doc(self):
+        DESIRED_XML='''
+        <root>
+          <target name="desired">
+            <logical noswap="false" nodump="false">
+              <zpool name="myrpool" action="create" is_root="true">
+                <filesystem name="export" action="create" in_be="false"/>
+                <filesystem name="export/home" action="create" in_be="false"/>
+                <be name="solaris"/>
+                <vdev name="vdev" redundancy="none"/>
+                <zvol name="myswap" action="create" use="swap">
+                  <size val="512m"/>
+                </zvol>
+                <zvol name="mydump" action="create" use="dump">
+                  <size val="512m"/>
+                </zvol>
+              </zpool>
+            </logical>
+            <disk whole_disk="false">
+              <disk_name name="c7d0" name_type="ctd"/>
+              <partition action="create" name="1" part_type="191">
+                <size val="30Gb" start_sector="512"/>
+                <slice name="0" action="create" force="true" is_swap="false" 
+                 in_zpool="myrpool" in_vdev="vdev">
+                  <size val="20Gb" start_sector="512"/>
+                </slice>
+                <slice name="1" action="create" force="true" is_swap="true">
+                  <size val="1Gb"/>
+                </slice>
+              </partition>
+            </disk>
+          </target>
+        </root>
+        '''
+        desired_dom = etree.fromstring(DESIRED_XML)
+
+        self.doc.import_from_manifest_xml(desired_dom, volatile=False)
+
+        # Set BE mounpoints
+        be_list = self.doc.get_descendants(class_type=BE)
+        for be in be_list:
+            be.mountpoint = self.test_target
+
+    def setUp(self):
+        # Set up the Target directory
+        self.test_target = tempfile.mkdtemp(dir="/tmp",
+                                            prefix="ict_test_")
+        os.chmod(self.test_target, 0777)
+        os.mkdir(os.path.join(self.test_target, "etc"))
+
+        self.engine = get_new_engine_instance()
+        self.doc = self.engine.data_object_cache
+
+        self.populate_doc()
+
+        # Instantiate the checkpoint
+        self.setup_swap = SetupSwap("setup_swap")
+
+        # Create a test file name
+        self.test_file = os.path.join(self.test_target, 'etc/vfstab')
+
+    def tearDown(self):
+        reset_engine()
+        self.doc = None
+
+        if os.path.isfile(self.test_file):
+            os.unlink(self.test_file)
+
+        if os.path.exists(self.test_target):
+            shutil.rmtree(self.test_target)
+
+    def test_update_vfstab(self):
+        '''Test update setup_swap'''
+
+        # Call the execute command for the checkpoint
+        self.setup_swap.execute()
+
+        # Check to see if the test dumpadm.conf file exists
+        self.assertTrue(os.path.isfile(self.test_file))
+
+        # Read in the contents of the test file
+        with open(self.test_file, "r") as fh:
+            vfstab_data = fh.readlines()
+                
+        expected_lines  = [
+            '/dev/dsk/c7d0s1\t-\t\t-\t\tswap\t-\tno\t-\n', 
+            '/dev/zvol/dsk/myrpool/myswap\t-\t\t-\t\tswap\t-\tno\t-\n']
+        self.assertEqual(vfstab_data, expected_lines)
+
+    def test_update_setup_swap_dry(self):
+        '''Test update setup_swap dry run'''
+
+        # Call the execute command for the checkpoint
+        # with dry_run set to true.
+        try:
+            self.setup_swap.execute(dry_run=True)
+        except Exception as e:
+            self.fail(str(e))
+
+        # Check to see if the test vfstab file exists
+        self.assertFalse(os.path.isfile(self.test_file))
+
+    def test_get_progress_estimate(self):
+        '''Test get progress estimate return value'''
+
+        # Check the return value for the progress estimate
+        self.assertEquals(self.setup_swap.get_progress_estimate(), 1)
+
+
+if __name__ == '__main__':
+    unittest.main()
--- a/usr/src/lib/install_ict/update_dumpadm.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_ict/update_dumpadm.py	Wed May 25 21:26:43 2011 +0100
@@ -85,13 +85,10 @@
             if not os.path.exists(os.path.dirname(dumpadmfile_dest)):
                 os.makedirs(os.path.dirname(dumpadmfile_dest))
 
-            # copy the dumpadm.conf file to the destination
-            shutil.copy2(self.dumpadmfile, dumpadmfile_dest)
-
             # Read the contents into a list and remove the
             # line containing DUMPADM_SAVDIR.
             with open(dumpadmfile_dest, "w+") as dest_fhndl:
-                with open(dumpadmfile_dest, "r") as fhndl:
+                with open(self.dumpadmfile, "r") as fhndl:
                     for line in fhndl:
                         if ICT.DUMPADM_SAVDIR not in line:
                             dest_fhndl.write(line)
--- a/usr/src/lib/install_manifest/dtd/ai.dtd	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_manifest/dtd/ai.dtd	Wed May 25 21:26:43 2011 +0100
@@ -18,7 +18,7 @@
 
  CDDL HEADER END
 
- Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 
 -->
 
@@ -27,6 +27,9 @@
 <!ENTITY % target SYSTEM "target.dtd">
 %target;
 
+<!ENTITY % boot_mods SYSTEM "boot_mods.dtd">
+%boot_mods;
+
 <!ENTITY % configuration SYSTEM "configuration.dtd">
 %configuration;
 
@@ -40,14 +43,7 @@
 	will be added as secondary sources.
 -->
 
-<!ELEMENT sc_manifest_file EMPTY>
-<!ATTLIST sc_manifest_file name CDATA #REQUIRED>
-<!ATTLIST sc_manifest_file URI CDATA #REQUIRED>
-
-<!ELEMENT sc_embedded_manifest (#PCDATA)>
-<!ATTLIST sc_embedded_manifest name CDATA #REQUIRED>
-
-<!ELEMENT ai_instance (target?, software+, add_drivers?, (configuration*|sc_manifest_file|sc_embedded_manifest), source*)>
+<!ELEMENT ai_instance (boot_mods?, target?, software+, add_drivers?, (configuration*), source*)>
 <!ATTLIST ai_instance name CDATA #IMPLIED>
 <!ATTLIST ai_instance http_proxy CDATA #IMPLIED>
 <!ATTLIST ai_instance auto_reboot (true|false) "false">
--- a/usr/src/lib/install_target/__init__.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_target/__init__.py	Wed May 25 21:26:43 2011 +0100
@@ -137,7 +137,7 @@
 
                     # verify this root pool has one BE associated with it
                     if not zpool.get_children(class_type=logical.BE):
-                        raise Target.InvalidError("%s " % disk.ctd +
+                        raise Target.InvalidError("%s " % zpool.name +
                             "has no Boot Environments associated with it")
 
                     # if the code has gotten this far, the zpool is a valid
--- a/usr/src/lib/install_target/controller.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_target/controller.py	Wed May 25 21:26:43 2011 +0100
@@ -79,11 +79,13 @@
     '''
     pass
 
+
 class SwapDumpGeneralError(Exception):
     ''' General exception for errors computing swap and dump values.
     '''
     pass
 
+
 class SwapDumpSpaceError(Exception):
     ''' Not enough space in the target disk for successful installation.
     '''
@@ -244,7 +246,7 @@
                 object.  TC will use this value to compute the minimum
                 disk size that can be selected.
             no_initial_logical=False. If set to True, then initialize will
-                not set up a default rpool structure. Will also ensure 
+                not set up a default rpool structure. Will also ensure
                 no default disk is selected.
             no_initial_disk=False.  If set to True, then initialize will
                 not select an initial disk.  This may be useful for non-
@@ -334,6 +336,7 @@
                     "instead." % (DEFAULT_ZPOOL_NAME, self._zpool._name))
 
         self._vdev = self._zpool.add_vdev(DEFAULT_VDEV_NAME, redundancy)
+
         self._be = BE()
         self._be.mountpoint = mountpoint
         self._zpool.insert_children(self._be)
@@ -544,11 +547,10 @@
                     bootid=Partition.ACTIVE)
 
                 new_slice = new_partition.add_slice("0", start, slice_size,
-                    Size.sector_units)
+                    Size.sector_units, force=True)
             else:
                 new_slice = disk.add_slice("0", start, slice_size,
-                    Size.sector_units)
-            new_slice.force = True
+                    Size.sector_units, force=True)
         else:
             # Compile a list of the usable slices, if any
             slice_list = list()
@@ -568,8 +570,7 @@
                 # No useable slices. Clear the slices and add a root slice
                 disk.delete_children(class_type=Slice)
                 new_slice = disk.add_slice("0", start, slice_size,
-                    Size.sector_units)
-                new_slice.force = True
+                    Size.sector_units, force=True)
             else:
                 for partition in partitions:
                     if partition.is_solaris and disk.label == "VTOC":
@@ -590,8 +591,7 @@
                         # root slice
                         partition.delete_children(class_type=Slice)
                         new_slice = partition.add_slice("0", start,
-                            slice_size, Size.sector_units)
-                        new_slice.force = True
+                            slice_size, Size.sector_units, force=True)
                         break
                 else:
                     return
@@ -631,7 +631,7 @@
             The following rules are used for determining the type of
             swap to be created, whether swap zvol is required and the
             size of swap to be created.
- 
+
             memory        type           required    size
             --------------------------------------------------
             <900mb        zvol           yes          0.5G (MIN_SWAP_SIZE)
@@ -654,7 +654,7 @@
             space as available will be utilized for swap/dump
 
             Size of all calculation is done in MB
-      
+
             Parameters:
             - installation_size: Size object.  The size required for
               the installation
@@ -670,7 +670,7 @@
                 string, Size object, string, Size object
 
             Raise:
-                SwapDumpSpaceError 
+                SwapDumpSpaceError
         '''
 
         if self._swap_dump_computed:
@@ -709,14 +709,14 @@
                         "with required swap: %s", required_size_mb)
                     LOGGER.error("Total available space: %s", available_size)
                     raise SwapDumpSpaceError
-            
+
             dump_size_mb = self._calc_swap_or_dump_size(
                 available_size_mb - required_size_mb,
                 MIN_DUMP_SIZE, MAX_DUMP_SIZE)
         else:
             free_space_mb = available_size_mb - installation_size_mb
             swap_size_mb = self._calc_swap_or_dump_size(
-                ((free_space_mb * MIN_SWAP_SIZE) / 
+                ((free_space_mb * MIN_SWAP_SIZE) /
                 (MIN_SWAP_SIZE + MIN_DUMP_SIZE)),
                 MIN_SWAP_SIZE, MAX_SWAP_SIZE)
             dump_size_mb = self._calc_swap_or_dump_size(
@@ -752,8 +752,8 @@
 
         fname = os.path.join(basedir, VFSTAB_FILE)
         try:
-            with open (fname, 'a+') as vf:
-                vf.write("%s\t%s\t\t%s\t\t%s\t%s\t%s\t%s\n" % 
+            with open(fname, 'a+') as vf:
+                vf.write("%s\t%s\t\t%s\t\t%s\t%s\t%s\t%s\n" %
                     (swap_device, "-", "-", "swap", "-", "no", "-"))
         except IOError, ioe:
             LOGGER.error("Failed to write to %s", fname)
@@ -766,7 +766,7 @@
 
             This takes into account MIN_SWAP_SIZE required for
             low-memory system.
-        
+
             Returns: Size object
         '''
 
@@ -1000,10 +1000,9 @@
                         # Slice (sparc)
                         parent = slices[0].parent
                         new_slice = parent.add_slice("0", start, slice_size,
-                            Size.sector_units)
+                            Size.sector_units, force=True)
 
                         new_slice.tag = V_ROOT
-                        new_slice.force = True
 
                         if self._vdev is not None:
                             new_slice.in_vdev = self._vdev.name
@@ -1101,8 +1100,8 @@
 
             Parameters:
             - available_space: Space that can be dedicated to swap (MB)
-	        - min_size: Minimum size to use (MB)
-	        - max_size: Maximum size to use (MB)
+            - min_size: Minimum size to use (MB)
+            - max_size: Maximum size to use (MB)
 
             Returns:
                size of swap in MB
@@ -1131,7 +1130,7 @@
             If system memory is less than 900mb, swap is required.
             Minimum required space for swap is 0.5G (MIN_SWAP_SIZE).
         '''
-   
+
         if self._mem_size < ZVOL_REQ_MEM:
             return MIN_SWAP_SIZE
 
@@ -1209,6 +1208,7 @@
 #------------------------------------------------------------------------------
 # Module private functions
 
+
 def _get_system_memory():
     ''' Returns the amount of memory available in the system '''
 
--- a/usr/src/lib/install_target/discovery.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_target/discovery.py	Wed May 25 21:26:43 2011 +0100
@@ -627,6 +627,19 @@
             cmd = [ISCSIADM, "modify", "discovery", "--static", "enable"]
             Popen.check_call(cmd, stdout=Popen.STORE, stderr=Popen.STORE,
                              logger=ILN)
+
+            cmd = [ISCSIADM, "list", "target", "-S", discovery_str]
+            p = Popen.check_call(cmd, stdout=Popen.STORE, stderr=Popen.STORE,
+                                 logger=ILN)
+
+            # Device path (/dev/rdsk/...) is last thing output when split over
+            # whitespace
+            dev_path = p.stdout.split()[-1]
+            # Extract ctds from the devpath
+            ctd = dev_path.split("/")[-1]
+            # Extract ctd from the ctds, and set the ctd of the disk so
+            # matching works correctly later.
+            iscsi.parent.ctd = ctd.partition("s2")[0]
         else:
             # set up discovery of sendtargets targets
             discovery_str = ip.address
--- a/usr/src/lib/install_target/instantiation.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_target/instantiation.py	Wed May 25 21:26:43 2011 +0100
@@ -337,12 +337,13 @@
             be_list = zpool.get_children(class_type=BE)
             be_fs_list = [fs.name for fs in fs_list if fs.in_be]
             for be in be_list:
-                # create the new BE.  If filesystems were specified with
-                # "in_be" set to True, add those filesystems to the create call
+                # Initialize the new BE.  If filesystems were specified with
+                # "in_be" set to True, add those filesystems to the init call
                 if be_fs_list:
-                    be.create(self.dry_run, zpool.name, be_fs_list)
+                    be.init(self.dry_run, pool_name=zpool.name,
+                            fs_list=be_fs_list)
                 else:
-                    be.create(self.dry_run, zpool.name)
+                    be.init(self.dry_run, pool_name=zpool.name)
 
     def execute(self, dry_run=False):
         """ Primary execution method use by the Checkpoint parent class
--- a/usr/src/lib/install_target/libbe/be.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_target/libbe/be.py	Wed May 25 21:26:43 2011 +0100
@@ -72,35 +72,77 @@
     return name_list
 
 
-def be_create(new_be_name=None, new_be_pool=None, fs_list=const.ZFS_FS_NAMES):
-    """ be_create() - function to create a new BE layout.  Creates default zfs
-    datasets as well.
+def be_init(new_be_name, new_be_pool, zfs_properties=None, nested_be=False,
+        fs_list=None, fs_zfs_properties=None,
+        shared_fs_list=None, shared_fs_zfs_properties=None):
+    """ be_init() - function to initialize a new BE layout.  Creates default
+    zfs datasets as well.
 
-    new_be_name - optional name for the new BE
-    new_be_pool - optional pool to use for the BE layout
+    new_be_name - name for the new BE
+    new_be_pool - pool to use for the BE layout
+    zfs_properties - properties applicable to the BE's root dataset.
+    nested_be - flag to specify if we're initializing a nested BE.
     fs_list - list of paths to convert to datasets within the BE.
+    fs_zfs_properties - properties TODO
+    shared_fs_list - list of paths to convert to datasets in the shared area.
+    shared_fs_zfs_properties - properties TODO
+
+    Returns - for nested BEs, the created name of the BE if different from
+              the BE's original name.  None otherwise.
     """
     # create a new NVList object
     nvlist = nvl.NVList()
 
-    if new_be_name is not None:
-        nvlist.add_string(const.BE_ATTR_NEW_BE_NAME, new_be_name)
+    # Add BE name and pool.
+    nvlist.add_string(const.BE_ATTR_NEW_BE_NAME, new_be_name)
+    nvlist.add_string(const.BE_ATTR_NEW_BE_POOL, new_be_pool)
 
-    if new_be_pool is not None:
-        nvlist.add_string(const.BE_ATTR_NEW_BE_POOL, new_be_pool)
+    # If zfs properties are provided for the BE, add them (these apply to
+    # the root dataset of the BE.)
+    if zfs_properties is not None:
+        nvlist.add_nvlist(const.BE_ATTR_ZFS_PROPERTIES, zfs_properties)
+
+    # Add whether or not we're initializing a nested BE.
+    nvlist.add_boolean_value(const.BE_ATTR_NEW_BE_NESTED_BE, nested_be)
+
+    # If initializing a nested BE, pass in flag to allow
+    # auto naming if there is a naming conflict.
+    if nested_be:
+        nvlist.add_boolean_value(const.BE_ATTR_NEW_BE_ALLOW_AUTO_NAMING, True)
 
-    # add the BE datasets
-    nvlist.add_uint16(const.BE_ATTR_FS_NUM, len(fs_list))
-    nvlist.add_string_array(const.BE_ATTR_FS_NAMES, fs_list)
-    nvlist.add_uint16(const.BE_ATTR_SHARED_FS_NUM,
-        len(const.ZFS_SHARED_FS_NAMES))
-    nvlist.add_string_array(const.BE_ATTR_SHARED_FS_NAMES,
-        const.ZFS_SHARED_FS_NAMES)
+    # Add the BE datasets
+    if fs_list is not None and len(fs_list) > 0:
+        nvlist.add_uint16(const.BE_ATTR_FS_NUM, len(fs_list))
+        nvlist.add_string_array(const.BE_ATTR_FS_NAMES, fs_list)
+
+        if fs_zfs_properties is not None and len(fs_zfs_properties) > 0:
+            nvlist.add_nvlist_array(const.BE_ATTR_FS_ZFS_PROPERTIES,
+                                    fs_zfs_properties)
+
+    # Add the shared datasets
+    if shared_fs_list is not None and len(shared_fs_list) > 0:
+        nvlist.add_uint16(const.BE_ATTR_SHARED_FS_NUM, len(shared_fs_list))
+        nvlist.add_string_array(const.BE_ATTR_SHARED_FS_NAMES, shared_fs_list)
+
+        if shared_fs_zfs_properties is not None and \
+            len(shared_fs_zfs_properties) > 0:
+            nvlist.add_nvlist_array(const.BE_ATTR_SHARED_FS_ZFS_PROPERTIES,
+                                    shared_fs_zfs_properties)
 
     # pylint: disable-msg=E1101
     err = cfunc.be_init(nvlist)
     if err != 0:
-        raise RuntimeError("be_create failed:  %s" % const.BE_ERRNO_MAP[err])
+        raise RuntimeError("be_init failed:  %s" % const.BE_ERRNO_MAP[err])
+
+    # For nested BEs, the initialized BE might have been created with a
+    # different name than requested (it was auto named to something else).
+    # If so, return new name.
+    if nested_be:
+        created_be_name = nvlist.lookup_string(const.BE_ATTR_NEW_BE_NAME)
+        if (created_be_name != new_be_name):
+            return created_be_name
+
+    return None
 
 
 def be_destroy(be_name):
@@ -131,16 +173,19 @@
         raise RuntimeError("be_activate failed:  %s" % const.BE_ERRNO_MAP[err])
 
 
-def be_mount(be_name, mountpoint):
+def be_mount(be_name, mountpoint, altpool=None):
     """ be_mount() - function to mount a BE
 
     be_name - BE to mount
     mounpoint - where to mount the BE
+    altpool - alternate pool area from which to find the BE
     """
     # create a new NVList object
     nvlist = nvl.NVList()
     nvlist.add_string(const.BE_ATTR_ORIG_BE_NAME, be_name)
     nvlist.add_string(const.BE_ATTR_MOUNTPOINT, mountpoint)
+    if altpool is not None:
+        nvlist.add_string(const.BE_ATTR_ALT_POOL, altpool)
     nvlist.add_uint16(const.BE_ATTR_MOUNT_FLAGS, 0)
     # pylint: disable-msg=E1101
     err = cfunc.be_mount(nvlist)
@@ -148,30 +193,36 @@
         raise RuntimeError("be_mount failed:  %s" % const.BE_ERRNO_MAP[err])
 
 
-def be_unmount(be_name):
+def be_unmount(be_name, altpool=None):
     """ be_unmount() - function to unmount a BE
 
     be_name - BE to unmount
+    altpool - alternate pool area from which to find the BE
     """
     # create a new NVList object
     nvlist = nvl.NVList()
     nvlist.add_string(const.BE_ATTR_ORIG_BE_NAME, be_name)
+    if altpool is not None:
+        nvlist.add_string(const.BE_ATTR_ALT_POOL, altpool)
     # pylint: disable-msg=E1101
     err = cfunc.be_unmount(nvlist)
     if err != 0:
         raise RuntimeError("be_unmount failed:  %s" % const.BE_ERRNO_MAP[err])
 
 
-def be_create_snapshot(be_name, snapshot_name):
+def be_create_snapshot(be_name, snapshot_name, altpool=None):
     """ be_create_snapshot() - function to create a snapshot of the BE
 
     be_name - BE to snapshot
     snapshot_name - name of the snapshot to create
+    altpool - alternate pool area from which to find the BE
     """
     # create a new NVList object
     nvlist = nvl.NVList()
     nvlist.add_string(const.BE_ATTR_ORIG_BE_NAME, be_name)
     nvlist.add_string(const.BE_ATTR_SNAP_NAME, snapshot_name)
+    if altpool is not None:
+        nvlist.add_string(const.BE_ATTR_ALT_POOL, altpool)
     # pylint: disable-msg=E1101
     err = cfunc.be_create_snapshot(nvlist)
     if err != 0:
--- a/usr/src/lib/install_target/libbe/const.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_target/libbe/const.py	Wed May 25 21:26:43 2011 +0100
@@ -248,14 +248,20 @@
 BE_ATTR_NEW_BE_NAME = "new_be_name"
 BE_ATTR_NEW_BE_POOL = "new_be_pool"
 BE_ATTR_NEW_BE_DESC = "new_be_desc"
+BE_ATTR_NEW_BE_NESTED_BE = "new_be_nested_be"
+BE_ATTR_NEW_BE_PARENTBE = "new_be_parentbe"
+BE_ATTR_NEW_BE_ALLOW_AUTO_NAMING = "new_be_allow_auto_naming"
 BE_ATTR_POLICY = "policy"
 BE_ATTR_ZFS_PROPERTIES = "zfs_properties"
 
 BE_ATTR_FS_NAMES = "fs_names"
+BE_ATTR_FS_ZFS_PROPERTIES = "fs_zfs_properties"
 BE_ATTR_FS_NUM = "fs_num"
 BE_ATTR_SHARED_FS_NAMES = "shared_fs_names"
+BE_ATTR_SHARED_FS_ZFS_PROPERTIES = "shared_fs_zfs_properties"
 BE_ATTR_SHARED_FS_NUM = "shared_fs_num"
 
+BE_ATTR_ALT_POOL = "alt_pool"
 BE_ATTR_MOUNTPOINT = "mountpoint"
 BE_ATTR_MOUNT_FLAGS = "mount_flags"
 BE_ATTR_UNMOUNT_FLAGS = "unmount_flags"
@@ -271,5 +277,5 @@
 BE_ATTR_DATE = "date"
 BE_ATTR_MOUNTED = "mounted"
 
-ZFS_FS_NAMES = ["/"]
-ZFS_SHARED_FS_NAMES = ["/export", "/export/home"]
+ZFS_FS_NAMES = []
+ZFS_SHARED_FS_NAMES = ["export", "export/home"]
--- a/usr/src/lib/install_target/logical.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_target/logical.py	Wed May 25 21:26:43 2011 +0100
@@ -39,9 +39,9 @@
 from solaris_install.target.size import Size
 from solaris_install.target.shadow.logical import ShadowLogical
 from solaris_install.target.shadow.zpool import ShadowZpool
-from solaris_install.target.libbe.be import be_list, be_create, be_destroy, \
+from solaris_install.target.libbe.be import be_list, be_init, be_destroy, \
     be_activate, be_mount, be_unmount
-from solaris_install.target.libbe.const import ZFS_FS_NAMES
+from solaris_install.target.libbe.const import ZFS_FS_NAMES, ZFS_SHARED_FS_NAMES
 
 DUMPADM = "/usr/sbin/dumpadm"
 LOFIADM = "/usr/sbin/lofiadm"
@@ -231,7 +231,7 @@
 
     def get(self, propname="all"):
         """ get() - method to return a specific zpool property.
-        
+
         propname - name of the property to return.  If the user does not
         specify a propname, return all pool properties
         """
@@ -851,11 +851,17 @@
 class BE(DataObject):
     """ be DOC node definition
     """
-    def __init__(self, name="solaris"):
-        super(BE, self).__init__(name)
+    def __init__(self, initial_name="solaris"):
+        super(BE, self).__init__(initial_name)
 
+        self.created_name = None
+        self.initial_name = initial_name
         self.mountpoint = None
 
+    @property
+    def name(self):
+        return self.created_name or self.initial_name
+
     def to_xml(self):
         element = etree.Element("be")
         element.set("name", self.name)
@@ -894,49 +900,63 @@
             s += "; mountpoint=%s" % self.mountpoint
         return s
 
-    def create(self, dry_run, pool_name="rpool", zfs_fs_list=ZFS_FS_NAMES):
-        """ method to create a BE.
+    def init(self, dry_run, pool_name="rpool", nested_be=False,
+            fs_list=None, fs_zfs_properties=None,
+            shared_fs_list=None, shared_fs_zfs_properties=None):
+        """ method to initialize a BE by creating the empty datasets for the BE.
         """
-        if not self.exists:
-            if not dry_run:
-                be_create(self.name, pool_name, zfs_fs_list)
-                # if a mountpoint was specified, mount the freshly
-                # created BE and create the mountpoint in the process
-                # if it does not exist
-                if self.mountpoint is not None:
+
+        if not dry_run:
+            new_name = be_init(self.name, pool_name, nested_be=nested_be,
+                fs_list=fs_list, fs_zfs_properties=fs_zfs_properties,
+                shared_fs_list=shared_fs_list,
+                shared_fs_zfs_properties=shared_fs_zfs_properties)
+
+            # For a nested BE, the processes of initialize a new BE
+            # may have ended up creating a different name.  We reap that
+            # here and update the stored name in this BE object accordingly.
+            if nested_be and new_name is not None:
+                logger = logging.getLogger(ILN)
+                logger.debug("Initialized nested BE with auto name: %s" % \
+                            new_name)
+                self.created_name = new_name
+
+            # if a mountpoint was specified, mount the freshly
+            # created BE and create the mountpoint in the process
+            # if it does not exist
+            if self.mountpoint is not None:
+                if nested_be:
+                    self.mount(self.mountpoint, dry_run, pool_name)
+                else:
                     self.mount(self.mountpoint, dry_run)
 
     def destroy(self, dry_run):
         """ method to destroy a BE.
         """
-        if self.exists:
-            if not dry_run:
-                be_destroy(self.name)
+        if not dry_run:
+            be_destroy(self.name)
 
     def activate(self, dry_run):
         """ method to activate a BE.
         """
-        if self.exists:
-            if not dry_run:
-                be_activate(self.name)
+        if not dry_run:
+            be_activate(self.name)
 
-    def mount(self, mountpoint, dry_run):
+    def mount(self, mountpoint, dry_run, altpool=None):
         """ method to mount a BE.
         """
-        if self.exists:
-            if not dry_run:
-                if not os.path.exists(mountpoint):
-                    os.makedirs(mountpoint)
-                be_mount(self.name, mountpoint)
-                self.mountpoint = mountpoint
+        if not dry_run:
+            if not os.path.exists(mountpoint):
+                os.makedirs(mountpoint)
+            be_mount(self.name, mountpoint, altpool)
+            self.mountpoint = mountpoint
 
-    def unmount(self, dry_run):
+    def unmount(self, dry_run, altpool=None):
         """ method to unmount a BE.
         """
-        if self.exists:
-            if not dry_run:
-                be_unmount(self.name)
-                self.mountpoint = None
+        if not dry_run:
+            be_unmount(self.name, altpool)
+            self.mountpoint = None
 
 
 class Lofi(object):
--- a/usr/src/lib/install_target/physical.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_target/physical.py	Wed May 25 21:26:43 2011 +0100
@@ -149,7 +149,7 @@
         return partition
 
     def add_slice(self, index, start_sector, size, size_units=Size.gb_units,
-                  in_zpool=None, in_vdev=None):
+                  in_zpool=None, in_vdev=None, force=False):
         """ add_slice() - method to create a Slice object and add it as a child
         of the Partition object
         """
@@ -160,6 +160,7 @@
         new_slice.start_sector = start_sector
         new_slice.in_zpool = in_zpool
         new_slice.in_vdev = in_vdev
+        new_slice.force = force
 
         # add the new Slice object as a child
         self.insert_children(new_slice)
@@ -674,9 +675,14 @@
         if disk_keyword is not None:
             disk.disk_keyword = DiskKeyword()
 
+        # Check for iSCSI information, will be handled in DOC as a child Iscsi
+        # object. Left this way to it's easy to locate the Iscsi information in
+        # the DOC for pre-discovery setup.
+        iscsi = element.find("iscsi")
+
         # at least one of the disk criteria must be specified
         if disk_name is None and disk_prop is None and \
-            disk_keyword is None:
+            disk_keyword is None and iscsi is None:
             raise ParsingError("No Disk identification provided")
 
         return disk
@@ -713,7 +719,7 @@
         self.delete_children(name=partition.name, class_type=Partition)
 
     def add_slice(self, index, start_sector, size, size_units=Size.gb_units,
-                  in_zpool=None, in_vdev=None):
+                  in_zpool=None, in_vdev=None, force=False):
         """ add_slice() - method to create a Slice object and add it as a child
         of the Disk object
         """
@@ -723,6 +729,7 @@
         new_slice.start_sector = start_sector
         new_slice.in_zpool = in_zpool
         new_slice.in_vdev = in_vdev
+        new_slice.force = force
 
         # add the new Slice object as a child
         self.insert_children(new_slice)
--- a/usr/src/lib/install_target/shadow/physical.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_target/shadow/physical.py	Wed May 25 21:26:43 2011 +0100
@@ -426,7 +426,7 @@
         # size
         if value.is_extended:
             for partition in self._shadow:
-                if partition.is_extended:
+                if partition.is_extended and partition.action != "delete":
                     self.set_error(self.TooManyExtPartitionsError())
             if value.size.sectors < 63:
                 self.set_error(self.ExtPartitionTooSmallError())
--- a/usr/src/lib/install_transfer/cpio.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_transfer/cpio.py	Wed May 25 21:26:43 2011 +0100
@@ -526,7 +526,7 @@
                     self.run_exec_file(trans.get(CONTENTS))
                 continue
             if trans.get(ACTION) == "install":
-                self.logger.debug("Transferring files to %s", self.dst)
+                self.logger.info("Transferring files to %s", self.dst)
                 self.transfer_filelist(trans.get(CONTENTS),
                                        trans.get(CPIO_ARGS))
 
--- a/usr/src/lib/install_transfer/ips.py	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/lib/install_transfer/ips.py	Wed May 25 21:26:43 2011 +0100
@@ -593,7 +593,7 @@
                     pkg_client_name=PKG_CLIENT_NAME,
                     version_id=self.CLIENT_API_VERSION, root=self.dst,
                     imgtype=self.completeness, is_zone=self.is_zone,
-                    **self._image_args)
+                    force=True, **self._image_args)
             except api_errors.VersionException, ips_err:
                 self.logger.exception("Error creating the IPS image")
                 raise ValueError("The IPS API version specified, "
--- a/usr/src/pkg/manifests/system-install-auto-install-auto-install-common.mf	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/pkg/manifests/system-install-auto-install-auto-install-common.mf	Wed May 25 21:26:43 2011 +0100
@@ -33,11 +33,13 @@
 dir path=usr/lib
 dir path=usr/lib/python2.6
 dir path=usr/lib/python2.6/vendor-packages
-dir path=usr/lib/python2.6/vendor-packages/osol_install
-dir path=usr/lib/python2.6/vendor-packages/osol_install/auto_install
+dir path=usr/lib/python2.6/vendor-packages/solaris_install
+dir path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install
 dir path=usr/share group=sys
 dir path=usr/share/auto_install group=sys
 dir path=usr/share/auto_install/sc_profiles group=sys
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/__init__.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/__init__.pyc mode=0444
 file path=usr/lib/python2.6/vendor-packages/osol_install/auto_install/__init__.py mode=0444
 file path=usr/lib/python2.6/vendor-packages/osol_install/auto_install/__init__.pyc mode=0444
 file path=usr/lib/python2.6/vendor-packages/osol_install/_libaiscf.so mode=0444
@@ -48,10 +50,6 @@
 file path=usr/lib/python2.6/vendor-packages/osol_install/libaimdns.so mode=0444
 file path=usr/lib/python2.6/vendor-packages/osol_install/libaiscf.py mode=0444
 file path=usr/lib/python2.6/vendor-packages/osol_install/netif.so mode=0444
-file path=usr/share/auto_install/ai.dtd mode=0444 group=sys
-file path=usr/share/auto_install/target.dtd mode=0444 group=sys
-file path=usr/share/auto_install/configuration.dtd mode=0444 group=sys
-file path=usr/share/auto_install/software.dtd mode=0444 group=sys
 file path=usr/share/auto_install/ai_manifest.xml mode=0444 group=sys
 file path=usr/share/auto_install/default.xml mode=0444 group=sys
 file path=usr/share/auto_install/sc_profiles/enable_sci.xml mode=0444 group=sys
--- a/usr/src/pkg/manifests/system-install-auto-install.mf	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/pkg/manifests/system-install-auto-install.mf	Wed May 25 21:26:43 2011 +0100
@@ -43,8 +43,10 @@
 dir path=usr/lib/install
 dir path=usr/lib/python2.6
 dir path=usr/lib/python2.6/vendor-packages
-dir path=usr/lib/python2.6/vendor-packages/osol_install
-dir path=usr/lib/python2.6/vendor-packages/osol_install/auto_install
+dir path=usr/lib/python2.6/vendor-packages/solaris_install
+dir path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install
+dir path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints
+dir path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input
 dir path=usr/share group=sys
 dir path=usr/share/auto_install group=sys
 file path=etc/user_attr.d/system%2Finstall%2Fauto-install group=sys
@@ -55,8 +57,20 @@
 file path=usr/bin/aimanifest mode=0555
 file path=usr/bin/auto-install mode=0555
 file path=usr/lib/install/sc_conv.ksh mode=0555
-file path=usr/lib/python2.6/vendor-packages/osol_install/auto_install/ai_parse_manifest.py mode=0444
-file path=usr/lib/python2.6/vendor-packages/osol_install/auto_install/ai_parse_manifest.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_parse_manifest.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_parse_manifest.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_instance.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_instance.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/auto_install.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/auto_install.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/utmpx.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/utmpx.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/__init__.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/__init__.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/dmm.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/dmm.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/target_selection.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/target_selection.pyc mode=0444
 file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/__init__.py mode=0444
 file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/__init__.pyc mode=0444
 file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/mim.py mode=0444
@@ -67,5 +81,5 @@
 file path=lib/svc/manifest/application/auto-installer.xml mode=0444 group=sys
 file path=lib/svc/manifest/application/manifest-locator.xml mode=0444 group=sys
 license cr_Sun license=cr_Sun
-link path=usr/lib/python2.6/vendor-packages/osol_install/auto_install/ai_get_manifest.py target=../../../../../bin/ai_get_manifest
+link path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_get_manifest.py target=../../../../../bin/ai_get_manifest
 
--- a/usr/src/pkg/manifests/system-library-install.mf	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/pkg/manifests/system-library-install.mf	Wed May 25 21:26:43 2011 +0100
@@ -103,6 +103,8 @@
 file path=usr/lib/python2.6/vendor-packages/solaris_install/ict/initialize_smf.pyc
 file path=usr/lib/python2.6/vendor-packages/solaris_install/ict/ips.py
 file path=usr/lib/python2.6/vendor-packages/solaris_install/ict/ips.pyc
+file path=usr/lib/python2.6/vendor-packages/solaris_install/ict/setup_swap.py
+file path=usr/lib/python2.6/vendor-packages/solaris_install/ict/setup_swap.pyc
 file path=usr/lib/python2.6/vendor-packages/solaris_install/ict/transfer_files.py
 file path=usr/lib/python2.6/vendor-packages/solaris_install/ict/transfer_files.pyc
 file path=usr/lib/python2.6/vendor-packages/solaris_install/ict/update_dumpadm.py
--- a/usr/src/tools/tests/tests.nose	Wed May 25 13:29:32 2011 -0600
+++ b/usr/src/tools/tests/tests.nose	Wed May 25 21:26:43 2011 +0100
@@ -30,4 +30,4 @@
 # the files in that directory should begin with "test_". Files
 # containing in-line doc-tests should be added explicitly.
 
-tests=lib/install_common/test/,lib/liberrsvc_pymod/test/,cmd/ai-webserver/test/,cmd/text-install/test/,cmd/installadm/test/,cmd/installadm/installadm_common.py,lib/install_utils/test/,lib/libict_pymod/test/,lib/install_logging_pymod/test,lib/install_doc/test,lib/install_engine/test,lib/install_manifest/test/,lib/install_transfer/test,cmd/distro_const/checkpoints/test,cmd/js2ai/modules/test/test_suite.py,lib/terminalui/test,cmd/system-config/profile/test/,cmd/system-config/test/,lib/install_manifest_input/test/,lib/install_target/test/
+tests=lib/install_common/test/,lib/liberrsvc_pymod/test/,cmd/ai-webserver/test/,cmd/text-install/test/,cmd/installadm/test/,cmd/installadm/installadm_common.py,lib/install_utils/test/,lib/libict_pymod/test/,lib/install_logging_pymod/test,lib/install_doc/test,lib/install_engine/test,lib/install_manifest/test/,lib/install_transfer/test,cmd/distro_const/checkpoints/test,cmd/js2ai/modules/test/test_suite.py,lib/terminalui/test,cmd/system-config/profile/test/,cmd/system-config/test/,cmd/auto-install/test,cmd/auto-install/checkpoints/dmm/test,lib/install_manifest_input/test,lib/install_target/test/