16257 Support for zones configuration and installation should be included in AI
authorEthan Quach <Ethan.Quach@sun.com>
Tue, 31 May 2011 14:21:09 -0700
changeset 1160 6f7e708c38ec
parent 1159 fbde90ccfae9
child 1161 5c1b6d445efc
16257 Support for zones configuration and installation should be included in AI 7041915 TransferFiles ICT should support transferring a directory that is more than one level deep. 7049824 System installed via AI ends up with incorrect mountpoints for shared ZFS datasets
usr/src/Makefile.master
usr/src/Targetdirs
usr/src/cmd/Makefile.targ
usr/src/cmd/ai-webserver/AI_database.py
usr/src/cmd/ai-webserver/Makefile
usr/src/cmd/ai-webserver/cgi_get_manifest.py
usr/src/cmd/ai-webserver/common_profile.py
usr/src/cmd/ai-webserver/create_profile.py
usr/src/cmd/ai-webserver/criteria_schema.rng
usr/src/cmd/ai-webserver/publish_manifest.py
usr/src/cmd/ai-webserver/set_criteria.py
usr/src/cmd/ai-webserver/test/test_ai_database.py
usr/src/cmd/ai-webserver/test/test_create_profile.py
usr/src/cmd/ai-webserver/test/test_publish_manifest.py
usr/src/cmd/ai-webserver/verifyXML.py
usr/src/cmd/auto-install/Makefile
usr/src/cmd/auto-install/__init__.py
usr/src/cmd/auto-install/ai_get_manifest.py
usr/src/cmd/auto-install/ai_manifest.xml
usr/src/cmd/auto-install/auto_install.py
usr/src/cmd/auto-install/checkpoints/Makefile
usr/src/cmd/auto-install/checkpoints/ai_configuration.py
usr/src/cmd/auto-install/checkpoints/target_selection_zone.py
usr/src/cmd/auto-install/default.xml
usr/src/cmd/auto-install/enable_sci.xml
usr/src/cmd/auto-install/manifest/Makefile
usr/src/cmd/auto-install/manifest/ai_manifest.xml
usr/src/cmd/auto-install/manifest/default.xml
usr/src/cmd/auto-install/manifest/zone_default.xml
usr/src/cmd/auto-install/profile/Makefile
usr/src/cmd/auto-install/profile/enable_sci.xml
usr/src/cmd/auto-install/profile/sc_sample.xml
usr/src/cmd/auto-install/profile/static_network.xml
usr/src/cmd/auto-install/sc_sample.xml
usr/src/cmd/auto-install/static_network.xml
usr/src/cmd/auto-install/svc/auto-installer
usr/src/cmd/auto-install/svc/manifest-locator
usr/src/cmd/auto-install/test/test_auto_install_manifest.py
usr/src/cmd/distro_const/Makefile
usr/src/cmd/distro_const/__init__.py
usr/src/cmd/distro_const/checkpoints/pre_pkg_img_mod.py
usr/src/cmd/distro_const/configuration.py
usr/src/cmd/installadm/list.py
usr/src/cmd/installadm/setup-service.sh
usr/src/cmd/system-config/__init__.py
usr/src/lib/Makefile
usr/src/lib/Makefile.targ
usr/src/lib/install_common/__init__.py
usr/src/lib/install_configuration/Makefile
usr/src/lib/install_configuration/__init__.py
usr/src/lib/install_configuration/configuration.py
usr/src/lib/install_ict/__init__.py
usr/src/lib/install_ict/apply_sysconfig.py
usr/src/lib/install_ict/create_snapshot.py
usr/src/lib/install_ict/initialize_smf.py
usr/src/lib/install_ict/ips.py
usr/src/lib/install_ict/transfer_files.py
usr/src/lib/install_manifest/dtd/configuration.dtd
usr/src/lib/install_target/Makefile
usr/src/lib/install_target/instantiation_zone.py
usr/src/lib/install_transfer/ips.py
usr/src/lib/libict/Makefile
usr/src/man/installadm.1m.txt
usr/src/pkg/manifests/install-distribution-constructor.mf
usr/src/pkg/manifests/system-install-auto-install-auto-install-common.mf
usr/src/pkg/manifests/system-install-auto-install.mf
usr/src/pkg/manifests/system-library-install.mf
--- a/usr/src/Makefile.master	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/Makefile.master	Tue May 31 14:21:09 2011 -0700
@@ -100,7 +100,8 @@
 ROOTPYTHONVENDORINSTALLPROF=    $(ROOTPYTHONVENDORINSTALL)/profile
 ROOTPYTHONVENDORSOLINSTALL=	$(ROOTPYTHONVENDOR)/solaris_install
 ROOTPYTHONVENDORSOLINSTALLBOOT =	$(ROOTPYTHONVENDORSOLINSTALL)/boot
-ROOTPYTHONVENDORSOLINSTALLDATACACHE= $(ROOTPYTHONVENDORSOLINSTALL)/data_object
+ROOTPYTHONVENDORSOLINSTALLCONFIGURATION =	$(ROOTPYTHONVENDORSOLINSTALL)/configuration
+ROOTPYTHONVENDORSOLINSTALLDATACACHE=	$(ROOTPYTHONVENDORSOLINSTALL)/data_object
 ROOTPYTHONVENDORINSTALLDC=	$(ROOTPYTHONVENDORSOLINSTALL)/distro_const
 ROOTPYTHONVENDORINSTALLDCCHKPT= $(ROOTPYTHONVENDORINSTALLDC)/checkpoints
 ROOTPYTHONVENDORSOLINSTALLAI= \
@@ -135,6 +136,7 @@
 ROOTPYTHONVENDORBOOTMGMTBKNDFW=	$(ROOTPYTHONVENDORBOOTMGMTBKND)/fw
 ROOTPYTHONVENDORBOOTMGMTBKNDLOADER=	$(ROOTPYTHONVENDORBOOTMGMTBKND)/loader
 ROOTAUTOINST=		$(ROOT)/usr/share/auto_install
+ROOTAUTOINSTMANIFEST=	$(ROOTAUTOINST)/manifest
 ROOTAUTOINSTSCPROFILES=	$(ROOTAUTOINST)/sc_profiles
 ROOTSBIN=		$(ROOT)/sbin
 ROOTUSRBIN=		$(ROOT)/usr/bin
--- a/usr/src/Targetdirs	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/Targetdirs	Tue May 31 14:21:09 2011 -0700
@@ -72,6 +72,7 @@
 	/usr/lib/python2.6/vendor-packages/solaris_install/distro_const/checkpoints/defaultfiles \
 	/usr/lib/python2.6/vendor-packages/solaris_install \
 	/usr/lib/python2.6/vendor-packages/solaris_install/boot \
+	/usr/lib/python2.6/vendor-packages/solaris_install/configuration \
 	/usr/lib/python2.6/vendor-packages/solaris_install/data_object \
 	/usr/lib/python2.6/vendor-packages/solaris_install/engine \
 	/usr/lib/python2.6/vendor-packages/solaris_install/engine/test \
@@ -93,6 +94,7 @@
 	/usr/lib/python2.6/vendor-packages/terminalui \
 	/usr/sbin \
 	/usr/share/auto_install \
+	/usr/share/auto_install/manifest \
 	/usr/share/auto_install/sc_profiles \
 	/usr/share/distro_const \
 	/usr/share/distro_const/profile \
--- a/usr/src/cmd/Makefile.targ	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/Makefile.targ	Tue May 31 14:21:09 2011 -0700
@@ -177,6 +177,9 @@
 $(ROOTAUTOINST)/%: %
 	$(INS.file)
 
+$(ROOTAUTOINSTMANIFEST)/%: %
+	$(INS.file)
+
 $(ROOTAUTOINSTSCPROFILES)/%: %
 	$(INS.file)
 
--- a/usr/src/cmd/ai-webserver/AI_database.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/ai-webserver/AI_database.py	Tue May 31 14:21:09 2011 -0700
@@ -40,6 +40,9 @@
 MANIFESTS_TABLE = 'manifests'  # DB table name for manifests
 PROFILES_TABLE = 'profiles'  # DB table name for profiles
 
+# Defined list of criteria that we treat as case sensitive.
+CRIT_LIST_CASE_SENSITIVE = ['zonename']
+
 
 class DB:
     ''' Class to connect to, and look-up entries in the SQLite database '''
@@ -194,9 +197,16 @@
                         _("Database open error."))
             self._con.close()
             return
+
+        sqlite.enable_callback_tracebacks(1)
+
+        # register our user-defined function
+        self._con.create_function("is_in_list", 4, is_in_list)
+
         # allow access by both index and column name
         self._con.row_factory = sqlite.Row
         self._cursor = self._con.cursor()
+
         # iterate over each DBrequest object in the queue
         while True:
             request = self._requests.get()
@@ -243,6 +253,41 @@
 # Functions below here
 #
 
+def is_in_list(crit_name, value, value_list, list_separator=None):
+    ''' All non-range type criteria fields will be considered as a
+        separated list of values.  This function will be registered
+        as a user-defined function to be used as a comparator in
+        selection queries for non-range criteria fields.
+
+        Parameters: crit_name      - name of criteria being evaluated
+                    value          - string to find in value_list
+                    value_list     - string of separated values
+                    list_separator - separator used in value_list
+
+        Returns: True  - if value is in value_list
+                 False - otherwise
+    '''
+    if value is None or value_list is None:
+        return 0
+
+    # Because we use this function as a callback from sqlite, we can't
+    # get it to pass a None object as an argument for the list separator.
+    # We specially look for the string 'None' to mean the None object.
+    if list_separator is not None and list_separator == 'None':
+        list_separator = None
+
+    # If the criteria being evaluated is in the list of criteria we've
+    # defined that are case sensitive, compare it without lowering.
+    if crit_name.lower() in CRIT_LIST_CASE_SENSITIVE:
+        if value in value_list.split(list_separator):
+            return 1
+    else:
+        if value.lower() in \
+            [val.lower() for val in value_list.split(list_separator)]:
+            return 1
+
+    return 0
+
 
 def sanitizeSQL(text):
     ''' Use to remove special SQL characters which could cause damage or
@@ -517,18 +562,18 @@
 def findManifest(criteria, db):
     ''' Used to find a non-default manifest.
     Provided a criteria dictionary, findManifest returns a query
-    response containing a single manifest (or 0 if there are no matching
+    response containing a single manifest (or None if there are no matching
     manifests).  Manifests with no criteria set (as they are either
     inactive or the default) are screened out.
     '''
     # If we didn't get any criteria, bail providing no manifest
     if len(criteria) == 0:
-        return 0
+        return None
 
     # create list of criteria in use that are set in the db
     criteria_set_in_db = list(getCriteria(db.getQueue(), strip=False))
     if len(criteria_set_in_db) == 0:
-        return 0
+        return None
 
     # create list of all criteria in the db
     all_criteria_in_db = list(getCriteria(db.getQueue(), strip=False,
@@ -539,7 +584,7 @@
     query_str = build_query_str(criteria, criteria_set_in_db,
                                 all_criteria_in_db)
     if not query_str:
-        return 0
+        return None
     query = DBrequest(query_str)
     db.getQueue().put(query)
     query.waitAns()
@@ -549,7 +594,7 @@
     if response and len(response) == 1:    # got a manifest
         return response[0]['name']
     else:                     # didn't get a manifest
-        return 0
+        return None
 
 
 def build_query_str(criteria, criteria_set_in_db, all_criteria_in_db):
@@ -591,35 +636,49 @@
     for crit in criteria_set_in_db:
         try:
             if crit.startswith("MIN"):
-                critval = sanitizeSQL(criteria[crit.replace('MIN', '', 1)])
-                if crit.endswith("mac"):
-                    # setup a clause like (HEX(MINmac) <= HEX(x'F00')) OR
-                    # MINMAC is NULL
-                    query_str += "(HEX(" + crit + ") <= HEX(x'" + \
-                                 critval + "') OR " + crit + " IS NULL) AND "
+                if crit.replace('MIN', '', 1) in criteria:
+                    critval = sanitizeSQL(criteria[crit.replace('MIN', '', 1)])
+                    if crit.endswith("mac"):
+                        # setup a clause like (HEX(MINmac) <= HEX(x'F00')) OR
+                        # MINMAC is NULL
+                        query_str += "(HEX(" + crit + ") <= HEX(x'" + \
+                            critval + "') OR " + crit + " IS NULL) AND "
+                    else:
+                        # setup a clause like crit <= value OR crit IS NULL AND
+                        query_str += "(" + crit + " <= " + critval + \
+                                     " OR " + crit + " IS NULL) AND "
                 else:
-                    # setup a clause like crit <= value OR crit IS NULL AND
-                    query_str += "(" + crit + " <= " + critval + \
-                                 " OR " + crit + " IS NULL) AND "
-
+                    query_str += "(" + crit + " IS NULL) AND "
             elif crit.startswith("MAX"):
-                critval = sanitizeSQL(criteria[crit.replace('MAX', '', 1)])
-                if crit.endswith("mac"):
-                    # setup a clause like (HEX(MAXmac) >= HEX(x'F00')) OR
-                    # MAXmac is NULL
-                    query_str += "(HEX(" + crit + ") >= HEX(x'" + critval + \
-                                 "') OR " + crit + " IS NULL) AND "
+                if crit.replace('MAX', '', 1) in criteria:
+                    critval = sanitizeSQL(criteria[crit.replace('MAX', '', 1)])
+                    if crit.endswith("mac"):
+                        # setup a clause like (HEX(MAXmac) >= HEX(x'F00')) OR
+                        # MAXmac is NULL
+                        query_str += "(HEX(" + crit + ") >= HEX(x'" + \
+                            critval + "') OR " + crit + " IS NULL) AND "
+                    else:
+                        # setup a clause like crit <= value
+                        query_str += "(" + crit + " >= " + critval + \
+                                     " OR " + crit + " IS NULL) AND "
                 else:
-                    # setup a clause like crit <= value
-                    query_str += "(" + crit + " >= " + critval + \
-                                 " OR " + crit + " IS NULL) AND "
-
+                    query_str += "(" + crit + " IS NULL) AND "
             else:
-                # store single values in lower case
-                # setup a clause like crit = lower(value)
-                query_str += "(" + crit + " " + "= LOWER('" + \
-                             sanitizeSQL(criteria[crit]) + "') OR " + \
-                             crit + " IS NULL) AND "
+                if crit in criteria:
+                    # For non-range criteria, the value stored in the DB
+                    # may be a whitespace separated list of single values.
+                    # We use a special user-defined function in the determine
+                    # if the given criteria is in that textual list.
+                    #
+                    # setup a clause like:
+                    #    crit IS NULL OR \
+                    #        is_in_list('crit', 'value', crit, 'None') == 1
+                    query_str += "(" + crit + " IS NULL OR is_in_list('" + \
+                                 crit + "', '" + sanitizeSQL(criteria[crit]) + \
+                                 "', " + crit + ", 'None') == 1) AND "
+                else:
+                    query_str += "(" + crit + " IS NULL) AND "
+
         except KeyError:
             print >> sys.stderr, _("Missing criteria: %s; returning 0") % crit
             return 0
@@ -646,7 +705,6 @@
                   "net_val desc, mem_val desc LIMIT 1")
     return query_str
 
-
 def formatValue(key, value):
     ''' Format and stringify database values.
 
--- a/usr/src/cmd/ai-webserver/Makefile	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/ai-webserver/Makefile	Tue May 31 14:21:09 2011 -0700
@@ -83,7 +83,7 @@
 # except for the MIN or MAX prefixes used in range names.
 #
 AI.db:
-	$(ECHO) 'CREATE TABLE manifests (name TEXT, instance INTEGER, arch TEXT, MINmac INTEGER, MAXmac INTEGER, MINipv4 INTEGER, MAXipv4 INTEGER, cpu TEXT, platform TEXT, MINnetwork INTEGER, MAXnetwork INTEGER, MINmem INTEGER, MAXmem INTEGER);' | /usr/bin/sqlite3 ./AI.db
-	$(ECHO) 'CREATE TABLE profiles (name TEXT, file TEXT, arch TEXT, hostname TEXT, MINmac INTEGER, MAXmac INTEGER, MINipv4 INTEGER, MAXipv4 INTEGER, cpu TEXT, platform TEXT, MINnetwork INTEGER, MAXnetwork INTEGER, MINmem INTEGER, MAXmem INTEGER);' | /usr/bin/sqlite3 ./AI.db
+	$(ECHO) 'CREATE TABLE manifests (name TEXT, instance INTEGER, arch TEXT, MINmac INTEGER, MAXmac INTEGER, MINipv4 INTEGER, MAXipv4 INTEGER, cpu TEXT, platform TEXT, MINnetwork INTEGER, MAXnetwork INTEGER, MINmem INTEGER, MAXmem INTEGER, zonename TEXT);' | /usr/bin/sqlite3 ./AI.db
+	$(ECHO) 'CREATE TABLE profiles (name TEXT, file TEXT, arch TEXT, hostname TEXT, MINmac INTEGER, MAXmac INTEGER, MINipv4 INTEGER, MAXipv4 INTEGER, cpu TEXT, platform TEXT, MINnetwork INTEGER, MAXnetwork INTEGER, MINmem INTEGER, MAXmem INTEGER, zonename TEXT);' | /usr/bin/sqlite3 ./AI.db
 
 include ../Makefile.targ
--- a/usr/src/cmd/ai-webserver/cgi_get_manifest.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/ai-webserver/cgi_get_manifest.py	Tue May 31 14:21:09 2011 -0700
@@ -66,6 +66,9 @@
         protocol_version   - the request version number, 0.5 indicates that the
                              original mechanisms are being used.
         service_name       - the service name
+        no_default         - boolean flag to signify whether or not we should
+                             hand back the default manifest and profiles if one
+                             cannot be matched based on the client criteria.
         post_data          - the POST-ed client criteria
 
     Raises
@@ -73,6 +76,7 @@
     '''
     protocol_version = COMPATIBILITY_VERSION  # assume original client
     service_name = None
+    no_default = False
     post_data = None
     if 'version' in form:
         protocol_version = form['version'].value  # new client
@@ -93,10 +97,13 @@
                 logging.warning(_(
                         "Unrecognized logging level from POST REQUEST:  ")
                         + str(sol_dbg))
+    if 'no_default' in form:
+        # no_default = form['no_default'].value
+        no_default = (str(True).lower() == (form['no_default'].value).lower())
     if 'postData' in form:
         post_data = form['postData'].value
 
-    return (protocol_version, service_name, post_data)
+    return (protocol_version, service_name, no_default, post_data)
 
 
 def get_environment_information():
@@ -179,7 +186,7 @@
 
 
 def send_manifest(form_data, port=0, servicename=None,
-        protocolversion=COMPATIBILITY_VERSION):
+        protocolversion=COMPATIBILITY_VERSION, no_default=False):
     '''Replies to the client with matching service for a service.
 
     Args
@@ -187,6 +194,9 @@
         port        - the port of the old client
         servicename - the name of the service being used
         protocolversion - the version of the AI service RE: handshake
+        no_default  - boolean flag to signify whether or not we should hand
+                      back the default manifest and profiles if one cannot
+                      be matched based on the client criteria.
 
     Returns
         None
@@ -289,69 +299,57 @@
         print '</pre>'
         return
 
-    if str(manifest).isdigit() and manifest > 0:
-        web_page = \
-            E.HTML(
-                   E.HEAD(
-                          E.TITLE(_("Error!"))
-                   ),
-                   E.BODY(
-                          E.P(_("Criteria indeterminate -- this "
-                                "should not happen! Got %s matches.") %
-                              str(manifest))
-                   )
-            )
-        print "Content-Type: text/html"     # HTML is following
-        print                               # blank line, end of headers
-        print lxml.etree.tostring(web_page, pretty_print=True)
-        return
-
     # check if findManifest() returned a number equal to 0
-    # (means we got no manifests back -- thus we serve the default)
-    elif manifest == 0:
+    # (means we got no manifests back -- thus we serve the default if desired)
+    if manifest is None and not no_default:
         manifest = get_default(servicename)
 
-    # findManifest() returned the name of the manifest to serve
-    # (or it is now set to default.xml)
-    try:
-        # construct the fully qualified filename
-        path = os.path.join(os.path.dirname(path), AI_DATA)
-        filename = os.path.abspath(os.path.join(path, manifest))
-        # open and read the manifest
-        with open(filename, 'rb') as mfp:
-            manifest_str = mfp.read()
-        # maintain compability with older AI client
-        if servicename is None or \
-                float(protocolversion) < float(PROFILES_VERSION):
-            content_type = mimetypes.types_map.get('.xml', 'text/plain')
-            print 'Content-Length:', len(manifest_str)  # Length of the file
-            print 'Content-Type:', content_type         # XML is following
-            print                                 # blank line, end of headers
-            print manifest_str
-            logging.info('Manifest sent from %s.' % filename)
+    # if we have a manifest to return, prepare its return
+    if manifest is not None:
+        try:
+            # construct the fully qualified filename
+            path = os.path.join(os.path.dirname(path), AI_DATA)
+            filename = os.path.abspath(os.path.join(path, manifest))
+            # open and read the manifest
+            with open(filename, 'rb') as mfp:
+                manifest_str = mfp.read()
+            # maintain compability with older AI client
+            if servicename is None or \
+                    float(protocolversion) < float(PROFILES_VERSION):
+                content_type = mimetypes.types_map.get('.xml', 'text/plain')
+                print 'Content-Length:', len(manifest_str)  # Length of the file
+                print 'Content-Type:', content_type         # XML is following
+                print                               # blank line, end of headers
+                print manifest_str
+                logging.info('Manifest sent from %s.' % filename)
+                return
+
+        except OSError, err:
+            print 'Content-Type: text/html'     # HTML is following
+            print                               # blank line, end of headers
+            print '<pre>'
+            # report the internal error to error_log and requesting client
+            sys.stderr.write(_('error:manifest (%s) %s\n') % (str(manifest), err))
+            sys.stdout.write(_('error:manifest (%s) %s\n') % (str(manifest), err))
+            print '</pre>'
             return
 
-    except OSError, err:
-        print 'Content-Type: text/html'     # HTML is following
-        print                               # blank line, end of headers
-        print '<pre>'
-        # report the internal error to error_log and requesting client
-        sys.stderr.write(_('error:manifest (%s) %s\n') % (str(manifest), err))
-        sys.stdout.write(_('error:manifest (%s) %s\n') % (str(manifest), err))
-        print '</pre>'
-        return
 
     # get AI service image path
     service_info = get_service_info(servicename)
     # construct object to contain MIME multipart message
     outermime = MIMEMultipart()
     client_msg = list()  # accumulate message output for AI client
-    # add manifest as attachment
-    msg = MIMEText(manifest_str, 'xml')
-    # indicate manifest using special name
-    msg.add_header('Content-Disposition', 'attachment',
-                   filename=sc.AI_MANIFEST_ATTACHMENT_NAME)
-    outermime.attach(msg)  # add manifest as an attachment
+
+    # If we have a manifest, attach it to the return message
+    if manifest is not None:
+        # add manifest as attachment
+        msg = MIMEText(manifest_str, 'xml')
+        # indicate manifest using special name
+        msg.add_header('Content-Disposition', 'attachment',
+                      filename=sc.AI_MANIFEST_ATTACHMENT_NAME)
+        outermime.attach(msg)  # add manifest as an attachment
+
 
     # search for any profiles matching client criteria
     # formulate database query to profiles table
@@ -362,8 +360,8 @@
     for crit in AIdb.getCriteria(aisql.getQueue(), table=AIdb.PROFILES_TABLE,
                                  onlyUsed=False):
         if crit not in criteria:
-            msgtxt = _("Warning: expected client criteria \"%s\" " \
-                       "missing from post-data. Profiles may be missing.") \
+            msgtxt = _("Warning: client criteria \"%s\" not provided in "
+                       "request.  Setting value to NULL for profile lookup.") \
                        % crit
             client_msg += [msgtxt]
             logging.warn(msgtxt)
@@ -375,98 +373,127 @@
             else:
                 nvpairs += [crit + " IS NULL"]
             continue
+
         # prepare criteria value to add to query
         envval = AIdb.sanitizeSQL(criteria[crit])
         if AIdb.isRangeCriteria(aisql.getQueue(), crit, AIdb.PROFILES_TABLE):
-            if crit == "mac":
-                nvpairs += ["(MIN" + crit + " IS NULL OR "
-                    "HEX(MIN" + crit + ")<=HEX(X'" + envval + "'))"]
-                nvpairs += ["(MAX" + crit + " IS NULL OR HEX(MAX" +
+            # If no default profiles are requested, then we mustn't allow
+            # this criteria to be NULL.  It must match the client's given
+            # value for this criteria.
+            if no_default:
+                if crit == "mac":
+                    nvpairs += ["(HEX(MIN" + crit + ")<=HEX(X'" + envval + \
+                        "'))"]
+
+                    nvpairs += ["(HEX(MAX" + crit + ")>=HEX(X'" + envval + \
+                        "'))"]
+                else:
+                    nvpairs += ["(MIN" + crit + "<='" + envval + "')"]
+                    nvpairs += ["(MAX" + crit + ">='" + envval + "')"]
+            else:
+                if crit == "mac":
+                    nvpairs += ["(MIN" + crit + " IS NULL OR "
+                        "HEX(MIN" + crit + ")<=HEX(X'" + envval + "'))"]
+                    nvpairs += ["(MAX" + crit + " IS NULL OR HEX(MAX" +
                         crit + ")>=HEX(X'" + envval + "'))"]
-            else:
-                nvpairs += ["(MIN" + crit + " IS NULL OR MIN" +
+                else:
+                    nvpairs += ["(MIN" + crit + " IS NULL OR MIN" +
                         crit + "<='" + envval + "')"]
-                nvpairs += ["(MAX" + crit + " IS NULL OR MAX" +
+                    nvpairs += ["(MAX" + crit + " IS NULL OR MAX" +
                         crit + ">='" + envval + "')"]
         else:
-            nvpairs += ["(" + crit + " IS NULL OR " +
-                    crit + "='" + envval + "')"]
-    q_str += " AND ".join(nvpairs)
+            # If no default profiles are requested, then we mustn't allow
+            # this criteria to be NULL.  It must match the client's given
+            # value for this criteria.
+            #
+            # Also, since this is a non-range criteria, the value stored
+            # in the DB may be a whitespace separated list of single
+            # values.  We use a special user-defined function in the
+            # determine if the given criteria is in that textual list.
+            if no_default:
+                nvpairs += ["(is_in_list('" + crit + "', '" + envval + "', " + \
+                    crit + ", 'None') == 1)"]
+            else:
+                nvpairs += ["(" + crit + " IS NULL OR is_in_list('" + crit + \
+                    "', '" + envval + "', " + crit + ", 'None') == 1)"]
+
+    if len(nvpairs) > 0:
+        q_str += " AND ".join(nvpairs)
 
-    # issue database query
-    logging.info("Profile query: " + q_str)
-    query = AIdb.DBrequest(q_str)
-    aisql.getQueue().put(query)
-    query.waitAns()
-    if query.getResponse() is None or len(query.getResponse()) == 0:
-        msgtxt = _("No profiles found.")
-        client_msg += [msgtxt]
-        logging.info(msgtxt)
-    else:
-        for row in query.getResponse():
-            profpath = row['file']
-            profname = row['name']
-            if profname is None:  # should not happen
-                profname = 'unnamed'
-            try:
-                if profpath is None:
-                    msgtxt = "Database record error - profile path is empty."
+        # issue database query
+        logging.info("Profile query: " + q_str)
+        query = AIdb.DBrequest(q_str)
+        aisql.getQueue().put(query)
+        query.waitAns()
+        if query.getResponse() is None or len(query.getResponse()) == 0:
+            msgtxt = _("No profiles found.")
+            client_msg += [msgtxt]
+            logging.info(msgtxt)
+        else:
+            for row in query.getResponse():
+                profpath = row['file']
+                profname = row['name']
+                if profname is None:  # should not happen
+                    profname = 'unnamed'
+                try:
+                    if profpath is None:
+                        msgtxt = "Database record error - profile path is empty."
+                        client_msg += [msgtxt]
+                        logging.error(msgtxt)
+                        continue
+                    msgtxt = _('Processing profile %s') % profname
+                    client_msg += [msgtxt]
+                    logging.info(msgtxt)
+                    with open(profpath, 'r') as pfp:
+                        raw_profile = pfp.read()
+                    # do any template variable replacement {{AI_xxx}}
+                    tmpl_profile = sc.perform_templating(raw_profile,
+                                                         validate_only=False)
+                    # precautionary validation or profile, logging only
+                    sc.validate_profile_string(tmpl_profile, service_info[2],
+                                               dtd_validation=True,
+                                               warn_if_dtd_missing=True)
+                except IOError, err:
+                    msgtxt = _("Error:  I/O error: ") + str(err)
                     client_msg += [msgtxt]
                     logging.error(msgtxt)
                     continue
-                msgtxt = _('Processing profile %s') % profname
+                except OSError:
+                    msgtxt = _("Error:  OS error on profile ") + profpath
+                    client_msg += [msgtxt]
+                    logging.error(msgtxt)
+                    continue
+                except KeyError:
+                    msgtxt = _('Error:  could not find criteria to substitute in '
+                            'template: ') + profpath
+                    client_msg += [msgtxt]
+                    logging.error(msgtxt)
+                    logging.error('Profile with template substitution error:' +
+                            raw_profile)
+                    continue
+                except lxml.etree.XMLSyntaxError, err:
+                    # log validation error and proceed
+                    msgtxt = _(
+                            'Warning:  syntax error found in profile: ') \
+                            + profpath
+                    client_msg += [msgtxt]
+                    logging.error(msgtxt)
+                    for error in err.error_log:
+                        msgtxt = _('Error:  ') + error.message
+                        client_msg += [msgtxt]
+                        logging.error(msgtxt)
+                    logging.info([_('Profile failing validation:  ') +
+                                 lxml.etree.tostring(root)])
+                # build MIME message and attach to outer MIME message
+                msg = MIMEText(tmpl_profile, 'xml')
+                # indicate in header that this is an attachment
+                msg.add_header('Content-Disposition', 'attachment',
+                               filename=profname)
+                # attach this profile to the manifest and any other profiles
+                outermime.attach(msg)
+                msgtxt = _('Parsed and loaded profile: ') + profname
                 client_msg += [msgtxt]
                 logging.info(msgtxt)
-                with open(profpath, 'r') as pfp:
-                    raw_profile = pfp.read()
-                # do any template variable replacement {{AI_xxx}}
-                tmpl_profile = sc.perform_templating(raw_profile,
-                                                     validate_only=False)
-                # precautionary validation or profile, logging only
-                sc.validate_profile_string(tmpl_profile, service_info[2],
-                                           dtd_validation=True,
-                                           warn_if_dtd_missing=True)
-            except IOError, err:
-                msgtxt = _("Error:  I/O error: ") + str(err)
-                client_msg += [msgtxt]
-                logging.error(msgtxt)
-                continue
-            except OSError:
-                msgtxt = _("Error:  OS error on profile ") + profpath
-                client_msg += [msgtxt]
-                logging.error(msgtxt)
-                continue
-            except KeyError:
-                msgtxt = _('Error:  could not find criteria to substitute in '
-                        'template: ') + profpath
-                client_msg += [msgtxt]
-                logging.error(msgtxt)
-                logging.error('Profile with template substitution error:' +
-                        raw_profile)
-                continue
-            except lxml.etree.XMLSyntaxError, err:
-                # log validation error and proceed
-                msgtxt = _(
-                        'Warning:  syntax error found in profile: ') \
-                        + profpath
-                client_msg += [msgtxt]
-                logging.error(msgtxt)
-                for error in err.error_log:
-                    msgtxt = _('Error:  ') + error.message
-                    client_msg += [msgtxt]
-                    logging.error(msgtxt)
-                logging.info([_('Profile failing validation:  ') +
-                             lxml.etree.tostring(root)])
-            # build MIME message and attach to outer MIME message
-            msg = MIMEText(tmpl_profile, 'xml')
-            # indicate in header that this is an attachment
-            msg.add_header('Content-Disposition', 'attachment',
-                           filename=profname)
-            # attach this profile to the manifest and any other profiles
-            outermime.attach(msg)
-            msgtxt = _('Parsed and loaded profile: ') + profname
-            client_msg += [msgtxt]
-            logging.info(msgtxt)
 
     # any profiles and AI manifest have been attached to MIME message
     # specially format list of messages for display on AI client console
@@ -669,8 +696,9 @@
 if __name__ == '__main__':
     gettext.install("ai", "/usr/lib/locale")
     DEFAULT_PORT = libaimdns.getinteger_property(SRVINST, PORTPROP)
-    (PARAM_VERSION, SERVICE, FORM_DATA) = get_parameters(cgi.FieldStorage())
-    print >> sys.stderr, PARAM_VERSION, SERVICE, FORM_DATA
+    (PARAM_VERSION, SERVICE, NO_DEFAULT, FORM_DATA) = \
+        get_parameters(cgi.FieldStorage())
+    print >> sys.stderr, PARAM_VERSION, SERVICE, NO_DEFAULT, FORM_DATA
     if PARAM_VERSION == COMPATIBILITY_VERSION or SERVICE is None:
         # Old client
         (REQUEST_METHOD, REQUEST_PORT) = get_environment_information()
@@ -697,7 +725,8 @@
         # do manifest criteria match
         try:
             send_manifest(FORM_DATA, servicename=SERVICE,
-                          protocolversion=PARAM_VERSION)
+                          protocolversion=PARAM_VERSION,
+                          no_default=NO_DEFAULT)
         except StandardError:
             # send error report to client (through stdout), log
             print "Content-Type: text/html"     # HTML is following
--- a/usr/src/cmd/ai-webserver/common_profile.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/ai-webserver/common_profile.py	Tue May 31 14:21:09 2011 -0700
@@ -146,6 +146,10 @@
     intol = list() # for INTO clause
     vals = list() # for VALUES clause
     for crit in AIdb.getCriteria(queue, table, onlyUsed=False, strip=True):
+
+        # Determine if this crit is a range criteria or not.
+        is_range_crit = AIdb.isRangeCriteria(queue, crit, table)
+
         # Get the value from the manifest
         values = criteria[crit]
         # the critera manifest didn't specify this criteria
@@ -153,7 +157,7 @@
             # if the criteria we're processing is a range criteria, fill in
             # NULL for two columns, MINcrit and MAXcrit
             vals += ["NULL"]
-            if AIdb.isRangeCriteria(queue, crit, table):
+            if is_range_crit:
                 where += ["MIN" + crit + " IS NULL"]
                 where += ["MAX" + crit + " IS NULL"]
                 intol += ["MIN" + crit]
@@ -163,14 +167,14 @@
             else:
                 where += [crit + " IS NULL"]
                 intol += [crit]
-        # this is a single criteria (not a range)
-        elif isinstance(values, basestring):
-            # use lower case for text strings
+        # This is a value criteria (not a range).  'values' is a list
+        # with one or more items.
+        elif not is_range_crit:
             intol += [crit]
-            val = AIdb.format_value(crit, values).lower()
+            val = AIdb.format_value(crit, " ".join(values))
             where += [crit + "=" + val]
             vals += [val]
-        # Else the values are a list this is a range criteria
+        # Else this is a range criteria.  'values' is a two-item list
         else:
             # Set the MIN column for this range criteria
             if values[0] == 'unbounded':
@@ -317,7 +321,7 @@
         # gather this criteria's values
         man_criterion = criteria[crit]
         # check "value" criteria here (check the criteria exists in DB
-        if isinstance(man_criterion, basestring):
+        if not AIdb.isRangeCriteria(dbo.getQueue(), crit, table):
             # only check criteria in use in the DB
             if crit not in critlist:
                 raise SystemExit(_(
--- a/usr/src/cmd/ai-webserver/create_profile.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/ai-webserver/create_profile.py	Tue May 31 14:21:09 2011 -0700
@@ -238,9 +238,19 @@
             val = criteria[crit]
             if not val:
                 continue
-            # MAC specified in criteria - also set client-ID in environment
-            if crit == 'mac':
-                if val[0] == val[1]:  # assume single client specified
+
+            # Determine if this crit is a range criteria or not.
+            is_range_crit = AIdb.isRangeCriteria(queue, crit,
+                table=AIdb.PROFILES_TABLE)
+
+            if is_range_crit:
+                # Range criteria must be specified as a single value to be
+                # supported for templating.
+                if val[0] != val[1]:
+                    continue
+
+                # MAC specified in criteria - also set client-ID in environment
+                if crit == 'mac':
                     val = val[0]
                     os.environ["AI_MAC"] = \
                         "%x:%x:%x:%x:%x:%x" % (
@@ -251,9 +261,8 @@
                                 int(val[8:10], 16),
                                 int(val[10:12], 16))
                     os.environ["AI_CID"] = "01" + str(val)
-            # IP or NETWORK specified in criteria
-            elif crit == 'network' or crit == 'ipv4':
-                if val[0] == val[1]:  # assume single IP or network specified
+                # IP or NETWORK specified in criteria
+                elif crit == 'network' or crit == 'ipv4':
                     val = val[0]
                     os.environ["AI_" + crit.upper()] = \
                         "%d.%d.%d.%d" % (
@@ -261,8 +270,13 @@
                                 int(val[3:6]),
                                 int(val[6:9]),
                                 int(val[9:12]))
+                else:
+                    os.environ["AI_" + crit.upper()] = val[0]
             else:
-                os.environ["AI_" + crit.upper()] = val[0]
+                # Value criteria must be specified as a single value to be
+                # supported for templating.
+                if len(val) == 1:
+                    os.environ["AI_" + crit.upper()] = val[0]
 
         tmpl_profile = raw_profile  # assume templating succeeded
         try:
--- a/usr/src/cmd/ai-webserver/criteria_schema.rng	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/ai-webserver/criteria_schema.rng	Tue May 31 14:21:09 2011 -0700
@@ -18,7 +18,7 @@
 
 CDDL HEADER END
 
-Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
 -->
 
 <!--
@@ -57,9 +57,16 @@
 				<text/>
 			</attribute>
 			<choice>
-				<!-- Criteria can be range or single value -->
+				<!--
+                                    Criteria can be a single value,
+                                    a list of values, or a range.
+                                -->
 				<element name="value">
-					<data type="string"/>
+                                        <list>
+                                                <oneOrMore>
+                                                        <data type="string"/>
+                                                </oneOrMore>
+                                        </list>
 				</element>
 				<element name="range">
 					<ref name="nm_range_criteria"/>
--- a/usr/src/cmd/ai-webserver/publish_manifest.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/ai-webserver/publish_manifest.py	Tue May 31 14:21:09 2011 -0700
@@ -195,27 +195,28 @@
     called by a main function, or the options parser, so it can potentially
     raise the SystemExit exception.
     Args: criteria in list format: [ criteria=value, criteria=value, ... ]
-          where value can be a:  single value
+          where value can be a:  single string value
+                                 space-separated string value (list of values)
                                  range (<lower>-<upper>)
     Returns: dictionary of criteria { criteria: value, criteria: value, ... }
-             with all keys and values in lower case
+             with all keys in lower case, values are case-sensitive.
     Raises: ValueError on malformed name=value strings in input list.
     """
     cri_dict = {}
     for entry in criteria:
-        entries = entry.lower().partition("=")
+        entries = entry.partition("=")
 
         if entries[1]:
             if not entries[0]:
                 raise ValueError(_("Missing criteria name in "
                                    "'%s'\n") % entry)
-            elif entries[0] in cri_dict:
+            elif entries[0].lower() in cri_dict:
                 raise ValueError(_("Duplicate criteria: '%s'\n") %
                              entries[0])
             elif not entries[2]:
                 raise ValueError(_("Missing value for criteria "
                                    "'%s'\n") % entries[0])
-            cri_dict[entries[0]] = entries[2]
+            cri_dict[entries[0].lower()] = entries[2]
         else:
             raise ValueError(_("Criteria must be of the form "
                                "<criteria>=<value>\n"))
@@ -266,9 +267,13 @@
         # gather this criteria's values from the manifest
         man_criterion = criteria[crit]
 
-        # check "value" criteria here (check the criteria exists in DB, and
-        # then find collisions)
-        if isinstance(man_criterion, basestring):
+        # Determine if this crit is a range criteria or not.
+        is_range_crit = AIdb.isRangeCriteria(db.getQueue(), crit,
+            AIdb.MANIFESTS_TABLE)
+
+        # Process "value" criteria here (check if the criteria exists in
+        # DB, and then find collisions)
+        if not is_range_crit:
             # only check criteria in use in the DB
             if crit not in AIdb.getCriteria(db.getQueue(),
                                             onlyUsed=False, strip=False):
@@ -285,16 +290,19 @@
             # will iterate over a list of the form [manName, manInst, crit,
             # None]
             for row in db_criteria:
-                # check if the database and manifest values differ
-                if(str(row[Fields.CRIT]).lower() ==
-                   str(man_criterion).lower()):
-                    # record manifest name, instance and criteria name
-                    try:
-                        collisions[row[Fields.MANNAME],
-                                   row[Fields.MANINST]] += crit + ","
-                    except KeyError:
-                        collisions[row[Fields.MANNAME],
-                                   row[Fields.MANINST]] = crit + ","
+                # check if a value in the list of values to be added is equal
+                # to a value in the list of values for this criteria for this
+                # row
+                for value in man_criterion:
+                    if AIdb.is_in_list(crit, value, str(row[Fields.CRIT]),
+                        None):
+                        # record manifest name, instance and criteria name
+                        try:
+                            collisions[row[Fields.MANNAME],
+                                       row[Fields.MANINST]] += crit + ","
+                        except KeyError:
+                            collisions[row[Fields.MANNAME],
+                                       row[Fields.MANINST]] = crit + ","
 
         # This is a range criteria.  (Check that ranges are valid, that
         # "unbounded" gets set to 0/+inf, ensure the criteria exists
@@ -484,14 +492,31 @@
                                       crit.replace('MIN', '', 1).
                                       replace('MAX', '', 1)))
 
-            # the range did not collide or this is a single value (if we
-            # differ we can break out knowing we diverge for this
+            # Either the range did not collide or this is not a range
+            # criteria.  (If the value of this criteria in the db does
+            # not equal the value of this criteria for the set of criteria
+            # to check, we can break out knowing we diverge for this
             # manifest/instance)
-            elif str(db_criterion).lower() != str(man_criterion).lower():
-                # manifests diverge (they don't collide)
+            elif not db_criterion and not man_criterion:
+                # Neither the value for this criteria in the db nor 
+                # the value for for this criteria in the given set of
+                # criteria to check are populated.  Loop around to
+                # check the next criteria.
+                continue
+            elif not db_criterion or not man_criterion:
+                # One of the two are not populated, we can break knowing
+                # they're different.
                 break
+            else:
+                # Both are populated.  If none of values in the list for
+                # this criteria to be added are equal to any of the values
+                # in the list for this criteria from the db, there will be
+                # no collision.  We can break out.
+                if not [value for value in man_criterion if \
+                    AIdb.is_in_list(crit, value, str(db_criterion), None)]:
+                    break
 
-        # end of for loop and we never broke out (diverged)
+        # end of for loop and we never broke out (collision)
         else:
             raise SystemExit(_("Error:\tManifest has same criteria as " +
                                "manifest: %s/%i!") %
@@ -545,15 +570,12 @@
             else:
                 query += "NULL,"
 
-        # this is a single criteria (not a range)
-        elif isinstance(values, basestring):
-            # translate "unbounded" to a database NULL
-            if values == "unbounded":
-                query += "NULL,"
-            else:
-                # use lower case for text strings
-                query += "'" + AIdb.sanitizeSQL(str(values).lower()) + "',"
-
+        # Else if this is a value criteria (not a range), insert the value
+        # as a space-separated list of values which will account for the case
+        # where a list of values have been given.
+        elif not crit.startswith('MAX'):
+            # Join the values of the list with a space separator.
+            query += "'" + AIdb.sanitizeSQL(" ".join(values)) + "',"
         # else values is a range
         else:
             for value in values:
@@ -695,8 +717,9 @@
     logging.debug('criteria file passed RNG validation')
 
     if errors:
-        raise ValueError(_("Error:\tFile %s failed validation:\n\t%s") %
-                         (criteria_path, root.message))
+        raise ValueError(_("Error:\tFile %s failed validation:\n"
+                           "\tline %s: %s") % (criteria_path, errors.line,
+                           errors.message))
     try:
         verifyXML.prepValuesAndRanges(root, db, table)
     except ValueError, err:
@@ -817,8 +840,8 @@
     def get_criterion(self, criterion):
         """
         Return criterion out of the criteria DOM
-        Returns: A list for range criterion with a min and max entry
-                 A string for value criterion
+        Returns: A two-item list for range criterion with a min and max entry
+                 A list of one or more values for value criterion
         """
 
         if self._criteria_root is None:
@@ -830,14 +853,11 @@
             # compare criteria name case-insensitive
             if crit.lower() == criterion.lower():
                 for child in tag.getchildren():
-                    if child.tag == "range":
-                        # this is a range response (split on white space)
+                    if child.text is not None:
+                        # split on white space for both values and ranges
                         return child.text.split()
-                    elif child.tag == "value":
-                        # this is a value response (strip white space)
-                        return child.text.strip()
                     # should not happen according to schema
-                    elif child.text is None:
+                    else:
                         raise AssertionError(_("Criteria contains no values"))
         return None
 
--- a/usr/src/cmd/ai-webserver/set_criteria.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/ai-webserver/set_criteria.py	Tue May 31 14:21:09 2011 -0700
@@ -173,6 +173,9 @@
     for crit in AIdb.getCriteria(dbn.getQueue(), table=table, onlyUsed=False,
             strip=True):
 
+        # Determine if this crit is a range criteria or not.
+        is_range_crit = AIdb.isRangeCriteria(dbn.getQueue(), crit)
+
         # Get the value from the manifest
         values = criteria[crit]
 
@@ -184,22 +187,18 @@
             if not append:
                 # if the criteria we're processing is a range criteria, fill in
                 # NULL for two columns, MINcrit and MAXcrit
-                if AIdb.isRangeCriteria(dbn.getQueue(), crit):
+                if is_range_crit:
                     nvpairs.append("MIN" + crit + "=NULL")
                     nvpairs.append("MAX" + crit + "=NULL")
                 # this is a single value
                 else:
                     nvpairs.append(crit + "=NULL")
 
-        # this is a single criteria (not a range)
-        elif isinstance(values, basestring):
-            # translate "unbounded" to a database NULL
-            if values == "unbounded":
-                nvstr = crit + "=NULL"
-            else:
-                # use lower case for text strings
-                nvstr = crit + "='" + AIdb.sanitizeSQL(str(values).lower()) \
-                        + "'"
+        # Else if this is a value criteria (not a range), insert the
+        # value as a space-separated list of values in case a list of
+        # values have been given. 
+        elif not is_range_crit:
+            nvstr = crit + "='" + AIdb.sanitizeSQL(" ".join(values)) + "'"
             nvpairs.append(nvstr)
 
         # Else the values are a list this is a range criteria
--- a/usr/src/cmd/ai-webserver/test/test_ai_database.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/ai-webserver/test/test_ai_database.py	Tue May 31 14:21:09 2011 -0700
@@ -306,8 +306,10 @@
         self.assertTrue("MINnetwork <= 010000002000" in query_str)
         self.assertTrue("HEX(MINmac) <= HEX(x'aabbccddeeff'" in query_str)
         self.assertTrue("HEX(MAXmac) >= HEX(x'aabbccddeeff'" in query_str)
-        self.assertTrue("arch = LOWER('i86pc')" in query_str)
-        self.assertTrue("platform = LOWER('myplatform')" in query_str)
+        self.assertTrue("is_in_list('arch', 'i86pc', arch, 'None')" \
+            in query_str)
+        self.assertTrue("is_in_list('platform', 'myplatform', platform, " +
+            "'None')" in query_str)
         self.assertTrue("NOT ((arch IS NULL)" in query_str)
         self.assertFalse("(cpu IS NULL)" in query_str)
         self.assertTrue(query_str.endswith("LIMIT 1"))
@@ -488,7 +490,34 @@
                         'mac': 'bbbbccddeef0'
                        }
         manifest = AIdb.findManifest(my_crit_dict, self.aidb)
-        self.assertEquals(manifest, 0)
+        self.assertEquals(manifest, None)
+
+class is_in_list(unittest.TestCase):
+    '''Tests for is_in_list'''
+
+    def test_match_list_first_value(self):
+        ''' Test that we match a value that is the first in the list '''
+        value = "foo"
+        value_list = "foo bar bum"
+        self.assertTrue(AIdb.is_in_list('dummy_crit', value, value_list, None))
+
+    def test_match_list_not_first_value(self):
+        ''' Test that we match a value in the middle of the list '''
+        value = "foo"
+        value_list = "bar foo bum"
+        self.assertTrue(AIdb.is_in_list('dummy_crit', value, value_list, None))
+
+    def test_match_case_sensitive(self):
+        ''' Test that we match a value based on a case senstive criteria '''
+        value = "FoO"
+        value_list = "bar FoO blah"
+        self.assertTrue(AIdb.is_in_list('zonename', value, value_list, None))
+
+    def test_no_match_case_sensitive(self):
+        ''' Test that we don't match a value on the wrong case.'''
+        value = "FoO"
+        value_list = "bar foo blah"
+        self.assertFalse(AIdb.is_in_list('zonename', value, value_list, None))
 
 
 if __name__ == '__main__':
--- a/usr/src/cmd/ai-webserver/test/test_create_profile.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/ai-webserver/test/test_create_profile.py	Tue May 31 14:21:09 2011 -0700
@@ -203,12 +203,12 @@
 class CriteriaToDict(unittest.TestCase):
     '''Tests for criteria_to_dict'''
 
-    def test_lower_case_conversion(self):
-        '''Ensure keys and values converted to lower case'''
-        criteria = ['ARCH=SPARC']
+    def test_case_conversion(self):
+        '''Ensure keys converted to lower case, values kept as input'''
+        criteria = ['ARCH=Sparc']
         cri_dict = publish_manifest.criteria_to_dict(criteria)
         self.assertEquals(len(cri_dict), 1)
-        self.assertEquals(cri_dict['arch'], 'sparc')
+        self.assertEquals(cri_dict['arch'], 'Sparc')
 
     def test_range_values(self):
         '''Ensure ranges saved correctly'''
@@ -217,6 +217,14 @@
         self.assertEquals(len(cri_dict), 1)
         self.assertTrue(cri_dict['mem'], '1048-2096')
 
+    def test_list_values(self):
+        '''Ensure lists are saved correctly'''
+        criteria = ['zonename="z1 z2 Z3"']
+        cri_dict = publish_manifest.criteria_to_dict(criteria)
+        self.assertEquals(len(cri_dict), 1)
+        self.assertTrue(cri_dict['zonename'], 'z1 z2 Z3')
+
+
     def test_multiple_entries(self):
         '''Ensure multiple criteria handled correctly'''
         criteria = ['ARCH=i86pc', 'MEM=1024', 'IPV4=129.224.45.185',
--- a/usr/src/cmd/ai-webserver/test/test_publish_manifest.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/ai-webserver/test/test_publish_manifest.py	Tue May 31 14:21:09 2011 -0700
@@ -278,12 +278,12 @@
 class CriteriaToDict(unittest.TestCase):
     '''Tests for criteria_to_dict'''
 
-    def test_lower_case_conversion(self):
-        '''Ensure keys and values converted to lower case'''
-        criteria = ['ARCH=SPARC']
+    def test_case_conversion(self):
+        '''Ensure keys and converted to lower case, values kept as input'''
+        criteria = ['ARCH=Sparc']
         cri_dict = publish_manifest.criteria_to_dict(criteria)
         self.assertEquals(len(cri_dict), 1)
-        self.assertEquals(cri_dict['arch'], 'sparc')
+        self.assertEquals(cri_dict['arch'], 'Sparc')
 
     def test_range_values(self):
         '''Ensure ranges saved correctly'''
@@ -292,6 +292,13 @@
         self.assertEquals(len(cri_dict), 1)
         self.assertTrue(cri_dict['mem'], '1048-2096')
 
+    def test_list_values(self):
+        '''Ensure lists are saved correctly'''
+        criteria = ['zonename="z1 z2 Z3"']
+        cri_dict = publish_manifest.criteria_to_dict(criteria)
+        self.assertEquals(len(cri_dict), 1)
+        self.assertTrue(cri_dict['zonename'], 'z1 z2 Z3')
+
     def test_multiple_entries(self):
         '''Ensure multiple criteria handled correctly'''
         criteria = ['ARCH=i86pc', 'MEM=1024', 'IPV4=129.224.45.185',
--- a/usr/src/cmd/ai-webserver/verifyXML.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/ai-webserver/verifyXML.py	Tue May 31 14:21:09 2011 -0700
@@ -192,7 +192,8 @@
     Returns: Nothing.  However, data may be checked and modified per above.
 
     Raises:
-    - Exception: Exactly 1 value (no spaces) expected for cpu criteria tag
+    - Exception: ValueError - a range criteria provided as a list of values
+                            - a non-range criteria provided as a range
     - Exceptions raised by database calls, and calls to
             - checkIPv4()
             - checkMAC()
@@ -204,7 +205,7 @@
     # Assume that MINxxx is a good enough check.
     # All criteria names in database are stored as lower case, except
     # for their "MIN" and "MAX" prefixes.
-    range_crit = []
+    range_crit = list()
     for crit_name in AIdb.getCriteria(database.getQueue(), table,
         onlyUsed=False, strip=False):
         if (crit_name.startswith("MIN")):
@@ -218,31 +219,31 @@
         # <range>'s here are a single element with a single
         # string containing two space-separated values for MIN and MAX
         # <value>'s here are a single element with a single
-        # string containing one value.
+        # string containing one value or a space-separated list of values.
         value_list = val_range.text.split()
         num_values = len(value_list)
 
-        # Val_range.tag will be either value or range.
-        # This is checked by the schema.
+        # val_range.tag will be either 'value' or 'range'.
+        # This is syntactically validated by the schema.
         if val_range.tag == "value":
-
-            # Allow values with spaces (which here look like
-            # multiple values), except for CPU items.  Non-CPU
-            # items are "arch" and "platform".
-            if num_values != 1 and crit_name == "cpu":
-                raise StandardError("Exactly 1 value " +
-                    "(no spaces) expected for cpu criteria tag")
+            # Allow a list of values to be provided with the 'value' tag.
+            # However, for criteria that can be provided as a range, we
+            # currently do not support lists for them.
+            if num_values > 1 and crit_name in range_crit:
+                raise ValueError("Criteria '" + crit_name + "' is not "
+                    "supported to be provided as a list of values")
         else:
-            if range_crit.count(crit_name) == 0:
-                raise StandardError("Range pair passed to " +
-                    "non-range criterion \"" + crit_name + "\"")
+            # For ranges, make sure it is indeed a range criteria
+            if crit_name not in range_crit:
+                raise ValueError("Criteria '" + crit_name + "' can not "
+                    "be passed as a range pair")
 
         # For value criteria, there is no need to do anything to store
         # single value into val_range.text.  It is already there.
         #
         # For some types supported by range criteria, some additional
-        # format checking is needed.  Also, single values passed as
-        # range criteria need to be split into a range where min=max.
+        # format checking is needed.  Also, range criteria that are passed
+        # as single values need to be split into a range where min=max.
 
         # Current criterion is a range criterion.
         if range_crit.count(crit_name) > 0:
--- a/usr/src/cmd/auto-install/Makefile	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/auto-install/Makefile	Tue May 31 14:21:09 2011 -0700
@@ -28,7 +28,7 @@
 clobber:=	TARGET=	clobber
 install:=	TARGET=	install
 
-SUBDIRS=	svc config checkpoints
+SUBDIRS=	svc config checkpoints manifest profile
 
 PROGS=		ai_get_manifest ai_sd auto-install
 
@@ -44,25 +44,16 @@
 
 PYCMODULES =	$(PYMODULES:%.py=%.pyc)
 
-MANIFEST_FILES = ai_manifest.xml \
-		 default.xml
-
 VERSION_FILE = version
 
-SC_PROFILE_FILES = static_network.xml enable_sci.xml sc_sample.xml
-
 ROOTPROGS=	$(PROGS:%=$(ROOTUSRBIN)/%)
 
 ROOTPYMODULES=	$(PYMODULES:%=$(ROOTPYTHONVENDORSOLINSTALLAI)/%)
 
 ROOTPYCMODULES= $(PYCMODULES:%=$(ROOTPYTHONVENDORSOLINSTALLAI)/%)
 
-ROOTMANIFESTS= $(MANIFEST_FILES:%=$(ROOTAUTOINST)/%)
-
 ROOTVERSION= $(VERSION_FILE:%=$(ROOTAUTOINST)/%)
 
-ROOTSCPROFILES= $(SC_PROFILE_FILES:%=$(ROOTAUTOINSTSCPROFILES)/%)
-
 all: python $(PROGS)
 
 install: all .WAIT $(ROOTPROGS) \
@@ -71,10 +62,8 @@
 	$(ROOTPYTHONVENDOR) \
 	$(ROOTPYTHONVENDORSOLINSTALL) \
 	$(ROOTPYTHONVENDORSOLINSTALLAI) \
-	$(ROOTMANIFESTS) \
 	$(ROOTVERSION) \
 	$(ROOTPYMODULES) $(ROOTPYCMODULES) \
-	$(ROOTSCPROFILES) \
 	$(SUBDIRS)
 
 python:
--- a/usr/src/cmd/auto-install/__init__.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/auto-install/__init__.py	Tue May 31 14:21:09 2011 -0700
@@ -35,6 +35,9 @@
 from solaris_install.data_object.cache import DataObjectCache
 import ai_instance
 
+# AI TransferFiles checkpoint name
+TRANSFER_FILES_CHECKPOINT = "transfer-ai-files"
+
 # Register local Data Objects, use relative module reference.
 DataObjectCache.register_class(ai_instance)
 
--- a/usr/src/cmd/auto-install/ai_get_manifest.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/auto-install/ai_get_manifest.py	Tue May 31 14:21:09 2011 -0700
@@ -47,7 +47,6 @@
 VERSION_FILE = '/usr/share/auto_install/version'
 
 # constants for generating temporary files with unique names for SMF profiles
-SC_OUTPUT_DIRECTORY = system_temp_path('profile')  # work dir for profiles
 SC_PREFIX = 'profile_'
 SC_EXTENSION = '.xml'
 
@@ -602,7 +601,8 @@
     'mac': (AICriteriaMAC, "Client MAC address"),
     'mem': (AICriteriaMemSize, "Physical memory size"),
     'network': (AICriteriaNetwork, "Client network address"),
-    'platform': (AICriteriaPlatform, "Client platform")
+    'platform': (AICriteriaPlatform, "Client platform"),
+    'zonename': (None, "Zonename")
 }
 
 
@@ -612,7 +612,9 @@
     """
     sys.stderr.write(_("Usage:\n"
                        "    %s -s service_list -o destination"
-                       " [-d debug_level] [-l] [-h]\n") %
+                       " -p profile_destination_dir"
+                       " [-c criteria=value ... ]"
+                       " [-d debug_level] [-l] [-h] [-e]\n") %
                        os.path.basename(sys.argv[0]))
     sys.exit(1)
 
@@ -647,7 +649,8 @@
 
 
 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-def ai_get_http_file(address, service_name, file_path, method, nv_pairs):
+def ai_get_http_file(address, service_name, file_path, method, nv_pairs,
+                     no_default=False):
     """		Description: Downloads file from url using HTTP protocol
 
         Parameters:
@@ -657,6 +660,8 @@
             method       - 'POST' or 'GET'
             nv_pairs     - dictionary containing name-value pairs to be sent
                            to the server using 'POST' method
+            no_default   - whether or not to request a default manifest if
+                           criteria can't be used to match a manifest.
 
         Returns:
             file
@@ -688,6 +693,7 @@
                                       'version': version,
                                       'service': service_name,
                                       'logging': AIGM_LOG.get_debug_level(),
+                                      'no_default': no_default,
                                       'postData': post_data})
             else:
                 # compatibility mode only needs to send the data
@@ -817,18 +823,23 @@
     opts_args = cli_opts_args[1:]
 
     try:
-        opts = getopt.getopt(opts_args, "s:o:d:lh")[0]
+        opts = getopt.getopt(opts_args, "c:d:ehlo:p:s:")[0]
     except getopt.GetoptError:
         AIGM_LOG.post(AILog.AI_DBGLVL_ERR,
                       "Invalid options or arguments provided")
         usage()
 
-    service_list = system_temp_path("service_list")
-    manifest_file = system_temp_path("manifest.xml")
+    criteria_list = list()
+    service_list = None
+    manifest_file = None
+    profile_dir = None
     list_criteria_only = False
+    no_default = False
 
     for option, argument in opts:
-        if option == "-s":
+        if option == "-c":
+            criteria_list.append(argument)
+        elif option == "-s":
             service_list = argument
         elif option == "-o":
             manifest_file = argument
@@ -836,22 +847,65 @@
             AIGM_LOG.set_debug_level(int(argument))
         elif option == "-l":
             list_criteria_only = True
+        elif option == "-p":
+            profile_dir = argument
+        elif option == "-e":
+            no_default = True
         elif option == "-h":
             usage()
 
+    if service_list is None or manifest_file is None or profile_dir is None:
+        AIGM_LOG.post(AILog.AI_DBGLVL_ERR,
+            "Invalid options or arguments provided")
+        usage()
+
     AIGM_LOG.post(AILog.AI_DBGLVL_INFO,
                   "Service list: %s", service_list)
 
     AIGM_LOG.post(AILog.AI_DBGLVL_INFO,
                   "Manifest file: " + manifest_file)
 
+    AIGM_LOG.post(AILog.AI_DBGLVL_INFO,
+                  "Profile directory: " + profile_dir)
+
+    if len(criteria_list) > 0:
+        AIGM_LOG.post(AILog.AI_DBGLVL_INFO,
+                      "Criteria list: " + str(criteria_list))
+
     ai_criteria_known = {}
 
-    # Obtain all available information about client
-    for key in AI_CRITERIA_SUPPORTED.keys():
-        ai_crit = AI_CRITERIA_SUPPORTED[key][0]()
-        if ai_crit.is_known():
-            ai_criteria_known[key] = ai_crit.get()
+    # If criteria specified on the command line, use that as
+    # our known criteria, otherwise get criteria from system.
+    if len(criteria_list) > 0:
+        for entry in criteria_list:
+            entries = entry.partition("=")
+
+            if entries[1]:
+                if not entries[0]:
+                    raise ValueError(_("Missing criteria name in '%s'\n") %
+                                       entry)
+                elif entries[0].lower() in ai_criteria_known:
+                    raise ValueError(_("Duplicate criteria: '%s'\n") %
+                                       entries[0])
+                elif not entries[2]:
+                    raise ValueError(_("Missing value for criteria '%s'\n") %
+                                       entries[0])
+
+                if entries[0] not in AI_CRITERIA_SUPPORTED:
+                    raise ValueError(_("Unsupported criteria: '%s'\n") %
+                                       entries[0])
+
+                ai_criteria_known[entries[0].lower()] = entries[2]
+            else:
+                raise ValueError(_("Criteria must be of the form "
+                               "<criteria>=<value>\n"))
+    else:
+        # Obtain all available information about client
+        for key in AI_CRITERIA_SUPPORTED.keys():
+            if AI_CRITERIA_SUPPORTED[key][0] is not None:
+                ai_crit = AI_CRITERIA_SUPPORTED[key][0]()
+                if ai_crit.is_known():
+                    ai_criteria_known[key] = ai_crit.get()
 
     # List all criteria which client can understand and provide
     AIGM_LOG.post(AILog.AI_DBGLVL_INFO,
@@ -907,7 +961,8 @@
         http_resp, ret, content_type = \
                 ai_get_http_file(ai_service, ai_name,
                                  "/cgi-bin/cgi_get_manifest.py",
-                                 'POST', ai_criteria_known)
+                                 'POST', ai_criteria_known,
+                                 no_default=no_default)
         #
         # If valid manifest was provided, it is not necessary
         # to connect next AI service,
@@ -928,14 +983,15 @@
             mime_response = "Content-Type: %s\n%s" % (content_type, http_resp)
             # by design, response is MIME-encoded, multipart
             if mime_response is not None:
-                cleanup_earlier_run()  # delete any profiles from previous runs
+                # delete any profiles from previous runs
+                cleanup_earlier_run(profile_dir)
                 # parse the MIME response
                 parse = Parser()
                 msg = parse.parsestr(mime_response)
                 # handle each self-identifying part
                 for imsg in msg.walk():
                     # write out manifest, any profiles, console messages
-                    if handle_mime_payload(imsg, manifest_file):
+                    if handle_mime_payload(imsg, manifest_file, profile_dir):
                         ai_manifest_obtained = True
             if ai_manifest_obtained:  # manifest written by MIME handler
                 service_list_fh.close()
@@ -965,7 +1021,12 @@
     if not ai_manifest_obtained:
         AIGM_LOG.post(AILog.AI_DBGLVL_ERR,
                       "None of contacted AI services provided valid manifest")
-        return 2
+        if no_default:
+            # If a default manifest is not requested, its OK if we didn't
+            # obtain a manifest, return 0.
+            return 0
+        else:
+            return 2
 
     # Save the manifest
     AIGM_LOG.post(AILog.AI_DBGLVL_INFO,
@@ -984,7 +1045,7 @@
 
 
 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-def handle_mime_payload(msg, manifest_file):
+def handle_mime_payload(msg, manifest_file, profile_dir):
     """ Given a MIME part of the manifest locator response,
     identify it and handle appropriately
 
@@ -1015,7 +1076,7 @@
 
         # not manifest, assume the attachment is a profile, and output it
         pname = write_profile_file(msg.get_filename(), payload,
-                                   SC_OUTPUT_DIRECTORY, SC_EXTENSION,
+                                   profile_dir, SC_EXTENSION,
                                    SC_PREFIX)
         if pname:
             AIGM_LOG.post(AILog.AI_DBGLVL_INFO, 'Wrote profile %s.' % pname)
@@ -1030,20 +1091,20 @@
 
 
 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-def cleanup_earlier_run():
+def cleanup_earlier_run(profile_dir):
     """
     From the designated profile output directory,
     purge any profiles left over from a previous run.
     """
     try:
-        cleanlist = os.listdir(SC_OUTPUT_DIRECTORY)
+        cleanlist = os.listdir(profile_dir)
     except OSError, err:
         if err.errno == ENOENT:  # exists
             return
         raise
     for fclean in cleanlist:
         if fclean.startswith(SC_PREFIX):  # uniquely identify profiles
-            os.unlink(os.path.join(SC_OUTPUT_DIRECTORY, fclean))
+            os.unlink(os.path.join(profile_dir, fclean))
 
 
 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
--- a/usr/src/cmd/auto-install/ai_manifest.xml	Tue May 31 11:07:18 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,268 +0,0 @@
-<?xml version="1.0"?>
-<!--
-CDDL HEADER START
-
-The contents of this file are subject to the terms of the
-Common Development and Distribution License (the "License").
-You may not use this file except in compliance with the License.
-
-You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-or http://www.opensolaris.org/os/licensing.
-See the License for the specific language governing permissions
-and limitations under the License.
-
-When distributing Covered Code, include this CDDL HEADER in each
-file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-If applicable, add the following below this CDDL HEADER, with the
-fields enclosed by brackets "[]" replaced with your own identifying
-information: Portions Copyright [yyyy] [name of copyright owner]
-
-CDDL HEADER END
-
-Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
--->
-<!--
-===============================================================================
-DTD sample manifest for Automatic Installer input manifest specification.
-===============================================================================
--->
-<!DOCTYPE auto_install SYSTEM "file:///usr/share/install/ai.dtd">
-<auto_install>
-  <!--
-        "auto_reboot" set to "true" may be an issue for x86 machines.
-        The boot order is not guaranteed and may cause unexpected
-        behavior. If auto_reboot is not desired, set auto_reboot="false".
-
-        The name of the manifest is obtained from (in this order):
-        1) the name from the installadm add-manifest command line "-m" option.
-        2) a name attribute in the manifest, e.g.: 
-           <ai_instance name="my_ai_manifest" auto_reboot="true">
-        3) manifest filename 
-    -->
-  <ai_instance auto_reboot="true">
-    <!--
-      =======================================================================
-      <target> - selections for AI target Device specification
-
-      Disk criteria are divided into three mutually exclusive groups:
-
-      G1 - deterministic disk criteria
-      ................................
-        * disk/iscsi parameters
-        * disk/disk_name, with name_type attribute:
-          one of ctd, volid, devpath or devid
-
-      G2 - non-deterministic disk criteria
-      ..........................
-        * disk/disk_prop: Any of dev_type, dev_vendor or
-          dev_size
-
-      G3 - keyword disk criteria
-      ...........................
-        * disk/disk_keyword: "boot_disk"
-
-      Schema ai.dtd enforces following policy:
-
-      * criteria in group G1 are mutually exclusive - only
-        one can be specified at a time
-
-      * groups G1, G2 and G3 are mutually exclusive - i.e.
-        if criteria from G1 is specified, no criteria
-        from G2 or G3 are allowed and vice versa
-
-      * multiple criteria from G2 can be specified
-      =======================================================================
-    -->
-    <target>
-      <disk>
-        <!-- G1 -->
-        <!--
-          c#t#d# device name like c0t0d0 or 
-          MPXIO name like c0t2000002037CD9F72d0
-        -->
-        <disk_name name="c1t0d0" name_type="ctd"/>
-        <!-- volume name set for instance by means
-          of format(1M) command
-        -->
-        <!--
-        <disk_name name="ai-disk" name_type="volid"/>
-        -->
-        <!-- device id - e.g. can be obtained by means of
-          iostat(1M) -iEn
-        -->
-        <!--
-        <disk_name name="id1,cmdk@AST31000340NS=____________9QJ2LNYY" name_type="devid"/>
-        -->
-        <!-- device path under /devices directory, e.g.
-          /pci@1e,600000/pci@0/pci@9/pci@0/scsi@1/sd@0,0
-        -->
-        <!--
-        <disk_name name="/pci@0/pci@9/pci@0/scsi@1/sd@0,0" name_type="devpath"/>
-        -->
-        <!--
-          ISCSI target device
-
-        <iscsi name="c0d2E0001010F68">
-          <ip>192.168.1.34</ip>
-        </iscsi> 
-        -->
-        <!-- G2 -->
-        <!--
-        <disk_prop dev_vendor="hitachi" dev_size="20480mb"/>
-
-        or 
-
-        <disk_prop dev_vendor="hitachi"/>
-
-        or
-
-        <disk_prop dev_size="20480mb"/>
-        -->
-        <!-- G3 -->
-        <!--
-        <disk_keyword key="boot_disk"/>
-        -->
-        <!--
-          On X86 machines, Slices exist within partitions only
-        -->
-        <!--
-          Uncomment this to force AI to find an existing Solaris
-          partition.
-        -->
-        <!--
-        <partition action="use_existing_solaris2">
-          <slice name="0">
-            <size val="20480mb"/>
-          </slice>
-          <slice name="4">
-            <size val="20480mb"/>
-          </slice>
-        </partition>
-
-        or, use the following to create a Solaris partition
-        -->
-        <partition name="1" part_type="191">
-          <size start_sector="200" val="40960mb"/>
-          <slice name="0">
-            <size val="20480mb"/>
-          </slice>
-          <slice name="4">
-            <size val="20480mb"/>
-          </slice>
-        </partition>
-        <!-- Define some other partitions to create too -->
-        <partition name="2" part_type="99">
-          <size start_sector="200" val="20480mb"/>
-        </partition>
-        <partition name="4" part_type="99">
-          <size start_sector="2000" val="20480mb"/>
-        </partition>
-        <!--
-        On SPARC systems, only specify the Slice layout.
-        -->
-        <!--
-            <slice name="0">
-              <size val="20480mb"/>
-            </slice>
-            <slice name="4">
-              <size val="20480mb"/>
-            </slice>
-        -->
-      </disk>
-    </target>
-    <software name="ips" type="IPS">
-      <source>
-        <publisher name="solaris">
-          <origin name="http://pkg.oracle.com/solaris/release"/>
-        </publisher>
-      </source>
-      <!--
-        By default the latest build available, in the
-        specified IPS repository, is installed.
-        if another build is required, the build number has
-        to be appended to the 'entire' package in following
-        form:
-      <name="[email protected]#"/>
-      -->
-      <software_data>
-        <name>pkg:/entire</name>
-        <name>pkg:/server_install</name>
-      </software_data>
-    </software>
-    <add_drivers>
-      <!--
-            Driver Updates: This section is for adding driver packages to the
-            boot environment before the installation takes place.  The
-            installer can then access all devices on the system.  The
-            packages installed in the boot environment will also be installed
-            on the target.
-
-            A <search_all> entry performs a search for devices which are
-            missing their drivers.  A repository publisher and location
-            may be specified, and that repository and its database will
-            be used.  If no publisher and location is specified, the
-            configured repositories will be used.
-            (See pkg publisher command.)  If <addall> is specified as
-            "true", then drivers the database says are third-party drivers
-            will be added like all others; otherwise third-party drivers
-            will not be added.
-
-                <search_all addall="true">
-                    <source>
-                        <publisher name="solaris">
-                            <origin name="http://pkg.oracle.com/solaris/release"/>
-                        </publisher>
-                    </source>
-                </search_all>
-
-            <software> entries are user-provided specifications of packages
-            needed in order to perform the install.  types are P5I, SVR4, DU.
-            A <software_data> action of "noinstall" inhibits adding to target.
-
-            P5I: A pkg(5) P5I file, full path is in the source/publisher/origin.
-            Path may be to a local file or an http or ftp specification.
-                <software>
-                    <source>
-                        <publisher>
-                            <origin
-                                name=
-        "http://pkg.oracle.com/solaris/release/p5i/0/driver/firewire.p5i"/>
-                        </publisher>
-                    </source>
-                    <software_data type="P5I"/>
-                </software>
-
-            SVR4: An SVR4 package spec. The source/publisher/origin corresponds
-            to the directory containing the packages.  The 
-            software/software_data/name refers tp the package's top level
-            directory or the package's datastream file.
-
-                <software>
-                    <source>
-                        <publisher>
-                            <origin name="/export/package_dir"/>
-                        </publisher>
-                    </source>
-                    <software_data type="SVR4">
-                        <name>my_disk_driver.d</name>
-                    </software_data>
-                </software>
-
-            DU: An ITU (Install Time Update) or Driver Update image.
-            The source/publisher/origin refers to the path just above the 
-            image's DU directory (if expanded) or the name of the .iso image.  
-            All packages in the image will be added.
-
-                <software>
-                    <source>
-                        <publisher>
-                            <origin name="/export/duimages/mydriver.iso"/>
-                        </publisher>
-                    </source>
-                    <software_data type="DU"/>
-                </software>     
-      -->
-      <search_all/>
-    </add_drivers>
-  </ai_instance>
-</auto_install>
--- a/usr/src/cmd/auto-install/auto_install.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/auto-install/auto_install.py	Tue May 31 14:21:09 2011 -0700
@@ -43,11 +43,16 @@
 
 from solaris_install import \
     ApplicationData, system_temp_path, post_install_logs_path, Popen
+from solaris_install.auto_install import TRANSFER_FILES_CHECKPOINT
 from solaris_install.auto_install.ai_instance import AIInstance
 from solaris_install.auto_install.checkpoints.dmm import \
     DERIVED_MANIFEST_DATA, DerivedManifestData
 from solaris_install.auto_install.checkpoints.target_selection import \
     SelectionError, TargetSelection
+from solaris_install.auto_install.checkpoints.target_selection_zone import \
+    TargetSelectionZone
+from solaris_install.auto_install.checkpoints.ai_configuration import \
+    AI_SERVICE_LIST_FILE, AIConfigurationError
 from solaris_install.auto_install.utmpx import users_on_console
 from solaris_install.boot import boot
 from solaris_install.data_object import ParsingError, \
@@ -59,11 +64,14 @@
 from solaris_install.ict import initialize_smf, update_dumpadm, ips, \
     device_config, apply_sysconfig, boot_archive, transfer_files, \
     create_snapshot, setup_swap
+from solaris_install.ict.apply_sysconfig import APPLY_SYSCONFIG_DICT, \
+    APPLY_SYSCONFIG_PROFILE_KEY
 from solaris_install.logger import FileHandler, ProgressHandler, MAX_INT
 from solaris_install.logger import INSTALL_LOGGER_NAME
 from solaris_install.manifest.parser import ManifestError, \
     MANIFEST_PARSER_DATA
 from solaris_install.target import Target, discovery, instantiation
+from solaris_install.target.instantiation_zone import ALT_POOL_DATASET
 from solaris_install.target.logical import BE, Logical
 from solaris_install.transfer import create_checkpoint
 from solaris_install.transfer.info import Software, Destination, Image, \
@@ -85,17 +93,17 @@
     TARGET_INSTANTIATION_CHECKPOINT = 'target-instantiation'
     FIRST_TRANSFER_CHECKPOINT = 'first-transfer'
     MANIFEST_CHECKPOINTS = ["derived-manifest", "manifest-parser"]
-    CHECKPOINTS_BEFORE_TI = ["target-discovery", "target-selection",
-                             TARGET_INSTANTIATION_CHECKPOINT]
+    CHECKPOINTS_BEFORE_TI = ["target-discovery", "target-selection", \
+        "ai-configuration", TARGET_INSTANTIATION_CHECKPOINT]
     CHECKPOINTS_BEFORE_TI.extend(MANIFEST_CHECKPOINTS)
     CHECKPOINTS_BEFORE_IPS = list(CHECKPOINTS_BEFORE_TI)
-    TRANSFER_FILES_CHECKPOINT = 'transfer-ai-files'
     INSTALLED_ROOT_DIR = "/a"
 
     def __init__(self, args=None):
         """
         Class constructor
         """
+        self.installed_root_dir = self.INSTALLED_ROOT_DIR
         self.auto_reboot = False
         self.doc = None
         self.exitval = self.AI_EXIT_SUCCESS
@@ -112,8 +120,45 @@
         self.engine = InstallEngine(debug=True, stop_on_error=True)
         self.doc = self.engine.data_object_cache
 
-        # Add ApplicationData to the DOC
-        self._app_data = ApplicationData("auto-install")
+        if self.options.zone_pool_dataset is not None:
+            # If we're installing a zone root, generate a work_dir
+            # location based on the current PID.
+            work_dir = "/system/volatile/install." + str(os.getpid())
+
+            # Add ApplicationData to the DOC
+            self._app_data = ApplicationData("auto-install", work_dir=work_dir)
+            self._app_data.data_dict[ALT_POOL_DATASET] = \
+                self.options.zone_pool_dataset
+
+            # Set installed_root_dir to be based off work_dir
+            self.installed_root_dir = work_dir + self.INSTALLED_ROOT_DIR
+        else:
+            # Add ApplicationData to the DOC
+            self._app_data = ApplicationData("auto-install")
+
+        # Add profile location to the ApplySysconfig checkpoint's data dict.
+        if self.options.profile is not None:
+            # Try to find the ApplySysconfig data dict from
+            # the DOC in case it already exists.
+            as_doc_dict = None
+            as_doc_dict = self.doc.volatile.get_first_child( \
+                name=APPLY_SYSCONFIG_DICT)
+
+            if as_doc_dict is None:
+                # Initialize new dictionary in DOC
+                as_dict = {APPLY_SYSCONFIG_PROFILE_KEY : self.options.profile}
+                as_doc_dict = DataObjectDict(APPLY_SYSCONFIG_DICT, as_dict)
+                self.doc.volatile.insert_children(as_doc_dict)
+            else:
+                # Add to existing dictionary in DOC
+                as_doc_dict.data_dict[APPLY_SYSCONFIG_PROFILE_KEY] = \
+                    self.options.profile
+
+        # Add service list file to ApplicationData
+        if self.options.service_list_file is not None:
+            self._app_data.data_dict[AI_SERVICE_LIST_FILE] = \
+                self.options.service_list_file
+
         self.doc.persistent.insert_children(self._app_data)
 
         # Clear error service
@@ -133,16 +178,24 @@
                 (self.options.stop_checkpoint))
 
         if not self.options.list_checkpoints:
-
-            if self.options.manifest:
-                self.logger.info("Using Profile: %s" % (self.options.manifest))
+            if self.manifest:
+                self.logger.info("Using XML Manifest: %s" % (self.manifest))
 
             if self.derived_script:
                 self.logger.info("Using Derived Script: %s" % \
                     (self.derived_script))
 
-            if self.manifest:
-                self.logger.info("Using Manifest: %s" % (self.manifest))
+            if self.options.profile:
+                self.logger.info("Using profile specification: %s" % \
+                    (self.options.profile))
+
+            if self.options.service_list_file:
+                self.logger.info("Using service list file: %s" % \
+                    (self.options.service_list_file))
+
+            if self.options.zone_pool_dataset:
+                self.logger.info("Installing zone under dataset: %s" % \
+                    (self.options.zone_pool_dataset))
 
             if self.options.dry_run:
                 self.logger.info("Dry Run mode enabled")
@@ -152,16 +205,21 @@
         Method to parse command line arguments
         """
 
-        usage = "%prog -m <manifest>\n" + \
+        usage = "%prog -m <manifest> [-c <profile/dir>]\n"\
             "\t[-i - Stop installation before Target Instantiation |\n" + \
             "\t -I - Stop installation after Target Instantiation]\n" + \
-            "\t[-n - Enable dry run mode]"
+            "\t[-n - Enable dry run mode]\n" + \
+            "\t[-r <service_list_file>]" + \
+            "\t[-Z <zone_pool_dataset]"
 
         parser = optparse.OptionParser(usage=usage)
 
         parser.add_option("-m", "--manifest", dest="manifest",
             help="Specify script or XML manifest to use")
 
+        parser.add_option("-c", "--profile", dest="profile",
+            help="Specify a profile or directory of profiles")
+
         parser.add_option("-i", "--break-before-ti", dest="break_before_ti",
             action="store_true", default=False,
             help="Break execution before Target Instantiation, testing only")
@@ -174,16 +232,23 @@
             action="store_true", default=False,
             help="Enable dry-run mode for testing")
 
-        parser.add_option("-l", "--list-checkpoints",  dest="list_checkpoints",
+        parser.add_option("-l", "--list-checkpoints", dest="list_checkpoints",
             action="store_true", default=False,
             help=optparse.SUPPRESS_HELP)
 
         parser.add_option("-s", "--stop-checkpoint", dest="stop_checkpoint",
             help=optparse.SUPPRESS_HELP)
 
+        parser.add_option("-r", "--service-list-file",
+            dest="service_list_file", help="Specify service list file")
+
+        parser.add_option("-Z", "--zone-pool-dataset",
+            dest="zone_pool_dataset",
+            help="Specify zone pool dataset to install into")
+
         (options, args) = parser.parse_args(args)
 
-        # If manifest argument provided, determine if script or manifest
+        # If manifest argument provided, determine if script or XML manifest
         if options.manifest:
             (self.derived_script, self.manifest) =  \
                 self.determine_manifest_type(options.manifest)
@@ -217,7 +282,7 @@
     @staticmethod
     def determine_manifest_type(manifest):
         """
-        Determine of manifest file argument is a script or xml manifest.
+        Determine if manifest file argument is a script or xml manifest.
         Simply check reading first two characters of file for #!
         """
         derived_script = None
@@ -280,7 +345,8 @@
 
         # Log progress and info messages to the console.
         self.progress_ph = AIProgressHandler(self.logger,
-            skip_console_msg=self.options.list_checkpoints)
+            skip_console_msg=(self.options.list_checkpoints or \
+                              self.options.zone_pool_dataset))
         self.progress_ph.start_progress_server()
         self.logger.addHandler(self.progress_ph)
 
@@ -295,7 +361,7 @@
         # create a install_log file handler and add it to the ai_logger
 
         # set the logfile names
-        install_log = self._app_data.work_dir + self.INSTALL_LOG
+        install_log = os.path.join(self._app_data.work_dir, self.INSTALL_LOG)
         self.install_log_fh = FileHandler(install_log)
 
         self.install_log_fh.setLevel(logging.DEBUG)
@@ -327,16 +393,11 @@
         new_be = self.be
 
         if new_be is not None:
-            if new_be.exists:
-                # Assumes BE is still mounted, should be, if it exists.
-                self.logger.info("Transferring log to %s" %
-                    new_be.mountpoint + self.BE_LOG_DIR)
-                self.install_log_fh.transfer_log(
-                    new_be.mountpoint + self.BE_LOG_DIR, isdir=True)
-            else:
-                self.logger.error(
-                    "Unable to determine BE mountpoint")
-                return False
+            # Assumes BE is still mounted, should be, if it exists.
+            self.logger.debug("Transferring log to %s" %
+                new_be.mountpoint + self.BE_LOG_DIR)
+            self.install_log_fh.transfer_log(
+                new_be.mountpoint + self.BE_LOG_DIR, isdir=True)
         else:
             self.logger.error(
                 "Unable to determine location to transfer logs to")
@@ -348,16 +409,19 @@
         """Do some clean up and set exit code.
         """
 
+        unmount_be = False
+
         self.exitval = error_val
         if not self.options.list_checkpoints:
             if error_val in [self.AI_EXIT_SUCCESS, self.AI_EXIT_AUTO_REBOOT]:
-                if error_val == self.AI_EXIT_AUTO_REBOOT:
-                    self.logger.info("Automated Installation succeeded.")
-                    self.logger.info("System will be rebooted now")
-                else:
-                    self.logger.info("Automated Installation succeeded.")
-                    self.logger.info("You may wish to reboot the system at "
-                                     "this time.")
+                self.logger.info("Automated Installation succeeded.")
+                if self.options.zone_pool_dataset is None:
+                    if error_val == self.AI_EXIT_AUTO_REBOOT:
+                        self.logger.info("System will be rebooted now")
+                    else:
+                        self.logger.info("You may wish to reboot the system at "
+                                         "this time.")
+                unmount_be = True
             else:
                 # error_val == self.AI_EXIT_FAILURE:
                 self.logger.info("Automated Installation Failed")
@@ -370,13 +434,16 @@
 
         # Only attempt to unmount BE if Target Instantiation has completed
         if self.options.stop_checkpoint not in self.CHECKPOINTS_BEFORE_TI:
-            # Unmount the BE now.
-            if self.be is not None:
-                try:
-                    self.be.unmount(self.options.dry_run)
-                except (RuntimeError) as ex:
-                    print ex  # Print since logger is closed now.
-                    self.exitval = self.AI_EXIT_FAILURE
+            # If we didn't fail unmount the BE now.
+            if unmount_be:
+                if self.be is not None:
+                    try:
+                        self.be.unmount(self.options.dry_run,
+                            altpool=self.options.zone_pool_dataset)
+                    except RuntimeError as ex:
+                        # Use print since logger is now closed.
+                        print >> sys.stderr, str(ex)
+                        self.exitval = self.AI_EXIT_FAILURE        
 
     def import_preserved_zpools(self):
         '''
@@ -495,7 +562,7 @@
                 self.__cleanup_before_exit(self.AI_EXIT_FAILURE)
                 return
 
-        # specifying to list checkpoints, do so then exit
+        # If specifying to list checkpoints, do so then exit
         # List of checkpoints available depend on what has just been
         # registered.
         if self.options.list_checkpoints:
@@ -514,7 +581,7 @@
                 # Set the HTTP Proxy environment variable
                 os.environ["http_proxy"] = ai_instance.http_proxy
 
-        self.logger.info("Auto Reboot set to : %s" % (self.auto_reboot))
+        self.logger.debug("Auto Reboot set to: %s" % (self.auto_reboot))
 
         # Ensure preserved zpools are online (imported)
         if not self.import_preserved_zpools():
@@ -533,7 +600,8 @@
                 if not self.__transfer_install_log():
                     self.__cleanup_before_exit(self.AI_EXIT_FAILURE)
                 else:
-                    if self.auto_reboot:
+                    if self.auto_reboot and \
+                        self.options.zone_pool_dataset is None:
                         self.__cleanup_before_exit(self.AI_EXIT_AUTO_REBOOT)
                     else:
                         self.__cleanup_before_exit(self.AI_EXIT_SUCCESS)
@@ -611,7 +679,7 @@
                 return True
 
             if not self.options.list_checkpoints:
-                self.logger.info("Registering Manifest Parser Checkpoint")
+                self.logger.debug("Registering Manifest Parser Checkpoint")
 
             self.engine.register_checkpoint("manifest-parser",
                                     "solaris_install.manifest.parser",
@@ -633,10 +701,10 @@
         # Execute Checkpoints
         if not self.options.list_checkpoints:
             if self.derived_script:
-                self.logger.info("Executing Derived Manifest and Manifest " \
+                self.logger.debug("Executing Derived Manifest and Manifest " \
                         "Parser Checkpoints")
             else:
-                self.logger.info("Executing Manifest Parser Checkpoint")
+                self.logger.debug("Executing Manifest Parser Checkpoint")
 
         if self.options.stop_checkpoint in self.MANIFEST_CHECKPOINTS:
             pause_cp = self.options.stop_checkpoint
@@ -709,6 +777,10 @@
             self.logger.error("Value errors occured :")
             print "\t\t%s" % str(ex)
             return False
+        except (AIConfigurationError) as ex:
+            self.logger.error("AI Configuration checkpoint error :")
+            print "\t\t%s" % str(ex)
+            return False
         except (RollbackError, UnknownChkptError, UsageError) as ex:
             self.logger.error("RollbackError, UnknownChkptError, UsageError :")
             print "\t\t%s" % str(ex)
@@ -737,37 +809,66 @@
         Wrapper to configure required checkpoints for performing an
         automated installation
         """
-        # Need to set following Checkpoints for installation
-        #   Derived Manifest (If script passed as argument)
-        #   Manifest Parser (If manifest passed or derived)
-        #   Target Discovery
-        #   Target Selection
-        #   Device Driver Update - Install Root
-        #   Target Instantiation
-        #   Transfer
-        #   Target Configuration
-        #   Device Driver Update - New BE
+        # Need to set following Checkpoints for installation.  Checkpoints
+        # marked with a 'G' are applicable when installing a global zone.
+        # Checkpoings marked with a 'N' are application when installing a
+        # non-global zone.
+        #   G- -- Derived Manifest (If script passed as argument)
+        #   GN -- Manifest Parser (If manifest passed or derived)
+        #   G- -- Target Discovery
+        #   G- -- Target Selection
+        #   -N -- Target Selection Zone
+        #   GN -- AI Configuration
+        #   G- -- Device Driver Update - Install Root
+        #   G- -- Target Instantiation
+        #   -N -- Target Instantiation Zone
+        #   GN -- Transfer
+        #   GN -- Target Configuration
+        #   G- -- Device Driver Update - New BE
 
         try:
             if not self.options.list_checkpoints:
                 self.logger.info("Configuring Checkpoints")
 
             # Register TargetDiscovery
-            self.engine.register_checkpoint("target-discovery",
+            if self.options.zone_pool_dataset is None:
+                self.engine.register_checkpoint("target-discovery",
                                 "solaris_install.target.discovery",
                                 "TargetDiscovery", args=None, kwargs=None)
 
             # Register TargetSelection
-            self.logger.debug("Adding Target Selection Checkpoint")
-            self.engine.register_checkpoint("target-selection",
-                "solaris_install.auto_install.checkpoints.target_selection",
-                "TargetSelection", args=None, kwargs=None)
+            if self.options.zone_pool_dataset is None:
+                self.logger.debug("Adding Target Selection Checkpoint")
+                self.engine.register_checkpoint("target-selection",
+                    "solaris_install.auto_install.checkpoints."
+                    "target_selection", "TargetSelection", args=None,
+                    kwargs=None)
+            else:
+                self.logger.debug("Adding Target Selection Zone Checkpoint")
+                self.engine.register_checkpoint("target-selection",
+                    "solaris_install.auto_install.checkpoints."
+                    "target_selection_zone", "TargetSelectionZone", args=None,
+                    kwargs={"be_mountpoint": self.installed_root_dir})
+
+            # Register AIConfiguration
+            self.logger.debug("Adding AI Configuration Checkpoint")
+            self.engine.register_checkpoint("ai-configuration",
+                "solaris_install.auto_install.checkpoints.ai_configuration",
+                "AIConfiguration", args=None, kwargs=None)
 
             # Register TargetInstantiation
-            self.logger.debug("Adding Target Instantiation Checkpoint")
-            self.engine.register_checkpoint(self.TARGET_INSTANTIATION_CHECKPOINT,
+            if self.options.zone_pool_dataset is None:
+                self.logger.debug("Adding Target Instantiation Checkpoint")
+                self.engine.register_checkpoint(self.TARGET_INSTANTIATION_CHECKPOINT,
                                 "solaris_install.target.instantiation",
                                 "TargetInstantiation", args=None, kwargs=None)
+            else:
+                self.logger.debug("Adding Target Instantiation Zone "
+                    "Checkpoint")
+                self.engine.register_checkpoint("target-instantiation",
+                                "solaris_install/target/instantiation_zone",
+                                "TargetInstantiationZone", args=None,
+                                kwargs=None)
 
             # Add destination for transfer nodes, and register checkpoints.
             sw_nodes = self.doc.volatile.get_descendants(class_type=Software)
@@ -789,7 +890,6 @@
                             self.FIRST_TRANSFER_CHECKPOINT:
                             self.options.stop_checkpoint = sw.name
 
-
                 # Ensure there is at least one software_data element with
                 # Install action exists, and that all software_data elements
                 # contain at least one 'name' sub element.
@@ -802,12 +902,12 @@
                         if sw_child.action == IPSSpec.INSTALL:
                             found_install_sw_data = True
                     elif tran_type == "CPIO" and \
-                         isinstance(sw_child, CPIOSpec):
+                        isinstance(sw_child, CPIOSpec):
                         found_sw_data = True
                         if sw_child.action == CPIOSpec.INSTALL:
                             found_install_sw_data = True
                     elif tran_type == "SVR4" and \
-                         isinstance(sw_child, SVR4Spec):
+                        isinstance(sw_child, SVR4Spec):
                         found_sw_data = True
                         if sw_child.action == SVR4Spec.INSTALL:
                             found_install_sw_data = True
@@ -825,18 +925,23 @@
                     return False
 
                 self.logger.debug("Setting destination for transfer: %s to %s"
-                    % (sw.name, self.INSTALLED_ROOT_DIR))
+                    % (sw.name, self.installed_root_dir))
                 dst = sw.get_first_child(class_type=Destination)
                 if dst is None:
                     dst = Destination()
                     if sw.tran_type.upper() == "IPS":
-                        image = Image(self.INSTALLED_ROOT_DIR, image_action)
-                        img_type = ImType("full")
+                        image = Image(self.installed_root_dir, image_action)
+
+                        if self.options.zone_pool_dataset is None:
+                            img_type = ImType("full", zone=False)
+                        else:
+                            img_type = ImType("full", zone=True)
+
                         image.insert_children(img_type)
                         dst.insert_children(image)
                         image_action = AbstractIPS.EXISTING
                     else:
-                        directory = Dir(self.INSTALLED_ROOT_DIR)
+                        directory = Dir(self.installed_root_dir)
                         dst.insert_children(directory)
                     sw.insert_children(dst)
                     # Next images are use_existing, not create.
@@ -856,19 +961,26 @@
             # Register ICT Checkpoints
             #=========================
             # 1. Initialize SMF Repository
-            self.engine.register_checkpoint("initialize-smf",
-                "solaris_install.ict.initialize_smf",
-                "InitializeSMF", args=None, kwargs=None)
+            if self.options.zone_pool_dataset is None:
+                self.engine.register_checkpoint("initialize-smf",
+                    "solaris_install.ict.initialize_smf",
+                    "InitializeSMF", args=None, kwargs=None)
+            else:
+                self.engine.register_checkpoint("initialize-smf-zone",
+                    "solaris_install.ict.initialize_smf",
+                    "InitializeSMFZone", args=None, kwargs=None)
 
             # 2. Boot Configuration
-            self.engine.register_checkpoint("boot-configuration",
-                "solaris_install.boot.boot",
-                "SystemBootMenu", args=None, kwargs=None)
+            if self.options.zone_pool_dataset is None:
+                self.engine.register_checkpoint("boot-configuration",
+                    "solaris_install.boot.boot",
+                    "SystemBootMenu", args=None, kwargs=None)
 
             # 3. Update dumpadm / Dump Configuration
-            self.engine.register_checkpoint("update-dump-adm",
-                "solaris_install.ict.update_dumpadm",
-                "UpdateDumpAdm", args=None, kwargs=None)
+            if self.options.zone_pool_dataset is None:
+                self.engine.register_checkpoint("update-dump-adm",
+                    "solaris_install.ict.update_dumpadm",
+                    "UpdateDumpAdm", args=None, kwargs=None)
 
             # 4. Setup Swap in Vfstab
             self.engine.register_checkpoint("setup-swap",
@@ -881,23 +993,26 @@
                 "SetFlushContentCache", args=None, kwargs=None)
 
             # 6. Device Configuration / Create Device Namespace
-            self.engine.register_checkpoint("device-config",
-                "solaris_install.ict.device_config",
-                "DeviceConfig", args=None, kwargs=None)
+            if self.options.zone_pool_dataset is None:
+                self.engine.register_checkpoint("device-config",
+                    "solaris_install.ict.device_config",
+                    "DeviceConfig", args=None, kwargs=None)
 
-            # 7. Transfer System Configuration To BE / ApplyStsConfig
-            self.engine.register_checkpoint("apply-sysconfig",
-                "solaris_install.ict.apply_sysconfig",
-                "ApplySysConfig", args=None, kwargs=None)
+            # 7. Transfer System Configuration To BE / ApplySysConfig
+            if self.options.profile is not None:
+                self.engine.register_checkpoint("apply-sysconfig",
+                    "solaris_install.ict.apply_sysconfig",
+                    "ApplySysConfig", args=None, kwargs=None)
 
             # 8. Boot Archive
-            self.engine.register_checkpoint("boot-archive",
-                "solaris_install.ict.boot_archive",
-                "BootArchive", args=None, kwargs=None)
+            if self.options.zone_pool_dataset is None:
+                self.engine.register_checkpoint("boot-archive",
+                    "solaris_install.ict.boot_archive",
+                    "BootArchive", args=None, kwargs=None)
 
             # 9. Transfer Files to New BE
             self.add_transfer_files()
-            self.engine.register_checkpoint(self.TRANSFER_FILES_CHECKPOINT,
+            self.engine.register_checkpoint(TRANSFER_FILES_CHECKPOINT,
                 "solaris_install.ict.transfer_files",
                 "TransferFiles", args=None, kwargs=None)
 
@@ -924,12 +1039,12 @@
         # insert if not found
         tf_doc_dict = None
         tf_doc_dict = self.doc.volatile.get_first_child( \
-            name=self.TRANSFER_FILES_CHECKPOINT)
+            name=TRANSFER_FILES_CHECKPOINT)
 
         if tf_doc_dict is None:
             # Initialize dictionary in DOC
             tf_dict = dict()
-            tf_doc_dict = DataObjectDict(self.TRANSFER_FILES_CHECKPOINT,
+            tf_doc_dict = DataObjectDict(TRANSFER_FILES_CHECKPOINT,
                 tf_dict)
             self.doc.volatile.insert_children(tf_doc_dict)
         else:
@@ -947,6 +1062,9 @@
             if mp is not None and mp.manifest is not None:
                 tf_dict[mp.manifest] = \
                     post_install_logs_path('derived/manifest.xml')
+        # Else transfer the XML manifest passed in
+        else:
+            tf_dict[self.manifest] = post_install_logs_path('ai.xml')
 
         # Transfer smf logs
         tf_dict['/var/svc/log/application-auto-installer:default.log'] = \
@@ -954,9 +1072,6 @@
         tf_dict['/var/svc/log/application-manifest-locator:default.log'] = \
             post_install_logs_path('application-manifest-locator:default.log')
 
-        # Transfer default manifest
-        tf_dict[system_temp_path('ai.xml')] = post_install_logs_path('ai.xml')
-
         # Transfer AI Service Discovery Log
         tf_dict[system_temp_path('ai_sd_log')] = \
             post_install_logs_path('ai_sd_log')
--- a/usr/src/cmd/auto-install/checkpoints/Makefile	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/auto-install/checkpoints/Makefile	Tue May 31 14:21:09 2011 -0700
@@ -29,8 +29,10 @@
 install:=	TARGET=	install
 
 PYMODULES=	__init__.py \
-                dmm.py \
-		target_selection.py
+		ai_configuration.py \
+		dmm.py \
+		target_selection.py \
+		target_selection_zone.py
 
 PYCMODULES=	$(PYMODULES:%.py=%.pyc)
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/checkpoints/ai_configuration.py	Tue May 31 14:21:09 2011 -0700
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+""" ai_configuration.py - AI Configuration
+"""
+
+import os
+import os.path
+import shutil
+import urllib
+
+from solaris_install import ApplicationData, Popen, CalledProcessError
+from solaris_install.auto_install import TRANSFER_FILES_CHECKPOINT
+from solaris_install.configuration.configuration import Configuration
+from solaris_install.data_object import ObjectNotFoundError
+from solaris_install.data_object.data_dict import DataObjectDict
+from solaris_install.engine import InstallEngine
+from solaris_install.engine.checkpoint import AbstractCheckpoint as Checkpoint
+from solaris_install.ict import SVCCFG
+from solaris_install.target import Target
+from solaris_install.target.instantiation_zone import ALT_POOL_DATASET
+from solaris_install.target.logical import Filesystem, Zpool
+
+# Define constants
+XMLLINT = '/usr/bin/xmllint'
+
+# Checkpoint specific AI ApplicationData dictionary keys
+AI_SERVICE_LIST_FILE = "service_list_file"
+
+
+class AIConfigurationError(Exception):
+    '''Error generated when a configuration error is detected'''
+
+    def __init__(self, msg):
+        Exception.__init__(self)
+        self.msg = msg
+
+    def __str__(self):
+        return self.msg
+
+class AIConfiguration(Checkpoint):
+    """ AIConfiguration - Checkpoint to process configuration
+        specifications in the AI manifest.
+    """
+
+    def __init__(self, name):
+        super(AIConfiguration, self).__init__(name)
+
+        self.app_data = None
+        self.service_list_file = None
+        self.alt_pool_dataset = None
+        self.zone_confs = list()
+
+    def validate_zone_configuration(self, dry_run=False):
+        """ Method to download and validate all zone configurations
+            specified in the AI manifest.  This method will also query
+            the AI installation service (if provided) to download
+            AI manifest and SC profiles for the zones specified.
+        """
+
+        TMP_ZONES_DIR = os.path.join(self.app_data.work_dir, "zones")
+        TMP_ZONES_CONFIG_LIST = os.path.join(TMP_ZONES_DIR , "config_list")
+        TMP_ZONES_CONFIG_OUTPUT = os.path.join(TMP_ZONES_DIR, "config_output")
+        TMP_ZONES_INSTALL_DIR = os.path.join(TMP_ZONES_DIR , "install")
+
+        TARGET_ZONES_INSTALL_DIR = "/var/zones/install"
+
+        # Clear out the TMP_ZONES_DIR directory in case it exists.
+        shutil.rmtree(TMP_ZONES_DIR, ignore_errors=True)
+
+        # Create TMP_ZONES_DIR directory
+        os.makedirs(TMP_ZONES_DIR)
+
+        try:
+            with open(TMP_ZONES_CONFIG_LIST, 'w') as zones_config_list:
+                # Copy all 'source' entries to a local area.
+                for conf in self.zone_confs:
+                    self.logger.info("Zone name: " + conf.name)
+                    self.logger.info("   source: " + conf.source)
+
+                    # Make subdirectory to store this zone's files.  The name
+                    # of the zone is used as the name of the subdirectory.
+                    os.makedirs(os.path.join(TMP_ZONES_INSTALL_DIR, conf.name))
+
+                    # Retrieve the zone config file.
+                    try:
+                        (filename, headers) = urllib.urlretrieve(conf.source,
+                            os.path.join(TMP_ZONES_INSTALL_DIR, conf.name,
+                                         "config"))
+                    except urllib.ContentTooShortError, er:
+                        raise AIConfigurationError("Retrieval of zone config "
+                            "file (%s) failed: %s" % (conf.name, str(er)))
+
+                    # Append this zone's local config file path
+                    zones_config_list.write(filename + "\n")
+
+                    # Retrieve this zone's AI manifest and profile(s) from a
+                    # remote installation service if service_list_file is
+                    # provided.
+                    if self.service_list_file is not None:
+                        cmd = ["/usr/bin/ai_get_manifest", "-e",
+                               "-o", os.path.join(TMP_ZONES_INSTALL_DIR,
+                               conf.name, "ai_manifest.xml"),
+                               "-p", os.path.join(TMP_ZONES_INSTALL_DIR,
+                               conf.name, "profiles"),
+                               "-c", "zonename=" + conf.name,
+                               "-s", self.service_list_file]
+                        try:
+                            Popen.check_call(cmd, stdout=Popen.STORE,
+                                             stderr=Popen.STORE,
+                                             logger=self.logger)
+                        except CalledProcessError, er:
+                            raise AIConfigurationError("AI manifest query for "
+                                "zone (%s) failed: %s" % (conf.name, str(er)))
+        except IOError, er:
+            raise AIConfigurationError("IO Error during zone validation: %s" % \
+                                       str(er))
+
+        # Pass the create a file with the list of zone config files in it
+        # to "/usr/sbin/zonecfg auto-install-report", which will parse the
+        # config files, do any necessary validation of the zone configurations,
+        # and yield an output file that will contain a list of directories
+        # and datasets that are required to exist for the given zone configs
+        # to work.
+
+        cmd = ['/usr/sbin/zonecfg', 'auto-install-report',
+               '-f', TMP_ZONES_CONFIG_LIST, '-o', TMP_ZONES_CONFIG_OUTPUT]
+        try:
+            Popen.check_call(cmd, stdout=Popen.STORE, stderr=Popen.STORE,
+                             logger=self.logger)
+        except CalledProcessError, er:
+            raise AIConfigurationError("Zone configurations failed "
+                                       "to validate.")
+
+        directories = list()
+        datasets = list()
+        try:
+            with open(TMP_ZONES_CONFIG_OUTPUT, 'r') as zones_config_output:
+                for line in zones_config_output:
+                    (name, value) = line.strip().split("=", 2)
+
+                    if name == "zonepath_parent":
+                        directories.append(value)
+                    elif name == "zfs_dataset":
+                        datasets.append(value)
+                    else:
+                        raise AIConfigruationError("Failure: unknown keyword "
+                            "in %s: %s" % (TMP_ZONES_CONFIG_OUTPUT, name))
+        except IOError, er:
+            raise AIConfigurationError("Could not read zone config output "
+                                       "file (%s): %s" % \
+                                       (TMP_ZONES_CONFIG_OUTPUT, str(er)))
+
+
+        # TODO: Check that all zonepath_parent values are directories that
+        # will not exist under a filesystem dataset that is inside the BE.
+
+
+        # Ensure all zfs_dataset values are specified to be created
+        # on the installed system.  At this point, Target Selection
+        # should have already run, so we should be able to grab the
+        # DESIRED filesystems from the DOC to make sure these datasets
+        # are specified.
+        if datasets:
+            target = self.doc.get_descendants(name=Target.DESIRED,
+                class_type=Target, not_found_is_err=True)[0]
+            fs_list = target.get_descendants(class_type=Filesystem)
+
+            if fs_list:
+                for dataset in datasets:
+                    if dataset not in [fs.full_name for fs in fs_list]:
+                        raise AIConfigurationError("The following dataset is "
+                            "specified in a zone configuration but does not "
+                            "exist in the AI manifest: %s" % dataset)
+
+        # Ensure all AI manifests and SC profiles are valid.
+        for zone in os.listdir(TMP_ZONES_INSTALL_DIR):
+            zone_dir = os.path.join(TMP_ZONES_INSTALL_DIR, zone)
+
+            manifest = os.path.join(zone_dir, "ai_manifest.xml")
+            if os.path.isfile(manifest):
+                # Validate AI manifest.
+                cmd = [XMLLINT, '--valid', manifest]
+                if dry_run:
+                    self.logger.debug('Executing: %s', cmd)
+                else:
+                    Popen.check_call(cmd, stdout=Popen.STORE,
+                        stderr=Popen.STORE, logger=self.logger)
+
+            profiles_dir = os.path.join(zone_dir, "profiles")
+            if os.path.isdir(profiles_dir):
+                for profile in os.listdir(profiles_dir):
+                    profile_file = os.path.join(profiles_dir, profile)
+                    if os.path.isfile(profile_file):
+                        # Validate SC profile.
+                        cmd = [SVCCFG, 'apply', '-n ', profile_file]
+                        if dry_run:
+                            self.logger.debug('Executing: %s', cmd)
+                        else:
+                            Popen.check_call(cmd, stdout=Popen.STORE, \
+                                stderr=Popen.STORE, logger=self.logger)
+
+        # Add the zone configuration directory into the dictionary in the DOC
+        # that will be processed by the transfer-ai-files checkpoint which will
+        # copy files over to the installed root.
+        tf_doc_dict = None
+        tf_doc_dict = self.doc.volatile.get_first_child( \
+            name=TRANSFER_FILES_CHECKPOINT)
+        if tf_doc_dict is None:
+            # Initialize new dictionary in DOC
+            tf_dict = dict()
+            tf_doc_dict = DataObjectDict(TRANSFER_FILES_CHECKPOINT,
+                tf_dict)
+            self.doc.volatile.insert_children(tf_doc_dict)
+        else:
+            tf_dict = tf_doc_dict.data_dict
+
+        tf_dict[TMP_ZONES_INSTALL_DIR] = TARGET_ZONES_INSTALL_DIR
+
+    def parse_doc(self):
+        """ class method for parsing the data object cache (DOC) objects
+        for use by this checkpoint
+        """
+        self.engine = InstallEngine.get_instance()
+        self.doc = self.engine.data_object_cache
+
+        # Get a reference to ApplicationData object
+        self.app_data = self.doc.persistent.get_first_child( \
+            class_type=ApplicationData)
+
+        if self.app_data:
+            # Get the installation service list file
+            self.service_list_file = \
+                self.app_data.data_dict.get(AI_SERVICE_LIST_FILE)
+
+            # See if an alternate pool dataset is set.
+            self.alt_pool_dataset = \
+                self.app_data.data_dict.get(ALT_POOL_DATASET)
+
+        if not self.service_list_file:
+            self.logger.debug("No service list provided.")
+
+        # Get all configuration components from the DOC
+        self.conf_list = self.doc.get_descendants(class_type=Configuration)
+
+    def get_progress_estimate(self):
+        """Returns an estimate of the time this checkpoint will take
+        """
+        return 2
+
+    def execute(self, dry_run=False):
+        """ Execution method
+
+            This method will process all configuration components that are
+            in the AI manifest.
+        """
+
+        self.parse_doc()
+
+        # AI currently only supports configuration of type 'zone'.
+        # Iterate all 'zone' configuration components and process them,
+        # ignore all other types.
+        for conf in self.conf_list:
+            if conf.type is not None and \
+                    conf.type == Configuration.TYPE_VALUE_ZONE:
+                self.zone_confs.append(conf)
+            else:
+                self.logger.debug('Unsupported configuration type "' +
+                        conf.type + '".  Ignoring ...')
+
+        if self.zone_confs:
+            if not self.alt_pool_dataset:   
+                self.validate_zone_configuration(dry_run=dry_run)
+            else:
+                # If alt_pool_dataset is not none, we're installing
+                # a zone, so we ignore configuration of type 'zone'
+                self.logger.debug('Configurations of type "' +
+                    Configuration.TYPE_VALUE_ZONE + '" not supported '
+                    'when installing a zone.  Ignoring ...')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/checkpoints/target_selection_zone.py	Tue May 31 14:21:09 2011 -0700
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+""" target_selection_zone.py - Select Install Target(s)
+"""
+
+import copy
+import platform
+
+from solaris_install import ApplicationData
+from solaris_install.engine import InstallEngine
+from solaris_install.target import Target
+from solaris_install.target.controller import DEFAULT_LOGICAL_NAME
+from solaris_install.target.instantiation_zone import ALT_POOL_DATASET
+from solaris_install.target.logical import Logical, BE, Filesystem
+from solaris_install.auto_install.checkpoints.target_selection import \
+     TargetSelection, SelectionError
+
+
+class TargetSelectionZone(TargetSelection):
+    """ TargetSelectionZone - Checkpoint to select install target.
+
+        This checkpoint selects the install target(s) for a zone based on
+        the information provided as the zone's alternate pool dataset and the
+        target information provided in the AI Manifest.
+
+        If it's not possible to determine a selection, then a SelectionError
+        exception will be raised, causing the installation to fail.
+    """
+
+    def __init__(self, name, be_mountpoint="/a"):
+        super(TargetSelection, self).__init__(name)
+
+        # instance attributes
+        self.be_mountpoint = be_mountpoint
+        self.doc = InstallEngine.get_instance().data_object_cache
+
+        # set the zone's alternate pool dataset
+        self.selected_dataset = None
+
+        # set the platform
+        self.arch = platform.processor()
+
+    def select_targets(self, from_manifest):
+        '''Logic to select the targets for a zone.
+
+           Given the alternate pool dataset, and the targets from the
+           manifest, make the selections.
+
+           If no suitable selection can be made, then the SelectionError
+           exception will be raised.  This should only be the cause if the
+           selected alternate pool dataset does not exist.
+
+           Returns a new set of Targets that can be insterted into the
+           Data Object Cache for TargetInstantiationZone to use.
+
+        '''
+
+        # The selected alternate pool dataset must be set
+        if self.selected_dataset is None:
+            raise SelectionError("No dataset selected as alternate pool "
+                                 "dataset.")
+
+        # Verify selected dataset exists
+        fs = Filesystem(self.selected_dataset)
+        if not fs.exists:
+            raise SelectionError("Dataset (%s) does not exist." % \
+                                 self.selected_dataset)
+
+        if from_manifest:
+            self.logger.debug("from_manifest =\n%s\n" % \
+                              (str(from_manifest[0])))
+        else:
+            self.logger.debug("from_manifest is empty\n")
+
+
+        # Instantiate desired target, logical, and zpool objects.
+        target = Target(Target.DESIRED)
+        logical = Logical(DEFAULT_LOGICAL_NAME)
+        logical.noswap = True
+        logical.nodump = True
+        zpool = logical.add_zpool(self.selected_dataset)
+
+        for manifest_target in from_manifest:
+            # Copy filesystem children into desired zpool
+            for fs in manifest_target.get_descendants(class_type=Filesystem):
+                zpool.insert_children(copy.deepcopy(fs))
+
+            # Copy BE children into desired zpool
+            for be in manifest_target.get_descendants(class_type=BE):
+                zpool.insert_children(copy.deepcopy(be))
+
+        # Check if we have a BE object under zpool.
+        # If not, create one.
+        be_list = zpool.get_children(class_type=BE)
+        if not be_list:
+            # Instantiate new BE object and insert it into zpool.
+            be = BE()
+            zpool.insert_children(be)
+        else:
+            # Zpool will have only one BE object.
+            be = be_list[0]
+
+        # Set BE's mountpoint to the mountpoint we need
+        # to mount it at to do the installation.
+        be.mountpoint = self.be_mountpoint
+
+        # Insert desired logical object into the desired target object.
+        target.insert_children(logical)
+
+        # Insert desired target object into the DOC.
+        self.doc.persistent.insert_children(target)
+
+
+    def parse_doc(self):
+        """ Method for locating objects in the  data object cache (DOC) for
+            use by the checkpoint.
+
+            Will return a Data Object reference for the Targets from the
+            manifest.
+        """
+
+        from_manifest = self.doc.find_path(
+            "//[@solaris_install.auto_install.ai_instance.AIInstance?2]"
+            "//[@solaris_install.target.Target?2]")
+
+        app_data = self.doc.persistent.get_first_child( \
+            class_type=ApplicationData)
+        if app_data:
+            self.selected_dataset = app_data.data_dict.get(ALT_POOL_DATASET)
+
+        return from_manifest
+
+    # Implement AbstractCheckpoint methods.
+    def get_progress_estimate(self):
+        """Returns an estimate of the time this checkpoint will take
+        """
+        return 3
+
+    def execute(self, dry_run=False):
+        """ Primary execution method used by the Checkpoint parent class
+            to select the targets during an install.
+        """
+        self.logger.debug("=== Executing Target Selection Zone Checkpoint ==")
+
+        from_manifest = self.parse_doc()
+
+        self.select_targets(from_manifest)
--- a/usr/src/cmd/auto-install/default.xml	Tue May 31 11:07:18 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- CDDL HEADER START
-
- The contents of this file are subject to the terms of the
- Common Development and Distribution License (the "License").
- You may not use this file except in compliance with the License.
-
- You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- or http://www.opensolaris.org/os/licensing.
- See the License for the specific language governing permissions
- and limitations under the License.
-
- When distributing Covered Code, include this CDDL HEADER in each
- file and include the License file at usr/src/OPENSOLARIS.LICENSE.
- If applicable, add the following below this CDDL HEADER, with the
- fields enclosed by brackets "[]" replaced with your own identifying
- information: Portions Copyright [yyyy] [name of copyright owner]
-
- CDDL HEADER END
-
- Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
-
--->
-<!DOCTYPE auto_install SYSTEM "file:///usr/share/install/ai.dtd">
-<auto_install>
-  <ai_instance name="default">
-    <software type="IPS">
-      <source>
-        <publisher name="solaris">
-          <origin name="http://pkg.oracle.com/solaris/release"/>
-        </publisher>
-      </source>
-      <!--
-	By default the latest build available, in the specified IPS
-	repository, is installed.  If another build is required, the 
-	build number has to be appended to the 'entire' package in following
-	form:
-
-	<name>pkg:/[email protected]#</name>
-      -->
-      <software_data action="install">
-        <name>pkg:/entire</name>
-        <name>pkg:/server_install</name>
-      </software_data>
-    </software>
-  </ai_instance>
-</auto_install>
--- a/usr/src/cmd/auto-install/enable_sci.xml	Tue May 31 11:07:18 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-<!--
-CDDL HEADER START
-
-The contents of this file are subject to the terms of the
-Common Development and Distribution License (the "License").
-You may not use this file except in compliance with the License.
-
-You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-or http://www.opensolaris.org/os/licensing.
-See the License for the specific language governing permissions
-and limitations under the License.
-
-When distributing Covered Code, include this CDDL HEADER in each
-file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-If applicable, add the following below this CDDL HEADER, with the
-fields enclosed by brackets "[]" replaced with your own identifying
-information: Portions Copyright [yyyy] [name of copyright owner]
-
-CDDL HEADER END
-
-Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
--->
-
-<!--
-System Configuration profile which activates interactive configuration
-scenario.
-
-It configures system/config smf service to bring up System Configuration
-Interactive Tool during boot process.
--->
-
-<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
-<service_bundle type="profile" name="enable_SCI_tool">
-    <service name="system/config" version="1" type="service">
-        <instance name="default" enabled="true">
-            <property_group name="configuration">
-                <propval name="configure" value="true"/>
-                <propval name="interactive_config" value="true"/>
-            </property_group>
-        </instance>
-    </service>
-</service_bundle>
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/manifest/Makefile	Tue May 31 14:21:09 2011 -0700
@@ -0,0 +1,41 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+include $(SRC)/cmd/Makefile.cmd
+
+MANIFEST_FILES =	ai_manifest.xml \
+			default.xml \
+			zone_default.xml
+
+ROOTMANIFESTS= $(MANIFEST_FILES:%=$(ROOTAUTOINSTMANIFEST)/%)
+
+all:
+
+install: all .WAIT \
+	$(ROOTMANIFESTS)
+
+clean:
+
+clobber: clean
+
+include $(SRC)/cmd/Makefile.targ
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/manifest/ai_manifest.xml	Tue May 31 14:21:09 2011 -0700
@@ -0,0 +1,275 @@
+<?xml version="1.0"?>
+<!--
+CDDL HEADER START
+
+The contents of this file are subject to the terms of the
+Common Development and Distribution License (the "License").
+You may not use this file except in compliance with the License.
+
+You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+or http://www.opensolaris.org/os/licensing.
+See the License for the specific language governing permissions
+and limitations under the License.
+
+When distributing Covered Code, include this CDDL HEADER in each
+file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+If applicable, add the following below this CDDL HEADER, with the
+fields enclosed by brackets "[]" replaced with your own identifying
+information: Portions Copyright [yyyy] [name of copyright owner]
+
+CDDL HEADER END
+
+Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+-->
+<!--
+===============================================================================
+DTD sample manifest for Automatic Installer input manifest specification.
+===============================================================================
+-->
+<!DOCTYPE auto_install SYSTEM "file:///usr/share/install/ai.dtd">
+<auto_install>
+  <!--
+        "auto_reboot" set to "true" may be an issue for x86 machines.
+        The boot order is not guaranteed and may cause unexpected
+        behavior. If auto_reboot is not desired, set auto_reboot="false".
+
+        The name of the manifest is obtained from (in this order):
+        1) the name from the installadm add-manifest command line "-m" option.
+        2) a name attribute in the manifest, e.g.: 
+           <ai_instance name="my_ai_manifest" auto_reboot="true">
+        3) manifest filename 
+    -->
+  <ai_instance auto_reboot="true">
+    <!--
+      =======================================================================
+      <target> - selections for AI target Device specification
+
+      Disk criteria are divided into three mutually exclusive groups:
+
+      G1 - deterministic disk criteria
+      ................................
+        * disk/iscsi parameters
+        * disk/disk_name, with name_type attribute:
+          one of ctd, volid, devpath or devid
+
+      G2 - non-deterministic disk criteria
+      ..........................
+        * disk/disk_prop: Any of dev_type, dev_vendor or
+          dev_size
+
+      G3 - keyword disk criteria
+      ...........................
+        * disk/disk_keyword: "boot_disk"
+
+      Schema ai.dtd enforces following policy:
+
+      * criteria in group G1 are mutually exclusive - only
+        one can be specified at a time
+
+      * groups G1, G2 and G3 are mutually exclusive - i.e.
+        if criteria from G1 is specified, no criteria
+        from G2 or G3 are allowed and vice versa
+
+      * multiple criteria from G2 can be specified
+      =======================================================================
+    -->
+    <target>
+      <disk>
+        <!-- G1 -->
+        <!--
+          c#t#d# device name like c0t0d0 or 
+          MPXIO name like c0t2000002037CD9F72d0
+        -->
+        <disk_name name="c1t0d0" name_type="ctd"/>
+        <!-- volume name set for instance by means
+          of format(1M) command
+        -->
+        <!--
+        <disk_name name="ai-disk" name_type="volid"/>
+        -->
+        <!-- device id - e.g. can be obtained by means of
+          iostat(1M) -iEn
+        -->
+        <!--
+        <disk_name name="id1,cmdk@AST31000340NS=____________9QJ2LNYY" name_type="devid"/>
+        -->
+        <!-- device path under /devices directory, e.g.
+          /pci@1e,600000/pci@0/pci@9/pci@0/scsi@1/sd@0,0
+        -->
+        <!--
+        <disk_name name="/pci@0/pci@9/pci@0/scsi@1/sd@0,0" name_type="devpath"/>
+        -->
+        <!--
+          ISCSI target device
+
+        <iscsi name="c0d2E0001010F68">
+          <ip>192.168.1.34</ip>
+        </iscsi> 
+        -->
+        <!-- G2 -->
+        <!--
+        <disk_prop dev_vendor="hitachi" dev_size="20480mb"/>
+
+        or 
+
+        <disk_prop dev_vendor="hitachi"/>
+
+        or
+
+        <disk_prop dev_size="20480mb"/>
+        -->
+        <!-- G3 -->
+        <!--
+        <disk_keyword key="boot_disk"/>
+        -->
+        <!--
+          On X86 machines, Slices exist within partitions only
+        -->
+        <!--
+          Uncomment this to force AI to find an existing Solaris
+          partition.
+        -->
+        <!--
+        <partition action="use_existing_solaris2">
+          <slice name="0">
+            <size val="20480mb"/>
+          </slice>
+          <slice name="4">
+            <size val="20480mb"/>
+          </slice>
+        </partition>
+
+        or, use the following to create a Solaris partition
+        -->
+        <partition name="1" part_type="191">
+          <size start_sector="200" val="40960mb"/>
+          <slice name="0">
+            <size val="20480mb"/>
+          </slice>
+          <slice name="4">
+            <size val="20480mb"/>
+          </slice>
+        </partition>
+        <!-- Define some other partitions to create too -->
+        <partition name="2" part_type="99">
+          <size start_sector="200" val="20480mb"/>
+        </partition>
+        <partition name="4" part_type="99">
+          <size start_sector="2000" val="20480mb"/>
+        </partition>
+        <!--
+        On SPARC systems, only specify the Slice layout.
+        -->
+        <!--
+            <slice name="0">
+              <size val="20480mb"/>
+            </slice>
+            <slice name="4">
+              <size val="20480mb"/>
+            </slice>
+        -->
+      </disk>
+      <logical>
+        <zpool name="rpool" is_root="true">
+          <filesystem name="export" mountpoint="/export"/>
+          <filesystem name="export/home"/>
+          <be name="solaris"/>
+        </zpool>
+      </logical>
+    </target>
+    <software name="ips" type="IPS">
+      <source>
+        <publisher name="solaris">
+          <origin name="http://pkg.oracle.com/solaris/release"/>
+        </publisher>
+      </source>
+      <!--
+        By default the latest build available, in the
+        specified IPS repository, is installed.
+        if another build is required, the build number has
+        to be appended to the 'entire' package in following
+        form:
+      <name="[email protected]#"/>
+      -->
+      <software_data>
+        <name>pkg:/entire</name>
+        <name>pkg:/server_install</name>
+      </software_data>
+    </software>
+    <add_drivers>
+      <!--
+            Driver Updates: This section is for adding driver packages to the
+            boot environment before the installation takes place.  The
+            installer can then access all devices on the system.  The
+            packages installed in the boot environment will also be installed
+            on the target.
+
+            A <search_all> entry performs a search for devices which are
+            missing their drivers.  A repository publisher and location
+            may be specified, and that repository and its database will
+            be used.  If no publisher and location is specified, the
+            configured repositories will be used.
+            (See pkg publisher command.)  If <addall> is specified as
+            "true", then drivers the database says are third-party drivers
+            will be added like all others; otherwise third-party drivers
+            will not be added.
+
+                <search_all addall="true">
+                    <source>
+                        <publisher name="solaris">
+                            <origin name="http://pkg.oracle.com/solaris/release"/>
+                        </publisher>
+                    </source>
+                </search_all>
+
+            <software> entries are user-provided specifications of packages
+            needed in order to perform the install.  types are P5I, SVR4, DU.
+            A <software_data> action of "noinstall" inhibits adding to target.
+
+            P5I: A pkg(5) P5I file, full path is in the source/publisher/origin.
+            Path may be to a local file or an http or ftp specification.
+                <software>
+                    <source>
+                        <publisher>
+                            <origin
+                                name=
+        "http://pkg.oracle.com/solaris/release/p5i/0/driver/firewire.p5i"/>
+                        </publisher>
+                    </source>
+                    <software_data type="P5I"/>
+                </software>
+
+            SVR4: An SVR4 package spec. The source/publisher/origin corresponds
+            to the directory containing the packages.  The 
+            software/software_data/name refers tp the package's top level
+            directory or the package's datastream file.
+
+                <software>
+                    <source>
+                        <publisher>
+                            <origin name="/export/package_dir"/>
+                        </publisher>
+                    </source>
+                    <software_data type="SVR4">
+                        <name>my_disk_driver.d</name>
+                    </software_data>
+                </software>
+
+            DU: An ITU (Install Time Update) or Driver Update image.
+            The source/publisher/origin refers to the path just above the 
+            image's DU directory (if expanded) or the name of the .iso image.  
+            All packages in the image will be added.
+
+                <software>
+                    <source>
+                        <publisher>
+                            <origin name="/export/duimages/mydriver.iso"/>
+                        </publisher>
+                    </source>
+                    <software_data type="DU"/>
+                </software>     
+      -->
+      <search_all/>
+    </add_drivers>
+  </ai_instance>
+</auto_install>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/manifest/default.xml	Tue May 31 14:21:09 2011 -0700
@@ -0,0 +1,57 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License (the "License").
+ You may not use this file except in compliance with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+
+-->
+<!DOCTYPE auto_install SYSTEM "file:///usr/share/install/ai.dtd">
+<auto_install>
+  <ai_instance name="default">
+    <target>
+      <logical>
+        <zpool name="rpool" is_root="true">
+          <filesystem name="export" mountpoint="/export"/>
+          <filesystem name="export/home"/>
+          <be name="solaris"/>
+        </zpool>
+      </logical>
+    </target>
+    <software type="IPS">
+      <source>
+        <publisher name="solaris">
+          <origin name="http://pkg.oracle.com/solaris/release"/>
+        </publisher>
+      </source>
+      <!--
+        By default the latest build available, in the specified IPS
+        repository, is installed.  If another build is required, the
+        build number has to be appended to the 'entire' package in the
+        following form:
+
+        <name>pkg:/[email protected]#</name>
+      -->
+      <software_data action="install">
+        <name>pkg:/entire</name>
+        <name>pkg:/server_install</name>
+      </software_data>
+    </software>
+  </ai_instance>
+</auto_install>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/manifest/zone_default.xml	Tue May 31 14:21:09 2011 -0700
@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ CDDL HEADER START
+
+ The contents of this file are subject to the terms of the
+ Common Development and Distribution License (the "License").
+ You may not use this file except in compliance with the License.
+
+ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ or http://www.opensolaris.org/os/licensing.
+ See the License for the specific language governing permissions
+ and limitations under the License.
+
+ When distributing Covered Code, include this CDDL HEADER in each
+ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ If applicable, add the following below this CDDL HEADER, with the
+ fields enclosed by brackets "[]" replaced with your own identifying
+ information: Portions Copyright [yyyy] [name of copyright owner]
+
+ CDDL HEADER END
+
+ Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+-->
+<!DOCTYPE auto_install SYSTEM "file:///usr/share/install/ai.dtd">
+
+<auto_install>
+    <ai_instance name="zone_default">
+        <target>
+            <logical>
+                <zpool name="rpool">
+                    <filesystem name="export" mountpoint="/export"/>
+                    <filesystem name="export/home"/>
+                    <be name="solaris"/>
+                </zpool>
+            </logical>
+        </target>
+
+        <software type="IPS">
+            <software_data action="install">
+                <name>pkg:/group/system/solaris-small-server</name>
+            </software_data>
+        </software>
+    </ai_instance>
+</auto_install>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/profile/Makefile	Tue May 31 14:21:09 2011 -0700
@@ -0,0 +1,41 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+include $(SRC)/cmd/Makefile.cmd
+
+SC_PROFILE_FILES =	static_network.xml \
+			enable_sci.xml \
+			sc_sample.xml
+
+ROOTSCPROFILES= $(SC_PROFILE_FILES:%=$(ROOTAUTOINSTSCPROFILES)/%)
+
+all:
+
+install: all .WAIT \
+	$(ROOTSCPROFILES)
+
+clean:
+
+clobber: clean 
+
+include $(SRC)/cmd/Makefile.targ
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/profile/enable_sci.xml	Tue May 31 14:21:09 2011 -0700
@@ -0,0 +1,43 @@
+<!--
+CDDL HEADER START
+
+The contents of this file are subject to the terms of the
+Common Development and Distribution License (the "License").
+You may not use this file except in compliance with the License.
+
+You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+or http://www.opensolaris.org/os/licensing.
+See the License for the specific language governing permissions
+and limitations under the License.
+
+When distributing Covered Code, include this CDDL HEADER in each
+file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+If applicable, add the following below this CDDL HEADER, with the
+fields enclosed by brackets "[]" replaced with your own identifying
+information: Portions Copyright [yyyy] [name of copyright owner]
+
+CDDL HEADER END
+
+Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+-->
+
+<!--
+System Configuration profile which activates interactive configuration
+scenario.
+
+It configures system/config smf service to bring up System Configuration
+Interactive Tool during boot process.
+-->
+
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<service_bundle type="profile" name="enable_SCI_tool">
+    <service name="system/config" version="1" type="service">
+        <instance name="default" enabled="true">
+            <property_group name="configuration">
+                <propval name="configure" value="true"/>
+                <propval name="interactive_config" value="true"/>
+            </property_group>
+        </instance>
+    </service>
+</service_bundle>
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/profile/sc_sample.xml	Tue May 31 14:21:09 2011 -0700
@@ -0,0 +1,89 @@
+<?xml version='1.0'?>
+<!--
+CDDL HEADER START
+
+The contents of this file are subject to the terms of the
+Common Development and Distribution License (the "License").
+You may not use this file except in compliance with the License.
+
+You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+or http://www.opensolaris.org/os/licensing.
+See the License for the specific language governing permissions
+and limitations under the License.
+
+When distributing Covered Code, include this CDDL HEADER in each
+file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+If applicable, add the following below this CDDL HEADER, with the
+fields enclosed by brackets "[]" replaced with your own identifying
+information: Portions Copyright [yyyy] [name of copyright owner]
+
+CDDL HEADER END
+
+Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+-->
+
+<!--
+Sample system configuration profile for use with Automated Installer
+
+Configures the following:
+* User account name 'jack', password 'jack', GID 10, UID 101, root role, bash shell
+* 'root' role with password 'solaris'
+* Keyboard mappings set to US-English
+* Network configuration is automated with Network Auto-magic
+* DNS name service client is enabled
+
+See installadm(1M) for usage of 'create-profile' subcommand.
+-->
+
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<service_bundle type="profile" name="system configuration">
+    <service name="system/config" version="1">
+      <instance name="default" enabled="true">
+        <property_group name="user_account">
+          <propval name="login" value="jack"/>
+          <propval name="password" value="9Nd/cwBcNWFZg"/>
+          <propval name="description" value="default_user"/>
+          <propval name="shell" value="/usr/bin/bash"/>
+          <propval name="gid" value='10'/>
+          <propval name="type" value="normal"/>
+          <propval name="roles" value="root"/>
+          <propval name="profiles" value="System Administrator"/>
+        </property_group>
+        <property_group name="root_account">
+            <propval name="password" value="$5$dnRfcZse$Hx4aBQ161Uvn9ZxJFKMdRiy8tCf4gMT2s2rtkFba2y4"/>
+            <propval name="type" value="role"/>
+        </property_group>
+      </instance>
+    </service>
+
+    <service version="1" name="system/identity">
+      <instance enabled="true" name="node">
+        <property_group name="config">
+           <propval name="nodename" value="solaris"/>
+        </property_group>
+      </instance>
+    </service>
+
+    <service name="system/console-login" version="1">
+      <property_group name="ttymon">
+        <propval name="terminal_type" value="sun"/>
+      </property_group>
+    </service>
+
+    <service name='system/keymap' version='1'>
+      <instance name='default' enabled='true'>
+        <property_group name='keymap'>
+          <propval name='layout' value='US-English'/>
+        </property_group>
+      </instance>
+    </service>
+
+    <service name="network/physical" version="1">
+      <instance name="nwam" enabled="true"/>
+      <instance name="default" enabled="false"/>
+    </service>
+
+    <service name='network/dns/client' version='1'>
+      <instance name='default' enabled='true'/>
+    </service>
+</service_bundle>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/cmd/auto-install/profile/static_network.xml	Tue May 31 14:21:09 2011 -0700
@@ -0,0 +1,109 @@
+<!--
+CDDL HEADER START
+
+The contents of this file are subject to the terms of the
+Common Development and Distribution License (the "License").
+You may not use this file except in compliance with the License.
+
+You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+or http://www.opensolaris.org/os/licensing.
+See the License for the specific language governing permissions
+and limitations under the License.
+
+When distributing Covered Code, include this CDDL HEADER in each
+file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+If applicable, add the following below this CDDL HEADER, with the
+fields enclosed by brackets "[]" replaced with your own identifying
+information: Portions Copyright [yyyy] [name of copyright owner]
+
+CDDL HEADER END
+
+Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+-->
+
+<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
+<service_bundle type="profile" name="system configuration">
+    <service name="system/config" version="1" type="service">
+        <instance name="default" enabled="true">
+            <property_group name="user_account" type="application">
+                <propval name="login" type="astring" value="jack"/>
+                <propval name="password" type="astring" value="9Nd/cwBcNWFZg"/>
+                <propval name="description" type="astring" value="default_user"/>
+                <propval name="shell" type="astring" value="/usr/bin/bash"/>
+                <propval name="uid" type='count' value='101'/>
+                <propval name="gid" type='count' value='10'/>
+                <propval name="type" type="astring" value="normal"/>
+                <propval name="roles" type="astring" value="root"/>
+            </property_group>
+
+            <property_group name="root_account" type="application">
+                <propval name="password" type="astring" value="$5$VgppCOxA$ycFmYW4ObRRHhtsGEygDdexk5bugqgSiaSR9niNCouC"/>
+                <propval name="type" type="astring" value="role"/>
+            </property_group>
+
+            <property_group name="other_sc_params" type="application">
+                <propval name="timezone" type="astring" value="GMT"/>
+                <propval name="hostname" type="astring" value="solaris"/>
+            </property_group>
+        </instance>
+    </service>
+
+    <service name="system/console-login" version="1" type="service">
+        <property_group name="ttymon" type="application">
+            <propval name="terminal_type" type="astring" value="sun"/>
+        </property_group>
+    </service>
+
+    <service name='system/keymap' version='1' type='service'>
+        <instance name='default' enabled='true'>
+            <property_group name='keymap' type='system'>
+                <propval name='layout' type='astring' value='US-English'/>
+            </property_group>
+        </instance>
+    </service>
+
+    <service name="network/physical" version="1" type="service">
+        <instance name="nwam" enabled="false"/>
+        <instance name="default" enabled="true"/>
+    </service>
+
+    <service name='network/install' version='1' type='service'>
+        <instance name='default' enabled='true'>
+            <property_group name='install_ipv4_interface' type='application'>
+                <propval name='name' type='astring' value='net0/v4'/>
+                <propval name='address_type' type='astring' value='static'/>
+                <propval name='static_address' type='net_address_v4' value='x.x.x.x/n'/>
+                <propval name='default_route' type='net_address_v4' value='x.x.x.x'/>
+            </property_group>
+
+            <property_group name='install_ipv6_interface' type='application'>
+                <propval name='name' type='astring' value='net0/v6'/>
+                <propval name='address_type' type='astring' value='addrconf'/>
+                <propval name='stateless' type='astring' value='yes'/>
+                <propval name='stateful' type='astring' value='yes'/>
+            </property_group>
+        </instance>
+    </service>
+
+    <service name='network/dns/install' version='1' type='service'>
+        <instance name='default' enabled='true'>
+            <property_group name='install_props' type='application'>
+                <property name='nameserver' type='net_address'>
+                    <net_address_list>
+                        <value_node value='x.x.x.x'/>
+                    </net_address_list>
+                </property>
+                <property name='search' type='astring'>
+                    <astring_list>
+                        <value_node value='example.com'/>
+                    </astring_list>
+                </property>
+            </property_group>
+        </instance>
+    </service>
+
+    <service name='network/dns/client' version='1' type='service'>
+        <instance name='default' enabled='true'/>
+    </service>
+</service_bundle>
+
--- a/usr/src/cmd/auto-install/sc_sample.xml	Tue May 31 11:07:18 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,89 +0,0 @@
-<?xml version='1.0'?>
-<!--
-CDDL HEADER START
-
-The contents of this file are subject to the terms of the
-Common Development and Distribution License (the "License").
-You may not use this file except in compliance with the License.
-
-You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-or http://www.opensolaris.org/os/licensing.
-See the License for the specific language governing permissions
-and limitations under the License.
-
-When distributing Covered Code, include this CDDL HEADER in each
-file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-If applicable, add the following below this CDDL HEADER, with the
-fields enclosed by brackets "[]" replaced with your own identifying
-information: Portions Copyright [yyyy] [name of copyright owner]
-
-CDDL HEADER END
-
-Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
--->
-
-<!--
-Sample system configuration profile for use with Automated Installer
-
-Configures the following:
-* User account name 'jack', password 'jack', GID 10, UID 101, root role, bash shell
-* 'root' role with password 'solaris'
-* Keyboard mappings set to US-English
-* Network configuration is automated with Network Auto-magic
-* DNS name service client is enabled
-
-See installadm(1M) for usage of 'create-profile' subcommand.
--->
-
-<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
-<service_bundle type="profile" name="system configuration">
-    <service name="system/config" version="1">
-      <instance name="default" enabled="true">
-        <property_group name="user_account">
-          <propval name="login" value="jack"/>
-          <propval name="password" value="9Nd/cwBcNWFZg"/>
-          <propval name="description" value="default_user"/>
-          <propval name="shell" value="/usr/bin/bash"/>
-          <propval name="gid" value='10'/>
-          <propval name="type" value="normal"/>
-          <propval name="roles" value="root"/>
-          <propval name="profiles" value="System Administrator"/>
-        </property_group>
-        <property_group name="root_account">
-            <propval name="password" value="$5$dnRfcZse$Hx4aBQ161Uvn9ZxJFKMdRiy8tCf4gMT2s2rtkFba2y4"/>
-            <propval name="type" value="role"/>
-        </property_group>
-      </instance>
-    </service>
-
-    <service version="1" name="system/identity">
-      <instance enabled="true" name="node">
-        <property_group name="config">
-           <propval name="nodename" value="solaris"/>
-        </property_group>
-      </instance>
-    </service>
-
-    <service name="system/console-login" version="1">
-      <property_group name="ttymon">
-        <propval name="terminal_type" value="sun"/>
-      </property_group>
-    </service>
-
-    <service name='system/keymap' version='1'>
-      <instance name='default' enabled='true'>
-        <property_group name='keymap'>
-          <propval name='layout' value='US-English'/>
-        </property_group>
-      </instance>
-    </service>
-
-    <service name="network/physical" version="1">
-      <instance name="nwam" enabled="true"/>
-      <instance name="default" enabled="false"/>
-    </service>
-
-    <service name='network/dns/client' version='1'>
-      <instance name='default' enabled='true'/>
-    </service>
-</service_bundle>
--- a/usr/src/cmd/auto-install/static_network.xml	Tue May 31 11:07:18 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,109 +0,0 @@
-<!--
-CDDL HEADER START
-
-The contents of this file are subject to the terms of the
-Common Development and Distribution License (the "License").
-You may not use this file except in compliance with the License.
-
-You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-or http://www.opensolaris.org/os/licensing.
-See the License for the specific language governing permissions
-and limitations under the License.
-
-When distributing Covered Code, include this CDDL HEADER in each
-file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-If applicable, add the following below this CDDL HEADER, with the
-fields enclosed by brackets "[]" replaced with your own identifying
-information: Portions Copyright [yyyy] [name of copyright owner]
-
-CDDL HEADER END
-
-Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
--->
-
-<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
-<service_bundle type="profile" name="system configuration">
-    <service name="system/config" version="1" type="service">
-        <instance name="default" enabled="true">
-            <property_group name="user_account" type="application">
-                <propval name="login" type="astring" value="jack"/>
-                <propval name="password" type="astring" value="9Nd/cwBcNWFZg"/>
-                <propval name="description" type="astring" value="default_user"/>
-                <propval name="shell" type="astring" value="/usr/bin/bash"/>
-                <propval name="uid" type='count' value='101'/>
-                <propval name="gid" type='count' value='10'/>
-                <propval name="type" type="astring" value="normal"/>
-                <propval name="roles" type="astring" value="root"/>
-            </property_group>
-
-            <property_group name="root_account" type="application">
-                <propval name="password" type="astring" value="$5$VgppCOxA$ycFmYW4ObRRHhtsGEygDdexk5bugqgSiaSR9niNCouC"/>
-                <propval name="type" type="astring" value="role"/>
-            </property_group>
-
-            <property_group name="other_sc_params" type="application">
-                <propval name="timezone" type="astring" value="GMT"/>
-                <propval name="hostname" type="astring" value="solaris"/>
-            </property_group>
-        </instance>
-    </service>
-
-    <service name="system/console-login" version="1" type="service">
-        <property_group name="ttymon" type="application">
-            <propval name="terminal_type" type="astring" value="sun"/>
-        </property_group>
-    </service>
-
-    <service name='system/keymap' version='1' type='service'>
-        <instance name='default' enabled='true'>
-            <property_group name='keymap' type='system'>
-                <propval name='layout' type='astring' value='US-English'/>
-            </property_group>
-        </instance>
-    </service>
-
-    <service name="network/physical" version="1" type="service">
-        <instance name="nwam" enabled="false"/>
-        <instance name="default" enabled="true"/>
-    </service>
-
-    <service name='network/install' version='1' type='service'>
-        <instance name='default' enabled='true'>
-            <property_group name='install_ipv4_interface' type='application'>
-                <propval name='name' type='astring' value='net0/v4'/>
-                <propval name='address_type' type='astring' value='static'/>
-                <propval name='static_address' type='net_address_v4' value='x.x.x.x/n'/>
-                <propval name='default_route' type='net_address_v4' value='x.x.x.x'/>
-            </property_group>
-
-            <property_group name='install_ipv6_interface' type='application'>
-                <propval name='name' type='astring' value='net0/v6'/>
-                <propval name='address_type' type='astring' value='addrconf'/>
-                <propval name='stateless' type='astring' value='yes'/>
-                <propval name='stateful' type='astring' value='yes'/>
-            </property_group>
-        </instance>
-    </service>
-
-    <service name='network/dns/install' version='1' type='service'>
-        <instance name='default' enabled='true'>
-            <property_group name='install_props' type='application'>
-                <property name='nameserver' type='net_address'>
-                    <net_address_list>
-                        <value_node value='x.x.x.x'/>
-                    </net_address_list>
-                </property>
-                <property name='search' type='astring'>
-                    <astring_list>
-                        <value_node value='example.com'/>
-                    </astring_list>
-                </property>
-            </property_group>
-        </instance>
-    </service>
-
-    <service name='network/dns/client' version='1' type='service'>
-        <instance name='default' enabled='true'/>
-    </service>
-</service_bundle>
-
--- a/usr/src/cmd/auto-install/svc/auto-installer	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/auto-install/svc/auto-installer	Tue May 31 14:21:09 2011 -0700
@@ -45,7 +45,9 @@
 
 # Profile used to enable SCI tool
 ENABLE_SCI_PROFILE=/usr/share/auto_install/sc_profiles/enable_sci.xml
-ENABLE_SCI_DEST=profile_enable_sci.xml
+
+# Installation service list file
+AI_SERVICE_LIST=/var/run/service_list
 
 ISA_INFO=`/usr/bin/uname -p`
 PRTCONF=/usr/sbin/prtconf
@@ -128,12 +130,13 @@
 # If PROFILE_DIR does not exist, or does not contain any profiles,
 # copy into it the profile which will enable SCI tool.
 if [ ! -d $PROFILE_DIR ] ; then
-    $MKDIR $PROFILE_DIR
-    $CP $ENABLE_SCI_PROFILE $PROFILE_DIR/$ENABLE_SCI_DEST
+	$MKDIR $PROFILE_DIR
+	$CP $ENABLE_SCI_PROFILE $PROFILE_DIR
 elif [ -z "$(ls -A $PROFILE_DIR)" ] ; then
-    $CP $ENABLE_SCI_PROFILE $PROFILE_DIR/$ENABLE_SCI_DEST
+	$CP $ENABLE_SCI_PROFILE $PROFILE_DIR
 fi
 
+OPTS=
 #
 # Enable the installer to be run in debug mode if requested.
 #
@@ -146,11 +149,18 @@
 
 	# enable verbose mode for logging service and ICT
 	export LS_DBG_LVL=4
-	$AI_ENGINE -v -m $AI_MANIFEST
-else
-	$AI_ENGINE -m $AI_MANIFEST
+
+	OPTS="$OPTS -v"
 fi
 
+#
+# Pass in installation service list if one exists.
+#
+if [ -f $AI_SERVICE_LIST ] ; then
+	OPTS="$OPTS -r $AI_SERVICE_LIST"
+fi
+
+$AI_ENGINE $OPTS -m $AI_MANIFEST -c $PROFILE_DIR	
 ret=$?
 
 # Process exit code returned from AI engine
--- a/usr/src/cmd/auto-install/svc/manifest-locator	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/auto-install/svc/manifest-locator	Tue May 31 14:21:09 2011 -0700
@@ -55,6 +55,9 @@
 # installation
 AI_DEFAULT_MANIFEST=/.cdrom/auto_install/default.xml
 
+# Directory location for profiles
+PROFILE_DIR=/system/volatile/profile
+
 # Service Discovery Engine
 AISD_ENGINE=/usr/bin/ai_sd
 # Service Choosing Engine
@@ -63,7 +66,8 @@
 AI_MANIFEST=/system/volatile/ai.xml
 # List of services which Service Discovery Engine will
 # contact for obtaining the manifest
-AI_SERVICE_LIST=/system/volatile/service_list.$$
+AI_SERVICE_LIST_TMP=/system/volatile/service_list.$$
+AI_SERVICE_LIST=/system/volatile/service_list
 # debug mode
 AI_DBGLVL=4
 # timeout for service discovery process
@@ -88,7 +92,8 @@
 	#
 	# For SPARC, parameters are stored in 
 	# <install_media_root_dir>/install.conf
-	# This file is downloaded using HTTP protocol and saved in /system/volatile.
+	# This file is downloaded using HTTP protocol and saved in
+        # /system/volatile.
 	# For X86, parameters are in defined in GRUB menu.lst
 	#
 	# TODO: Unify the implementation - bug 7789
@@ -140,7 +145,7 @@
 	print "Service discovery phase initiated" | $TEE_LOGTOCONSOLE
 	print "Service name to look up: $AI_SERVICE_NAME" | $TEE_LOGTOCONSOLE
 
-	$AISD_ENGINE -n $AI_SERVICE_NAME -o $AI_SERVICE_LIST -t $AI_TIMEOUT \
+	$AISD_ENGINE -n $AI_SERVICE_NAME -o $AI_SERVICE_LIST_TMP -t $AI_TIMEOUT \
 	    -d $AI_DBGLVL
 
 	#
@@ -161,7 +166,8 @@
 
 		print "Service $AI_SERVICE_NAME located at $AI_SERVICE_ADDRESS" \
 			"will be used" | $TEE_LOGTOCONSOLE
-		print "$AI_SERVICE_ADDRESS:$AI_SERVICE_NAME" > $AI_SERVICE_LIST
+		print "$AI_SERVICE_ADDRESS:$AI_SERVICE_NAME" > \
+			$AI_SERVICE_LIST_TMP
 	fi
 
 	print "Service discovery finished successfully" | $TEE_LOGTOCONSOLE
@@ -177,16 +183,21 @@
 		return 1
 	fi
 
-	print "Process of obtaining configuration manifest initiated" | \
+	print "Process of obtaining install manifest initiated" | \
 	    $TEE_LOGTOCONSOLE
 
-	$AISC_ENGINE -s $AI_SERVICE_LIST -o $AI_MANIFEST -d $AI_DBGLVL
+	$AISC_ENGINE -s $AI_SERVICE_LIST_TMP -o $AI_MANIFEST -p $PROFILE_DIR \
+		-d $AI_DBGLVL
 
 	if [ $? -ne 0 ] ; then
-		print "Could not obtain valid configuration manifest" | \
+		print "Could not obtain valid install manifest" | \
 		    $TEE_LOGTOCONSOLE
 		return 1
 	fi
+
+	# Save off the used copy of $AI_SERVICE_LIST_TMP
+	cp $AI_SERVICE_LIST_TMP $AI_SERVICE_LIST
+
 	$FILE $AI_MANIFEST | $GREP -q executable && $CHMOD +x $AI_MANIFEST
 	return 0
 }
@@ -329,7 +340,7 @@
 #
 elif [ "$NETBOOT" = "false" ]; then
 	setup_default_manifest
-	print "Using the default configuration manifest for installation" | \
+	print "Using the default install manifest for installation" | \
 	    $TEE_LOGTOCONSOLE
 #
 # Else no "prompt" bootarg, and we're booted from network; do
@@ -340,7 +351,7 @@
 	if [ $? -ne 0 ] ; then
 		exit $SMF_EXIT_ERR_FATAL
 	fi
-	print "Using the configuration manifest obtained via service" \
+	print "Using the install manifest obtained via service" \
 	    "discovery" | $TEE_LOGTOCONSOLE
 fi
 
--- a/usr/src/cmd/auto-install/test/test_auto_install_manifest.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/auto-install/test/test_auto_install_manifest.py	Tue May 31 14:21:09 2011 -0700
@@ -24,7 +24,7 @@
 # Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 #
 '''
-Tests auto install to a specified XML Profile
+Tests auto install to a specified XML Manifest
 '''
 
 import os
@@ -35,7 +35,7 @@
 from solaris_install.engine.test.engine_test_utils import reset_engine
 
 
-class TestAutoInstallProfile(unittest.TestCase):
+class TestAutoInstallManifest(unittest.TestCase):
     '''Tests to auto installation succeeds with -m specified manifest '''
     AI = None
 
--- a/usr/src/cmd/distro_const/Makefile	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/distro_const/Makefile	Tue May 31 14:21:09 2011 -0700
@@ -20,7 +20,7 @@
 #
 
 #
-# Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
 #
 
 include ../Makefile.cmd
@@ -35,7 +35,6 @@
 PROGS=		distro_const
 
 PYMODULES=	cli.py \
-		configuration.py \
 		__init__.py \
 		distro_spec.py \
 		execution_checkpoint.py
--- a/usr/src/cmd/distro_const/__init__.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/distro_const/__init__.py	Tue May 31 14:21:09 2011 -0700
@@ -26,8 +26,7 @@
 
 """init module for the distribution constructor"""
 
-__all__ = ["cli", "configuration", "distro_const", "execution_checkpoint",
-           "distro_spec"]
+__all__ = ["cli", "distro_const", "execution_checkpoint", "distro_spec"]
 
 
 import logging
@@ -37,7 +36,6 @@
 import sys
 import time
 
-import configuration
 import distro_spec
 import execution_checkpoint
 
--- a/usr/src/cmd/distro_const/checkpoints/pre_pkg_img_mod.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/distro_const/checkpoints/pre_pkg_img_mod.py	Tue May 31 14:21:09 2011 -0700
@@ -36,12 +36,12 @@
 
 from osol_install.install_utils import dir_size, encrypt_password
 from pkg.cfgfiles import PasswordFile
+from solaris_install.configuration.configuration import Configuration
 from solaris_install.engine import InstallEngine
 from solaris_install.engine.checkpoint import AbstractCheckpoint as Checkpoint
 from solaris_install.data_object import ObjectNotFoundError
 from solaris_install.data_object.data_dict import DataObjectDict
 from solaris_install.distro_const import DC_LABEL, DC_PERS_LABEL
-from solaris_install.distro_const.configuration import Configuration
 
 # load a table of common unix cli calls
 import solaris_install.distro_const.cli as cli
--- a/usr/src/cmd/distro_const/configuration.py	Tue May 31 11:07:18 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,166 +0,0 @@
-#!/usr/bin/python
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-
-#
-# Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
-#
-
-""" configuration
-
- Configuration object class for handling the <configuration> elements
- in the manifest.
-
-"""
-
-import subprocess
-import sys
-import os.path
-
-from lxml import etree
-
-from solaris_install.data_object import ParsingError
-from solaris_install.data_object.cache import DataObjectCache
-from solaris_install.data_object.simple import SimpleXmlHandlerBase
-
-_NULL = open("/dev/null", "r+")
-
-
-class Configuration(SimpleXmlHandlerBase):
-    TAG_NAME = "configuration"
-    NAME_LABEL = "name"
-    SOURCE_LABEL = "source"
-    TYPE_LABEL = "type"
-    DEST_LABEL = "dest"
-    VALIDATION_LABEL = "validation"
-    PATH_LABEL = "path"
-    ARGS_LABEL = "args"
-    ON_ERROR_LABEL = "on_error"
-
-    def __init__(self, name):
-        super(Configuration, self).__init__(name)
-
-        self.source = None
-        self.dest = None
-        self.type = None
-        self.validation = None
-
-    def to_xml(self):
-        element = etree.Element(Configuration.TAG_NAME)
-
-        element.set(Configuration.NAME_LABEL, self.name)
-        element.set(Configuration.SOURCE_LABEL, self.source)
-
-        if self.type is not None:
-            element.set(Configuration.TYPE_LABEL, self.type)
-
-        if self.dest is not None:
-            element.set(Configuration.DEST_LABEL, self.dest)
-
-        if self.validation is not None:
-            validation_element = etree.SubElement(element,
-                Configuration.VALIDATION_LABEL)
-            for (key, value) in self.validation.items():
-                validation_element.set(key, value)
-                validation_element.set(key, value)
-
-        return element
-
-    @classmethod
-    def can_handle(cls, element):
-        '''
-        Returns True if element has:
-        - the tag 'configuration'
-        - a name attribute
-        - a source attribute
-
-        Otherwise returns False
-        '''
-        if element.tag != cls.TAG_NAME:
-            return False
-
-        for entry in [cls.NAME_LABEL, cls.SOURCE_LABEL]:
-            if element.get(entry) is None:
-                return False
-
-        return True
-
-    @classmethod
-    def from_xml(cls, element):
-        validation = {}
-
-        name = element.get(cls.NAME_LABEL)
-        source = element.get(cls.SOURCE_LABEL)
-        dest = element.get(cls.DEST_LABEL)
-        type = element.get(cls.TYPE_LABEL)
-        path = None
-        args = None
-
-        if not os.path.exists(source):
-            raise ParsingError("Invalid element specified in "
-                               "the %s section " % cls.NAME_LABEL +
-                               "of the manifest.  "
-                               "source does not exist: %s" % source)
-
-        for subelement in element.iterchildren():
-            if subelement.tag == cls.VALIDATION_LABEL:
-                path = subelement.get(cls.PATH_LABEL)
-                args = subelement.get(cls.ARGS_LABEL)
-                on_error = subelement.get(cls.ON_ERROR_LABEL)
-                if path is not None:
-                    if os.path.exists(path):
-                        validation[cls.PATH_LABEL] = path
-                    else:
-                        raise ParsingError("Invalid element specified in "
-                            "the %s section of " % cls.NAME_LABEL +
-                            "the manifest. validation path does not exist: "
-                            "%s" % path)
-                if args is not None:
-                    validation[cls.ARGS_LABEL] = args
-                if on_error is not None:
-                    validation[cls.ON_ERROR_LABEL] = on_error
-
-        # validate the 'source' if a validation path was specified
-        if path is not None:
-            try:
-                if args is not None:
-                    cmd = [path, args, source]
-                else:
-                    cmd = [path, source]
-                subprocess.check_call(cmd, stdout=_NULL, stderr=_NULL)
-            except subprocess.CalledProcessError:
-                raise ParsingError("Error reading %s " % cls.NAME_LABEL +
-                    "element from the source manifest. source manifest "
-                    "specified could not be validated: %s" % source)
-
-        configuration = Configuration(name)
-        configuration.source = source
-        if dest is not None:
-            configuration.dest = dest
-        if type is not None:
-            configuration.type = type
-        configuration.validation = validation
-
-        return configuration
-
-
-# register all the classes with the DOC
-DataObjectCache.register_class(sys.modules[__name__])
--- a/usr/src/cmd/installadm/list.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/installadm/list.py	Tue May 31 14:21:09 2011 -0700
@@ -847,9 +847,9 @@
     """
     Iterates over the criteria which consists of a dictionary with
     possibly arch, min memory, max memory, min ipv4, max ipv4, min mac,
-    max mac, cpu, platform, min network and max network converting it
-    into a dictionary with arch, mem, ipv4, mac, cpu, platform, and
-    network.  Any min/max attributes are stored as a range within the
+    max mac, cpu, platform, min network, max network and zonename converting
+    it into a dictionary with arch, mem, ipv4, mac, cpu, platform, network
+    and zonename.  Any min/max attributes are stored as a range within the
     new dictionary.
 
     Args
@@ -943,7 +943,7 @@
                 servicename1:[
                              { 'arch':arch1, 'mem':memory1, 'ipv4':ipaddress1,
                                'mac':macaddr1, 'platform':platform1,
-                               'network':network1, 'cpu':cpu1 },
+                               'network':network1, 'cpu':cpu1, 'zonename':z1 },
                              ...
                             ]
             }
--- a/usr/src/cmd/installadm/setup-service.sh	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/installadm/setup-service.sh	Tue May 31 14:21:09 2011 -0700
@@ -36,8 +36,9 @@
 
 . /usr/lib/installadm/installadm-common
 
-IMG_AI_DEFAULT_MANIFEST="/auto_install/default.xml"
-SYS_AI_DEFAULT_MANIFEST="/usr/share/auto_install/default.xml"
+IMG_AI_DEFAULT_MANIFEST="/auto_install/manifest/default.xml"
+OLD_IMG_AI_DEFAULT_MANIFEST="/auto_install/default.xml"
+SYS_AI_DEFAULT_MANIFEST="/usr/share/auto_install/manifest/default.xml"
 AI_SETUP_WS=/var/installadm/ai-webserver
 AI_WS_CONF=$AI_SETUP_WS/ai-httpd.conf
 VARAI=/var/ai
@@ -232,6 +233,10 @@
 
 	if [[ -f ${imagepath}${IMG_AI_DEFAULT_MANIFEST} ]]; then
 		default_manifest_src=${imagepath}${IMG_AI_DEFAULT_MANIFEST}
+        elif [[ -f ${imagepath}${OLD_IMG_AI_DEFAULT_MANIFEST} ]]; then
+                # Support older service images by checking for the
+                # default manifest at the previous location.
+		default_manifest_src=${imagepath}${OLD_IMG_AI_DEFAULT_MANIFEST}
 	elif [[ -f ${SYS_AI_DEFAULT_MANIFEST} ]]; then
 		print "Warning: Using default manifest <" \
 		      "${SYS_AI_DEFAULT_MANIFEST}>"
--- a/usr/src/cmd/system-config/__init__.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/cmd/system-config/__init__.py	Tue May 31 14:21:09 2011 -0700
@@ -37,9 +37,12 @@
 import signal
 import sys
 
+from solaris_install import engine
 from solaris_install.data_object import DataObject
-from solaris_install import engine
+from solaris_install.data_object.data_dict import DataObjectDict
 from solaris_install.engine import InstallEngine, RollbackError
+from solaris_install.ict.apply_sysconfig import APPLY_SYSCONFIG_DICT, \
+    APPLY_SYSCONFIG_PROFILE_KEY
 
 _ = gettext.translation("sysconfig", "/usr/share/locale",
                         fallback=True).ugettext
@@ -221,7 +224,21 @@
     eng.register_checkpoint(GENERATE_SC_PROFILE_CHKPOINT,
                             "solaris_install/manifest/writer",
                             "ManifestWriter", args=sc_args, kwargs=sc_kwargs)
-    
+
+    # Add profile location to the ApplySysconfig checkpoint's data dict.
+    # Try to find the ApplySysconfig data dict from the DOC in case it
+    # already exists.
+    as_doc_dict = None
+    as_doc_dict = eng.doc.volatile.get_first_child(name=APPLY_SYSCONFIG_DICT)
+    if as_doc_dict is None:
+        # Initialize new dictionary in DOC
+        as_dict = {APPLY_SYSCONFIG_PROFILE_KEY : sc_profile}
+        as_doc_dict = DataObjectDict(APPLY_SYSCONFIG_DICT, as_dict)
+        eng.doc.volatile.insert_children(as_doc_dict)
+    else:
+        # Add to existing dictionary in DOC
+        as_doc_dict.data_dict[APPLY_SYSCONFIG_PROFILE_KEY] = sc_profile
+
     eng.doc.persistent.insert_children([ConfigProfile()])
 
 
--- a/usr/src/lib/Makefile	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/lib/Makefile	Tue May 31 14:21:09 2011 -0700
@@ -52,6 +52,7 @@
 COMSUBDIRS=	liberrsvc_pymod \
 		liberrsvc \
 		install_boot \
+		install_configuration \
 		install_doc \
 		install_engine \
 		install_ict \
--- a/usr/src/lib/Makefile.targ	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/lib/Makefile.targ	Tue May 31 14:21:09 2011 -0700
@@ -115,6 +115,9 @@
 $(ROOTPYTHONVENDORSOLINSTALLBOOT):
 	$(INS.dir)
 
+$(ROOTPYTHONVENDORSOLINSTALLCONFIGURATION):
+	$(INS.dir)
+
 $(ROOTPYTHONVENDORSOLINSTALLDATACACHE):
 	$(INS.dir)
 
@@ -200,6 +203,9 @@
 $(ROOTPYTHONVENDORSOLINSTALLBOOT)/%: %
 	$(CP_P.file)
 
+$(ROOTPYTHONVENDORSOLINSTALLCONFIGURATION)/%: %
+	$(CP_P.file)
+
 $(ROOTPYTHONVENDORSOLINSTALLDATACACHE)/%: %
 	$(CP_P.file)
 
--- a/usr/src/lib/install_common/__init__.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/lib/install_common/__init__.py	Tue May 31 14:21:09 2011 -0700
@@ -381,6 +381,7 @@
 
         self._application_name = application_name
         self._work_dir = work_dir
+        self.data_dict = dict()
 
     @property
     def application_name(self):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/lib/install_configuration/Makefile	Tue May 31 14:21:09 2011 -0700
@@ -0,0 +1,60 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+PYMODS		= __init__.py \
+		configuration.py
+
+PYCMODS		= $(PYMODS:%.py=%.pyc)
+
+ROOTPYMODS	= $(PYMODS:%=$(ROOTPYTHONVENDORSOLINSTALLCONFIGURATION)/%)
+
+ROOTPYCMODS	= $(PYCMODS:%=$(ROOTPYTHONVENDORSOLINSTALLCONFIGURATION)/%)
+
+
+CLOBBERFILES	= $(PYCMODS) 
+CLEANFILES	= $(CLOBBERFILES)
+
+include ../Makefile.lib
+
+HDRS		= $(EXPHDRS) $(PRIVHDRS)
+
+python:
+	$(PYTHON) -m compileall -l $(@D)
+
+all:		$(HDRS) python
+
+install_h:
+
+install:	all .WAIT \
+		$(ROOTPYTHONVENDOR) \
+		$(ROOTPYTHONVENDORSOLINSTALL) \
+		$(ROOTPYTHONVENDORSOLINSTALLCONFIGURATION) \
+		$(ROOTPYMODS) $(ROOTPYCMODS)
+
+lint:		lint_SRCS
+
+FRC:
+
+include ../Makefile.targ
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/lib/install_configuration/__init__.py	Tue May 31 14:21:09 2011 -0700
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+""" init module for configuration """
+
+from solaris_install.data_object.cache import DataObjectCache
+from solaris_install.configuration.configuration import Configuration
+
+__all__ = ["configuration"]
+
+# Register the DOC classes
+DataObjectCache.register_class(Configuration)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/lib/install_configuration/configuration.py	Tue May 31 14:21:09 2011 -0700
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+""" configuration
+
+ Configuration object class for handling the <configuration> elements
+ in the manifest.
+
+"""
+
+import subprocess
+import sys
+import os.path
+import urllib
+
+from lxml import etree
+
+from solaris_install.data_object import ParsingError
+from solaris_install.data_object.cache import DataObjectCache
+from solaris_install.data_object.simple import SimpleXmlHandlerBase
+
+_NULL = open("/dev/null", "r+")
+
+
+class Configuration(SimpleXmlHandlerBase):
+    TAG_NAME = "configuration"
+    NAME_LABEL = "name"
+    SOURCE_LABEL = "source"
+    TYPE_LABEL = "type"
+    DEST_LABEL = "dest"
+    VALIDATION_LABEL = "validation"
+    PATH_LABEL = "path"
+    ARGS_LABEL = "args"
+    ON_ERROR_LABEL = "on_error"
+
+    TYPE_VALUE_NETWORK = "network"
+    TYPE_VALUE_USER = "user"
+    TYPE_VALUE_SYSCONF = "sysconf"
+    TYPE_VALUE_ZONE = "zone"
+
+    def __init__(self, name):
+        super(Configuration, self).__init__(name)
+
+        self.source = None
+        self.dest = None
+        self.type = None
+        self.validation = None
+
+    def to_xml(self):
+        element = etree.Element(Configuration.TAG_NAME)
+
+        element.set(Configuration.NAME_LABEL, self.name)
+        element.set(Configuration.SOURCE_LABEL, self.source)
+
+        if self.type is not None:
+            element.set(Configuration.TYPE_LABEL, self.type)
+
+        if self.dest is not None:
+            element.set(Configuration.DEST_LABEL, self.dest)
+
+        if self.validation is not None:
+            validation_element = etree.SubElement(element,
+                Configuration.VALIDATION_LABEL)
+            for (key, value) in self.validation.items():
+                validation_element.set(key, value)
+                validation_element.set(key, value)
+
+        return element
+
+    @classmethod
+    def can_handle(cls, element):
+        '''
+        Returns True if element has:
+        - the tag 'configuration'
+        - a name attribute
+        - a source attribute
+
+        Otherwise returns False
+        '''
+        if element.tag != cls.TAG_NAME:
+            return False
+
+        for entry in [cls.NAME_LABEL, cls.SOURCE_LABEL]:
+            if element.get(entry) is None:
+                return False
+
+        return True
+
+    @classmethod
+    def from_xml(cls, element):
+        validation = {}
+
+        name = element.get(cls.NAME_LABEL)
+        source = element.get(cls.SOURCE_LABEL)
+        dest = element.get(cls.DEST_LABEL)
+        type = element.get(cls.TYPE_LABEL)
+        path = None
+        args = None
+
+        # supported source formats are a local file path that starts with a
+        # leading slash, or URI strings that start with 'http', 'file', or 'ftp
+        if "://" not in source and not source.startswith("file:/"):
+            # source is a file path:
+            if not os.path.exists(source):
+                raise ParsingError("Invalid element specified in "
+                                   "the %s section " % cls.NAME_LABEL +
+                                   "of the manifest.  "
+                                   "source does not exist: %s" % source)
+        else:
+            try:
+                fileobj = urllib.urlopen(source)
+            except (IOError), e:
+                raise ParsingError("Invalid element specified in "
+                                   "the %s section " % cls.NAME_LABEL +
+                                   "of the manifest.  "
+                                   "Unable to open source (%s): %s" % \
+                                   (source, e))
+
+
+        for subelement in element.iterchildren():
+            if subelement.tag == cls.VALIDATION_LABEL:
+                path = subelement.get(cls.PATH_LABEL)
+                args = subelement.get(cls.ARGS_LABEL)
+                on_error = subelement.get(cls.ON_ERROR_LABEL)
+                if path is not None:
+                    if os.path.exists(path):
+                        validation[cls.PATH_LABEL] = path
+                    else:
+                        raise ParsingError("Invalid element specified in "
+                            "the %s section of " % cls.NAME_LABEL +
+                            "the manifest. validation path does not exist: "
+                            "%s" % path)
+                if args is not None:
+                    validation[cls.ARGS_LABEL] = args
+                if on_error is not None:
+                    validation[cls.ON_ERROR_LABEL] = on_error
+
+        # validate the 'source' if a validation path was specified
+        if path is not None:
+            try:
+                if args is not None:
+                    cmd = [path, args, source]
+                else:
+                    cmd = [path, source]
+                subprocess.check_call(cmd, stdout=_NULL, stderr=_NULL)
+            except subprocess.CalledProcessError:
+                raise ParsingError("Error reading %s " % cls.NAME_LABEL +
+                    "element from the source manifest. source manifest "
+                    "specified could not be validated: %s" % source)
+
+        configuration = Configuration(name)
+        configuration.source = source
+        if dest is not None:
+            configuration.dest = dest
+        if type is not None:
+            configuration.type = type
+        configuration.validation = validation
+
+        return configuration
--- a/usr/src/lib/install_ict/__init__.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/lib/install_ict/__init__.py	Tue May 31 14:21:09 2011 -0700
@@ -44,6 +44,7 @@
 GENERIC_XML = 'etc/svc/profile/generic.xml'
 GEN_LTD_NET_XML = 'generic_limited_net.xml'
 GLOBAL_DB = 'lib/svc/seed/global.db'
+ZONE_DB = 'lib/svc/seed/nonglobal.db'
 INETD_XML = 'inetd_generic.xml'
 INETD_SVCS_XML = 'etc/svc/profile/inetd_services.xml'
 KBD_DEFAULT = 'US-English'
@@ -52,8 +53,10 @@
 MNTTAB = 'etc/mnttab'
 NAME_SVC_XML = 'etc/svc/profile/name_service.xml'
 NS_DNS_XML = 'ns_dns.xml'
+NS_FILES_XML = 'ns_files.xml'
 PKG = '/usr/bin/pkg'
-PROFILE_DIR = '/var/run/profile'
+PLATFORM_XML = 'etc/svc/profile/platform.xml'
+PLATFORM_NONE_XML = 'platform_none.xml'
 PROFILE_DEST = 'etc/svc/profile/site'
 PROFILE_SITE = 'etc/svc/profile/site.xml'
 REPO_DB = 'etc/svc/repository.db'
--- a/usr/src/lib/install_ict/apply_sysconfig.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/lib/install_ict/apply_sysconfig.py	Tue May 31 14:21:09 2011 -0700
@@ -30,9 +30,13 @@
 
 from solaris_install import Popen
 
+# Checkpoint specific dictionary and keys
+APPLY_SYSCONFIG_DICT = "apply_sysconfig_dict"
+APPLY_SYSCONFIG_PROFILE_KEY = "profile"
+
 
 class ApplySysConfig(ICT.ICTBaseClass):
-    '''ICT checkpoint that applies the system configuration SMF profile
+    '''ICT checkpoint that applies the system configuration SMF profile(s)
        to the target during an AI installation'''
 
     def __init__(self, name):
@@ -43,6 +47,8 @@
         '''
         super(ApplySysConfig, self).__init__(name)
 
+        self.profile = None
+
     def execute(self, dry_run=False):
         '''
             The AbstractCheckpoint class requires this method
@@ -51,8 +57,10 @@
             Validates the profile against service configuration DTD
             using svccfg.
 
-            Copies the profiles in PROFILE_DIR to the profile destination
-            to the target during an AI installation.
+            Copies a profile or directory of profiles to the smf site profile
+            directory in the target destination area.  The path of the profile
+            or directory of profiles is stored in a data dictionary in the DOC
+            with a name defined by this checkpoint.
 
             Parameters:
             - the dry_run keyword paramater. The default value is False.
@@ -63,22 +71,38 @@
               On failure, errors raised are managed by the engine.
         '''
         self.logger.debug('ICT current task: Applying the system '
-                          'configuration profile')
+                          'configuration profile(s)')
 
         # parse_doc populates variables necessary to execute the checkpoint
         self.parse_doc()
 
         sc_profile_dst = os.path.join(self.target_dir, ICT.PROFILE_DEST)
 
-        # make list of files in profile input directory
-        self.logger.debug("Checking for %s", ICT.PROFILE_DIR)
-        if not dry_run:
-            if not os.access(ICT.PROFILE_DIR, os.F_OK):
-                self.logger.debug("%s does not exist", ICT.PROFILE_DIR)
-                self.logger.debug("There are no system configuration profiles "
-                                  "to apply")
-                return
+        # Get the profile specification from the specific
+        # data dictionary stored in the DOC
+        as_doc_dict = self.doc.volatile.get_first_child( \
+            name=APPLY_SYSCONFIG_DICT)
+
+        # If dictionary not set, or profile value not set in
+        # dictionary, there's no work to do.
+        if as_doc_dict is not None:
+            self.profile = as_doc_dict.data_dict.get( \
+                APPLY_SYSCONFIG_PROFILE_KEY)
 
+        if self.profile is None:
+            self.logger.debug("No profile given.")
+            return
+
+        self.logger.debug("Checking for profile %s", self.profile)
+
+        # If profile does not exist, there's no work to do
+        if not os.access(self.profile, os.F_OK):
+            self.logger.debug("Cannot access profile %s" % self.profile)
+            self.logger.debug("There are no system configuration profiles "
+                              "to apply")
+            return
+
+        # Make sure destination directory exists.
         if not dry_run:
             if not os.path.exists(sc_profile_dst):
                 os.makedirs(sc_profile_dst)
@@ -87,33 +111,39 @@
                 # chown root:sys
                 os.chown(sc_profile_dst, 0, grp.getgrnam(ICT.SYS).gr_gid)
 
-        for root, dirs, files in os.walk(ICT.PROFILE_DIR, topdown=False):
-            for name in files:
-                # only copy files matching the template 'profileNNNN.xml'
-                if not name.startswith('profile') or not name.endswith('.xml'):
-                    continue
+        # profile may be a file or directory, handle either case.
+        profile_list = list()
+        if os.path.isdir(self.profile):
+            self.logger.debug("Processing profile directory %s", self.profile)
+            for root, dirs, files in os.walk(self.profile, topdown=False):
+                for name in files:
+                    # Add name to list of profile files to process.
+                    profile_list.append(os.path.join(root, name))
+        else:
+            self.logger.debug("Processing profile file %s", self.profile)
+            profile_list.append(self.profile)
 
-                self.logger.debug("Applying profile %s", name)
-                # validate against DTD using svccfg
-                cmd = [ICT.SVCCFG, 'apply', '-n ', os.path.join(root, name)]
-                if dry_run:
-                    self.logger.debug('Executing: %s', cmd)
-                if not dry_run:
-                    os.environ[ICT.SVCCFG_DTD] = os.path.join(self.target_dir,
-                                                              ICT.SVC_BUNDLE)
-                    os.environ[ICT.SVCCFG_REPOSITORY] = os.path.join(
-                                                              self.target_dir,
-                                                              ICT.SVC_REPO)
-                    Popen.check_call(cmd, stdout=Popen.STORE, \
-                                     stderr=Popen.STORE,
-                        logger=self.logger)
+        for profile in profile_list:
+            self.logger.debug("Applying profile %s", profile)
+            # validate against DTD using svccfg
+            cmd = [ICT.SVCCFG, 'apply', '-n ', profile]
+            if dry_run:
+                self.logger.debug('Executing: %s', cmd)
+            if not dry_run:
+                os.environ[ICT.SVCCFG_DTD] = os.path.join(self.target_dir,
+                                                          ICT.SVC_BUNDLE)
+                os.environ[ICT.SVCCFG_REPOSITORY] = os.path.join(
+                                                          self.target_dir,
+                                                          ICT.SVC_REPO)
+                Popen.check_call(cmd, stdout=Popen.STORE, \
+                                 stderr=Popen.STORE, logger=self.logger)
 
-                fdst = os.path.join(sc_profile_dst, name)
+            fdst = os.path.join(sc_profile_dst, os.path.basename(profile))
 
-                self.logger.debug('Copying %s to %s', name, fdst)
-                if not dry_run:
-                    shutil.copy(os.path.join(root, name), fdst)
-                    # read-only by user (root)
-                    os.chmod(fdst, S_IRUSR)
-                    # chown root:sys
-                    os.chown(fdst, 0, grp.getgrnam(ICT.SYS).gr_gid)
+            self.logger.debug('Copying %s to %s', profile, fdst)
+            if not dry_run:
+                shutil.copy(profile, fdst)
+                # read-only by user (root)
+                os.chmod(fdst, S_IRUSR)
+                # chown root:sys
+                os.chown(fdst, 0, grp.getgrnam(ICT.SYS).gr_gid)
--- a/usr/src/lib/install_ict/create_snapshot.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/lib/install_ict/create_snapshot.py	Tue May 31 14:21:09 2011 -0700
@@ -25,6 +25,8 @@
 #
 
 import solaris_install.ict as ICT
+from solaris_install import ApplicationData
+from solaris_install.target.instantiation_zone import ALT_POOL_DATASET
 from solaris_install.target.logical import Options
 from solaris_install.target.libbe.be import be_create_snapshot
 
@@ -76,9 +78,20 @@
 
         self.logger.debug("Creating initial snapshot. be: %s, snapshot: %s",
                           be_name, self.snapshot_name)
+
+        # See if we're operating on a nested BE by getting the alternate
+        # pool dataset.  This should be set by the application.
+        alt_pool_dataset = None
+        app_data = None
+        app_data = self.doc.persistent.get_first_child( \
+            class_type=ApplicationData)
+        if app_data:
+            alt_pool_dataset = app_data.data_dict.get(ALT_POOL_DATASET)
+
         if not dry_run:
             # Create the initial snapshot of the installed system
-            be_create_snapshot(be_name, self.snapshot_name)
+            be_create_snapshot(be_name, self.snapshot_name,
+                altpool=alt_pool_dataset)
 
     def get_progress_estimate(self):
         '''
--- a/usr/src/lib/install_ict/initialize_smf.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/lib/install_ict/initialize_smf.py	Tue May 31 14:21:09 2011 -0700
@@ -41,6 +41,39 @@
         '''
         super(InitializeSMF, self).__init__(name)
 
+        # The smf repository source and destination
+        self.source_db = None
+        self.destination_db = None
+
+        # The dictionary of smf profile links to set up
+        self.sys_profile_dict = None
+
+    def _execute(self, dry_run=False):
+        '''
+            Parameters:
+            - the dry_run keyword paramater. The default value is False.
+              If set to True, the log message describes the checkpoint tasks.
+
+            Returns:
+            - Nothing
+              On failure, errors raised are managed by the engine.
+        '''
+        self.logger.debug("Copying %s to %s", self.source_db,
+            self.destination_db)
+        if not dry_run:
+            # copy the source data base to the destination data base
+            shutil.copy2(self.source_db, self.destination_db)
+
+        self.logger.debug("ICT current task: "
+                          "Creating symlinks to system profile")
+        if not dry_run:
+            for key, value in self.sys_profile_dict.items():
+                if os.path.exists(value):
+                    os.unlink(value)
+                self.logger.debug("Creating a symlink between %s and %s",
+                                  key, value)
+                os.symlink(key, value)
+
     def execute(self, dry_run=False):
         '''
             The AbstractCheckpoint class requires this method
@@ -58,40 +91,71 @@
             - Nothing
               On failure, errors raised are managed by the engine.
         '''
-        # The smf source and destination
-        source_db = None
-        destination_db = None
-
         self.logger.debug('ICT current task: Setting up the SMF repository')
 
         # parse_doc populates variables necessary to execute the checkpoint
         self.parse_doc()
 
         # Set up the smf source and destination
-        source_db = os.path.join(self.target_dir, ICT.GLOBAL_DB)
-        destination_db = os.path.join(self.target_dir, ICT.REPO_DB)
+        self.source_db = os.path.join(self.target_dir, ICT.GLOBAL_DB)
+        self.destination_db = os.path.join(self.target_dir, ICT.REPO_DB)
 
-        sys_profile_dict = {
+        self.sys_profile_dict = {
             ICT.GEN_LTD_NET_XML:
                 os.path.join(self.target_dir, ICT.GENERIC_XML),
             ICT.NS_DNS_XML:
                 os.path.join(self.target_dir, ICT.NAME_SVC_XML),
             ICT.INETD_XML:
-                os.path.join(self.target_dir, ICT.INETD_SVCS_XML),
-            os.path.basename(ICT.SC_PROFILE):
-                os.path.join(self.target_dir, ICT.PROFILE_SITE)}
+                os.path.join(self.target_dir, ICT.INETD_SVCS_XML)}
+
+        self._execute(dry_run=dry_run)
 
-        self.logger.debug("Copying %s to %s", source_db, destination_db)
-        if not dry_run:
-            # copy the source data base to the destination data base
-            shutil.copy2(source_db, destination_db)
+class InitializeSMFZone(InitializeSMF):
+    '''ICT checkpoint sets up an smf repository and corrects
+       the smf system profile.
+    '''
+    def __init__(self, name):
+        '''Initializes the class
+           Parameters:
+               -name - this arg is required by the AbstractCheckpoint
+                       and is not used by the checkpoint.
+        '''
+        super(InitializeSMFZone, self).__init__(name)
+
+    def execute(self, dry_run=False):
+        '''
+            The AbstractCheckpoint class requires this method
+            in sub-classes.
+
+            Initializing SMF for a zone involves two sub-tasks:
+            - Copy /lib/svc/seed/nonglobal.db to /etc/svc/repository.db
+            - Create symlinks to the correct system profile files
 
-        self.logger.debug("ICT current task: "
-                          "Creating symlinks to system profile")
-        if not dry_run:
-            for key, value in sys_profile_dict.items():
-                if os.path.exists(value):
-                    os.unlink(value)
-                self.logger.debug("Creating a symlink between %s and %s",
-                                  key, value)
-                os.symlink(key, value)
+            Parameters:
+            - the dry_run keyword paramater. The default value is False.
+              If set to True, the log message describes the checkpoint tasks.
+
+            Returns:
+            - Nothing
+              On failure, errors raised are managed by the engine.
+        '''
+        self.logger.debug('ICT current task: Setting up the SMF repository')
+
+        # parse_doc populates variables necessary to execute the checkpoint
+        self.parse_doc()
+
+        # Set up the smf source and destination
+        self.source_db = os.path.join(self.target_dir, ICT.ZONE_DB)
+        self.destination_db = os.path.join(self.target_dir, ICT.REPO_DB)
+
+        self.sys_profile_dict = {
+            ICT.GEN_LTD_NET_XML:
+                os.path.join(self.target_dir, ICT.GENERIC_XML),
+            ICT.NS_FILES_XML:
+                os.path.join(self.target_dir, ICT.NAME_SVC_XML),
+            ICT.INETD_XML:
+                os.path.join(self.target_dir, ICT.INETD_SVCS_XML),
+            ICT.PLATFORM_NONE_XML:
+                os.path.join(self.target_dir, ICT.PLATFORM_XML)}
+
+        self._execute(dry_run=dry_run)
--- a/usr/src/lib/install_ict/ips.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/lib/install_ict/ips.py	Tue May 31 14:21:09 2011 -0700
@@ -24,6 +24,8 @@
 # Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
 #
 
+import os
+
 import solaris_install.ict as ICT
 import pkg.client.api_errors as api_errors
 import pkg.client.image as image
@@ -76,3 +78,9 @@
                 img.set_property('flush-content-cache-on-success', 'False')
             except api_errors.ImageNotFoundException, err:
                 self.logger.debug("No IPS image found at install target")
+
+            # The above call will end up leaving our process's cwd
+            # in the image's root area, which will cause pain later
+            # on in trying to unmount the image.  So we manually
+            # change dir back to "/".
+            os.chdir("/")
--- a/usr/src/lib/install_ict/transfer_files.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/lib/install_ict/transfer_files.py	Tue May 31 14:21:09 2011 -0700
@@ -75,13 +75,34 @@
             Returns:
             - Nothing
         '''
-        self.logger.debug("Executing: Copy %s to %s", source, dest)
+        self.logger.debug("Executing: Copy file %s to %s", source, dest)
         # Copy the file, if it exists.
         if os.access(source, os.F_OK):
             shutil.copy2(source, dest)
         else:
             self.logger.debug('%s not found -- skipping', source)
 
+    def copy_dir(self, source, dest):
+        '''
+            Class method to copy a source directory to a destination
+            only if the source directory exists.  This method uses
+            shutil.copytree() to copy the directory, so the desitnation
+            must not already exist.
+
+            Paramters:
+            - source : Source directory to be copied
+            - dest : destination directory to copy to
+
+            Returns:
+            - Nothing
+        '''
+        self.logger.debug("Executing: Copy dir %s to %s", source, dest)
+        # Copy the directory, if it exists.
+        if os.path.isdir(source):
+            shutil.copytree(source, dest)
+        else:
+            self.logger.debug('%s not found -- skipping', source)
+
     def execute(self, dry_run=False):
         '''
             The AbstractCheckpoint class requires this method
@@ -118,12 +139,7 @@
 
                 # Check for source dir, if so copy entire contents
                 if os.path.isdir(source):
-                    if not os.access(dest, os.F_OK):
-                        os.makedirs(dest)
-
-                    for listfile in os.listdir(source):
-                        srcfile = os.path.join(source, listfile)
-                        self.copy_file(srcfile, dest)
+                    self.copy_dir(source, dest)
                 else:
                     # Create the destination if it does not exist.
                     if not os.access(os.path.dirname(dest), os.F_OK):
--- a/usr/src/lib/install_manifest/dtd/configuration.dtd	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/lib/install_manifest/dtd/configuration.dtd	Tue May 31 14:21:09 2011 -0700
@@ -29,7 +29,7 @@
 <!--
 	Default to user configuration if type is not set. 
 -->
-<!ATTLIST configuration type (network|sysconf|user) #IMPLIED>
+<!ATTLIST configuration type (network|sysconf|user|zone) #IMPLIED>
 
 <!--
 	Configuration name should match the name of the checkpoint consuming
--- a/usr/src/lib/install_target/Makefile	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/lib/install_target/Makefile	Tue May 31 14:21:09 2011 -0700
@@ -1,4 +1,4 @@
-# 
+#
 # CDDL HEADER START
 #
 # The contents of this file are subject to the terms of the
@@ -49,6 +49,7 @@
 		controller.py \
 		discovery.py \
 		instantiation.py \
+		instantiation_zone.py \
 		logical.py \
 		physical.py \
 		size.py \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/usr/src/lib/install_target/instantiation_zone.py	Tue May 31 14:21:09 2011 -0700
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+""" instantiation.py - target instantiation checkpoint.  Parses the Data Object
+Cache for physical and logical targets.
+"""
+from solaris_install import ApplicationData
+from solaris_install.engine import InstallEngine
+from solaris_install.target import Target
+from solaris_install.target.instantiation import TargetInstantiation
+from solaris_install.target.logical import BE, DatasetOptions, Filesystem, Zpool
+
+ALT_POOL_DATASET = "alt_pool_dataset"
+
+class TargetInstantiationZone(TargetInstantiation):
+    """ class to instantiate targets
+    """
+
+    def __init__(self, name):
+        super(TargetInstantiation, self).__init__(name)
+
+        # lists for specific elements in the DOC
+        self.logical_list = list()
+
+        self.pool_dataset = None
+
+    def parse_doc(self):
+        """ class method for parsing the data object cache (DOC) objects
+        for use by this checkpoint
+        """
+
+        # doc and target nodes
+        self.doc = InstallEngine.get_instance().data_object_cache
+        self.target = self.doc.get_descendants(name=Target.DESIRED,
+                                               class_type=Target)[0]
+
+        # get the alternate "pool" dataset underwhich to instantiate the target
+        app_data = self.doc.persistent.get_first_child( \
+            class_type=ApplicationData)
+        if app_data:
+            self.pool_dataset = app_data.data_dict.get(ALT_POOL_DATASET)
+
+        if not self.pool_dataset:
+            raise RuntimeError("No alternate 'pool' dataset specified")
+
+        self.logical_list = self.target.get_descendants(class_type=Zpool)
+
+    def create_logicals(self):
+        """ method used to parse the logical targets and create the objects
+        with action of "create".
+        """
+
+        for zpool in self.logical_list:
+            # For a zone root, we process the BE and filesystems.
+            be_list = zpool.get_children(class_type=BE)
+            fs_list = zpool.get_children(class_type=Filesystem)
+
+            # Process filesystems.
+            be_fs_list = list()
+            shared_fs_list = list()
+            for fs in fs_list:
+                if fs.action == "create":
+                    # TODO: Parse options string and mountpoint and add
+                    # to zfs properties list here...
+
+                    if fs.in_be:
+                        # Append filesystem name to BE filesystem list
+                        be_fs_list.append(fs.name)
+                    else:
+                        # Append filesystem name to shared filesystem list
+                        shared_fs_list.append(fs.name)
+
+            # Initialize BE with the specified in_be filesystems.
+            for be in be_list:
+                be.init(self.dry_run, self.pool_dataset, nested_be=True,
+                        fs_list=be_fs_list, fs_zfs_properties=None,
+                        shared_fs_list=shared_fs_list,
+                        shared_fs_zfs_properties=None)
+
+    def execute(self, dry_run=False):
+        """ Primary execution method use by the Checkpoint parent class
+        """
+        self.logger.debug("Executing Target Instantiation Zone")
+        self.dry_run = dry_run
+
+        self.parse_doc()
+
+        # set up logical devices (BE and filesystems)
+        if self.logical_list:
+            self.create_logicals()
--- a/usr/src/lib/install_transfer/ips.py	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/lib/install_transfer/ips.py	Tue May 31 14:21:09 2011 -0700
@@ -106,7 +106,7 @@
     __metaclass__ = abc.ABCMeta
 
     # Variables associated with the package image
-    CLIENT_API_VERSION = 57
+    CLIENT_API_VERSION = 58
     DEF_REPO_URI = "http://pkg.opensolaris.org/release"
     DEF_PROG_TRACKER = progress.CommandLineProgressTracker()
 
@@ -301,7 +301,6 @@
             self.logger.debug("Image Type: zone")
 
         not_allowed = set(["prefix", "repo_uri", "origins", "mirrors"])
-        #img_args = set(self.image_args)
         img_args = set(self.image_args.keys())
         overlap = list(not_allowed & img_args)
         if overlap:
@@ -380,15 +379,52 @@
 
         # Add specified publishers/origins/mirrors to the image.
         for idx, element in enumerate(self._add_publ):
-            self.logger.debug("Adding additional publishers")
-            if self._add_mirror[idx]:
-                repo = publisher.Repository(mirrors=self._add_mirror[idx],
-                                            origins=self._add_origin[idx])
+            # If this publisher doesn't already exist, add it.
+            if not self.api_inst.has_publisher(prefix=element):
+                self.logger.debug("Adding additional publisher %s" % \
+                                  str(element))
+                if self._add_mirror[idx]:
+                    repo = publisher.Repository(mirrors=self._add_mirror[idx],
+                                                origins=self._add_origin[idx])
+                else:
+                    repo = publisher.Repository(origins=self._add_origin[idx])
+                pub = publisher.Publisher(prefix=element, repository=repo)
+                if not self.dry_run:
+                    self.api_inst.add_publisher(pub=pub, refresh_allowed=False)
+            # Else update the existing publisher with this spec
             else:
-                repo = publisher.Repository(origins=self._add_origin[idx])
-            pub = publisher.Publisher(prefix=element, repository=repo)
-            if not self.dry_run:
-                self.api_inst.add_publisher(pub=pub, refresh_allowed=False)
+                self.logger.debug("Updating publisher information for " \
+                                  "%s" % str(element))
+                pub = self.api_inst.get_publisher(prefix=element,
+                                                  duplicate=True)
+                repository = pub.repository
+                if self._add_origin[idx]:
+                    for origin in self._add_origin[idx]:
+                        if not repository.has_origin(origin):
+                            repository.add_origin(origin)
+                if self._add_mirror[idx]:
+                    for mirror in self._add_mirror[idx]:
+                        if not repository.has_mirror(mirror):
+                            repository.add_mirror(mirror)
+                self.api_inst.update_publisher(pub=pub, refresh_allowed=False)
+
+        # Get the publisher information of what the image is set with now.
+        # Re-set publisher_list to that list, so that it can be used later
+        # to be printed out
+        pub_list = self.api_inst.get_publishers(duplicate=True)
+        pub_list_for_print = list()
+        for pub in pub_list:
+            repo = pub.repository
+            origin_uris = list()
+            mirror_uris = list()
+            if repo.origins:
+                for origin in repo.origins:
+                    origin_uris.append(origin.uri)
+            if repo.mirrors:
+                for mirror in repo.mirrors:
+                    mirror_uris.append(mirror.uri)
+            pub_list_for_print.append((pub.prefix, origin_uris, mirror_uris))
+        self.publisher_list = pub_list_for_print
 
         if self.dry_run:
             self.logger.debug("Dry Run: publishers updated")
@@ -456,6 +492,12 @@
                         self.api_inst.execute_plan()
                         self.api_inst.reset()
 
+                        # The above call will end up leaving our process's cwd
+                        # in the image's root area, which will cause pain later
+                        # in trying to unmount the image.  So we manually
+                        # change dir back to "/".
+                        os.chdir("/")
+
                         # Release stdout and stderr
                         sys.stdout = tmp_stdout
                         sys.stderr = tmp_stderr
@@ -531,7 +573,9 @@
     def set_image_args(self):
         '''Set the image args we need set because the information
            was passed in via other attributes. These include progtrack,
-           prefix, repo_uri, origins, and mirrors.
+           prefix, repo_uri, origins, and mirrors.  If we're creating a
+           zone image, we also need to set the use-system-repo property
+           in the props argument.
         '''
         self._image_args = copy.copy(self.image_args)
         self._image_args["progtrack"] = self.prog_tracker
@@ -543,6 +587,13 @@
             self._image_args["origins"] = self._origin[1:]
         if self._mirror and self._mirror is not None:
             self._image_args["mirrors"] = self._mirror
+        if self.is_zone:
+            if self._image_args.has_key("props"):
+                props_dict = self._image_args["props"]
+                props_dict["use-system-repo"] = True
+            else:
+                props_dict = {"use-system-repo": True}
+                self._image_args["props"] = props_dict
 
     def get_ips_api_inst(self):
         '''Get a handle to the api instance. If it is specified to use
@@ -594,6 +645,13 @@
                     version_id=self.CLIENT_API_VERSION, root=self.dst,
                     imgtype=self.completeness, is_zone=self.is_zone,
                     force=True, **self._image_args)
+
+                # The above call will end up leaving our process's cwd in
+                # the image's root area, which will cause pain later on
+                # in tyring to unmount the image.  So we manually change
+                # dir back to "/".
+                os.chdir("/")
+
             except api_errors.VersionException, ips_err:
                 self.logger.exception("Error creating the IPS image")
                 raise ValueError("The IPS API version specified, "
@@ -696,21 +754,32 @@
         img_arg_list = dst_image.get_children(Args.ARGS_LABEL, Args)
 
         # If arguments were specified, validate that the
-        # user only specified them once.
+        # user only specified them once, and that they
+        # didn't specify arguments they're not allowed to.
         for args in img_arg_list:
-            self.image_args = args.arg_dict
             # ssl_key and ssl_cert are part of the image specification.
             # If the user has put them into the args that's an error
             # since we wouldn't know which one to use if they were
             # specified in both places.
             not_allowed = set(["ssl_key", "ssl_cert"])
-            img_args = set(self.image_args.keys())
-            overlap = list(not_allowed & img_args)
+            cur_img_args = set(args.arg_dict.keys())
+            overlap = list(not_allowed & cur_img_args)
             if overlap:
                 raise ValueError("The following components may be specified "
                                  "with the destination image of the manifest "
                                  "but are invalid as args: %s" % str(overlap))
 
+            # Check that the current set of image args being processed
+            # are not duplicates of one we've already processed.
+            image_args = set(self.image_args.keys())
+            overlap = list(image_args & cur_img_args)
+            if overlap:
+                raise ValueError("The following components are specified "
+                                 "twice in the manifest: %s" % str(overlap))
+
+            # Update the image args with the current image args being processed.
+            self.image_args.update(args.arg_dict)
+
         # Parse the transfer specific attributes.
         self._parse_transfer_node(soft_node)
 
@@ -825,17 +894,26 @@
             pub_list = src.get_children(Publisher.PUBLISHER_LABEL, Publisher,
                                         not_found_is_err=True)
 
-            # The first publisher is the preferred one.
-            pub = pub_list.pop(0)
-            self._set_publisher_info(pub, preferred=True)
+            # If we're not installing a zone image, the first publisher is
+            # treated as the preferred one (i.e. it's passed as arguments to
+            # the image_create() call); for a zone, all publishers should
+            # be processed as additional publishers since a zone image will
+            # be created with the system repository already in place.
+            if not self.is_zone:
+                pub = pub_list.pop(0)
+                self._set_publisher_info(pub, preferred=True)
             for pub in pub_list:
                 self._set_publisher_info(pub, preferred=False)
         else:
             if self.img_action != self.EXISTING:
                 # If the source isn't specified, use the defaults for create.
-                self._origin = [self.DEF_REPO_URI]
-                self.logger.debug("    Origin Info: %s", self.DEF_REPO_URI)
-                self._mirror = None
+                # For a zone image, the system repo will already be set up
+                # as the default repo so we don't need to set up a the default
+                # here if source isn't specified.
+                if not self.is_zone:
+                    self._origin = [self.DEF_REPO_URI]
+                    self.logger.debug("    Origin Info: %s", self.DEF_REPO_URI)
+                    self._mirror = None
 
 
 class TransferIPSAttr(AbstractIPS):
--- a/usr/src/lib/libict/Makefile	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/lib/libict/Makefile	Tue May 31 14:21:09 2011 -0700
@@ -64,8 +64,8 @@
 #Install Completion Tasks test program
 ict_test:	.WAIT dynamic ict_test.o
 	${LINK.c} -R$(ROOTADMINLIB:$(ROOT)%=%) \
-		-o ict_test ict_test.o -L$(ROOTADMINLIB) \
-		-Lpics/${ARCH} -L$(ROOTUSRLIB) -lict -llogsvc -lnvpair
+		-o ict_test ict_test.o -Lpics/${ARCH} -L$(ROOTADMINLIB) \
+		-L$(ROOTUSRLIB) -lict -llogsvc -lnvpair
 
 dynamic: $(DYNLIB) .WAIT $(DYNLIBLINK)
 
--- a/usr/src/man/installadm.1m.txt	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/man/installadm.1m.txt	Tue May 31 14:21:09 2011 -0700
@@ -22,13 +22,13 @@
 
      installadm add-manifest -n <svcname>
           -f <manifest_file>  [-m <manifest_name>]
-          [-c <criteria=value|range> ... | -C <criteria.xml>]
+          [-c <criteria=value|list|range> ... | -C <criteria.xml>]
 
      installadm delete-manifest -m <manifest_name> -n <svcname>
 
      installadm set-criteria -n <svcname>
           -m <manifest_name> | -p <profile_name> ...
-          -a|-c <criteria=value|range> ... | -C <criteria.xml>
+          -a|-c <criteria=value|list|range> ... | -C <criteria.xml>
 
      installadm create-client [-b <property>=<value>,...] 
           [-t <imagepath>] -e <macaddr> -n <svcname> 
@@ -38,7 +38,7 @@
      installadm create-profile -n <svcname>  
          -f <profile_file> ... 
          [-p <profile_name>]
-         [-c <criteria=value|range> ... | 
+         [-c <criteria=value|list|range> ... | 
           -C <criteria.xml>]
 
      installadm delete-profile -n <svcname> -p <profile_name> ...
@@ -322,7 +322,7 @@
 
      installadm add-manifest -n <svcname>
      -f <manifest_file>  [-m <manifest_name>]
-     [-c <criteria=value|range> ... | -C <criteria.xml>]
+     [-c <criteria=value|list|range> ... | -C <criteria.xml>]
 
           Associates manifests with a specific install 
           service, thus making the manifests available on 
@@ -348,7 +348,7 @@
      -m   <manifest_name> 
           Optional: Specifies the name of the AI manifest.
     
-     -c   <-c <criteria=value|range> ...> 
+     -c   <-c <criteria=value|list|range> ...> 
           Optional: Specifies criteria to be associated with the
           added non-default manifest. When publishing a default
           manifest, criteria must not be specified. When 
@@ -380,7 +380,7 @@
     
      installadm create-profile -n <svcname>  
            -f <profile_file> ... -p <profile_name>
-          [-c <criteria=value|range> ... | -C <criteria.xml>]
+          [-c <criteria=value|list|range> ... | -C <criteria.xml>]
 
          Associates profiles with a specific install service. 
          Criteria can optionally be associated with a profile by 
@@ -399,7 +399,7 @@
          option is invalid and the names of the profiles
          are derived from their filenames.
 
-         -c   <criteria=value|range> ... 
+         -c   <criteria=value|list|range> ... 
               Specifies criteria to be associated with the
               profile(s).
 
@@ -437,7 +437,7 @@
 
      installadm set-criteria  -n <svcname> 
          [-m <manifest_name>] [-p <profile_name> ...]
-         {-a|-c <criteria=value|range> ... } |
+         {-a|-c <criteria=value|list|range> ... } |
          { -C <criteria.xml> }
 
          Updates criteria of an already published manifest,
@@ -446,16 +446,17 @@
          must be specified with one of the mutually exclusive
          options, -a, -c, or -C.
 
-         Valid criteria are described under add-manifest
+         Valid criteria are described under the add-manifest
          subcommand.
 
-         -a   <criteria=value|range> 
+         -a   <criteria=value|list|range> 
               Specifies criteria to be appended to the existing 
-              criteria for the manifest or profile. If the criteria 
-              specified already exists, the value/range of that 
-              criteria is replaced by the specified value/range.
+              criteria for the manifest or profile. If the
+              criteria specified already exists, the
+              value/list/range of that criteria is replaced by
+              the specified value/list/range.
 
-         -c   <criteria=value|range>  
+         -c   <criteria=value|list|range>  
               Specifies criteria to replace all existing 
               criteria for the manifest or profile.
 
@@ -566,11 +567,12 @@
 CRITERIA
 
      Manifests and profiles can be used to configure AI clients
-     differently according to certain characteristics, or criteria.
-     Only one manifest may be associated with a client,
-     whereas any number of profiles may be associated.
+     differently according to certain characteristics, or
+     criteria.  Only one manifest may be associated with a
+     client, whereas any number of profiles may be associated.
 
-     The criteria values are determined by the AI client during startup.
+     The criteria values are determined by the AI client during
+     startup.
 
      The following AI client system criteria may be specified for
      either manifests and profiles unless otherwise noted:
@@ -585,17 +587,24 @@
      mem      - memory size in MiB per prtconf(1M)
      network  - IP version 4 network number
      platform - platform name per uname(1), option '-i'
+     zonename - name of a zone per zones(5)
 
-     ipv4, mac, mem, network specifications may be expressed as ranged
-     values separated by a hyphen, '-'. To signify no limit to one
-     end of a ranged specification, use 'unbounded'. See examples below.
+     ipv4, mac, mem, network specifications may be expressed as
+     ranged values separated by a hyphen, '-'. To signify no
+     limit to one end of a ranged specification, use 'unbounded'.
+
+     Any criteria that is not specifiable as a ranged value can
+     be specified as a list of values separated by white space.
+
+     See examples below.
 
 CRITERIA FILES
 
      A criteria XML file allows you to specify criteria for a
      manifest or profile by passing the file to the add-manifest, 
      create-profile, or set-criteria commands. Criteria can be
-     specified as a value or a range, by using the following tags.
+     specified as a value, a list of values, or a range, by using
+     the following tags.
 
      For a criterion with a specific value:
 
@@ -608,6 +617,24 @@
      where XXXX is the name of the criterion (e.g. MAC, IPV4,
      MEM, or ARCH) and yyyy is the value of the criterion.
 
+     For a criterion with a list of values:
+
+     <ai_criteria_manifest>
+         <ai_criteria name=XXXX>
+             <value>
+                 aaaa
+                 bbbb
+                 cccc
+                 dddd
+             </value>
+         </ai_criteria>
+     </ai_criteria_manifest>
+     
+     where XXXX is the name of the criterion (e.g. hostname,
+     zonename, or any other criteria that is not a ranged
+     criteria) and aaaa, bbbb, cccc, dddd are the values of
+     the criterion.
+
      For a criterion with a range:
 
      <ai_criteria_manifest>
@@ -906,14 +933,32 @@
 .in -9
 .sp
 
-     Example 11: Use the following sample command to append to
+     Example 11: Use the following sample command to add the
+     profile in /export/profile4 to svc1 with criteria of
+     any of the hostnames 'myhost1', 'host3', or 'host6':
+
+         # installadm create-profile -f /export/profile4 -n svc1
+           -p profile4 -c hostname="myhost1 host3 host6"
+
+         # installadm list -p -n svc1
+.sp
+.in +9
+.nf
+Profile          Criteria
+-------          --------
+profile4         hostname = myhost1 host3 host6
+.fi
+.in -9
+.sp
+
+     Example 12: Use the following sample command to append to
      the criteria of manifest2 of svc1, a criterion of 4096MB
      memory or greater:
 
          # installadm set-criteria -m manifest2 -n svc1
            -a MEM="4096-unbounded"
      
-     Example 12: Use the following sample command to replace the
+     Example 13: Use the following sample command to replace the
      criteria of manifest2 of svc1 with the criteria specified by 
      the file, /tmp/criteria.xml:
 
@@ -923,14 +968,14 @@
      See the CRITERIA FILES section for more information on the
      contents of the criteria xml file.
 
-     Example 13:  Use the following sample command to validate
+     Example 14:  Use the following sample command to validate
      the profiles stored in the file myprofdir/myprofile.xml 
      and herprofdir/herprofile.xml during their development:
 
          # installadm validate -P myprofdir/myprofile.xml \
             -P herprofdir/herprofile.xml -n svc1
 
-     Example 14:  Use the following sample command to export 
+     Example 15:  Use the following sample command to export 
      the profile, myprofile.xml in service, svc1:
 
          # installadm export -p myprofile.xml -n svc1
@@ -957,4 +1002,4 @@
      http://docs.sun.com/
 
 
-		Last Changed February 07, 2011
+		Last Changed May 19, 2011
--- a/usr/src/pkg/manifests/install-distribution-constructor.mf	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/pkg/manifests/install-distribution-constructor.mf	Tue May 31 14:21:09 2011 -0700
@@ -92,8 +92,6 @@
 file path=usr/lib/python2.6/vendor-packages/solaris_install/distro_const/__init__.pyc mode=0444
 file path=usr/lib/python2.6/vendor-packages/solaris_install/distro_const/cli.py mode=0444
 file path=usr/lib/python2.6/vendor-packages/solaris_install/distro_const/cli.pyc mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/distro_const/configuration.py mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/distro_const/configuration.pyc mode=0444
 file path=usr/lib/python2.6/vendor-packages/solaris_install/distro_const/distro_spec.py mode=0444
 file path=usr/lib/python2.6/vendor-packages/solaris_install/distro_const/distro_spec.pyc mode=0444
 file path=usr/lib/python2.6/vendor-packages/solaris_install/distro_const/execution_checkpoint.py mode=0444
--- a/usr/src/pkg/manifests/system-install-auto-install-auto-install-common.mf	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/pkg/manifests/system-install-auto-install-auto-install-common.mf	Tue May 31 14:21:09 2011 -0700
@@ -29,17 +29,50 @@
 set name=info.classification value="org.opensolaris.category.2008:System/Administration and Configuration"
 set name=variant.arch value=$(ARCH)
 set name=variant.opensolaris.zone value=global value=nonglobal
+user gcos-field="AI User" group=nobody uid=60003 ftpuser=false username="aiuser"
 dir path=usr group=sys
+dir path=usr/bin
 dir path=usr/lib
 dir path=usr/lib/python2.6
 dir path=usr/lib/python2.6/vendor-packages
+dir path=usr/lib/python2.6/vendor-packages/osol_install
+dir path=usr/lib/python2.6/vendor-packages/osol_install/auto_install
 dir path=usr/lib/python2.6/vendor-packages/solaris_install
 dir path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install
+dir path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints
 dir path=usr/share group=sys
 dir path=usr/share/auto_install group=sys
 dir path=usr/share/auto_install/sc_profiles group=sys
+file path=etc/user_attr.d/system%2Finstall%2Fauto-install group=sys
+file path=usr/bin/ai_get_manifest mode=0555
+file path=usr/bin/aimanifest mode=0555
+file path=usr/bin/auto-install mode=0555
 file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/__init__.py mode=0444
 file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/__init__.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_parse_manifest.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_parse_manifest.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_instance.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_instance.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/auto_install.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/auto_install.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/utmpx.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/utmpx.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/__init__.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/__init__.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/ai_configuration.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/ai_configuration.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/dmm.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/dmm.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/target_selection.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/target_selection.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/target_selection_zone.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/target_selection_zone.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/__init__.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/__init__.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/mim.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/mim.pyc mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/process_dtd.py mode=0444
+file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/process_dtd.pyc mode=0444
 file path=usr/lib/python2.6/vendor-packages/osol_install/auto_install/__init__.py mode=0444
 file path=usr/lib/python2.6/vendor-packages/osol_install/auto_install/__init__.pyc mode=0444
 file path=usr/lib/python2.6/vendor-packages/osol_install/_libaiscf.so mode=0444
@@ -50,10 +83,13 @@
 file path=usr/lib/python2.6/vendor-packages/osol_install/libaimdns.so mode=0444
 file path=usr/lib/python2.6/vendor-packages/osol_install/libaiscf.py mode=0444
 file path=usr/lib/python2.6/vendor-packages/osol_install/netif.so mode=0444
-file path=usr/share/auto_install/ai_manifest.xml mode=0444 group=sys
-file path=usr/share/auto_install/default.xml mode=0444 group=sys
+file path=usr/share/auto_install/manifest/ai_manifest.xml mode=0444 group=sys
+file path=usr/share/auto_install/manifest/default.xml mode=0444 group=sys
+file path=usr/share/auto_install/manifest/zone_default.xml mode=0444 group=sys
 file path=usr/share/auto_install/sc_profiles/enable_sci.xml mode=0444 group=sys
 file path=usr/share/auto_install/sc_profiles/static_network.xml mode=0444 group=sys
 file path=usr/share/auto_install/sc_profiles/sc_sample.xml mode=0444 group=sys
 file path=usr/share/auto_install/version mode=0444 group=sys
+link path=usr/share/auto_install/default.xml target=manifest/default.xml
+link path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_get_manifest.py target=../../../../../bin/ai_get_manifest
 license cr_Sun license=cr_Sun
--- a/usr/src/pkg/manifests/system-install-auto-install.mf	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/pkg/manifests/system-install-auto-install.mf	Tue May 31 14:21:09 2011 -0700
@@ -28,7 +28,6 @@
 set name=info.classification value="org.opensolaris.category.2008:System/Administration and Configuration"
 set name=variant.arch value=$(ARCH)
 set name=variant.opensolaris.zone value=global
-user gcos-field="AI User" group=nobody uid=60003 ftpuser=false username="aiuser"
 dir path=etc group=sys
 dir path=etc/user_attr.d group=sys
 dir path=lib
@@ -41,45 +40,11 @@
 dir path=usr/sbin
 dir path=usr/lib
 dir path=usr/lib/install
-dir path=usr/lib/python2.6
-dir path=usr/lib/python2.6/vendor-packages
-dir path=usr/lib/python2.6/vendor-packages/solaris_install
-dir path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install
-dir path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints
-dir path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input
-dir path=usr/share group=sys
-dir path=usr/share/auto_install group=sys
-file path=etc/user_attr.d/system%2Finstall%2Fauto-install group=sys
 file path=lib/svc/method/auto-installer mode=0555
 file path=lib/svc/method/manifest-locator mode=0555
-file path=usr/bin/ai_get_manifest mode=0555
 file path=usr/bin/ai_sd mode=0555
-file path=usr/bin/aimanifest mode=0555
-file path=usr/bin/auto-install mode=0555
 file path=usr/lib/install/sc_conv.ksh mode=0555
-file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_parse_manifest.py mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_parse_manifest.pyc mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_instance.py mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_instance.pyc mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/auto_install.py mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/auto_install.pyc mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/utmpx.py mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/utmpx.pyc mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/__init__.py mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/__init__.pyc mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/dmm.py mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/dmm.pyc mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/target_selection.py mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/checkpoints/target_selection.pyc mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/__init__.py mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/__init__.pyc mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/mim.py mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/mim.pyc mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/process_dtd.py mode=0444
-file path=usr/lib/python2.6/vendor-packages/solaris_install/manifest_input/process_dtd.pyc mode=0444
 file path=usr/sbin/get_manifest mode=0555
 file path=lib/svc/manifest/application/auto-installer.xml mode=0444 group=sys
 file path=lib/svc/manifest/application/manifest-locator.xml mode=0444 group=sys
 license cr_Sun license=cr_Sun
-link path=usr/lib/python2.6/vendor-packages/solaris_install/auto_install/ai_get_manifest.py target=../../../../../bin/ai_get_manifest
-
--- a/usr/src/pkg/manifests/system-library-install.mf	Tue May 31 11:07:18 2011 -0700
+++ b/usr/src/pkg/manifests/system-library-install.mf	Tue May 31 14:21:09 2011 -0700
@@ -31,11 +31,12 @@
 dir path=usr group=sys
 dir path=usr/lib
 dir path=usr/lib/python2.6
+dir path=usr/lib/python2.6/vendor-packages
 dir path=usr/lib/python2.6/vendor-packages/bootmgmt
-dir path=usr/lib/python2.6/vendor-packages
 dir path=usr/lib/python2.6/vendor-packages/osol_install
 dir path=usr/lib/python2.6/vendor-packages/solaris_install
 dir path=usr/lib/python2.6/vendor-packages/solaris_install/boot
+dir path=usr/lib/python2.6/vendor-packages/solaris_install/configuration
 dir path=usr/lib/python2.6/vendor-packages/solaris_install/data_object
 dir path=usr/lib/python2.6/vendor-packages/solaris_install/engine
 dir path=usr/lib/python2.6/vendor-packages/solaris_install/ict
@@ -71,6 +72,10 @@
 file path=usr/lib/python2.6/vendor-packages/solaris_install/boot/boot.pyc
 file path=usr/lib/python2.6/vendor-packages/solaris_install/boot/boot_spec.py
 file path=usr/lib/python2.6/vendor-packages/solaris_install/boot/boot_spec.pyc
+file path=usr/lib/python2.6/vendor-packages/solaris_install/configuration/__init__.py
+file path=usr/lib/python2.6/vendor-packages/solaris_install/configuration/__init__.pyc
+file path=usr/lib/python2.6/vendor-packages/solaris_install/configuration/configuration.py
+file path=usr/lib/python2.6/vendor-packages/solaris_install/configuration/configuration.pyc
 file path=usr/lib/python2.6/vendor-packages/solaris_install/data_object/__init__.py
 file path=usr/lib/python2.6/vendor-packages/solaris_install/data_object/__init__.pyc
 file path=usr/lib/python2.6/vendor-packages/solaris_install/data_object/cache.py
@@ -119,6 +124,8 @@
 file path=usr/lib/python2.6/vendor-packages/solaris_install/target/discovery.pyc
 file path=usr/lib/python2.6/vendor-packages/solaris_install/target/instantiation.py
 file path=usr/lib/python2.6/vendor-packages/solaris_install/target/instantiation.pyc
+file path=usr/lib/python2.6/vendor-packages/solaris_install/target/instantiation_zone.py
+file path=usr/lib/python2.6/vendor-packages/solaris_install/target/instantiation_zone.pyc
 file path=usr/lib/python2.6/vendor-packages/solaris_install/target/libadm/__init__.py
 file path=usr/lib/python2.6/vendor-packages/solaris_install/target/libadm/__init__.pyc
 file path=usr/lib/python2.6/vendor-packages/solaris_install/target/libadm/cfunc.py