usr/src/cmd/js2ai/modules/conv.py
author Mary Ding <mary.ding@oracle.com>
Thu, 28 Jun 2012 11:30:49 -0700
changeset 1732 166f53ebcc4c
parent 1603 08318748ed0a
child 1761 c4dbae7a9d80
permissions -rwxr-xr-x
7180314 usr/src/cmd/js2ai/modules/conv.py had typo packge 7057701 js2ai results does not show up under link for install_unit_tests

#!/usr/bin/python2.6
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
#
"""Conversion routines used to Solaris 10 convert rules and profile files to
the xml format used by the Solaris installer

"""

import gettext
import itertools
import os.path
import re
import sys

import pkg.client.api as api
import pkg.client.api_errors as apx
import pkg.client.progress as progress
import os

from solaris_install.js2ai import common
from solaris_install.js2ai.common import _
from solaris_install.js2ai.common import fetch_xpath_node
from solaris_install.js2ai.common import generate_error
from solaris_install.js2ai.common import LOG_KEY_FILE, LOG_KEY_LINE_NUM
from solaris_install.js2ai.common import LVL_CONVERSION, LVL_PROCESS
from solaris_install.js2ai.common import LVL_UNSUPPORTED, LVL_WARNING
from solaris_install.js2ai.common import RULES_FILENAME
from solaris_install.js2ai.default_xml import DEFAULT_XML_EMPTY
from lxml import etree
from StringIO import StringIO

from solaris_install import PKG5_API_VERSION


# These validation patterns were taken directly from the jumpstart
# check script

# Disk Patterns
# SPARC: cwtxdysz or cxdysz (c0t0d0s0 or c0d0s0)
# x86: cwtxdy or cxdy (c0t0d0 or c0d0)

# DISK1_PATTERN covers cxtydz (c0t0d0)
DISK1_PATTERN = re.compile("c[0-9][0-9]*t[0-9][0-9]*.*d[0-9][0-9]*$")
# DISK2_PATTERN covers cxdy (c0d0)
DISK2_PATTERN = re.compile("c[0-9][0-9]*.*d[0-9][0-9]*$")

# Slice Patterns: cwtxdysz or cxdysz (c0t0d0s0 or c0d0s0)
SLICE1_PATTERN = re.compile("(c[0-9][0-9]*t[0-9][0-9]*.*d[0-9][0-9]*)s[0-7]$")
SLICE2_PATTERN = re.compile("(c[0-9][0-9]*.*d[0-9][0-9]*)s[0-7]$")

NUM_PATTERN = re.compile("[0-9][0-9]*$")
SIZE_PATTERN = re.compile("([0-9][0-9]*)([g|m]?)$")

FILESYS_ARG_PATTERN = re.compile("..*:/..*")

DEFAULT_POOL_NAME = "rpool"
DEFAULT_VDEV_NAME = "rpool_vdev"
VDEV_SUFFIX = "_vdev"

DEFAULT_SWAP_POOL_NAME = "swap_pool"

# The text form when adding a localization facet
# in a manifest
# <software name="ips" type="IPS">
#      <destination>
#        
        #  </destination>
        #  ...
        # </software

        if len(values) != 1:
            self.__invalid_syntax(keyword)
            return
        if self._image_node is None:
            # We haven't done a local setting operation yet.
            #
            # Find the local settings and delete all the child nodes if
            # they exist, otherwise create the initial structure needed
            #
            software = self.__fetch_solaris_software_node()
            dest = software.find(common.ELEMENT_DESTINATION)
            if dest is None:
                dest = etree.Element(common.ELEMENT_DESTINATION)
                software.insert(0, dest)
            else:
                image = dest.find(common.ELEMENT_IMAGE)
                if image is not None:
                    dest.remove(image)
            self._image_node = etree.SubElement(dest, common.ELEMENT_IMAGE)

            # <facet set="false">facet.locale.*</facet>
            facet = etree.SubElement(self._image_node, common.ELEMENT_FACET)
            facet.set(common.ATTRIBUTE_SET, "false")
            facet.text = FACET_LOCALE_FORM % "*"

        # <facet set="true">facet.locale.${locale}</facet>
        facet = etree.SubElement(self._image_node, common.ELEMENT_FACET)
        facet.set(common.ATTRIBUTE_SET, "true")
        facet.text = FACET_LOCALE_FORM % values[0]

    def __convert_package_entry(self, keyword, values):
        """Converts the package keyword/values from the profile into
        the new xml format

        """
        # Input:        package <package_name> <add|delete> <arg1> <arg2> ...

        package = values[0]
        if len(values) == 2:
            action = values[1].lower()
        elif len(values) < 2:
            action = "add"
        else:
            # If the remote (nfs or http) or local (local_device or local_file)
            # directory option is used we can't support this entry. Log it
            # and return.
            self.__unsupported_syntax(keyword,
                _("package install from specified locations is not supported "
                "for SVR4 packages"))
            return
        if (action != "add" and action != "delete"):
            self.__invalid_syntax(keyword)
            return
        if action == "add":
            self.__add_software_data(package,
                                     SOFTWARE_INSTALL)
        elif action == "delete":
            self.__add_software_data(package,
                                     SOFTWARE_UNINSTALL)

    def __convert_pool_entry(self, keyword, values):
        """Converts the pool keyword/values from the profile into
        the new xml format

        """
        # pool <pool name> <pool size> <swap size> <dump size>
        #                                       <slice> | mirror [<slice>]+
        #
        # Input:        ${1}    - pool name
        #               ${2}    - pool size (num, existing, auto)
        #               ${3}    - swap size (num, auto)
        #               ${4}    - dump size (num, auto)
        #               ${5}    - (mirror, <slice>, rootdisk.s??, any)
        #                .      - (<slice>, rootdisk.s??, any)
        length = len(values)
        if length < 5:
            self.__invalid_syntax(keyword)
            return

        pool_name = values[0]
        # Pool name must be between 0-30 characters
        name_length = len(pool_name)
        if name_length == 0 or name_length > 30:
            self.__gen_err(LVL_CONVERSION,
                           _("invalid pool name of '%(pool)s': must be at "
                             "least 1 character and no more than 30 "
                             "characters in length") % {"pool": pool_name})
            return
        # Update the name that we are using for the root pool
        self._root_pool_name = pool_name

        pool_size = values[1].lower()
        if pool_size == SIZE_EXISTING:
            self.__unsupported_syntax(keyword,
                                      _("pool sizes other than a number or "
                                        "auto are not supported"))
            return None
        elif pool_size != SIZE_AUTO:
            pool_size = self.__size_conversion("<pool_size>", pool_size)
            if pool_size is None:
                return

        swap_size = values[2].lower()
        noswap = "false"
        if swap_size == "0":
            swap_size = None
            noswap = "true"
        elif swap_size != SIZE_AUTO:
            swap_size = self.__size_conversion("<swap_size>", swap_size)
            if swap_size is None:
                return

        dump_size = values[3].lower()
        nodump = "false"
        if dump_size == "0":
            dump_size = None
            nodump = "true"
        elif dump_size != SIZE_AUTO:
            dump_size = self.__size_conversion("<dump_size>", dump_size)
            if dump_size is None:
                return

        if values[4] == "mirror":
            # Mirror must have at least 2 slices
            mirror = True
            if length < 6:
                self.__invalid_syntax(keyword)
                return
            redundancy = "mirror"
            devices = values[5:]
        else:
            mirror = False

            # Non mirror can only have 1 slice specified
            if length > 5:
                self.__invalid_syntax(keyword)
                return
            redundancy = "none"
            devices = values[4:]

        updated_list = list()
        for device in devices:
            allow_any = mirror == False and pool_size == SIZE_AUTO
            update_device = \
                self.__device_conversion(device=device,
                                         allow_any_value=allow_any,
                                         use_rootdisk=(mirror == False))
            if update_device is None:
                return
            updated_list.append(update_device)
        devices = updated_list

        # Check for any conflicts with root_device and boot_device
        # pool will always override these settings
        if mirror:
            if not self.__is_valid_mirror(keyword, devices, pool_size, True):
                return
        else:
            # Check for conditions like
            #
            # root_device cxtxdxsx
            # pool p_abc auto auto auto cxtxdxsx
            #
            # or
            #
            # boot_device cxtxdx
            # pool p_abc auto auto auto cxtxdxsx
            #
            # Warning message has been outputed
            self.__rootdisk_slice_conflict_check(keyword, devices[0])

        zpool = self.__create_root_pool(keyword, self._root_pool_name,
                                        noswap, nodump)
        self.__create_vdev(zpool, redundancy)
        for device in devices:
            self.__add_device(device, pool_size, self._root_pool_name)

        if swap_size is not None and swap_size != SIZE_AUTO:
            self.__create_zvol(parent=zpool, name="swap", use="swap",
                                zvol_size=swap_size)
        if dump_size is not None and dump_size != SIZE_AUTO:
            self.__create_zvol(parent=zpool, name="dump", use="dump",
                                zvol_size=dump_size)
        if not mirror and self._rootdisk is None:
            # Based on how we process things the pool keyword is where
            # we don't follow the basic steps of "How JumpStart Determines"
            # " a System's Root Disk (Initial Installation)"
            #
            # Since rootdisk has not been set via root_disk or boot_device
            # we now want to use the device specified by pool as our
            # root_device.
            self._root_device = devices[0]
            self._rootdisk = self._root_device
            self._rootdisk_set_by_keyword = keyword

    def __convert_system_type_entry(self, keyword, values):
        """Processes the system_type entry in profile and flags any
        value other than standalone as unsupported value

        """
        length = len(values)
        if length > 1 or length == 0:
            self.__invalid_syntax(keyword)
            return
        if values[0] != "standalone":
            self.__unsupported_value(keyword, values[0])

    def __fetch_disk_node(self, disk_name):
        """Returns the <disk> node for the disk 'disk_name'"""
        diskname_node = self.__fetch_diskname_node(disk_name)
        if diskname_node is None:
            return None
        return diskname_node.getparent()

    def __fetch_diskname_node(self, disk_name):
        """Returns the diskname node with the disk_name of 'disk_name'"""
        xpath = "./disk/disk_name[@name='%s']"
        return fetch_xpath_node(self._target, xpath % disk_name)

    def __fetch_pool(self, pool_name):
        """Fetch the ZFS pool with the specified name if it exists"""
        xpath = "./logical/zpool[@name='%s']" % pool_name
        return fetch_xpath_node(self._target, xpath)

    def __fetch_root_pool(self):
        """Fetch the ZFS root pool"""
        xpath = "./logical/zpool[@is_root='true']"
        return fetch_xpath_node(self._target, xpath)

    def __fetch_rootdisk(self):
        """Fetch the rootdisk device value"""
        if self._rootdisk is not None:
            if self._rootdisk == DEVICE_ANY:
                self._rootdisk = self.__pop_usedisk_entry(DEVICE_ANY)
        return self._rootdisk

    def __fetch_rootdisk_slice(self):
        """Fetch the rootdisk device value.  Convert it to slice form if not
           currently in the slice form.

        """
        slice_name = self.__fetch_rootdisk()
        if slice_name not in [None, DEVICE_ANY]:
            # rootdisk may or may not be in the slice form already
            if not self.__is_valid_slice(slice_name):
                return slice_name + "s0"
        return slice_name

    def __fetch_slice_insertion_point(self, disk_name):
        """Return the insertion point for adding slices to the specified
           disk.  On sparc this is <disk> on x86 this is <partition>
           Returns None is disk doesn't exist.

        """
        disk_node = self.__fetch_disk_node(disk_name)
        if disk_node is None:
            return None
        if self._arch in [None, common.ARCH_X86]:
            # We return the partition
            xpath = "./partition[@action='create'][@type='191']"
            return fetch_xpath_node(disk_node, xpath)
        return disk_node

    def __pop_usedisk_entry(self, default_value=None):
        """Return the 1st device off the usedisk stack if one exists.
           Otherwise return default_value

        """
        if len(self._usedisk):
            return self._usedisk.pop(0)
        return default_value

    def __pop_usedisk__slice_entry(self, default_value=None):
        """Return the 1st device off the usedisk stack in slice form
           if one exists. Otherwise return default_value

        """
        if len(self._usedisk):
            return self._usedisk.pop(0) + "s0"
        return default_value

    def __store_boot_device_entry(self, keyword, values):
        """Converts the boot device keyword/values from the profile into the
        new xml format

        """

        # The supported syntax in Solaris 10 was
        #
        # boot_device <device> <eeprom>
        # valid values:
        #       <device>:
        #                       c#[t#]d#s# - SPARC
        #                       c#[t#]d#   - X86
        #                       existing
        #                       any
        #       <eeprom>:
        #                       preserve
        #                       update
        #
        if self._boot_device is not None:
            self.__duplicate_keyword(keyword)
            return
        length = len(values)
        if length > 2 or length == 0:
            self.__invalid_syntax(keyword)
            return

        device = values[0].lower()
        if device == "existing":
            self.__unsupported_value(_("<device>"), device)
            return

        if device != DEVICE_ANY:
            if not self.__is_valid_device_name(device) and \
               not self.__is_valid_slice(device):
                self.__gen_err(LVL_CONVERSION,
                               _("invalid device specified: %(device)s") % \
                                 {"device": device})
                return

            # The device specified is valid.  Set the architecure for the
            # device based on the device value specified. If a valid slice is
            # specified for boot device the architecuture is SPARC.
            # Otherwise it's x86
            if self.__is_valid_slice(device):
                if not self.__change_arch(common.ARCH_SPARC):
                    return
            else:
                if not self.__change_arch(common.ARCH_X86):
                    return

        if length == 2:
            eeprom = values[1].lower()
            if eeprom == "preserve":
                # No action since this says keep it as is
                pass
            elif eeprom == "update":
                # Technically only the preserve value is supported
                # on x86 but since we are ignoring the update value
                # this message is sufficient
                self.__gen_err(LVL_UNSUPPORTED,
                               _("ignoring eeprom 'update' setting, as "
                                 "this action is not supported"))
            else:
                self.__invalid_syntax(keyword)
                return
        if self._root_device is not None:
            # If we follow how jumpstart determines which disk to use for
            # the root disk the root_device keyword has a higher priority
            # than the boot_device keyword.  If the devices specified
            # are different then flag it as a conflict, but use root_device
            # defintion.
            #
            # root_device c1t0d0s1
            # boot_device c0t0d0 update
            cmp_device = self.__device_name_conversion(self._root_device)
            if cmp_device != device:
                self.__gen_err(LVL_CONVERSION,
                               _("conflicting definition: rootdisk previously "
                                 "defined as '%(root_device)s' via keyword "
                                 "'%(rd_kw)s', ignoring entry") % \
                                 {"root_device": self._root_device,
                                  "rd_kw": self._rootdisk_set_by_keyword})
                return
        else:
            self._rootdisk = device
        self._rootdisk_set_by_keyword = keyword
        self._boot_device = device

    def  __store_root_device_entry(self, keyword, values):
        """Set the profile root device value that we'll use if root device
        is specified by the user

        """
        # root_device <slice>
        if self._root_device is not None:
            self.__duplicate_keyword(keyword)
            return
        if len(values) != 1:
            self.__invalid_syntax(keyword)
            return
        if not self.__is_valid_slice(values[0]):
            self.__gen_err(LVL_CONVERSION,
                           _("invalid device specified: %(device)s") % \
                             {"device": values[0]})
            return

        self._root_device = values[0]
        if self._boot_device is not None:
            # Do we have a conflict?
            cmp_device = self.__device_name_conversion(self._boot_device)
            if self._root_device != cmp_device:
                self.__gen_err(LVL_CONVERSION,
                               _("translation conflict between devices "
                                 "specified for boot_device and root_device, "
                                 "using root_device define of "
                                 "'%(root_device)s', ignoring define of "
                                 "boot_device of '%(boot_device)s'") % \
                                 {"root_device": self._root_device,
                                  "boot_device": self._boot_device})
        self._rootdisk = self._root_device
        self._rootdisk_set_by_keyword = keyword

    def  __store_partitioning_entry(self, keyword, values):
        """Set the profile partitioning value that we'll use if fdisk all
        or usedisk is specified by the user later

        """
        # partitioning <type> where type is default, existing or explicit
        if self._partitioning is not None:
            self.__duplicate_keyword(keyword)
            return
        if len(values) != 1:
            self.__invalid_syntax(keyword)
            return
        self._partitioning = values[0].lower()
        if self._partitioning not in [PARTITIONING_DEFAULT,
                                      PARTITIONING_EXPLICIT]:
            self.__gen_err(LVL_UNSUPPORTED,
                           _("unsupported profile, partitioning must be "
                             "'default' or 'explicit'"))
            self._report.conversion_errors = None
            self._tree = None
            raise ValueError

    def __store_usedisk_entry(self, keyword, values):
        """Store the usedisk devices specified by the user.  We'll use these
        to create the root pool if paritioning default is specified.

        """
        #
        # usedisk <device> [<device]
        if len(values) == 0 or len(values) > 2:
            self.__invalid_syntax(keyword)
            return
        for value in values:
            if self.__is_valid_device_name(value):
                self._usedisk.append(value)
            else:
                self.__gen_err(LVL_CONVERSION,
                               _("invalid device specified: %(device)s") % \
                                 {"device": value})
                return

    def __is_valid_install_type(self, keyword, values):
        """The only profiles that are supported are install profiles
        The jumpstart scripts require it as the first keyword in the
        file.  If the install_type is not initial_install reject
        the entire profile

        """
        if keyword != "install_type":
            self.__gen_err(LVL_PROCESS,
                           _("invalid profile, first specified keyword must "
                             "be install_type, got '%(keyword)s'") % \
                             {"keyword": keyword})
            self._report.conversion_errors = None
            self._report.unsupported_items = None
            self._tree = None
            return False
        install_type = values[0].lower()
        if install_type in ["upgrade",
                            "flash_install", "flash_upgrade"]:
            self.__unsupported_value(keyword, values[0])
            self._report.conversion_errors = None
            self._tree = None
            return False
        if install_type != "initial_install":
            self.__invalid_syntax(keyword)
            self._report.conversion_errors = None
            self._report.unsupported_items = None
            self._tree = None
            return False
        return True

    @property
    def tree(self):
        """Returns the xml tree associated with this object"""
        return self._tree

    def fetch_tree(self, arch):
        """Convert the current tree to the specified architecute

        Supported architecutres are:
            common.ARCH_GENERIC
            common.ARCH_SPARC
            common.ARCH_X86

        Conversion not support:
            SPARC to X86

        """
        if arch not in [common.ARCH_GENERIC, common.ARCH_X86,
                         common.ARCH_SPARC]:
            # Programming error
            raise ValueError(-("unsupported architecture specified"))
        if arch == self._arch or self._arch == common.ARCH_GENERIC:
            # Tree is in the proper format for the architecture requested
            return self._tree
        if arch == common.ARCH_X86:
            if self._arch in [None, common.ARCH_X86]:
                # Tree is in the proper format for the architecture requested
                return self._tree
        if arch == common.ARCH_SPARC:
            if self._arch in [None, common.ARCH_X86]:
                # Tree is not in the proper format.  A conversion is necessary
                return self.__fetch_sparc_from_x86_tree(self._tree)

        # Programming error
        raise ValueError(_("Conversion from architecute %(req_arch) to "
                           "%(cur_arch)s is not supported") %
                               {"req_arch": arch,
                                "cur_arch": self._arch})

    def __fetch_sparc_from_x86_tree(self, tree):
        """Converts a x86 manifest xml tree to a sparc manifest xml tree"""
        clone = common.tree_copy(tree)
        # The only difference between an x86 based xml tree and a sparc
        # tree is currently the <partition> node.  Look for the
        # <partition> node and remove it, if it exists.
        xpath = "/auto_install/ai_instance/target"
        target = fetch_xpath_node(clone, xpath)
        for disk in target.findall(common.ELEMENT_DISK):
            # To convert a x86 manifest profile to a sparc we're simply
            # going to take the children slices of the partition and move
            # them up as a child of <disk> and then delete the <partition>
            # node
            partition = disk.find(common.ELEMENT_PARTITION)
            if partition is not None:
                for slice_node in partition.findall(common.ELEMENT_SLICE):
                    partition.remove(slice_node)
                    disk.append(slice_node)
                disk.remove(partition)
        return clone

    def __fetch_keys(self):
        """Fetch the keys that we need to process from the profile dictionary

        """
        if self.prof_dict is None:
            keys = {}
        else:
            # Sort the keys based on the line #
            keys = sorted(self.prof_dict.keys())
        if len(keys) == 0:
            # There's nothing to convert.  This is a valid condition if
            # the file couldn't of been read for example
            self._report.conversion_errors = None
            self._report.unsupported_items = None
            return None
        return keys

    def __find_xml_entry_points(self):
        """Find and set the global xml entry points"""
        self._target = self._ai_instance.find(common.ELEMENT_TARGET)
        if self._target is not None:
            # Delete the target entry from the default manifest
            self._ai_instance.remove(self._target)

        # Create <target> and insert immediately after
        # <ai_instance> node
        self._target = etree.Element(common.ELEMENT_TARGET)
        self._ai_instance.insert(0, self._target)

    profile_conversion_dict = {
        "boot_device": None,
        "bootenv": __unsupported_keyword,
        "client_arch": __unsupported_keyword,
        "client_swap": __unsupported_keyword,
        "cluster": __unsupported_keyword,
        "dontuse": __unsupported_keyword,
        "fdisk": __convert_fdisk_entry,
        "filesys": __convert_filesys_entry,
        "geo": __unsupported_keyword,
        "install_type": __convert_install_type_entry,
        "locale": __convert_locale_entry,
        "num_clients": __unsupported_keyword,
        "package": __convert_package_entry,
        "partitioning": __store_partitioning_entry,
        "pool": __convert_pool_entry,
        "root_device": None,
        "system_type": __convert_system_type_entry,
        "usedisk": None
        }

    def __process_profile(self):
        """Process the profile by taking all keyword/values pairs and
        generating the associated xml for the key value pairs

        """

        keys = self.__fetch_keys()
        if keys is None:
            self._tree = None
            return

        check_for_install_type = True
        pool_obj = None
        line_num = 0
        #
        # The keywords for the profile are processed in 3 different phases.
        #
        # Phase  Actions Performed
        # -----  -------------------------------------------------------------
        #   1    o Check to ensure that "install_type" is the first keyword
        #          in the profile.
        #        o Check the entire profile for "root_device" and "boot_device"
        #          keywords for the generation of the "rootdisk".  This
        #          duplicates the first 2 steps that Jumpstart does when it
        #          determines what disk to use for the System's Root Disk
        #        o Check for the keyword "partitioning" and store for later use
        #        o Check for keyword "usedisk" and store for later use
        #   2    o Process the pool keyword.  If used this is the closest
        #          parallel to how the new installer uses so we give this
        #          the highest priority in what we use to generate the ZFS
        #          root pool
        #   3    o If partition value is "default" create ZFS root pool
        #   4    o Process the remaining keywords in the profile
        #
        for key in keys:
            key_value_obj = self.prof_dict[key]
            if key_value_obj is None:
                raise KeyError

            keyword = key_value_obj.key
            values = key_value_obj.values
            line_num = key_value_obj.line_num

            if line_num is None or values is None or keyword is None:
                raise KeyError(_("Got None value, line_num=%(lineno)s "
                               "values=%(values)s keyword=%(keywords)s") %
                               {"lineno": str(line_num),
                                "values": str(values),
                                "keyword": str(keyword)})

            self._extra_log_params[LOG_KEY_LINE_NUM] = line_num
            if check_for_install_type:
                # The 1st keyword in the profile must be install_type,
                # if it's not we reject the profile
                if not self.__is_valid_install_type(keyword, values):
                    self._tree = None
                    return
                del self.prof_dict[key]
                check_for_install_type = False
            #
            # Scan all the keyword for root_device and boot_device
            # These keywords are special since they allow us to emulate
            # the initial 2 stages in the process of
            #       How JumpStart Determines a System's Root Disk
            #
            # 1. If the root_device keyword is specified in the profile, the
            #    JumpStart program sets rootdisk to the root device.
            # 2. If rootdisk is not set and the boot_device keyword is
            #    specified in the profile, the JumpStart program sets rootdisk
            #    to the boot device.
            if keyword == "root_device":
                self.__store_root_device_entry(keyword, values)
                del self.prof_dict[key]
            elif keyword == "boot_device":
                self.__store_boot_device_entry(keyword, values)
                del self.prof_dict[key]
            elif keyword == "pool":
                del self.prof_dict[key]
                if pool_obj is not None:
                    self.__duplicate_keyword(keyword)
                else:
                    pool_obj = key_value_obj
            elif keyword == "partitioning":
                try:
                    self.__store_partitioning_entry(keyword, values)
                    del self.prof_dict[key]
                except ValueError:
                    return
            elif keyword == "usedisk":
                self.__store_usedisk_entry(keyword, values)
                del self.prof_dict[key]
            elif keyword == "fdisk":
                if not self.__change_arch(common.ARCH_X86):
                    return

        self.__find_xml_entry_points()

        # Next create the zfs pool if the pool keyword was encountered
        # With the new installer we only support creating a single zfs
        # root pool.  The pool keyword takes precidence over any other
        # settings the user may have made
        if pool_obj is not None:
            self._extra_log_params[LOG_KEY_LINE_NUM] = pool_obj.line_num
            self.__convert_pool_entry(pool_obj.key, pool_obj.values)

        # Now process the remaining keys
        keys = sorted(self.prof_dict.keys())
        for key in keys:
            key_value_obj = self.prof_dict[key]
            keyword = key_value_obj.key.lower()
            values = key_value_obj.values
            self._extra_log_params[LOG_KEY_LINE_NUM] = key_value_obj.line_num
            try:
                function_to_call = self.profile_conversion_dict[keyword]
            except KeyError:
                self.__unsupported_keyword(keyword, values)
            else:
                if function_to_call is not None:
                    function_to_call(self, keyword, values)

        # If the root pool is not created attempt to create it now
        if self._root_pool is None:
            if self._rootdisk is not None:
                # rootdisk is set create the pool using the specified rootdisk
                zpool = self.__create_root_pool(self._rootdisk_set_by_keyword)
                self.__create_vdev(zpool)
                self.__add_device(self._rootdisk,
                                  self._rootdisk_size, self._root_pool_name)

            elif self._partitioning is not None and \
                 self._partitioning == PARTITIONING_DEFAULT:
                # Root pool doesn't exist.  User specified partitioning
                # default.  Go ahead and create it now
                zpool = self.__create_root_pool("partitioning")
                self.__create_vdev(zpool)
                if len(self._usedisk) == 0:
                    # No usedisk entries where specified.  Add device
                    # based on "any"
                    self.__add_device(DEVICE_ANY)

        if self._root_pool is not None:
            # Add any additional disks that the user may of told use to
            # use to the root pool as long as it's not a mirrored pool
            xpath = "./vdev[@redundancy='mirror']"
            vdev_mirror = fetch_xpath_node(self._root_pool, xpath)
            if vdev_mirror is None:
                for device in self._usedisk:
                    self.__add_device(device, None, self._root_pool_name)

        # Since we auto create the <target> node by default we want
        # to make sure the node has children before we finish up.
        # If there are no children then we want to delete the node
        if len(self._target) == 0:
            # No children.  Delete target node
            self._ai_instance.remove(self._target)

        # Check to determine if we have any children nodes
        # If we don't and we have errors then clear the xml
        # tree so we don't have a empty tree hierachy that
        # results in a <auto_install/>
        # Conversely though if all that's in the file is
        # initial_install and we have no errors then we should
        # create a file even if it's just <auto_install/>
        # That technically won't have any meaning to the new jumpstart
        # engine though
        if len(self._ai_instance) == 0 and self._report.has_errors():
            self._tree = None
        elif self._root_pool is None:
            if line_num is None:
                line_num = 1
            else:
                line_num += 1
            self._extra_log_params[LOG_KEY_LINE_NUM] = line_num
            # Create the root pool, but tell AI to choose the disk
            zpool = self.__create_root_pool("default")