16148 need linked image support for zones, phase 1
authorEdward Pilatowicz <edward.pilatowicz@oracle.com>
Sat, 07 May 2011 00:25:10 -0700
changeset 2339 aa5954c06b9d
parent 2338 63a4d56416c6
child 2340 194a87d123d4
16148 need linked image support for zones, phase 1 16568 zoneadm install can create out of sync zones if entire has been removed
doc/client_api_versions.txt
doc/linked-images.txt
src/Makefile
src/brand/Makefile
src/brand/attach
src/brand/common.ksh
src/brand/developerenv.ksh
src/brand/fmri_compare.py
src/brand/image_install
src/brand/p2v
src/brand/pkgcreatezone
src/checkforupdates.py
src/client.py
src/gui/modules/installupdate.py
src/gui/modules/misc_non_gui.py
src/gui/modules/versioninfo.py
src/man/pkg.5.txt
src/modules/actions/__init__.py
src/modules/actions/attribute.py
src/modules/actions/depend.py
src/modules/actions/driver.py
src/modules/actions/file.py
src/modules/actions/generic.py
src/modules/actions/license.py
src/modules/actions/signature.py
src/modules/altroot.py
src/modules/client/api.py
src/modules/client/api_errors.py
src/modules/client/debugvalues.py
src/modules/client/image.py
src/modules/client/imageconfig.py
src/modules/client/imageplan.py
src/modules/client/linkedimage/__init__.py
src/modules/client/linkedimage/common.py
src/modules/client/linkedimage/system.py
src/modules/client/linkedimage/zone.py
src/modules/client/pkg_solver.py
src/modules/client/pkgdefs.py
src/modules/client/pkgplan.py
src/modules/client/progress.py
src/modules/lint/engine.py
src/modules/manifest.py
src/modules/misc.py
src/modules/pkgsubprocess.py
src/modules/syscallat.c
src/pkg/manifests/developer%2Fopensolaris%2Fpkg5.p5m
src/pkg/manifests/package%2Fpkg.p5m
src/pkg/manifests/system%2Fzones%2Fbrand%2Fipkg.p5m
src/pkg/pkglint_whitelist.txt
src/pkgdep.py
src/setup.py
src/sysrepo.py
src/tests/api/t_altroot.py
src/tests/api/t_api.py
src/tests/api/t_api_info.py
src/tests/api/t_api_list.py
src/tests/api/t_api_search.py
src/tests/api/t_linked_image.py
src/tests/api/t_p5p.py
src/tests/api/t_pkg_api_install.py
src/tests/cli/t_change_variant.py
src/tests/cli/t_https.py
src/tests/cli/t_lock.py
src/tests/cli/t_pkg_R_option.py
src/tests/cli/t_pkg_image_create.py
src/tests/cli/t_pkg_install.py
src/tests/cli/t_pkg_intent.py
src/tests/cli/t_pkg_linked.py
src/tests/cli/t_pkg_publisher.py
src/tests/cli/t_pkg_sysrepo.py
src/tests/cli/t_pkg_temp_sources.py
src/tests/cli/t_pkgrepo.py
src/tests/cli/t_pkgsend.py
src/tests/cli/t_pkgsign.py
src/tests/cli/t_sysrepo.py
src/tests/pkg5unittest.py
src/tests/pylintrc
src/util/distro-import/importer.py
--- a/doc/client_api_versions.txt	Fri May 06 17:24:48 2011 -0700
+++ b/doc/client_api_versions.txt	Sat May 07 00:25:10 2011 -0700
@@ -1,3 +1,44 @@
+Version 59:
+Compatible with clients using versions 57-59.
+
+    pkg.client.api.ImageInterface has changed as follows:
+        * introduced new linked image interfaces:
+                attach_linked_child()
+                audit_linked()
+                audit_linked_children()
+                audit_linked_rvdict2rv()
+                detach_linked_children()
+                detach_linked_rvdict2rv()
+                gen_plan_attach()
+                gen_plan_detach()
+                gen_plan_sync()
+                get_linked_name()
+                get_linked_props()
+                ischild()
+                list_linked()
+                parse_linked_name()
+                parse_linked_name_list()
+                sync_linked_children()
+                sync_linked_rvdict2rv()
+        * introduced new non-linked image interfaces:
+                gen_plan_change_varcets()
+                gen_plan_install()
+                gen_plan_revert()
+                gen_plan_uninstall()
+                gen_plan_update()
+                solaris_image()
+        * deprecated:
+                plan_change_varcets()
+                plan_install()
+                plan_revert()
+                plan_uninstall()
+                plan_update()
+                plan_update_all()
+
+    pkg.client.api_errors has changed as follows:
+        * introduced LinkedImageException
+        * updated PlanCreationException
+
 Version 58:
 Compatible with clients using version 57:
 
@@ -15,15 +56,15 @@
 Version 57:
 Incompatible with clients using versions 0-56:
      pkg.client.api.ImageInterface has changed as follows:
-        * get_preferred_publisher has been replaced with 
+        * get_preferred_publisher has been replaced with
           get_highest_ranked_publisher
 
         * set_pub_search_after has been removed
 
         * set_pub_search_before has been removed
 
-        * search_after, search_before, and search_first have been added as 
-	  options to add_publisher and update_publisher
+        * search_after, search_before, and search_first have been added as
+          options to add_publisher and update_publisher
 
         * The write_syspub_0 function has been added.
 
@@ -38,7 +79,7 @@
         * a proxy attribute has been added and may be set during initialization
 
      pkg.client.publisher.Publisher has changed as follows:
-        * The repositories attribute which contained a list of Repository 
+        * The repositories attribute which contained a list of Repository
           objects has been replaced by the repository attribute which contains a
           single Repository object.
 
@@ -50,7 +91,7 @@
         * The add_repository function has been replaced with the set_repository
           function.
 
-        * The has_configuration function has been added which attempts to 
+        * The has_configuration function has been added which attempts to
           determine whether a publisher has been configured by a user.
 
         * The remove_repository function has had its arguments removed.
@@ -71,12 +112,12 @@
 
      pkg.client.publisher.Publisher has changed as follows:
 
-	* The following functions have been removed:
+        * The following functions have been removed:
           get_intermediate_certs
-       	  verify_ca_certs
+          verify_ca_certs
 
         * verify_chain has had the mandatory parameter cur_pathlen added which
-	  indicates how many certificates exist between cert and the leaf cert.
+          indicates how many certificates exist between cert and the leaf cert.
 
         * approve_ca_cert has had the 'manual' parameter removed as only the
           user can approve a certificate now.
@@ -153,6 +194,7 @@
 
 Version 51:
 Compatible with clients using versions 46-50:
+
     pkg.client.api.PlanDescription has changed as follows:
         * get_solver_errors() was added to allow retrieval of extended
           error information for plan_*() functions.  See 'pydoc
@@ -168,7 +210,9 @@
 
 Version 49:
 Compatible with clients using versions 46-48:
+
     pkg.client.api.ImageInterface has changed as follows:
+
         * plan_install and plan_update take an optional
           argument to reject packages in solution.
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/doc/linked-images.txt	Sat May 07 00:25:10 2011 -0700
@@ -0,0 +1,1265 @@
+.. This document is formatted using reStructuredText, which is a Markup
+   Syntax and Parser Component of Docutils for Python.  An html version
+   of this document can be generated using the following command:
+     rst2html.py doc/linked-images.txt > doc/linked-images.html
+
+============================
+Ips pkg(5) zones integration
+============================
+
+:Author: Edward Pilatowicz
+:Version: 0.6
+
+.. sectnum::
+.. contents::
+
+Introduction
+============
+
+To allow for support of pkg(5) within zones and the automatic management
+and upgrading of zones during global zone pkg(5) operations, this
+proposal describes enhancements to pkg(5) to support "linked images".
+
+The pkg(5) linked images functionality proposed herein is intended to
+be generic, with zones being one type of linked images that can be
+supported. In addition to supporting zones linked images we also propose
+supporting another "system" type of linked images. The details of how
+these linked image types differ will be explained in more detail below.
+
+Another goal of the pkg(5) linked image support is to make all the
+linked image functionality visible to other pkg(5) subsystems common,
+and to isolate code dealing with the different linked image types within
+well defined modules contained within the linked image subsystem. IE,
+while other pkg(5) subsystems may need to be aware of linked images,
+they should not have to worry about specific linked image types.
+(zones, system, etc.)
+
+Linked images will have properties associated with them. The set of
+available properties may vary across different linked image types.  The
+storage location for these properties values (IE, linked image metadata)
+may be plugin specific. For the "system" plugin, property data
+configuration will live within a /var/pkg configuration file.  For the
+"zones" linked image plugin, property configuration will be derived from
+the zones subsystem (some properties will have implicit values, others
+may be derived from zonecfg(1m) parameters, etc.)
+
+
+Zones and linked images
+=======================
+
+The design for pkg(5) linked images as proposed herein is primarily
+being driven by the need to support zones. Hence, before jumping into
+the specifics of linked images support it is worth discussing how zones
+user are expected to interact with linked images, and also the
+requirements that zones have upon the design of linked images.
+
+
+Zone users and linked images
+----------------------------
+
+Ideally, zone users should be unaware of all linked image functionality
+proposed herein.  They should never need to run any of the proposed
+pkg(1) linked image commands.  Linked images will do their work
+automatically under the hood when users do operations like:
+
+- Run pkg(1) in the global zone.  In this case pkg(1) operations will
+  automatically keep non-global zones in sync with the global zone.
+
+- Run pkg(1) in a non-global zone.  In this case pkg(1) will prevent
+  operations that would allow a non-global zone to go out of sync with
+  the global zone.
+
+- Run zoneadm(1m) install/uninstall/detach/attach/etc.  In this case the
+  zone brand callbacks will utilize with the pkg(5) linked image
+  functionality to manage zones as linked images.
+
+
+Zones requirements
+------------------
+
+Zones is a OS level virtualization technology that allows us to run
+multiple userland execution environment on a single kernel. Each of
+these userland execution environments is a separate virtual machine,
+with it's own filesystem, processes, network stack, etc. The default
+execution environment (IE, the one you get when you install Solaris or
+OpenSolaris) is called the global zone.  Subsequent execution
+environments are called non-global zones and can be created and managed
+via zonecfg(1M) and zoneadm(1M).
+
+All the zones on a system share the same kernel, and the kernel is
+managed by the global zone. Non-global zones can not supply their own
+kernel modules. Hence, any software within a zone which spans the
+user/kernel boundary must be kept in sync with the kernel installed in
+the global zone. This puts constraints on what software can be installed
+within zones. The basic requirements here can be summed up as:
+
+- Software installed in a non-global zone that depends on specific
+  kernel functionality must be kept in sync with the kernel software
+  installed within the global zone.  examples:
+	libzfs and drv/zfs (system/file-system/zfs)
+	libdladm (system/library) and drv/dld (system/kernel)
+
+- Software installed in a non-global that communicates to the global
+  zone via private interfaces must be kept in sync with the kernel
+  software installed within the global zone.  examples:
+	zones proxy (system/zones)
+	libdladm (system/library) and dladm (system/network)
+
+- Software that depends on specific kernel functionality can only be
+  installed in a non-global zone if the corresponding kernel
+  functionality is installed within the global zone.
+
+Since non-global zones are separate virtual machines with their own
+filesystems and software, these machines (and their software contents)
+may not be trusted by the global zone. Hence any software management
+operations being done on non-global zone should not be able to affect
+the global zone. Effectively this means that all actions performed on a
+zone cannot safely be done from the global zone environment. This means
+that software management operations initiated from a global zone will
+either have to "enter" the zone (if it's running) to perform their
+operation, or they must take special precautions when accessing zone
+images to ensure that all filesystem operations are performed safely.
+The basic requirements here can be summed up as:
+
+- Software management operations will need to span multiple processes
+  operating in different zones.
+
+- Since zones are untrusted execution environment, global zone pkg(5)
+  operations should not read data from non-global zone.  IE, any data
+  flow required to support linked images must be from a global zone to a
+  non-global zone, and never in the reverse direction.  Also, write
+  accesses to a non-global zone from the global zone should be kept to
+  an absolute minimum, and any such accesses must be provably safe via
+  code inspection.
+
+Lastly, since non-global zones are separate virtual machines, they will
+normally not have access to the contents of the global zone. Yet the
+software that can be run in non-global zones is constrained by the
+software installed in the global zone. Hence:
+
+- All the constraints required to perform software management operations
+  within a non-global zone must be persistently available within that
+  zones image/filesystem.
+
+
+Zones non-requirements
+----------------------
+
+While developing linked images, existing Solaris 10 zones users have
+asked how linked images will enable them to continue to do certain zones
+administrative operations that they are familiar with.  Unfortunately,
+some of these operations were possible mainly as side effects of the
+zones and SVR4 packaging implementations on Solaris 10.  Since pkg(5) is
+significantly changing the way zones are used and managed, some of these
+legacy operations will no longer be possible and users will need to
+adopt new ways of managing their zones.  So here are some of the
+requests from Solaris 10 users that we're not initially addressing with
+linked images.
+
+
+**The ability to install a patch on all zones on a system.**
+
+Patching of systems with pkg(5) will be substantially different from the
+patching of Solaris 10 system.  This change is the administrative model
+is being driven by pkg(5) and is largely orthogonal to zones.  With
+pkg(5), patching of systems will be done via pkg(1) update, where
+repositories are populated with the latests versions of packages that
+all system should be running, and when systems are image-updated they
+will automatically install the latest versions of software available
+within the repositories they are using.  Hence, if an administrator
+wants to install updated packages onto a system with zones, they should
+push the updated packages into their repositories and update their
+zones.
+
+
+**The ability to install a package on all zones on a system.**
+
+Previously in Solaris 10, the package content of zones closely tracked
+the package content of the global zone.  In most cases installing a
+package in the global zone would result in that package being installed
+in all zones.  With pkg(5) the contents of zones are much more decoupled
+from the contents of global zones.  We will not be providing a way to
+install packages across multiple zones.  In general, system should only
+contain the software that they need for their operation.  Hence, this
+software should be installed at zone installation time.  Or if added
+later, it needs to be added directly into the zone image (via a pkg(1)
+install run within that zone image).
+
+We may subsequently create new mechanisms to help with operations like
+the ones above.  Such mechanisms might take the form of recursive
+operation support, a simple image content policy mechanism, or some
+larger system life cycle management mechanism that defines the package
+software content of systems for their entire deployment (instead of just
+at install time).
+
+
+Possible linked image types
+===========================
+
+So in addition to supporting zones linked image, the design herein has
+also considered other future possible types of linked images. So before
+going into the details of the linked image design it's worth while to
+first describe some possible linked image types to understand when and
+where they might be used.
+
+
+**Zones linked images**
+
+  Support for zones linked images is included in this proposal. Users
+  will not be able to directly create or manage zones linked images.
+  Instead the system will automatically manage zones linked images when
+  zones are used. Zones linked images should provide us with with a
+  means to do the following:
+
+  - Allow for the creation of non-global zone images where the contents
+    of those images is constrained by the software installed in the
+    global zone.
+
+  - Allow for the updating of software within non-global zones while
+    taking into account the constraints placed upon the zone by the
+    software installed in the global zone.
+
+  - Allow pkg(5) operations initiated from (and operating on) the global
+    zone to update non-global zone images as required to keep them in
+    sync with software being updated in the global zone.
+
+  - Allow for pkg(5) operations initiated directly upon non-global zone
+    image to take into account the constraints placed upon then by the
+    software installed in the global zone.
+
+  - Allow for the auditing of non-global zones to verify that their
+    software contents are in sync with the software installed in the
+    global zone.
+
+
+**System linked images**
+
+  Support for system linked images is included in this proposal. These
+  types of linked images will be very similar to zone linked images. All
+  the features listed above that will be available for zones linked
+  images will also be available for system linked images. But unlike
+  zone linked images, these images can be created via new pkg(1)
+  interfaces.
+
+  Support for system linked images is included in this proposal because
+  it is anticipated that system linked images will be used internally
+  within Solaris.  Also, having a "system" linked image type will
+  greatly facilitate the testing of linked image functionality, the
+  large majority of which is common to all linked image types.
+
+
+**User linked images**
+
+  Support for user linked images is NOT included in this proposal. These
+  type of images would be managed very differently from zones or system
+  linked images. User linked images could provide us with the following
+  functionality:
+
+  - Allow for arbitrary users to create user linked images, where the
+    contents of that image are in sync with the software installed in
+    another image.
+
+  - Allow for a user to update the software within a user linked image
+    while staying in sync with the software installed in another image.
+
+  - Allow for the auditing of a user linked image to verify that their
+    software contents are in sync with the software installed in another
+    image.
+
+  So here's an example of how a user linked image might work. Say a user
+  named Ed wants to run a copy of SpamAssassin on a system named
+  jurassic, but jurassic doesn't have SpamAssassin installed. Ed could
+  then create a "user" linked image that is linked to the jurassic
+  system image. Then he could install SpamAssassin into that image.
+  Pkg(5) would install a version of SpamAssassin that is compatible with
+  the software contents already installed on jurassic. (In the process
+  of doing this pkg(5) would also install into the user image any
+  dependencies required by SpamAssassin that were missing from
+  jurassic.) Ed could then run this version of SpamAssassin without
+  having to worry about his software being out of sync with the system.
+  The system administrator would have no knowledge of user images that
+  might be created on a system, hence, if the administrator updates the
+  software on a system then any user images may now be out of sync. In
+  this case, Ed would be able to perform an audit of all his user linked
+  images to determine if they are in sync with their specified policy
+  and the contents of the system they are being used on. If they were
+  out of sync (due to the system being updated) he could then initiate a
+  sync operation to update them and bring them back in sync.
+
+
+**Diskless client linked images**
+
+  Support for diskless client linked images is NOT included in this
+  proposal. These types of images would probably not be managed
+  directly, but would probably be managed indirectly by diskless client
+  management tools. One possible deployment model for diskless clients
+  would be to create a parent image which represents the standard
+  diskless client configuration deployment, and then to create linked
+  images all parented to that one parent image. These linked images
+  would actually be the diskless client root filesystems. Subsequent
+  software management operations could be performed on the parent image,
+  with changes automatically propagated to all the client images
+  automatically based on the content policy of the linked images.
+
+
+**Distributed linked images**
+
+  Support for Distributed linked images is NOT included in this
+  proposal.  But if pkg(5) functionality was accessible over the network
+  via rad interfaces, it should be possible to create linked image
+  relationships that span multiple machines.  This could be used in a
+  manager similar to diskless clients where a master deployment image is
+  created on one machine, and this master image is then linked to
+  machines which deploy the image.  Subsequently, updates to the master
+  image would automatically be propagated to deployment machines, based
+  of the content management policy being used.
+
+
+Out of scope
+============
+
+There are many components which will probably be required to allow
+pkg(5) and zones to work seamlessly together. This proposal does not
+address all of them. Specifically, the following features are not being
+delivered by this project.
+
+
+**User, diskless, and distributed linked images.**
+
+  While this project will provide basic support for creating system and
+  zone linked images, it will not provide support for any other types of
+  linked linked images, even though other types of linked images may be
+  discussed within this document.
+
+
+**Offline zone install.** [1]_
+
+  This proposal does nothing to address the current requirement that an
+  active network connection to a valid package repository be available
+  during most pkg(5) operations. If anything, this proposal will
+  increase the number of operations that may require such a connection
+  to be available.
+
+
+**The system repository.** [2]_
+
+  When managing zone linked images, it's critical that certain packages
+  be kept in sync between global and non-global zones. This means that
+  zones must have access to any publishers that contain packages that
+  must be kept in sync between the images, regardless of the zones
+  publisher configuration. Some additional complications to this problem
+  are that for zones which are not running, we may not be able to
+  instantiate their network configuration, so we may not be able to talk
+  to any of their configured network based publishers.  The planned
+  solution to address these problems is to create "system repository".
+  The system repository would allow a linked image to access any
+  publishers configured within the parent image that contain packages
+  which needed to be kept in sync with the child image.
+
+  Since delivery of the system repository is out of scope for this
+  project, initially zones linked image support will rely on zones
+  having a publisher configuration which provides access to all the
+  packages required to keep the zone in sync.
+
+
+**Zones image locking.** [3]_
+
+  When performing pkg(5) operations in a global zone that affect the
+  contents of the global zone, we need a locking mechanism to prevent
+  concurrent operations from being done within a non-global zone. We
+  also need a locking mechanism that allows for the reverse. Ie, if a
+  zone is in the middle of a pkg(5) operation we don't want to initiate
+  an operation from the global zone which might also require updating
+  that zone. Other scenarios that we will probably also need to protect
+  against include a zone changing state while we're performing a pkg(5)
+  operation on that zone. For example, if we're installing a package in
+  a global zone which results in us also installing a package in a
+  non-global zone, we need to prevent that non-global zone from
+  rebooting while we're installing the package.  Another example is that
+  if we're installing or removing a package from a global zone, we will
+  probably want to prevent a concurrent install of a non-global zone
+  since that could results in the freshly installed zone being out of
+  sync with the global zone. This proposal does not address any of the
+  possible race conditions wrt pkg(5) and zone linked image operations.
+
+
+**Pkg(5)/beadm(1M) image filesystem management.** [4]_
+
+  Currently, beadm(1M) allows for versioning (via snapshotting and
+  cloning) of multiple filesystem in a global zone image, assuming all
+  those filesystems are zfs based and are all children of the current
+  root filesystem. beadm(1M) treats any other filesystems in the current
+  BE as shared and lofs mounts them into any other alternate BE. This
+  means that if any pkg(5) software is installed into these "shared"
+  filesystems that software will become out of sync wrt some BE. Pkg(5)
+  and beadm(1M) do not perform any check to ensure that all the software
+  being installed is not being installed into "shared" filesystems. This
+  same problem also affects zones. This proposal does not address this
+  issue in any way and assumes that eventually pkg(5) or beadm(1M) will
+  provide more flexible and robust functionality to manage images that
+  span multiple filesystem.
+
+
+**Beadm(1M) support for linked images.**
+
+  Currently, beadm(1M) can be run within the global zone to manage
+  global zone boot environments and their associated zones. But
+  beadm(1M) does not support running within a non-global zone and
+  creating snapshots of zone boot environments from within a zone.
+  Beadm(1M) support within a zone is a requirements for supporting
+  pkg(5) image-update within a zone.  (Since image-update only operates
+  on alternate boot environments.) It is also the case that other pkg(5)
+  operations may refuse to operate on the currently running image and
+  may only be run on cloned and offline boot environments. Since
+  beadm(1M) can not be run within a zone, we can't create cloned boot
+  environments within a zone, so none of these operations will be
+  supported.
+
+  Additionally, beadm(1M) is currently aware of zones images, but
+  eventually, beadm(1m) should probably become linked image aware, since
+  all linked images should be snapshotted, cloned, and upgraded in sync.
+  Enhancing beadm(1M) will be required to support non-zone types of
+  linked images that live outside the current boot environment. Once
+  this project delivers initial pkg(5) linked image interfaces it will
+  be possible to update beadm(1M) to consume these interfaces so that it
+  can be aware of all linked images on the system, instead of just zone
+  images. These beadm(1M) enhancements are out of the scope of this
+  proposal.
+
+
+pkg(5) linked image overview
+============================
+
+pkg(5) linked images will always always have a parent and child
+relationship. Parent images may have multiple children, but each child
+image may only have one parent. It's possible for there to be multiple
+levels of linked images (i.e., nested linked image), for example, you
+could have a system linked image, which is a child of a zone linked
+image, which is the child of a global zone image.
+
+
+pkg(5) linked image name
+------------------------
+
+All pkg(5) linked images are uniquely identified by a name. A fully
+qualified linked image name is <linked image type>:<linked image name>.
+The linked image name must begin with an alphanumeric character and can
+contain alphanumeric characters, underscores (_), hyphens (-), and dots
+(.).  Additional restrictions on the <linked image name> format may be
+defined by the linked image plugin handling that type of linked image.
+
+
+pkg(5) linked image attach mode
+-------------------------------
+
+As previously mentioned, all linked images will have a parent/child
+relationship.  But there are two distinct ways that this parent/child
+link can be established, which in turn determines what operations are
+possible on each image and how the linked image relationship is managed.
+
+First, a parent may link/attach a child image to itself, in which case,
+the parent image will be authoritative for the linked image
+relationship.  This means that:
+
+- The parent image is aware of the child image.
+
+- The child image will know that it is a linked image, but it will
+  not know the location of it's parent image.
+
+- Linked image properties can only be modified from the parent image and
+  are treated as read-only within the child image.
+
+- Packaging operations on the parent image which require updating child
+  images to keep them in sync will attempt to do so automatically.
+
+Second, an image may make itself into a child by linking/attaching
+itself to a parent.  In this case the parent has no awareness of the
+child image and the child image is authoritative for and must manage the
+linked image relationship.  This means that:
+
+- The parent image is unaware of the child image.
+
+- The child image will know that it is a linked image, and it will
+  know the location of the parent image.
+
+- Linked image properties only exist within (and there for must be
+  modified from within) the child image.
+
+- Packaging operations on the parent image will not directly affect the
+  child image.  It is the responsibility of the child image to make sure
+  that it remains in sync with it's parent.
+
+In the former image linking mode, the parent must "push" constraint (and
+property) information to child images.  While in the latter mode the
+child will "pull" constraint information from the parent.
+
+Zones linked images will exclusively use the push linking mode.
+System linked images will support both the push and pull linking modes.
+
+
+pkg(5) linked image properties
+------------------------------
+
+As mentioned earlier, each child linked image will have properties
+associated with it.
+
+In the case of both push and pull based child images, linked image
+property information will always be stored within a private file and
+directory in the pkg(5) image metadata directory.  Currently either
+/.org.opensolaris,pkg/linked/ or /var/pkg/linked/.
+
+In the case of push based parent images, the property information for
+child images is accessible through a plugin for managing each different
+type of linked image.  This allows each linked image type to manage the
+storage of linked image properties.  In the case of system linked
+images, child linked image properties will be stored within the image
+configuration.  In the case of Zones linked images, linked image
+properties may either be fixed or derived from different zonecfg(1m)
+options.
+
+Initially the following properties will be supported:
+
+**li-name**
+
+  This is a linked image name (as described above).
+
+  This property is common to all linked image types, both push and
+  pull based.
+
+  Notably, this property always refers to a child linked image and
+  never a parent image.  This is because the linked image name encodes
+  what type of linked image the child is.
+
+**li-mode**
+
+  This indicates the attach mode of the linked image child (as
+  described above), either push or pull.
+
+  This property is common to all linked image types, both push and
+  pull based.
+
+**li-path**
+
+  This is the filesystem path by which one linked image is accessible
+  from another.
+
+  In the case of a parent image with a push based child, this property
+  will point to the child image.  From within the push based child,
+  this property will not be present.  In the case of pull based
+  children, this property will point to the parent linked image.
+
+**li-recurse**
+
+  This property specifies if recursive linked image operations should
+  continue to recurse within a linked child.
+
+  This property only exists for push based children of a parent image.
+
+  By default, for zones this property will be set to false.  This means
+  that if we do an recursive pkg(5) operation in the global zone that
+  affects a non-global zone, we will update that non-global zone, but we
+  will ignore any children of that non-global zone.  So if for example,
+  a non-global zone administrator has created a push based child of the
+  non-global zone image, global zone pkg operations will not recurse
+  into that child image.
+
+
+pkg(5) linked image interfaces
+==============================
+
+
+pkg(1) linked image interfaces introduction
+-------------------------------------------
+
+Linked image support has an impact on the behavior and interfaces of
+many pkg(5) operations.  Hence, before jumping into all the new linked
+image interfaces it makes sense to discuss some of the common linked
+image behaviors and options to pkg(1) cli interfaces.
+
+pkg(5) operation recursion
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+  Any pkg(1) subcommand which installs or removes packages from a linked
+  image may attempt to recurse into linked child images to attempt to
+  keep those child images in sync with the parent.  There are two things
+  to note about this recursive behavior.
+
+  First, the initial recursion into child images by a pkg(5) operation
+  on a parent image has no relation to the li-recure linked image
+  property.  This property is only used when dealing with multiple
+  levels of recursion.  If this property is false for a child image, a
+  recursive operation will still descend into that child, but it will
+  not continue to recursively descended into children of that child.
+
+  Second, it's important to realize that recursion doesn't imply that
+  the operation being performed on the parent will also be performed on
+  all it's children.  Recursion into children is only done to keep child
+  images in sync.  To clarify this point it's worth explicitly
+  describing how this impacts common pkg(5) operations.
+
+    **pkg(1) install** - When an install recurses into child linked images,
+    it does so only to keep those images in sync.  Hence, if a user
+    installs a new package in a parent image, the requested package will
+    not automatically be installed into child images.  Although if a
+    user "upgrades" a package in the parent image by installing a newer
+    version of that package, and that package also happens to be kept in
+    sync between the parent and child images, then the recursive sync
+    done as part of the install operation will result in the newer
+    package being installed in the child image.
+
+    **pkg(1) update** - This is handled similarly to a pkg(1) install.
+    When we recurse into the child we do not do a pkg(1) update.  We
+    will only update the minimum number of packages required to keep the
+    child in sync with the planned contents of the parent image.
+
+    **pkg(1) uninstall** - When an uninstall recurses into a child, it will
+    not uninstall the specified packages within that child.  Hence if a
+    random un-synced package is removed from a parent image it will not
+    be touched in a child image.  But if the user tries to remove
+    a synced package which is installed within a child image, the
+    removal will fail.  The user will have to remove the package from
+    child images before they can remove it from the parent image.
+
+
+pkg(1) linked image common cli options: ignoring children
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+  As mention above, any pkg(1) subcommand which installs or removes
+  packages from a linked image may attempt to recurse into linked child
+  images to attempt to keep those child images in sync with the parent.
+  There for, these pkg(1) commands will all get new options to allow
+  them to ignore linked child images.  These pkg(1) commands include::
+
+    attach-linked [-I | -i <li-name>]
+    change-facet [-I | -i <li-name>]
+    change-variant [-I | -i <li-name>]
+    image-update [-I | -i <li-name>]
+    install [-I | -i <li-name>]
+    set-property-linked [-I | -i <li-name>]
+    sync-linked [-I | -i <li-name>]
+    uninstall [-I | -i <li-name>]
+
+  Note that the list of commands above does not include pkg(1) commands
+  like verify, fix, and change-facet, since those commands will never
+  result in packages being installed or removed from an image.
+  Unintuitively, set-property-linked is included in the list above since
+  it may be used to change linked image properties which may results in
+  in packaging contents changes in that image, which in turn may need to
+  be propagated to linked children.
+
+  When performing one of the above operations, the operation is first
+  planned in the parent image, and if there are required packaging
+  changes then we will recurse to each child image and create a plan to
+  keep each child in sync with the planned changes in the parent.  If a
+  plan which keeps a child image in sync cannot be created then the
+  operation will fail before any image is modified.  In this case, if
+  the administrator wants to retry the requested operation they will
+  need to do one of the following before that operation will succeed:
+
+  - Detach the child image preventing the operation.
+
+  - Modify the package contents of the child image that is preventing
+    the operation such that the operation can succeed.
+
+  - Pass the -i <li-name> option to the requested pkg(1) command,
+    there by telling it to ignore the specified child image that is
+    preventing the operation.
+
+  - Use the -I option to requested pkg(1) command, there by telling
+    it to ignore all child images.
+
+  Notably, in certain cases it's possible for a push based child linked
+  image to exist but not be accessible from the parent. An example of
+  this would be when a non-root user runs a pkg(1) command, all zone
+  linked image paths will not be accessible.  If pkg(1) is attempting to
+  do any operation which may recurse into child images, all children
+  must be accessible and if any child is not accessible the operations
+  will fail and no updates will be made to any image.
+
+
+pkg(1) linked image common cli options: selecting children
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+  Some of the proposed linked image pkg(1) commands support common
+  arguments that allow the caller to specify which linked image they
+  should operate on.  These new commands and options are::
+
+    property-linked [-l <li-name>]
+    set-property-linked [-l <li-name>]
+
+    audit-linked [-a|-l <li-name>]
+    detach-linked [-a|-l <li-name>]
+    sync-linked [-a|-l <li-name>]
+
+  If one the above commands is run without any arguments, then the
+  command will be preformed on the current image with the assumption
+  that the current image is a linked child image.  If the current image
+  is not a child image an error will be generated.
+
+  If the "-l <li-name>" option is specified to one of the commands
+  above, then it's assumed that the current image has a child by that
+  name and the requested operation is run for that child.
+
+  If the "-a" option is specified to one of the commands above, then the
+  command is run for all the children of the current image.
+
+
+pkg(1) linked image common cli options: syncing parent metadata
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+  If a child image is linked to a parent in pull mode, then certain
+  pkg(1) subcommands will always attempt to update linked image metadata
+  from their parent.  This update will fail if the parent image is not
+  accessible.  Hence, a new --no-parent-sync option is available for to
+  skip this parent metadata sync.  The pkg(1) commands which support
+  this option are:
+
+    attach [--no-parent-sync]
+    audit [--no-parent-sync]
+    change-facet [--no-parent-sync]
+    change-variant [--no-parent-sync]
+    image-update [--no-parent-sync]
+    install [--no-parent-sync]
+    list [--no-parent-sync]
+    sync [--no-parent-sync]
+
+
+pkg(1) linked image cli interfaces
+----------------------------------
+
+pkg(1) list-linked
+~~~~~~~~~~~~~~~~~~
+
+  **pkg list-linked [-H]**
+
+  List all known child images associated with the current image. This
+  lists the name and path for each child image.
+
+  Here's an example of this command when run by a non-root user on a
+  system with zones::
+
+    $ pkg list-linked
+    NAME            RELATIONSHIP    PATH
+    -               self            /
+    zone:dhcp       child           /export/zones/dhcp/root
+    zone:z1         child           /export/zones/z1/root
+    zone:z3         child           /export/zones/z3/root
+    system:child    child           /child
+
+
+pkg(1) property-linked
+~~~~~~~~~~~~~~~~~~~~~~
+
+  **pkg property-linked [-H] [-l <li-name>] [propname ...]**
+
+  List all property values associated with a linked image.  If no linked
+  image is specified then if the current image is assumed to be a child
+  linked image and it's properties are listed.
+
+  Here's an example of this command when run on an image that is not
+  linked::
+
+    $ pkg -R /tmp/a list-linked
+    $
+
+  Here's an example of this command when run on an image that has
+  children but is not itself a child::
+
+    $ pkg property-linked
+    PROPERTY        VALUE
+    li-altroot      /
+    li-path         /
+
+  Here's an example of this command when run by a non-root user on a
+  system with zones::
+
+    $ pkg property-linked -l zone:dhcp
+    PROPERTY        VALUE
+    li-altroot      /
+    li-model        push
+    li-name         zone:dhcp
+    li-path         /export/zones/dhcp/root
+
+  Here's an example of this command when run by a root user directly on
+  a zone/child image::
+
+    # pkg -R /export/zones/dhcp/root property-linked
+    PROPERTY        VALUE
+    li-altroot      /export/zones/dhcp/root
+    li-model        push
+    li-name         zone:dhcp
+    li-path         /export/zones/dhcp/root/
+
+
+pkg(1) audit-linked
+~~~~~~~~~~~~~~~~~~~
+
+  **pkg audit-linked [--no-parent-sync] [-a|-l <name>]**
+
+  Audit the package contents of a linked image to see if it's in sync
+  with the contents of it's parent image and it's content policy.
+
+  Here's an example of this command when run on an image that has no
+  parent::
+
+    $ pkg audit-linked
+    pkg: Linked image exception(s):
+    Current image is not a linked child: /
+
+  Here's an example of this command run by root on a system with zones::
+
+    # pkg audit-linked -a
+    NAME            STATUS
+    zone:dhcp       diverged
+    zone:z1         diverged
+    zone:z3         diverged
+    system:child    synced
+
+
+pkg(1) sync-linked
+~~~~~~~~~~~~~~~~~~
+
+  | **pkg sync-linked [-a|-l <name>]**
+  |       **[--no-parent-sync] [--no-pkg-updates] [--linked-md-only]**
+  |       **[-nvq] [--accept] [--licenses] [--no-index] [--no-refresh]**
+
+  Sync will attempt to bring an image into sync with it's policy.
+
+  A sync operation may upgrade packages or install new packages into an
+  image while bringing the image into sync.  If the user wants to
+  prevent a sync from installing new packages or updating existing
+  packages, they can specify the --no-pkg-updates option.  If the image
+  cannot be synced without installing or updating packages, then this
+  option will cause the sync operation to fail.
+
+  If the caller doesn't want to make any packaging updates to the child
+  image then they can specify the --linked-md-only option.  (This option
+  implies the --no-pkg-updates option.) When this option is specified
+  the package contents of an image will not be modified, and the only
+  result of the operation is that the parent content data stored within
+  the child image will be updated.
+
+  Since a sync operation may modify the packaging contents of an image
+  it is very similar to a pkg(1) install operation, there for the sync
+  command also must support of the same options as the pkg(1) install
+  command.  Those common options are::
+
+    [-nvq] [--accept] [--licenses] [--no-index] [--no-refresh]
+
+  Here's an example of this command when run on an image that has no
+  parent::
+
+    $ pkg sync-linked
+    pkg: detach-linked failed (linked image exception(s)):
+    Current image is not a linked child: /
+
+  Here's an example of this command run by root on a system with zones::
+
+    # pkg sync-linked -l zone:dhcp -v
+    Solver: [ Variables: 2356 Clauses: 60384 Iterations: 2 State: Succeeded]
+    ...
+    Maintained incorporations: None
+
+    Package version changes:
+    ...
+    pkg://opensolaris.org/[email protected],5.11-0.125:20091014T044127Z -> pkg://opensolaris.org/[email protected],5.11-0.139:20100511T142142Z
+    ...
+
+
+pkg(1) set-property-linked
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+  | **pkg set-property-linked [-l <name>]**
+  |       **[--no-parent-sync] [--no-pkg-updates] [--linked-md-only]**
+  |       **[-nvq] [--accept] [--licenses] [--no-index] [--no-refresh]**
+  |       **<propname> <propvalue>**
+
+  This command will attempt to update the specified linked image
+  property.
+
+  Certain linked image properties may be read-only, and which properties
+  are read-only may vary between different types of linked images.  When
+  dealing with push based child images, all linked image properties are
+  treated as read-only within the child.
+
+  Since a set-property-linked operation can change a linked image's
+  content policy, this command may need to sync a child image with it's
+  parent.  Hence, the set-property-linked command also supports many of
+  the same options as the pkg(1) sync-link command.  Those common
+  options are::
+
+    [--no-parent-sync] [--no-pkg-updates] [--linked-md-only]
+    [-nvq] [--accept] [--licenses] [--no-index] [--no-refresh]
+
+
+pkg(1) attach-linked
+~~~~~~~~~~~~~~~~~~~~
+
+  | **pkg attach-linked (-c|-p)**
+  |       **[--no-pkg-updates] [--linked-md-only]**
+  |       **[-nvq] [--accept] [--licenses] [--no-index] [--no-refresh]**
+  |       **[--prop-linked <propname>=<propvalue> ...]**
+  |        **<li-name> <dir>**
+
+  The attach-linked command is used to establish a linked image
+  relationship.  This command may not be supported for all linked image
+  types.  (For example, zone linked images cannot be attached via this
+  command.)
+
+  If a parent image want to link a child image to itself, the -c option
+  should be specified.  This creates a child with a push mode of
+  operation.  If a child image wants to attach to a parent image, the -p
+  option should be specified.  This creates a child with a pull mode of
+  operation.
+
+  When linking images the user may specify optional linked image
+  property values.  Not all properties are settable via these values.
+  (The allowable properties may change in the future and may also be
+  linked image type plugin specific.)
+
+  Normally when linking images together, a sync of the child image is
+  automatically done.  If the child image can not be synced the attach
+  will fail.  Since an attach operation tries to sync a child image with
+  it's parent, the attach-linked command must also support many of the
+  same options as the pkg(1) sync-link command.  Those common options
+  are::
+
+    [--no-pkg-updates] [--linked-md-only]
+    [-nvq] [--accept] [--licenses] [--no-index] [--no-refresh]
+
+  Currently, linked image relationships cannot be created at image
+  creation time.  A linked image relationship can only be established at
+  between two existing images.  The reason for this is mainly cli
+  complexity.  Specifically, supporting this would require that pkg(1)
+  image-create accept all the same commands options as the pkg(1)
+  attach-linked command.
+
+  Here's an example of this command attaching a push child image::
+
+    # pkg -R attach-linked -v -c system:child /child
+               Create boot environment:    No
+                  Rebuild boot archive:    No
+    Services:
+      None
+
+
+pkg(1) detach-linked
+~~~~~~~~~~~~~~~~~~~~
+
+  **pkg detach-linked [-a|-l <li-name>]**
+
+  The detach-linked command will end a linked image relationship.  This
+  command may not be supported for all linked image types.  (For
+  example, zone linked images cannot be detached via this command.)
+
+  Here's an example of this command when run on an image that is not
+  linked::
+
+    # pkg detach-linked
+    pkg: detach-linked failed (linked image exception(s)):
+    Current image is not a linked child: /
+
+  Here's an example of this command when run directly on a child that
+  linked to a parent via a push mode relationship::
+
+    # pkg -R /child detach-linked
+    pkg: detach-linked failed (linked image exception(s)):
+    Parent linked to child, can not detach child: /child
+
+  Here's an example of this command detaching a child image::
+
+    # pkg detach-linked -l system:b -v
+               Create boot environment:    No
+                  Rebuild boot archive:    No
+    Services:
+      None
+
+
+
+pkg(5) linked image manifest metadata
+-------------------------------------
+
+When dealing with zones, package publishers need a way to specify which
+packages must be kept in sync between zone images.  In Solaris 10 this
+was done via a SVR4 packaging attribute (SUNW_PKG_ALLZONES).  With
+pkg(5) we will create a new manifest depend actions for this
+purpose::
+
+  depend type=parent
+
+If a package contains this dependency and it is being installed into an
+image which is not a linked child then this dependency is ignored.  If a
+package containing this image is being installed into a child image,
+then it's required that the same package be installed within the parent
+image.  This dependency has an implicit fmri value which is equal to the
+package fmri which contains the dependency.  Also, when matching fmris
+in the parent image, the fmri version matching algorithm employed is the
+same as that used for satisfying "incorporate" dependencies.  (Ie, an
+exact version match is required, not a equal than or greater version
+match.)
+
+So packages that must be kept in sync between the global and non-global
+zone should have the following dependency action in their manifest::
+
+  depend type=parent variant.opensolaris.zone=nonglobal
+
+The dependency above will need to be set for any package which delivers
+software that may operate across zone boundaries on a system.  This
+would include:
+
+- Any software which delivers a kernel component of any kind.
+
+- Any software which delivers interfaces which may span a zone boundary.
+  Some examples would include:
+
+  - pkg(5) - Since zones are different linked images, pkg(5) by
+    definition must manage images that span zones.
+
+  - libdladm - When this library is used inside a zone it will will
+    communicate (via private interfaces) to the dlmgmtd daemon inside
+    the global zone.
+
+Initially, all of ON and pkg(5) will set this property for all packages
+they deliver.  In time, the number of packages delivered from the ON and
+pkg(5) gates with this attribute set will be reduced.
+
+This property will also be a public interface since anyone delivering
+software via pkg(5) may fall into one of the categories above.  Examples
+of third parties applications which fall into this category would
+include things like VirtualBox, VxVM/VxFS, ClearCase, etc.
+
+
+pkg(5) linked image api.py interfaces
+-------------------------------------
+
+There will need to be changes made to the pkg(5) api.py interfaces to
+support linked images.
+
+The new pkg(5) api.py interfaces will attempt to minimize the amount of
+change required for existing pkg(5) api.py clients which do not wish to
+provide support for managing linked images directly.  (This should
+include every api.py client except for the pkg(1) cli).  Aside from
+tweaks to existing api.py interfaces, the most significant impact to
+existing api.py consumers will be a new set of linked image related
+exceptions and errors that may be generated by api.py interface calls.
+
+
+
+pkg(5) linked image internals
+=============================
+
+
+pkg(5) linked image child operations
+------------------------------------
+
+When operating on child linked images, pkg(1) will access those images
+in one of two ways.
+
+1) A parent image may access a child image directly to write linked
+image property and parent content information into the child image.
+
+2) For all other operations, the pkg(1) linked image code will spawn a
+new pkg(1) cli process to manipulate the child image.  For system images
+this will be a normal pkg(1) process started with the -R option.  Zones
+linked images will initially operate in the same way as system images.
+
+
+pkg(5) linked image operation staging
+-------------------------------------
+
+Currently pkg(1) has multiple stages of operation when manipulating
+images. The stages most relevant to linked image operations are:
+
+1) Package install/uninstall/upgrade planning
+2) Package action planning
+3) Package content downloading
+4) Action execution
+
+When dealing with linked images we want to be able to perform most of
+the operations above in lock step across multiple images.  We want to be
+able to do planning for all child images (and potentially children of
+children) before beginning any of the later stages of execution (this
+will allows us to report problems early.).  Before beginning any
+operation which modifies an image we also want to make sure that we've
+downloaded any required content for updating any linked images.
+
+Also, depending on how many packages are installed within an pkg(5)
+image, stages 1 and 2 above can be very memory intensive, and when
+planning operations across many images we want to be careful not to run
+a system out of memory.
+
+Hence, this project will create a mechanism where using pkg(5) we can
+execute one of the specific stages above, save the results to disk, and
+then resume our operation to perform a later stage, only using
+previously saved results.  This will be done by adding the following
+private options to pkg(1):
+
+  | **pkg [--runid=<N> --stage=(pubcheck|plan|prepare|execute)] <command> ...**
+
+The --stage option will specify which specific operation the pkg(1)
+command should perform.  If --stage=pubcheck is specified, the pkg(1)
+command will verify that the current images publisher configuration is a
+superset of the parent images publisher configuration.  If --stage=plan
+is specified, the pkg(1) command will plan what packages to
+install/uninstall/upgrade, write that information to disk, and exit.  If
+--stage=prepare is specified, pkg(1) will read a previously created
+install/uninstall/upgrade plan from disk (an error will be generated if
+there is no existing package plan on disk), and then download any
+required contents.  Lastly, --stage=execute will cause pkg(1) to read
+existing package and action plans and execute them.
+
+When writing package plans to disk they are stored in json format.
+
+It's possible that we may have multiple non-image-modifying operations
+in progress on a single image at one.  (For example, using the -n option
+to make pkg(1) commands.)  Hence, we introduce a --runid option that
+allows the caller to specify a number (N) to uniquely identify the saved
+package and action plans.  The --runid options is required when using
+the --stage option.
+
+These new options are private and users should never specify them.  They
+will be used internally by the linked image code when invoking pkg(1)
+commands to operate on child images.
+
+
+zones(5) and pkg(5) integration
+===============================
+
+This project will update the zones smf service script and the zones ipkg
+brand callback to utilize the new linked image pkg(1) cli and api.py
+interfaces.
+
+zones smf service changes
+-------------------------
+
+During system boot the zones smf service (svc:/system/zones) will be
+enhanced such that it does a linked-audit of all installed ipkg branded
+zones on the system.  If any zones fail the linked audit, the zones
+service will enter the maintenance state.  If a zone has the zonecfg(1m)
+autoboot property set to true, but fails a linked audit, that zone will
+not be booted.
+
+ipkg brand callback changes
+---------------------------
+
+The zones ipkg brand boot callback will be updated to audit zone images
+before attempting to boot them.  If a zone image fails to audit then it
+will also fail to boot.
+
+The zones install callback will be modified such that it uses linked
+functionality when creating and populating zones images.
+
+The zones attach and detach callbacks will be modified so that they
+automatically use the linked image attach and detach functionality.  By
+default, a zoneadm attach will use the pkg(1) attach-linked
+--no-pkg-updates option, unless the zoneadm(1m) attach -u option is
+specified (there by allowing package updates).
+
+Zoneadm(1m) move will not require any new callbacks or updates since
+zones linked image metadata (like the zone path) will not be cached by
+the linked image subsystem.  Instead, internally, the zones linked image
+plugin will obtain all zones linked image metadata directly from the
+zones subsystem.
+
+
+
+Related Bugs
+============
+
+The linked image support proposed above is covered by the following
+bugs:
+
+- | 16148 need linked image support for zones, phase 1
+  | https://defect.opensolaris.org/bz/show_bug.cgi?id=16148
+
+- | 6969956 need pkg metadata indicating pkgs which must be synced between zones
+  | http://monaco.sfbay.sun.com/detail.jsf?cr=6969956
+  | 7633 need pkg metadata indicating pkgs which must be synced between zones
+  | https://defect.opensolaris.org/bz/show_bug.cgi?id=7633
+
+The following bugs are all prerequisites for most the future linked
+images and zones follow on work described above:
+
+- | 16149 need system repository to facilitate linked image support
+  | https://defect.opensolaris.org/bz/show_bug.cgi?id=16149
+
+- | 6964121 tmpfs rename should update ->v_path
+  | http://monaco.sfbay.sun.com/detail.jsf?cr=6964121
+
+The future improvements to linked images and zones which are required
+for fully integrated zones support are covered by:
+
+- | 16150 need linked image support for zones, phase 2
+  | https://defect.opensolaris.org/bz/show_bug.cgi?id=16150
+
+- | 6978239 pkg(5) needs a zones state locking mechanism
+  | http://monaco.sfbay.sun.com/detail.jsf?cr=6978239
+
+Other bugs which are related to pkg(5) and zones(5) which are not being
+addressed by this work include:
+
+- | 1947 Offline zone creation is impossible
+  | https://defect.opensolaris.org/bz/show_bug.cgi?id=1947
+
+- | 13986 incorporation/metapackage needed for zone install
+  | https://defect.opensolaris.org/bz/show_bug.cgi?id=13986
+
+- | 15343 pkg needs to be able to lock BEs
+  | https://defect.opensolaris.org/bz/show_bug.cgi?id=15343
+
+- | 15342 libbe (and beadm) need BE write lock support
+  | https://defect.opensolaris.org/bz/show_bug.cgi?id=15342
+
+- | 16258 libbe support for zones
+  | https://defect.opensolaris.org/bz/show_bug.cgi?id=16258
+
+- | 16838 pkg should take into account libbe's ideas about shared and nonshared filesystems
+  | https://defect.opensolaris.org/bz/show_bug.cgi?id=16838
+
+- | 6988152 beadm/libbe needs support for zone/linked dataset management
+  | http://monaco.sfbay.sun.com/detail.jsf?cr=6988152
+
+- | 6998842 Zones Proxy for the pkg(5) System Repository
+  | http://monaco.sfbay.sun.com/detail.jsf?cr=6998842
+
+
+PTL Entries
+===========
+
+- | 8362 Pkg support for Zones phase 1
+  | http://sac.sfbay/projectlog/ptl/dashboard.php?UniqueID=8362
+
+- | 8724 Pkg support for Zones phase 2
+  | http://sac.sfbay/projectlog/ptl/dashboard.php?UniqueID=8724
+
+- | 8725 pkg(5) System Repository
+  | http://sac.sfbay/projectlog/ptl/dashboard.php?UniqueID=8725
+
+- | 8726 Zones Proxy for the pkg(5) System Repository
+  | http://sac.sfbay/projectlog/ptl/dashboard.php?UniqueID=8726
+
+- | 8727 Updater Branded Zones
+  | http://sac.sfbay/projectlog/ptl/dashboard.php?UniqueID=8727
+
+- | 8728 Zone State Locking
+  | http://sac.sfbay/projectlog/ptl/dashboard.php?UniqueID=8728
+
+
+References
+==========
+
+.. [1] | 1947 Offline zone creation is impossible
+   | https://defect.opensolaris.org/bz/show_bug.cgi?id=1947
+
+.. [2] | 16149 need system repository to facilitate linked image support
+   | https://defect.opensolaris.org/bz/show_bug.cgi?id=16149
+
+.. [3] | 15343 pkg needs to be able to lock BEs
+   | https://defect.opensolaris.org/bz/show_bug.cgi?id=15343
+   | 15342 libbe (and beadm) need BE write lock support
+   | https://defect.opensolaris.org/bz/show_bug.cgi?id=15342
+   | 6978239 pkg(5) needs a zones state locking mechanism
+   | http://monaco.sfbay.sun.com/detail.jsf?cr=6978239
+
+.. [4] | 16838 pkg should be aware of image and filesystem boundaries
+   | https://defect.opensolaris.org/bz/show_bug.cgi?id=16838
--- a/src/Makefile	Fri May 06 17:24:48 2011 -0700
+++ b/src/Makefile	Sat May 07 00:25:10 2011 -0700
@@ -30,6 +30,9 @@
 packages := TARGET = install
 clean := TARGET = clean
 clobber := TARGET = clobber
+lint := TARGET = lint
+clint := TARGET = clint
+pylint := TARGET = pylint
 test := TARGET = test
 test-verbose := TARGET = test-verbose
 test-generate := TARGET = test-generate
@@ -48,8 +51,22 @@
 	$(PYTHON) setup.py clobber
 	@cd pkg; pwd; make clobber
 
+#
+# run pylint as part of the install target.
+# it's the best way to ensure things stay pylint clean.
+#
 install: $(SUBDIRS)
 	$(PYTHON) setup.py install
+	$(PYTHON) setup.py pylint
+
+lint:
+	$(PYTHON) setup.py lint
+
+clint:
+	$(PYTHON) setup.py clint
+
+pylint:
+	$(PYTHON) setup.py pylint
 
 #
 # This rule propagates the current make target through all of the
@@ -70,7 +87,7 @@
 test-verbose:
 	$(PYTHON) setup.py test -v
 
-test-generate: 
+test-generate:
 	$(PYTHON) setup.py test -g
 
 test-leaks:
--- a/src/brand/Makefile	Fri May 06 17:24:48 2011 -0700
+++ b/src/brand/Makefile	Sat May 07 00:25:10 2011 -0700
@@ -46,53 +46,56 @@
 	$(ROOTBRAND) \
 	$(ROOTBRANDPKG)
 
-ROOTFILES = \
-	$(ROOTETCBRAND)/pkgrm.conf \
-	$(ROOTETCBRAND)/smf_disable.conf \
-	$(ROOTETCZONES)/SUNWipkg.xml \
+ROOTBINS = \
 	$(ROOTBRANDPKG)/attach \
 	$(ROOTBRANDPKG)/boot \
 	$(ROOTBRANDPKG)/clone \
-	$(ROOTBRANDPKG)/common.ksh \
 	$(ROOTBRANDPKG)/detach \
-	$(ROOTBRANDPKG)/fmri_compare \
 	$(ROOTBRANDPKG)/halt \
 	$(ROOTBRANDPKG)/image_install \
 	$(ROOTBRANDPKG)/p2v \
 	$(ROOTBRANDPKG)/pkgcreatezone \
-	$(ROOTBRANDPKG)/pkgrm.lst \
 	$(ROOTBRANDPKG)/poststate \
 	$(ROOTBRANDPKG)/prestate \
-	$(ROOTBRANDPKG)/smf_disable.lst \
 	$(ROOTBRANDPKG)/support \
 	$(ROOTBRANDPKG)/sysboot \
 	$(ROOTBRANDPKG)/uninstall
 
+ROOTFILES = \
+	$(ROOTBRANDPKG)/common.ksh \
+	$(ROOTBRANDPKG)/developerenv.ksh \
+	$(ROOTBRANDPKG)/pkgrm.lst \
+	$(ROOTBRANDPKG)/smf_disable.lst \
+	$(ROOTETCBRAND)/pkgrm.conf \
+	$(ROOTETCBRAND)/smf_disable.conf \
+	$(ROOTETCZONES)/SUNWipkg.xml \
+
 BIN = \
 	support \
-	fmri_compare
+
+FILEMODE = 0444
+$(ROOTBINS) := FILEMODE = 755
+$(ROOTETCBRAND)/% := FILEMODE = 644
+
 
 support := LDLIBS = -lzonecfg
 
 all := TARGET = all
 
+
 support: support.c
 	$(LINK.c) -o $@ [email protected] $(LDLIBS)
 
-fmri_compare: fmri_compare.py
-	cp [email protected] $@
-	chmod 755 $@
-
-all: fmri_compare
+all:
 
 clean:
 	rm -f $(BIN)
 
 clobber: clean
-	rm -f $(ROOTFILES)
+	rm -f $(ROOTFILES) $(ROOTBINS)
 	rm -fr $(ROOTBRAND)
 
-install: $(ROOTFILES)
+install: $(ROOTFILES) $(ROOTBINS)
 
 
 $(ROOT) $(ROOTETC) $(ROOTETCBRAND) $(ROOTETCZONES) $(ROOTUSRLIB) \
@@ -100,10 +103,10 @@
 	mkdir -p $@
 
 $(ROOTETCBRAND)/%: $(ROOTETCBRAND) %
-	rm -f $@; $(INSTALL) -f $(ROOTETCBRAND) -m 0644 $<
+	rm -f $@; $(INSTALL) -f $(ROOTETCBRAND) -m $(FILEMODE) $<
 
 $(ROOTETCZONES)/%: $(ROOTETCZONES) %
-	rm -f $@; $(INSTALL) -f $(ROOTETCZONES) -m 0444 $<
+	rm -f $@; $(INSTALL) -f $(ROOTETCZONES) -m $(FILEMODE) $<
 
 $(ROOTBRANDPKG)/%: $(ROOTBRANDPKG) %
-	rm -f $@; $(INSTALL) -f $(ROOTBRANDPKG) -m 0444 $<
+	rm -f $@; $(INSTALL) -f $(ROOTBRANDPKG) -m $(FILEMODE) $<
--- a/src/brand/attach	Fri May 06 17:24:48 2011 -0700
+++ b/src/brand/attach	Sat May 07 00:25:10 2011 -0700
@@ -26,16 +26,20 @@
 
 . /usr/lib/brand/ipkg/common.ksh
 
+# Allows developer to override some things like PATH and PYTHONPATH
+. /usr/lib/brand/ipkg/developerenv.ksh
+
 m_attach_log=$(gettext "Log File: %s")
 m_zfs=$(gettext "A ZFS file system was created for the zone.")
-m_usage=$(gettext  "attach [-a archive] [-d dataset] [-n] [-r zfs-recv] [-u]\n\tThe -a archive option specifies a tar file or cpio archive.\n\tThe -d dataset option specifies an existing dataset.\n\tThe -r zfs-recv option receives the output of a 'zfs send' command\n\tof an existing zone root dataset.\n\tThe -u option indicates that the software should be updated to match\n\tthe current host.")
+m_usage=$(gettext  "attach [-a archive] [-d dataset] [-r zfs-recv] [-v] [-u]\n\tThe -a archive option specifies a tar file or cpio archive.\n\tThe -d dataset option specifies an existing dataset.\n\tThe -r zfs-recv option receives the output of a 'zfs send' command\n\tof an existing zone root dataset.\n\tThe -u option indicates that the software should be updated to match\n\tthe current host.\n\tThe -v option enables verbose output.")
 m_attach_root=$(gettext "               Attach Path: %s")
 m_attach_ds=$(gettext   "        Attach ZFS Dataset: %s")
 m_gzinc=$(gettext       "       Global zone version: %s")
 m_zinc=$(gettext        "   Non-Global zone version: %s")
 m_need_update=$(gettext "                Evaluation: Packages in zone %s are out of sync with the global zone. To proceed, retry with the -u flag.")
 m_cache=$(gettext       "                     Cache: Using %s.")
-m_updating=$(gettext    "  Updating non-global zone: Output follows")
+m_publisher=$(gettext   "  Updating non-global zone: Propagating publisher %s.")
+m_image_link=$(gettext  "  Updating non-global zone: Linking to image %s.")
 m_sync_done=$(gettext   "  Updating non-global zone: Zone updated.")
 m_complete=$(gettext    "                    Result: Attach Succeeded.")
 m_failed=$(gettext      "                    Result: Attach Failed.")
@@ -50,6 +54,7 @@
 f_sanity_variant=$(gettext "  Sanity Check: FAILED, couldn't determine %s from image.")
 f_sanity_global=$(gettext  "  Sanity Check: FAILED, appears to be a global zone (%s=%s).")
 f_update=$(gettext "Could not update attaching zone")
+f_sysrepo=$(gettext "Could not install system-repository")
 f_ds_config=$(gettext  "Failed to configure dataset %s: could not set %s.")
 f_no_active_ds_mounted=$(gettext  "Failed to locate any dataset mounted at %s.  Attach requires a mounted dataset.")
 f_nonsticky=$(gettext "Could not set legacy publisher to non-sticky")
@@ -60,6 +65,7 @@
 # Clean up on interrupt
 trap_cleanup() {
 	typeset msg=$(gettext "Installation cancelled due to interrupt.")
+	trap - INT
 
 	log "$msg"
 
@@ -71,7 +77,15 @@
 
 # If the attach failed then clean up the ZFS datasets we created.
 trap_exit() {
-	if [[ $EXIT_CODE == $ZONE_SUBPROC_OK ]]; then
+	trap - INT
+	if [[ -n $EXIT_NOEXECUTE ]]; then
+		# dryrun mode, nothing to do here; exit with whatever
+		# EXIT_CODE is set to.
+		;
+	elif [[ $EXIT_CODE == $ZONE_SUBPROC_USAGE ]]; then
+		# Usage message printed, nothing to do here.
+		;
+	elif [[ $EXIT_CODE == $ZONE_SUBPROC_OK ]]; then
 		# unmount the zoneroot if labeled brand
 		is_brand_labeled
 		(( $? == 1 )) && ( umount $ZONEROOT || \
@@ -111,7 +125,7 @@
 trap trap_cleanup INT
 trap trap_exit EXIT
 
-PKG="/usr/bin/pkg"
+PKG=pkg
 KEYDIR=/var/pkg/ssl
 
 # If we weren't passed at least two arguments, exit now.
@@ -147,22 +161,9 @@
 typeset gz_incorporations=""
 
 #
-# $1 is an empty string to be populated with a list of incorporation
-# fmris.
-#
-gather_incorporations() {
-	typeset -n incorporations=$1
-	typeset p=
-
-	for p in \
-	    $(LC_ALL=C $PKG search -Hl -o pkg.name \
-	    ':pkg.depend.install-hold:core-os*');do
-		incorporations="$incorporations $(get_pkg_fmri $p)"
-	done
-}
-
+verbose=""
 # Other brand attach options are invalid for this brand.
-while getopts "a:d:nr:u" opt; do
+while getopts "a:d:n:r:uv" opt; do
 	case $opt in
 		a)
 			if [[ -n "$inst_type" ]]; then
@@ -178,7 +179,10 @@
 		 	inst_type="directory"
 			install_media="$OPTARG"
 			;;
-		n)	noexecute=1 ;;
+		n)	noexecute=1
+	       		EXIT_NOEXECUTE=1
+			dryrun_mfst=$"OPTARG"
+			;;
 		r)
 			if [[ -n "$inst_type" ]]; then
 				fatal "$incompat_options" "$m_usage"
@@ -187,6 +191,8 @@
 			install_media="$OPTARG"
 			;;
 		u)	allow_update=1 ;;
+		v)	verbose="-v"
+			OPT_V=1 ;;	# used for vlog()
 		?)	fail_usage "" ;;
 		*)	fail_usage "";;
 	esac
@@ -201,14 +207,16 @@
 
 if [ $noexecute -eq 1 ]; then
 	#
-	# The zone doesn't have to exist when the -n option is used, so do
+	# the zone doesn't have to exist when the -n option is used, so do
 	# this work early.
 	#
 
-	# XXX There is no sw validation for IPS right now, so just pretend
+	# LIXXX There is no sw validation for IPS right now, so just pretend
 	# everything will be ok.
+
+	# Set exit code for the trap handler.
 	EXIT_CODE=$ZONE_SUBPROC_OK
-	exit $ZONE_SUBPROC_OK
+	exit $EXIT_CODE
 fi
 
 enable_zones_services
@@ -246,54 +254,21 @@
 
 
 #
-# Try to find the "entire" incorporation's FMRI in the gz.
-#
-gz_entire_fmri=$(get_entire_incorp)
-
-#
-# If entire isn't installed, create an array of global zone core-os
-# incorporations.
-#
-if [[ -z $gz_entire_fmri ]]; then
-	gather_incorporations gz_incorporations
-fi
-
-#
 # We're done with the global zone: switch images to the non-global
 # zone.
 #
 PKG_IMAGE="$ZONEROOT"
 
 #
-# Try to find the "entire" incorporation's FMRI in the ngz.
-#
-ngz_entire_fmri=$(get_entire_incorp)
-
-[[ -n $gz_entire_fmri ]] && log "$m_gzinc" "$gz_entire_fmri"
-[[ -n $ngz_entire_fmri ]] && log "$m_zinc" "$ngz_entire_fmri"
-
-#
-# Create the list of incorporations we wish to install/update in the
-# ngz.
-#
-typeset -n incorp_list
-if [[ -n $gz_entire_fmri ]]; then
-    incorp_list=gz_entire_fmri
-else
-    incorp_list=gz_incorporations
-fi
-
-#
 # If there is a cache, use it.
 #
 if [[ -f /var/pkg/pkg5.image && -d /var/pkg/publisher ]]; then
-	PKG_CACHEROOT=/var/pkg/publisher
+	# respect PKG_CACHEROOT if the caller has it set.
+	[ -z "$PKG_CACHEROOT" ] && PKG_CACHEROOT=/var/pkg/publisher
 	export PKG_CACHEROOT
 	log "$m_cache" "$PKG_CACHEROOT"
 fi
 
-log "$m_updating"
-
 #
 # pkg update-format doesn't allow a dry run or provide any other way to
 # see if an update is needed.
@@ -316,53 +291,27 @@
 LC_ALL=C $PKG refresh --full
 
 #
-# Bring the ngz entire incorporation into sync with the gz as follows:
-# - First compare the existence of entire in both global and non-global
-#   zone and update the non-global zone accordingly.
-# - Then, if updates aren't allowed check if we can attach because no
-#   updates are required. If we can, then we are finished.
-# - Finally, we know we can do updates and they are required, so update
-#   all the non-global zone incorporations using the list we gathered
-#   from the global zone earlier.
+# Reset the pkg image back to the global zone so that we link the
+# zoneroot to that image.
 #
-if [[ -z $gz_entire_fmri && -n $ngz_entire_fmri ]]; then
-	if [[ $allow_update == 1 ]]; then
-		LC_ALL=C $PKG uninstall entire || pkg_err_check "$f_update"
-	else
-		log "\n$m_need_update" "$ZONENAME"
-		EXIT_CODE=$ZONE_SUBPROC_NOTCOMPLETE
-		exit $EXIT_CODE
-    fi
+PKG_IMAGE=$GZ_IMAGE
+
+log "$m_image_link" $GZ_IMAGE
+extra_flags=""
+if [[ $allow_update == 0 ]]; then
+       extra_flags="--no-pkg-updates"
 fi
 
-if [[ $allow_update == 0 ]]; then
-	LC_ALL=C $PKG install --accept --no-refresh -n $incorp_list
-	if [[ $? == 4 ]]; then
-		log "\n$m_complete"
-		EXIT_CODE=$ZONE_SUBPROC_OK
-		exit $EXIT_CODE
-	else
-		log "\n$m_need_update" "$ZONENAME"
-		EXIT_CODE=$ZONE_SUBPROC_NOTCOMPLETE
-		exit $EXIT_CODE
-	fi
-fi
+LC_ALL=C $PKG attach-linked $verbose -f --allow-relink --accept \
+    $extra_flags \
+    -c zone:${zone.name} $ZONEROOT || pkg_err_check "$f_update"
 
-#
-# If the NGZ doesn't have entire, but the GZ does, then we have to install
-# entire twice. First time we don't specify a version and let constraining
-# incorporations determine the version. Second time, we try to install the
-# same version as we have in the GZ.
-#
-if [[ -n $gz_entire_fmri && -z $ngz_entire_fmri ]]; then
-	LC_ALL=C $PKG install --accept --no-refresh entire  || \
-	    pkg_err_check "$f_update"
-fi
+# make sure the sysrepo is installed:
+PKG_IMAGE=$ZONEROOT
+LC_ALL=C $PKG install $verbose pkg:///package/pkg/system-repository || \
+    pkg_err_check "$f_sysrepo"
 
-LC_ALL=C $PKG install --accept --no-refresh $incorp_list  || \
-    pkg_err_check "$f_update"
-
-log "\n$m_sync_done"
+dlog "\n$m_sync_done"
 log "$m_complete"
 
 EXIT_CODE=$ZONE_SUBPROC_OK
--- a/src/brand/common.ksh	Fri May 06 17:24:48 2011 -0700
+++ b/src/brand/common.ksh	Sat May 07 00:25:10 2011 -0700
@@ -41,7 +41,7 @@
 f_incompat_options=$(gettext "cannot specify both %s and %s options")
 f_sanity_detail=$(gettext  "Missing %s at %s")
 f_sanity_sparse=$(gettext  "Is this a sparse zone image?  The image must be whole-root.")
-sanity_ok=$(gettext     "  Sanity Check: Passed.  Looks like an OpenSolaris system.")
+sanity_ok=$(gettext     "  Sanity Check: Passed.  Looks like a Solaris system.")
 sanity_fail=$(gettext   "  Sanity Check: FAILED (see log for details).")
 sanity_fail_vers=$(gettext  "  Sanity Check: the Solaris image (release %s) is not an OpenSolaris image and cannot be installed in this type of branded zone.")
 install_fail=$(gettext  "        Result: *** Installation FAILED ***")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/brand/developerenv.ksh	Sat May 07 00:25:10 2011 -0700
@@ -0,0 +1,39 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+#
+# This file is present as a hook to allow developers to insert
+# environment variables-- in particular, to allow the use of an
+# alternate set of IPS or other bits-- into the brand hooks.
+#
+# End users should not modify this file.
+#
+
+# export PKGPROTO=/path/to/proto_area
+# mach=$(uname -p)
+# export PATH=$PKGPROTO/root_$mach/usr/bin:$PATH
+# export PYTHONPATH=$PKGPROTO/root_$mach/usr/lib/python2.6/vendor-packages/
+# unset mach
+
--- a/src/brand/fmri_compare.py	Fri May 06 17:24:48 2011 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-#!/usr/bin/python2.6
-#
-# CDDL HEADER START
-#
-# The contents of this file are subject to the terms of the
-# Common Development and Distribution License (the "License").
-# You may not use this file except in compliance with the License.
-#
-# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
-# or http://www.opensolaris.org/os/licensing.
-# See the License for the specific language governing permissions
-# and limitations under the License.
-#
-# When distributing Covered Code, include this CDDL HEADER in each
-# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
-# If applicable, add the following below this CDDL HEADER, with the
-# fields enclosed by brackets "[]" replaced with your own identifying
-# information: Portions Copyright [yyyy] [name of copyright owner]
-#
-# CDDL HEADER END
-#
-#
-# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
-# Use is subject to license terms.
-#
-
-import pkg.fmri
-import sys
-
-def usage():
-        print >> sys.stderr, "usage: %s <fmri1> <fmri2>" % sys.argv[0]
-        sys.exit(2)
-
-if len(sys.argv) != 3:
-        usage()
-
-try:
-        x = pkg.fmri.PkgFmri(sys.argv[1])
-        y = pkg.fmri.PkgFmri(sys.argv[2])
-except pkg.fmri.FmriError, e:
-        print >> sys.stderr, "error: %s" % str(e)
-        sys.exit(1)
-
-if not x.is_same_pkg(y):
-        print >> sys.stderr, \
-            "error: can only compare two versions of the same package."
-        sys.exit(1)
-
-if x < y:
-        print "<"
-elif x > y:
-        print ">"
-elif x == y:
-        print "="
-else:
-        print >> sys.stderr, "panic"
-        sys.exit(1)
-
-sys.exit(0)
--- a/src/brand/image_install	Fri May 06 17:24:48 2011 -0700
+++ b/src/brand/image_install	Sat May 07 00:25:10 2011 -0700
@@ -35,6 +35,9 @@
 
 . /usr/lib/brand/ipkg/common.ksh
 
+# Allows developer to override some things like PATH and PYTHONPATH
+. /usr/lib/brand/ipkg/developerenv.ksh
+
 m_usage=$(gettext "\n        install {-a archive|-d path} {-p|-u} [-s|-v]")
 install_log=$(gettext   "    Log File: %s")
 
--- a/src/brand/p2v	Fri May 06 17:24:48 2011 -0700
+++ b/src/brand/p2v	Sat May 07 00:25:10 2011 -0700
@@ -19,8 +19,9 @@
 #
 # CDDL HEADER END
 #
-# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
-# Use is subject to license terms.
+
+#
+# Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
 #
 
 # NOTE: this script runs in the global zone and touches the non-global
@@ -38,6 +39,10 @@
 
 . /usr/lib/brand/ipkg/common.ksh
 
+# Allows developer to override some things like PATH and PYTHONPATH
+. /usr/lib/brand/ipkg/developerenv.ksh
+
+PKG=pkg
 LOGFILE=
 EXIT_CODE=1
 
@@ -266,7 +271,7 @@
 	for i in $(egrep -hv "^#" /usr/lib/brand/ipkg/pkgrm.lst \
 	    /etc/brand/ipkg/pkgrm.conf)
 	do
-		pkg info $i >/dev/null 2>&1
+		$PKG info $i >/dev/null 2>&1
 		if (( $? != 0 )); then
 			continue
 		fi
@@ -475,8 +480,8 @@
 
 # Change the pkging variant from global zone to non-global zone.
 log "$v_change_var"
-pkg -R $ZONEROOT change-variant variant.opensolaris.zone=nonglobal || \
-    fatal "$e_change_var"
+$PKG -R $ZONEROOT change-variant variant.opensolaris.zone=nonglobal || \
+    pkg_err_check "$e_change_var"
 # Set the property which tells the image to use the system publisher.
 pkg -R $ZONEROOT set-property use-system-repo true
 if [[ $? != 0 ]]; then
--- a/src/brand/pkgcreatezone	Fri May 06 17:24:48 2011 -0700
+++ b/src/brand/pkgcreatezone	Sat May 07 00:25:10 2011 -0700
@@ -35,9 +35,13 @@
 
 . /usr/lib/brand/ipkg/common.ksh
 
+# Allows developers to override some things like PATH and PYTHONPATH
+. /usr/lib/brand/ipkg/developerenv.ksh
+
 f_a_obs=$(gettext "-a publisher=uri option is obsolete.")
 f_pkg5_missing=$(gettext "pkg(5) does not seem to be present on this system.\n")
 f_img=$(gettext "failed to create image\n")
+f_imglink=$(gettext "failed to link image to global zone\n")
 f_pkg=$(gettext "failed to install package\n")
 f_interrupted=$(gettext "Installation cancelled due to interrupt.\n")
 f_bad_publisher=$(gettext "Syntax error in publisher information.")
@@ -68,13 +72,7 @@
 TEXTDOMAIN="SUNW_OST_OSCMD"
 export TEXTDOMAIN
 
-PKG=/usr/bin/pkg
-export PKG
-
-#
-# Just in case.  This should probably be removed later.
-#
-[[ ! -x $PKG ]] && fail_incomplete "$f_pkg5_missing"
+PKG=pkg
 
 unset install_archive
 unset source_dir
@@ -187,16 +185,33 @@
     --set-property use-system-repo=true \
     $ZONEROOT || fail_incomplete "$f_img"
 
+# Link this image to the parent image.
+printf "$m_image_link\n" $GZ_IMAGE
+LC_ALL=C $PKG attach-linked -q -f --no-refresh --no-index --linked-md-only \
+    -c zone:$ZONENAME $ZONEROOT || fail_incomplete "$f_imglink"
+
 # Change the value of PKG_IMAGE so that future PKG operation will work
 # on the newly created zone rather than the global zone
-
 PKG_IMAGE="$ZONEROOT"
 export PKG_IMAGE
 
+if [[ -f /var/pkg/pkg5.image && -d /var/pkg/publisher ]]; then
+	# respect PKG_CACHEROOT if the caller has it set.
+	[ -z "$PKG_CACHEROOT" ] && PKG_CACHEROOT=/var/pkg/publisher
+	export PKG_CACHEROOT
+	printf "$m_cache\n" $PKG_CACHEROOT
+fi
+
 printf "$m_core\n"
 pkglist=""
+
+#
+# 'entire' is essentially optional-- if you don't have it in your global
+# zone, you are probably an OS developer, and therefore you probably don't
+# want it in your non-global zone.  We follow the preference we find in
+# the global zone.
 if [[ -n $entire_fmri ]]; then
-	pkglist="$pkglist $entire_fmri"
+	pkglist="$pkglist pkg:///entire"
 fi
 
 pkglist="$pkglist
--- a/src/checkforupdates.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/checkforupdates.py	Sat May 07 00:25:10 2011 -0700
@@ -19,7 +19,7 @@
 #
 # CDDL HEADER END
 #
-# Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
 #
 
 import getopt
@@ -121,11 +121,23 @@
                         self.__send_return(enumerations.UPDATES_UNDETERMINED)
                         return
                 try:
-                        plan_ret = \
-                            self.api_obj.plan_update_all(
-                            refresh_catalogs = True,
-                            noexecute = True, force = True)
-                        stuff_to_do = plan_ret[0]
+                        #
+                        # Since this program is intended to primarily be a
+                        # helper for the gui components, and since the gui
+                        # components are currently unaware of child images,
+                        # we'll limit the available update check we're about
+                        # to do to just the parent image.  If we didn't do
+                        # this we could end up in a situation where the parent
+                        # has no available updates, but a child image does,
+                        # and then the gui (which is unaware of children)
+                        # would show that no updates are available to the
+                        # parent.
+                        #
+                        for pd in self.api_obj.gen_plan_update(
+                            refresh_catalogs=True, noexecute=True,
+                            force=True, li_ignore=[]):
+                                continue
+                        stuff_to_do = not self.api_obj.planned_nothingtodo()
                 except api_errors.CatalogRefreshException, cre:
                         crerr = nongui_misc.get_catalogrefresh_exception_msg(cre)
                         if debug:
--- a/src/client.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/client.py	Sat May 07 00:25:10 2011 -0700
@@ -67,6 +67,7 @@
         import pkg.client.bootenv as bootenv
         import pkg.client.history as history
         import pkg.client.progress as progress
+        import pkg.client.linkedimage as li
         import pkg.client.publisher as publisher
         import pkg.fmri as fmri
         import pkg.misc as misc
@@ -81,29 +82,19 @@
             RESULT_FAILED_LOCKED, RESULT_FAILED_STORAGE,
             RESULT_FAILED_TRANSPORT, RESULT_FAILED_UNKNOWN,
             RESULT_FAILED_OUTOFMEMORY)
+        from pkg.client.pkgdefs import *
         from pkg.misc import EmptyI, msg, PipeError
 except KeyboardInterrupt:
         import sys
         sys.exit(1)
 
-CLIENT_API_VERSION = 58
+CLIENT_API_VERSION = 59
 PKG_CLIENT_NAME = "pkg"
 
 JUST_UNKNOWN = 0
 JUST_LEFT = -1
 JUST_RIGHT = 1
 
-# pkg exit codes
-EXIT_OK      = 0
-EXIT_OOPS    = 1
-EXIT_BADOPT  = 2
-EXIT_PARTIAL = 3
-EXIT_NOP     = 4
-EXIT_NOTLIVE = 5
-EXIT_LICENSE = 6
-EXIT_LOCKED  = 7
-
-
 logger = global_settings.logger
 
 valid_special_attrs = ["action.hash", "action.key", "action.name", "action.raw"]
@@ -142,7 +133,7 @@
         # program name on all platforms.
         logger.error(ws + pkg_cmd + text_nows)
 
-def usage(usage_error=None, cmd=None, retcode=2, full=False):
+def usage(usage_error=None, cmd=None, retcode=EXIT_BADOPT, full=False):
         """Emit a usage message and optionally prefix it with a more
             specific error message.  Causes program to exit. """
 
@@ -174,13 +165,48 @@
         basic_usage["refresh"] = _("[--full] [publisher ...]")
         basic_usage["version"] = ""
 
-        advanced_cmds = ["info", "contents", "search", "", "verify", "fix",
-            "revert", "", "variant", "change-variant", "", "facet",
-            "change-facet", "", "avoid", "unavoid", "", "property",
-            "set-property", "add-property-value", "remove-property-value",
-            "unset-property", "", "publisher", "set-publisher",
-            "unset-publisher", "", "history", "purge-history", "",
-            "rebuild-index", "update-format", "image-create"]
+        advanced_cmds = [
+            "info",
+            "contents",
+            "search",
+            "",
+            "verify",
+            "fix",
+            "revert",
+            "",
+            "variant",
+            "change-variant",
+            "",
+            "facet",
+            "change-facet",
+            "",
+            "avoid",
+            "unavoid",
+            "",
+            "property",
+            "set-property",
+            "add-property-value",
+            "remove-property-value",
+            "unset-property",
+            "",
+            "publisher",
+            "set-publisher",
+            "unset-publisher",
+            "",
+            "history",
+            "purge-history",
+            "",
+            "rebuild-index",
+            "update-format",
+            "image-create",
+            "",
+            "attach-linked",
+            "detach-linked",
+            "list-linked",
+            "audit-linked",
+            "sync-linked",
+            "property-linked",
+        ]
 
         adv_usage["info"] = \
             _("[-lr] [-g path_or_uri ...] [--license] [pkg_fmri_pattern ...]")
@@ -250,6 +276,26 @@
         adv_usage["rebuild-index"] = ""
         adv_usage["update-format"] = ""
 
+        adv_usage["list-linked"] = _("-H")
+        adv_usage["attach-linked"] = _(
+            "[-fnvq] [--accept] [--licenses] [--no-index] [--no-refresh]\n"
+            "            [--no-pkg-updates] [--linked-md-only]\n"
+            "            [--allow-relink]\n"
+            "            [--prop-linked <propname>=<propvalue> ...]\n"
+            "            (-c|-p) <li-name> <dir>")
+        adv_usage["detach-linked"] = _(
+            "[-fnvq] [-a|-l <li-name>] [--linked-md-only]")
+        adv_usage["property-linked"] = _("[-H] [-l <li-name>] [propname ...]")
+        adv_usage["audit-linked"] = _("[-a|-l <li-name>]")
+        adv_usage["sync-linked"] = _(
+            "[-nvq] [--accept] [--licenses] [--no-index] [--no-refresh]\n"
+            "            [--no-parent-sync] [--no-pkg-updates]\n"
+            "            [--linked-md-only] [-a|-l <name>]")
+        adv_usage["set-property-linked"] = _(
+            "[-nvq] [--accept] [--licenses] [--no-index] [--no-refresh]\n"
+            "            [--no-parent-sync] [--no-pkg-updates]\n"
+            "            [--linked-md-only] <propname>=<propvalue> ...")
+
         def print_cmds(cmd_list, cmd_dic):
                 for cmd in cmd_list:
                         if cmd is "":
@@ -301,12 +347,12 @@
         PKG_IMAGE"""))
         sys.exit(retcode)
 
-def get_fmri_args(api_inst, args, cmd=None):
+def get_fmri_args(api_inst, pargs, cmd=None):
         """ Convenience routine to check that input args are valid fmris. """
 
         res = []
         errors = []
-        for pat, err, pfmri, matcher in api_inst.parse_fmri_patterns(args):
+        for pat, err, pfmri, matcher in api_inst.parse_fmri_patterns(pargs):
                 if not err:
                         res.append((pat, err, pfmri, matcher))
                         continue
@@ -323,77 +369,26 @@
                 error("\n".join(str(e) for e in errors), cmd=cmd)
         return len(errors) == 0, res
 
-def list_inventory(api_inst, args):
+def list_inventory(op, api_inst, pargs,
+    li_parent_sync, list_all, list_installed_newest, list_newest,
+    list_upgradable, omit_headers, origins, refresh_catalogs, summary,
+    verbose):
         """List packages."""
 
-        opts, pargs = getopt.getopt(args, "Hafg:nsuv", ["no-refresh"])
-
-        display_headers = True
-        origins = set()
-        refresh_catalogs = True
-        pkg_list = api.ImageInterface.LIST_INSTALLED
-        summary = False
-        verbose = 0
+        api_inst.progresstracker = get_tracker(quiet=omit_headers)
+
         variants = False
-
-        ltypes = set()
-        for opt, arg in opts:
-                if opt == "-H":
-                        display_headers = False
-                elif opt == "-a":
-                        ltypes.add(opt)
-                elif opt == "-f":
-                        ltypes.add(opt)
-                        variants = True
-                elif opt == "-g":
-                        origins.add(misc.parse_uri(arg, cwd=orig_cwd))
-                elif opt == "-n":
-                        ltypes.add(opt)
-                elif opt == "-s":
-                        summary = True
-                elif opt == "-u":
-                        ltypes.add(opt)
-                elif opt == "-v":
-                        verbose = verbose + 1
-                elif opt == "--no-refresh":
-                        refresh_catalogs = False
-
-        allowed = [
-            ("-a", ("-f", "-s", "-v")),
-            ("-u", ("-s", "-v")),
-            ("-n", ("-s", "-v")),
-        ]
-
-        if origins and "-n" not in ltypes:
-                # Use of -g implies -a unless -n is provided.
-                ltypes.add("-a")
-
-        if "-f" in ltypes and "-a" not in ltypes:
-                usage(_("-f may only be used in combination with -a"),
-                    cmd="list")
-
-        if "-f" in ltypes:
+        pkg_list = api.ImageInterface.LIST_INSTALLED
+        if list_all:
+                variants = True
                 pkg_list = api.ImageInterface.LIST_ALL
-        elif "-a" in ltypes:
+        elif list_installed_newest:
                 pkg_list = api.ImageInterface.LIST_INSTALLED_NEWEST
-        elif "-n" in ltypes:
+        elif list_newest:
                 pkg_list = api.ImageInterface.LIST_NEWEST
-        elif "-u" in ltypes:
+        elif list_upgradable:
                 pkg_list = api.ImageInterface.LIST_UPGRADABLE
 
-        for ltype, permitted in allowed:
-                if ltype in ltypes:
-                        ltypes.discard(ltype)
-                        diff = ltypes.difference(permitted)
-                        if not diff:
-                                # Only allowed options used.
-                                continue
-                        usage(_("%(opts)s may not be used with %(opt)s") % {
-                            "opts": ", ".join(diff), "opt": ltype })
-
-        if summary and verbose:
-                usage(_("-s and -v may not be combined"), cmd="list")
-
         if verbose:
                 fmt_str = "%-76s %s"
         elif summary:
@@ -401,17 +396,15 @@
         else:
                 fmt_str = "%-55s %-20s %s"
 
-        api_inst.progresstracker = get_tracker(quiet=not display_headers)
-
         # Each pattern in pats can be a partial or full FMRI, so
         # extract the individual components.  These patterns are
         # transformed here so that partial failure can be detected
         # when more than one pattern is provided.
-        rval, res = get_fmri_args(api_inst, pargs, cmd="list")
+        rval, res = get_fmri_args(api_inst, pargs, cmd=op)
         if not rval:
                 return EXIT_OOPS
 
-        api_inst.log_operation_start("list")
+        api_inst.log_operation_start(op)
         if pkg_list != api_inst.LIST_INSTALLED and refresh_catalogs:
                 # If the user requested packages other than those
                 # installed, ensure that a refresh is performed if
@@ -459,7 +452,7 @@
                     raise_unmatched=True, repos=origins, variants=variants)
                 for pt, summ, cats, states in res:
                         found = True
-                        if display_headers:
+                        if not omit_headers:
                                 if verbose:
                                         msg(fmt_str %
                                             ("FMRI", "IFO"))
@@ -471,7 +464,7 @@
                                         msg(fmt_str %
                                             ("NAME (PUBLISHER)",
                                             "VERSION", "IFO"))
-                                display_headers = False
+                                omit_headers = True
 
                         status = ""
                         for sentry in state_map:
@@ -536,7 +529,7 @@
         except (api_errors.InvalidPackageErrors,
             api_errors.ActionExecutionError,
             api_errors.PermissionsException), e:
-                error(e, cmd="list")
+                error(e, cmd=op)
                 return EXIT_OOPS
         except api_errors.InventoryException, e:
                 if e.illegal:
@@ -554,20 +547,20 @@
                 if pkg_list == api.ImageInterface.LIST_ALL or \
                     pkg_list == api.ImageInterface.LIST_NEWEST:
                         error(_("no packages matching '%s' known") % \
-                            ", ".join(e.notfound), cmd="list")
+                            ", ".join(e.notfound), cmd=op)
                 elif pkg_list == api.ImageInterface.LIST_INSTALLED_NEWEST:
                         error(_("no packages matching '%s' allowed by "
                             "installed incorporations or image variants that "
                             "are known or installed") % \
-                            ", ".join(e.notfound), cmd="list")
+                            ", ".join(e.notfound), cmd=op)
                         logger.error("Use -af to allow all versions.")
                 elif pkg_list == api.ImageInterface.LIST_UPGRADABLE:
                         error(_("no packages matching '%s' are installed "
                             "and have newer versions available") % \
-                            ", ".join(e.notfound), cmd="list")
+                            ", ".join(e.notfound), cmd=op)
                 else:
                         error(_("no packages matching '%s' installed") % \
-                            ", ".join(e.notfound), cmd="list")
+                            ", ".join(e.notfound), cmd=op)
 
                 if found and e.notfound:
                         # Only some patterns matched.
@@ -576,15 +569,17 @@
                 api_inst.log_operation_end(result=history.RESULT_NOTHING_TO_DO)
                 return EXIT_OOPS
 
-def get_tracker(quiet=False):
+def get_tracker(quiet=False, verbose=0):
         if quiet:
                 progresstracker = progress.QuietProgressTracker()
         else:
                 try:
                         progresstracker = \
-                            progress.FancyUNIXProgressTracker()
+                            progress.FancyUNIXProgressTracker(
+                                quiet=quiet, verbose=verbose)
                 except progress.ProgressTrackerException:
-                        progresstracker = progress.CommandLineProgressTracker()
+                        progresstracker = progress.CommandLineProgressTracker(
+                            quiet=quiet, verbose=verbose)
         return progresstracker
 
 def fix_image(api_inst, args):
@@ -764,7 +759,7 @@
         any_errors = False
         processed = False
         notfound = EmptyI
-        progresstracker = get_tracker(quiet)
+        progresstracker = get_tracker(quiet, verbose)
         try:
                 res = api_inst.get_pkg_list(api.ImageInterface.LIST_INSTALLED,
                     patterns=pargs, raise_unmatched=True, return_fmris=True)
@@ -870,7 +865,7 @@
 display_plan_options = ["basic", "fmris", "variants/facets", "services",
     "actions", "boot-archive"]
 
-def display_plan(api_inst, verbose):
+def __display_plan(api_inst, verbose):
         """Helper function to display plan to the desired degree.
         Verbose can either be a numerical value, or a list of
         items to display"""
@@ -977,32 +972,62 @@
                 for a in plan.get_actions():
                         logger.info("  %s" % a)
 
-def display_plan_licenses(api_inst, show_all=False):
+def display_plan_licenses(api_inst, show_all=False, show_req=True):
         """Helper function to display licenses for the current plan.
 
         'show_all' is an optional boolean value indicating whether all licenses
         should be displayed or only those that have must-display=true."""
 
         plan = api_inst.describe()
-
         for pfmri, src, dest, accepted, displayed in plan.get_licenses():
                 if not show_all and not dest.must_display:
                         continue
-                elif not show_all and dest.must_display and displayed:
+
+                if not show_all and dest.must_display and displayed:
                         # License already displayed, so doesn't need to be
                         # displayed again.
                         continue
 
                 lic = dest.license
-                logger.info("-" * 60)
-                logger.info(_("Package: %s") % pfmri)
-                logger.info(_("License: %s\n") % lic)
-                logger.info(dest.get_text())
-                logger.info("\n")
+                if show_req:
+                        logger.info("-" * 60)
+                        logger.info(_("Package: %s") % pfmri)
+                        logger.info(_("License: %s\n") % lic)
+                        logger.info(dest.get_text())
+                        logger.info("\n")
 
                 # Mark license as having been displayed.
                 api_inst.set_plan_license_status(pfmri, lic, displayed=True)
 
+def display_plan(api_inst, noexecute, op, quiet, show_licenses,
+    stage, verbose):
+
+        plan = api_inst.describe()
+        if not plan:
+                return
+
+        if stage not in [API_STAGE_DEFAULT, API_STAGE_PLAN]:
+                # we should have displayed licenses earlier so mark all
+                # licenses as having been displayed.
+                display_plan_licenses(api_inst, show_req=False)
+                return
+
+        if api_inst.planned_nothingtodo(li_ignore_all=True):
+                # nothing todo
+                if op == PKG_OP_UPDATE:
+                        s = _("No updates available for this image.")
+                else:
+                        s = _("No updates necessary for this image.")
+                if api_inst.ischild():
+                        s + " (%s)" % api_inst.get_linked_name()
+                msg(s)
+                return
+
+        display_plan_licenses(api_inst, show_all=show_licenses)
+
+        if not quiet:
+                __display_plan(api_inst, verbose)
+
 def __api_prepare(operation, api_inst, accept=False):
         # Exceptions which happen here are printed in the above level, with
         # or without some extra decoration done here.
@@ -1064,6 +1089,10 @@
                 # be printed on the same line as the spinner.
                 error("\n" + str(e))
                 rval = EXIT_OOPS
+        except (api_errors.LinkedImageException), e:
+                error(_("%s failed (linked image exception(s)):\n%s") %
+                      (operation, str(e)))
+                rval = e.lix_exitrv
         except api_errors.ImageUpdateOnLiveImageException:
                 error(_("%s cannot be done on live image") % operation)
                 rval = EXIT_NOTLIVE
@@ -1129,7 +1158,7 @@
 
         return rval
 
-def __api_alloc(imgdir, exact_match, pkg_image_used, quiet):
+def __api_alloc(imgdir, exact_match, pkg_image_used, quiet, runid=-1):
         progresstracker = get_tracker(quiet)
 
         def qv(val):
@@ -1142,7 +1171,7 @@
         try:
                 return api.ImageInterface(imgdir, CLIENT_API_VERSION,
                     progresstracker, None, PKG_CLIENT_NAME,
-                    exact_match=exact_match)
+                    exact_match=exact_match, runid=runid)
         except api_errors.ImageLocationAmbiguous, e:
                 # This should only be raised if exact_match is False.
                 assert exact_match is False
@@ -1181,7 +1210,7 @@
                 format_update_error(e)
                 return
 
-def __api_plan_exception(op, api_inst, noexecute, verbose):
+def __api_plan_exception(op, noexecute, verbose, api_inst):
         e_type, e, e_traceback = sys.exc_info()
 
         if e_type == api_errors.ImageNotFoundException:
@@ -1191,6 +1220,10 @@
                 error("\n" + _("%s failed (inventory exception):\n%s") % (op,
                     e))
                 return EXIT_OOPS
+        if isinstance(e, api_errors.LinkedImageException):
+                error(_("%s failed (linked image exception(s)):\n%s") %
+                      (op, str(e)))
+                return e.lix_exitrv
         if e_type == api_errors.IpkgOutOfDateException:
                 msg(_("""\
 WARNING: pkg(5) appears to be out of date, and should be updated before
@@ -1211,10 +1244,14 @@
                 if noexecute:
                         return EXIT_OK
                 return EXIT_OOPS
+        if e_type == api_errors.ConflictingActionErrors:
+                error("\n" + str(e), cmd=op)
+                if verbose:
+                        __display_plan(api_inst, verbose)
+                return EXIT_OOPS
         if e_type in (api_errors.InvalidPlanError,
             api_errors.ReadOnlyFileSystemException,
             api_errors.ActionExecutionError,
-            api_errors.ConflictingActionErrors,
             api_errors.InvalidPackageErrors):
                 error("\n" + str(e), cmd=op)
                 return EXIT_OOPS
@@ -1267,46 +1304,570 @@
         raise
         # NOTREACHED
 
-def change_variant(api_inst, args):
+def __api_op(_op, _api_inst, _accept=False, _li_ignore=None, _noexecute=False,
+    _origins=None, _quiet=False, _review_release_notes=False,
+    _show_licenses=False, _stage=API_STAGE_DEFAULT, _verbose=0, **kwargs):
+        """Do something that involves the api.
+
+        Arguments prefixed with '_' are primarily used within this
+        function.  All other arguments must be specified via keyword
+        assignment and will be passed directly on to the api
+        interfaces being invoked."""
+
+        # massage arguments
+        if type(_li_ignore) == list:
+                # parse any linked image names specified on the command line
+                _li_ignore = _api_inst.parse_linked_name_list(_li_ignore)
+
+        # All the api interface functions that we inovke have some
+        # common arguments.  Set those up now.
+        kwargs["accept"] = _accept
+        kwargs["li_ignore"] = _li_ignore
+        kwargs["noexecute"] = _noexecute
+        if _origins != None:
+                kwargs["repos"] = _origins
+
+        # display plan debugging information
+        if _verbose > 2:
+                DebugValues.set_value("plan", "True")
+
+        # plan the requested operation
+        stuff_to_do = None
+
+        if _op == PKG_OP_ATTACH:
+                api_plan_func = _api_inst.gen_plan_attach
+        elif _op in [PKG_OP_CHANGE_FACET, PKG_OP_CHANGE_VARIANT]:
+                api_plan_func = _api_inst.gen_plan_change_varcets
+        elif _op == PKG_OP_DETACH:
+                api_plan_func = _api_inst.gen_plan_detach
+        elif _op == PKG_OP_INSTALL:
+                api_plan_func = _api_inst.gen_plan_install
+        elif _op == PKG_OP_SYNC:
+                api_plan_func = _api_inst.gen_plan_sync
+        elif _op == PKG_OP_UNINSTALL:
+                api_plan_func = _api_inst.gen_plan_uninstall
+        elif _op == PKG_OP_UPDATE:
+                api_plan_func = _api_inst.gen_plan_update
+        else:
+                raise RuntimeError("__api_op() invalid op: %s" % _op)
+
+        first_plan = True
+        plan_displayed = False
+        try:
+                for pd in api_plan_func(**kwargs):
+                        if not first_plan:
+                                #
+                                # we don't display anything for child images
+                                # since they currently do their own display
+                                # work.
+                                #
+                                continue
+
+                        # the first plan description is always for ourself.
+                        first_plan = False
+                        display_plan(_api_inst, _noexecute, _op, _quiet,
+                            _show_licenses, _stage, _verbose)
+                        plan_displayed = True
+        except:
+                rv = __api_plan_exception(_op, _noexecute, _verbose, _api_inst)
+                if rv != EXIT_OK:
+                        return rv
+
+        if not plan_displayed:
+                display_plan(_api_inst, _noexecute, _op, _quiet,
+                    _show_licenses, _stage, _verbose)
+
+        stuff_to_do = not _api_inst.planned_nothingtodo()
+        if not stuff_to_do:
+                return EXIT_NOP
+
+        if _noexecute or _stage in [API_STAGE_PUBCHECK, API_STAGE_PLAN]:
+                return EXIT_OK
+
+        # Exceptions which happen here are printed in the above level,
+        # with or without some extra decoration done here.
+        ret_code = __api_prepare(_op, _api_inst, accept=_accept)
+        if ret_code != EXIT_OK:
+                return ret_code
+
+        if _stage == API_STAGE_PREPARE:
+                return EXIT_OK
+
+        ret_code = __api_execute_plan(_op, _api_inst)
+        if _review_release_notes and ret_code == 0 and \
+            _stage == API_STAGE_DEFAULT and _api_inst.solaris_image():
+                msg("\n" + "-" * 75)
+                msg(_("NOTE: Please review release notes posted at:\n" ))
+                msg(misc.get_release_notes_url())
+                msg("-" * 75 + "\n")
+
+        return ret_code
+
+def opts_err_opt1_req_opt2(opt1, opt2, op):
+        msg = _("%(opt1)s may only be used in combination with %(opt2)s") % \
+            {"opt1": opt1, "opt2": opt2}
+        usage(msg, cmd=op)
+
+def opts_err_incompat(opt1, opt2, op):
+        msg = _("the %(opt1)s and %(opt2)s options may not be combined") % \
+            {"opt1": opt1, "opt2": opt2}
+        usage(msg, cmd=op)
+
+def opts_err_repeated(opt1, op):
+        msg = _("option '%s' repeated") % (opt1)
+        usage(msg, cmd=op)
+
+def opts_table_cb_beopts(op, api_inst, opts, opts_new):
+
+        # synthesize require_new_be and deny_new_be into new_be
+        del opts_new["require_new_be"]
+        del opts_new["deny_new_be"]
+        opts_new["new_be"] = None
+
+        if opts["require_new_be"] and opts["deny_new_be"]:
+                opts_err_incompat("--require-new-be", "--deny-new-be", op)
+
+        # create a new key called "new_be" in the options array
+        if opts["require_new_be"]:
+                opts_new["new_be"] = True
+        if opts["deny_new_be"]:
+                opts_new["new_be"] = False
+
+def opts_table_cb_li_ignore(op, api_inst, opts, opts_new):
+
+        # synthesize li_ignore_all and li_ignore_list into li_ignore
+        del opts_new["li_ignore_all"]
+        del opts_new["li_ignore_list"]
+        opts_new["li_ignore"] = None
+
+        # check if there's nothing to ignore
+        if not opts["li_ignore_all"] and not opts["li_ignore_list"]:
+                return
+
+        if opts["li_ignore_all"]:
+
+                # can't ignore all and specific images
+                if opts["li_ignore_list"]:
+                        opts_err_incompat("-I", "-i", op)
+
+                # can't ignore all and target anything.
+                if "li_target_all" in opts and opts["li_target_all"]:
+                        opts_err_incompat("-I", "-a", op)
+                if "li_target_list" in opts and opts["li_target_list"]:
+                        opts_err_incompat("-I", "-l", op)
+                if "li_name" in opts and opts["li_name"]:
+                        opts_err_incompat("-I", "-l", op)
+
+                opts_new["li_ignore"] = []
+                return
+
+        assert opts["li_ignore_list"]
+
+        # it doesn't make sense to specify images to ignore if the
+        # user is already specifying images to operate on.
+        if "li_target_all" in opts and opts["li_target_all"]:
+                opts_err_incompat("-i", "-a", op)
+        if "li_target_list" in opts and opts["li_target_list"]:
+                opts_err_incompat("-i", "-l", op)
+        if "li_name" in opts and opts["li_name"]:
+                opts_err_incompat("-i", "-l", op)
+
+        li_ignore = []
+        for li_name in opts["li_ignore_list"]:
+                # check for repeats
+                if li_name in li_ignore:
+                        opts_err_repeated("-i %s" % (li_name), op)
+                # add to ignore list
+                li_ignore.append(li_name)
+
+        opts_new["li_ignore"] = li_ignore
+
+def opts_table_cb_li_no_psync(op, api_inst, opts, opts_new):
+        # if a target child linked image was specified, the no-parent-sync
+        # option doesn't make sense since we know that both the parent and
+        # child image are accessible
+
+        if "li_target_all" not in opts:
+                # we don't accept linked image target options
+                assert "li_target_list" not in opts
+                return
+
+        if opts["li_target_all"] and not opts["li_parent_sync"]:
+                opts_err_incompat("-a", "--no-parent-sync", op)
+        if opts["li_target_list"] and not opts["li_parent_sync"]:
+                opts_err_incompat("-l", "--no-parent-sync", op)
+
+def opts_table_cb_li_props(op, api_inst, opts, opts_new):
+        """convert linked image prop list into a dictionary"""
+
+        opts_new["li_props"] = __parse_linked_props(opts["li_props"], op)
+
+def opts_table_cb_li_target(op, api_inst, opts, opts_new):
+        # figure out which option the user specified
+        if opts["li_target_all"] and opts["li_target_list"]:
+                opts_err_incompat("-a", "-l", op)
+        elif opts["li_target_all"]:
+                arg1 = "-a"
+        elif opts["li_target_list"]:
+                arg1 = "-l"
+        else:
+                return
+
+        if "be_activate" in opts and not opts["be_activate"]:
+                opts_err_incompat(arg1, "--no-be-activate", op)
+        if "be_name" in opts and opts["be_name"]:
+                opts_err_incompat(arg1, "--be-name", op)
+        if "deny_new_be" in opts and opts["deny_new_be"]:
+                opts_err_incompat(arg1, "--deny-new-be", op)
+        if "require_new_be" in opts and opts["require_new_be"]:
+                opts_err_incompat(arg1, "--require-new-be", op)
+        if "reject_pats" in opts and opts["reject_pats"]:
+                opts_err_incompat(arg1, "--require", op)
+        if "origins" in opts and opts["origins"]:
+                opts_err_incompat(arg1, "-g", op)
+
+        # validate linked image name
+        li_target_list = []
+        for li_name in opts["li_target_list"]:
+                # check for repeats
+                if li_name in li_target_list:
+                        opts_err_repeated("-l %s" % (li_name), op)
+                # add to ignore list
+                li_target_list.append(li_name)
+
+        opts_new["li_target_list"] = li_target_list
+
+def opts_table_cb_li_target1(op, api_inst, opts, opts_new):
+        # figure out which option the user specified
+        if opts["li_name"]:
+                arg1 = "-l"
+        else:
+                return
+
+        if "be_activate" in opts and not opts["be_activate"]:
+                opts_err_incompat(arg1, "--no-be-activate", op)
+        if "be_name" in opts and opts["be_name"]:
+                opts_err_incompat(arg1, "--be-name", op)
+        if "deny_new_be" in opts and opts["deny_new_be"]:
+                opts_err_incompat(arg1, "--deny-new-be", op)
+        if "require_new_be" in opts and opts["require_new_be"]:
+                opts_err_incompat(arg1, "--require-new-be", op)
+        if "reject_pats" in opts and opts["reject_pats"]:
+                opts_err_incompat(arg1, "--require", op)
+        if "origins" in opts and opts["origins"]:
+                opts_err_incompat(arg1, "-g", op)
+
+def opts_table_cb_no_headers_vs_quiet(op, api_inst, opts, opts_new):
+        # check if we accept the -q option
+        if "quiet" not in opts:
+                return
+
+        # -q implies -H
+        if opts["quiet"]:
+                opts_new["omit_headers"] = True
+
+def opts_table_cb_nqv(op, api_inst, opts, opts_new):
+        if opts["verbose"] and opts["quiet"]:
+                opts_err_incompat("-v", "-q", op)
+
+def opts_table_cb_origins(op, api_inst, opts, opts_new):
+        origins = set()
+        for o in opts["origins"]:
+                origins.add(misc.parse_uri(o, cwd=orig_cwd))
+        opts_new["origins"] = origins
+
+def opts_table_cb_stage(op, api_inst, opts, opts_new):
+        if opts["stage"] == None:
+                opts_new["stage"] = API_STAGE_DEFAULT
+                return
+
+        if opts_new["stage"] not in api_stage_values:
+                usage(_("invalid operation stage: '%s'") % opts["stage"],
+                    cmd=op)
+
+def opts_cb_li_attach(op, api_inst, opts, opts_new):
+        if opts["attach_parent"] and opts["attach_child"]:
+                opts_err_incompat("-c", "-p", op)
+
+        if not opts["attach_parent"] and not opts["attach_child"]:
+                usage(_("either -c or -p must be specified"), cmd=op)
+
+        if opts["attach_child"]:
+                # if we're attaching a new child then that doesn't affect
+                # any other children, so ignoring them doesn't make sense.
+                if opts["li_ignore_all"]:
+                        opts_err_incompat("-c", "-I", op)
+                if opts["li_ignore_list"]:
+                        opts_err_incompat("-c", "-i", op)
+
+def opts_table_cb_md_only(op, api_inst, opts, opts_new):
+        # if the user didn't specify linked-md-only we're done
+        if not opts["li_md_only"]:
+                return
+
+        # li_md_only implies li_pkg_updates
+        if "li_pkg_updates" in opts:
+                opts_new["li_pkg_updates"] = False
+
+        #
+        # if li_md_only is false that means we're not updating any packages
+        # within the current image so there are a ton of options that no
+        # longer apply to the current operation, and hence are incompatible
+        # with li_md_only.
+        #
+        arg1 = "--linked-md-only"
+        if "be_name" in opts and opts["be_name"]:
+                opts_err_incompat(arg1, "--be-name", op)
+        if "deny_new_be" in opts and opts["deny_new_be"]:
+                opts_err_incompat(arg1, "--deny-new-be", op)
+        if "require_new_be" in opts and opts["require_new_be"]:
+                opts_err_incompat(arg1, "--require-new-be", op)
+        if "li_parent_sync" in opts and not opts["li_parent_sync"]:
+                opts_err_incompat(arg1, "--no-parent-sync", op)
+
+def opts_cb_list(op, api_inst, opts, opts_new):
+        if opts_new["origins"] and not opts_new["list_newest"]:
+                # Use of -g implies -a unless -n is provided.
+                opts_new["list_installed_newest"] = True
+
+        if opts_new["list_all"] and not opts_new["list_installed_newest"]:
+                opts_err_opt1_req_opt2("-f", "-a", op)
+
+        if opts_new["list_installed_newest"] and opts_new["list_newest"]:
+                opts_err_incompat("-a", "-n", op)
+
+        if opts_new["list_installed_newest"] and opts_new["list_upgradable"]:
+                opts_err_incompat("-a", "-u", op)
+
+        if opts_new["summary"] and opts_new["verbose"]:
+                opts_err_incompat("-s", "-v", op)
+
+#
+# options common to multiple pkg(1) subcommands.  The format for specifying
+# options is a list which can contain:
+#
+# - Function pointers which define callbacks that are invoked after all
+#   options (aside from extra pargs) have been parsed.  These callbacks can
+#   verify the the contents and combinations of different options.
+#
+# - Tuples formatted as:
+#       (s, l, k, v)
+#   where the values are:
+#       s: a short option, ex: -f
+#       l: a long option, ex: --foo
+#       k: the key value for the options dictionary
+#       v: the default value. valid values are: True/False, None, [], 0
+#
+opts_table_beopts = [
+    opts_table_cb_beopts,
+    ("",  "be-name=",        "be_name",              None),
+    ("",  "deny-new-be",     "deny_new_be",          False),
+    ("",  "no-be-activate",  "be_activate",          True),
+    ("",  "require-new-be",  "require_new_be",       False),
+]
+
+opts_table_force = [
+    ("f", "",                "force",                False),
+]
+
+opts_table_li_ignore = [
+    opts_table_cb_li_ignore,
+    ("I", "",                "li_ignore_all",        False),
+    ("i", "",                "li_ignore_list",       []),
+]
+
+opts_table_li_md_only = [
+    opts_table_cb_md_only,
+    ("",  "linked-md-only",    "li_md_only",         False),
+]
+
+opts_table_li_no_pkg_updates = [
+    ("",  "no-pkg-updates",  "li_pkg_updates",       True),
+]
+
+opts_table_li_no_psync = [
+    opts_table_cb_li_no_psync,
+    ("",  "no-parent-sync",  "li_parent_sync",       True),
+]
+
+opts_table_li_props = [
+    opts_table_cb_li_props,
+    ("", "prop-linked",      "li_props",             []),
+]
+
+opts_table_li_target = [
+    opts_table_cb_li_target,
+    ("a", "",                "li_target_all",        False),
+    ("l", "",                "li_target_list",       []),
+]
+
+opts_table_li_target1 = [
+    opts_table_cb_li_target1,
+    ("l", "",                "li_name",              None),
+]
+
+opts_table_licenses = [
+    ("",  "accept",          "accept",               False),
+    ("",  "licenses",        "show_licenses",        False),
+]
+
+opts_table_no_headers = [
+    opts_table_cb_no_headers_vs_quiet,
+    ("H", "",                "omit_headers",         False),
+]
+
+opts_table_no_index = [
+    ("",  "no-index",        "update_index",         True),
+]
+
+opts_table_no_refresh = [
+    ("",  "no-refresh",      "refresh_catalogs",     True),
+]
+
+opts_table_reject = [
+    ("", "reject=",          "reject_pats",          []),
+]
+
+opts_table_verbose = [
+    ("v", "",                "verbose",              0),
+]
+
+opts_table_quiet = [
+    ("q", "",                "quiet",                False),
+]
+
+opts_table_nqv = \
+    opts_table_quiet + \
+    opts_table_verbose + \
+    [
+    opts_table_cb_nqv,
+    ("n", "",                "noexecute",            False),
+]
+
+opts_table_origins = [
+    opts_table_cb_origins,
+    ("g", "",                "origins",              []),
+]
+
+opts_table_stage = [
+    opts_table_cb_stage,
+    ("",  "stage",           "stage",                None),
+]
+
+#
+# Options for pkg(1) subcommands.  Built by combining the option tables above,
+# with some optional subcommand unique options defined below.
+#
+opts_install = \
+    opts_table_beopts + \
+    opts_table_li_ignore + \
+    opts_table_li_no_psync + \
+    opts_table_licenses + \
+    opts_table_reject + \
+    opts_table_no_index + \
+    opts_table_no_refresh + \
+    opts_table_nqv + \
+    opts_table_origins + \
+    []
+
+# "update" cmd inherits all "install" cmd options
+opts_update = \
+    opts_install + \
+    opts_table_force + \
+    opts_table_stage + \
+    []
+
+# "attach-linked" cmd inherits all "install" cmd options
+opts_attach_linked = \
+    opts_install + \
+    opts_table_force + \
+    opts_table_li_md_only + \
+    opts_table_li_no_pkg_updates + \
+    opts_table_li_props + \
+    [
+    opts_cb_li_attach,
+    ("",  "allow-relink",   "allow_relink",         False),
+    ("c", "",               "attach_child",         False),
+    ("p", "",               "attach_parent",        False),
+]
+
+# "set-property-linked" cmd inherits all "install" cmd options
+opts_set_property_linked = \
+    opts_install + \
+    opts_table_li_md_only + \
+    opts_table_li_no_pkg_updates + \
+    opts_table_li_target1 + \
+    []
+
+# "sync-linked" cmd inherits all "install" cmd options
+opts_sync_linked = \
+    opts_install + \
+    opts_table_li_md_only + \
+    opts_table_li_no_pkg_updates + \
+    opts_table_li_target + \
+    opts_table_stage + \
+    []
+
+opts_uninstall = \
+    opts_table_beopts + \
+    opts_table_li_ignore + \
+    opts_table_no_index + \
+    opts_table_nqv + \
+    opts_table_stage + \
+    [
+    ("r", "",               "recursive_removal",    False)
+]
+
+opts_audit_linked = \
+    opts_table_li_no_psync + \
+    opts_table_li_target + \
+    opts_table_no_headers + \
+    opts_table_quiet + \
+    []
+
+opts_detach_linked = \
+    opts_table_force + \
+    opts_table_li_target + \
+    opts_table_nqv + \
+    []
+
+opts_list_linked = \
+    opts_table_li_ignore + \
+    opts_table_no_headers + \
+    []
+
+opts_list_property_linked = \
+    opts_table_li_target1 + \
+    opts_table_no_headers + \
+    []
+
+opts_list_inventory = \
+    opts_table_li_no_psync + \
+    opts_table_no_refresh + \
+    opts_table_no_headers + \
+    opts_table_origins + \
+    opts_table_verbose + \
+    [
+    opts_cb_list,
+    ("a", "",               "list_installed_newest", False),
+    ("f", "",               "list_all",              False),
+    ("n", "",               "list_newest",           False),
+    ("s", "",               "summary",               False),
+    ("u", "",               "list_upgradable",       False),
+]
+
+def change_variant(op, api_inst, pargs,
+    accept, be_activate, be_name, li_ignore, li_parent_sync, new_be,
+    noexecute, origins, quiet, refresh_catalogs, reject_pats,
+    show_licenses, update_index, verbose):
         """Attempt to change a variant associated with an image, updating
         the image contents as necessary."""
 
-        op = "change-variant"
-        opts, pargs = getopt.getopt(args, "g:nvq", ["accept", "be-name=",
-            "deny-new-be", "licenses", "no-be-activate", "require-new-be"])
-
-        accept = quiet = noexecute = show_licenses = False
-        origins = set()
-        verbose = 0
-        be_activate = True
-        be_name = None
-        new_be = None
-        for opt, arg in opts:
-                if opt == "-g":
-                        origins.add(misc.parse_uri(arg, cwd=orig_cwd))
-                elif opt == "-n":
-                        noexecute = True
-                elif opt == "-v":
-                        verbose = verbose + 1
-                elif opt == "-q":
-                        quiet = True
-                elif opt == "--accept":
-                        accept = True
-                elif opt == "--be-name":
-                        be_name = arg
-                elif opt == "--licenses":
-                        show_licenses = True
-                elif opt == "--deny-new-be":
-                        new_be = False
-                elif opt == "--no-be-activate":
-                        be_activate = False
-                elif opt == "--require-new-be":
-                        new_be = True
-
-        if verbose and quiet:
-                usage(_("%s: -v and -q may not be combined") % op)
-        if verbose > 2:
-                DebugValues.set_value("plan", "True")
+        api_inst.progresstracker = get_tracker(quiet, verbose)
+
+        xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op)
+        if not xrval:
+                return EXIT_OOPS
 
         if not pargs:
                 usage(_("%s: no variants specified") % op)
@@ -1329,79 +1890,26 @@
                             (op, name))
                 variants[name] = value
 
-        stuff_to_do = None
-        try:
-                stuff_to_do = api_inst.plan_change_varcets(variants,
-                    facets=None, noexecute=noexecute, be_name=be_name,
-                    new_be=new_be, repos=origins, be_activate=be_activate)
-        except:
-                ret_code = __api_plan_exception(op, api_inst, noexecute,
-                    verbose)
-                if ret_code != EXIT_OK:
-                        return ret_code
-
-        if not stuff_to_do:
-                if verbose:
-                        display_plan(api_inst, verbose)
-                msg(_("No updates necessary for this image."))
-                return EXIT_NOP
-
-        display_plan_licenses(api_inst, show_all=show_licenses)
-        if not quiet:
-                display_plan(api_inst, verbose)
-        if noexecute:
-                return EXIT_OK
-
-        # Exceptions which happen here are printed in the above level, with
-        # or without some extra decoration done here.
-        ret_code = __api_prepare("change-variant", api_inst, accept=accept)
-        if ret_code != EXIT_OK:
-                return ret_code
-
-        ret_code = __api_execute_plan("change-variant", api_inst)
-
-        return ret_code
-
-def change_facet(api_inst, args):
+        return __api_op(op, api_inst, _accept=accept, _li_ignore=li_ignore,
+            _noexecute=noexecute, _origins=origins, _quiet=quiet,
+            _show_licenses=show_licenses, _verbose=verbose,
+            be_activate=be_activate, be_name=be_name,
+            li_parent_sync=li_parent_sync, new_be=new_be,
+            refresh_catalogs=refresh_catalogs, reject_list=reject_pats,
+            update_index=update_index, variants=variants)
+
+def change_facet(op, api_inst, pargs,
+    accept, be_activate, be_name, li_ignore, li_parent_sync, new_be,
+    noexecute, origins, quiet, refresh_catalogs, reject_pats,
+    show_licenses, update_index, verbose):
         """Attempt to change the facets as specified, updating
         image as necessary"""
 
-        op = "change-facet"
-        opts, pargs = getopt.getopt(args, "g:nvq", ["accept", "be-name=",
-            "deny-new-be", "licenses", "no-be-activate", "require-new-be"])
-
-        accept = quiet = noexecute = show_licenses = False
-        origins = set()
-        verbose = 0
-        be_activate = True
-        be_name = None
-        new_be = None
-        for opt, arg in opts:
-                if opt == "-g":
-                        origins.add(misc.parse_uri(arg, cwd=orig_cwd))
-                elif opt == "-n":
-                        noexecute = True
-                elif opt == "-v":
-                        verbose = verbose + 1
-                elif opt == "-q":
-                        quiet = True
-                elif opt == "--accept":
-                        accept = True
-                elif opt == "--be-name":
-                        be_name = arg
-                elif opt == "--licenses":
-                        show_licenses = True
-                elif opt == "--deny-new-be":
-                        new_be = False
-                elif opt == "--no-be-activate":
-                        be_activate = False
-                elif opt == "--require-new-be":
-                        new_be = True
-
-        if verbose and quiet:
-                usage(_("%s: -v and -q may not be combined") % op)
-        if verbose > 2:
-                DebugValues.set_value("plan", "True")
+        api_inst.progresstracker = get_tracker(quiet, verbose)
+
+        xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op)
+        if not xrval:
+                return EXIT_OOPS
 
         if not pargs:
                 usage(_("%s: no facets specified") % op)
@@ -1437,97 +1945,27 @@
                 else:
                         facets[name] = v
 
-        api_inst.progresstracker = get_tracker(quiet)
-
-        stuff_to_do = None
-        try:
-                stuff_to_do = api_inst.plan_change_varcets(variants=None,
-                    facets=facets, noexecute=noexecute, be_name=be_name,
-                    new_be=new_be, repos=origins, be_activate=be_activate)
-        except:
-                ret_code = __api_plan_exception(op, api_inst, noexecute,
-                    verbose)
-                if ret_code != EXIT_OK:
-                        return ret_code
-
-        if not stuff_to_do:
-                if verbose:
-                        display_plan(api_inst, verbose)
-                msg(_("Facet change has no effect on image"))
-                return EXIT_NOP
-
-        display_plan_licenses(api_inst, show_all=show_licenses)
-        if not quiet:
-                display_plan(api_inst, verbose)
-        if noexecute:
-                return EXIT_OK
-
-        # Exceptions which happen here are printed in the above level, with
-        # or without some extra decoration done here.
-        ret_code = __api_prepare(op, api_inst, accept=accept)
-        if ret_code != EXIT_OK:
-                return ret_code
-
-        ret_code = __api_execute_plan(op, api_inst)
-
-        return ret_code
-
-def install(api_inst, args):
+        return __api_op(op, api_inst, _accept=accept, _li_ignore=li_ignore,
+            _noexecute=noexecute, _origins=origins, _quiet=quiet,
+            _show_licenses=show_licenses, _verbose=verbose,
+            be_activate=be_activate, be_name=be_name,
+            li_parent_sync=li_parent_sync, new_be=new_be, facets=facets,
+            refresh_catalogs=refresh_catalogs, reject_list=reject_pats,
+            update_index=update_index)
+
+def install(op, api_inst, pargs,
+    accept, be_activate, be_name, li_ignore, li_parent_sync, new_be,
+    noexecute, origins, quiet, refresh_catalogs, reject_pats,
+    show_licenses, update_index, verbose):
+
         """Attempt to take package specified to INSTALLED state.  The operands
         are interpreted as glob patterns."""
 
-        op = "install"
-        opts, pargs = getopt.getopt(args, "g:nvq", ["accept", "licenses",
-            "no-be-activate", "no-refresh", "no-index", "deny-new-be",
-            "require-new-be", "be-name=", "reject="])
-
-        accept = quiet = noexecute = show_licenses = False
-        verbose = 0
-        origins = set()
-        refresh_catalogs = update_index = True
-        new_be = None
-        be_activate = True
-        be_name = None
-        reject_pats = []
-
-        for opt, arg in opts:
-                if opt == "-g":
-                        origins.add(misc.parse_uri(arg, cwd=orig_cwd))
-                elif opt == "-n":
-                        noexecute = True
-                elif opt == "-v":
-                        verbose = verbose + 1
-                elif opt == "-q":
-                        quiet = True
-                elif opt == "--accept":
-                        accept = True
-                elif opt == "--licenses":
-                        show_licenses = True
-                elif opt == "--no-be-activate":
-                        be_activate = False
-                elif opt == "--no-refresh":
-                        refresh_catalogs = False
-                elif opt == "--no-index":
-                        update_index = False
-                elif opt == "--deny-new-be":
-                        new_be = False
-                elif opt == "--require-new-be":
-                        new_be = True
-                elif opt == "--be-name":
-                        be_name = arg
-                elif opt == "--reject":
-                        reject_pats.append(arg)
+        api_inst.progresstracker = get_tracker(quiet, verbose)
 
         if not pargs:
                 usage(_("at least one package name required"), cmd=op)
 
-        if verbose and quiet:
-                usage(_("-v and -q may not be combined"), cmd=op)
-        if verbose > 2:
-                DebugValues.set_value("plan", "True")
-
-        api_inst.progresstracker = get_tracker(quiet)
-
         rval, res = get_fmri_args(api_inst, pargs, cmd=op)
         if not rval:
                 return EXIT_OOPS
@@ -1536,171 +1974,45 @@
         if not xrval:
                 return EXIT_OOPS
 
-        stuff_to_do = None
-        try:
-                stuff_to_do = api_inst.plan_install(pargs,
-                    refresh_catalogs, noexecute, update_index=update_index,
-                    be_name=be_name, new_be=new_be, reject_list=reject_pats,
-                    repos=origins, be_activate=be_activate)
-        except Exception, e:
-                ret_code = __api_plan_exception(op, api_inst, noexecute,
-                    verbose)
-
-                if isinstance(e, api_errors.ConflictingActionErrors) and verbose:
-                        display_plan(api_inst, verbose)
-
-                if ret_code != EXIT_OK:
-                        return ret_code
-
-        if not stuff_to_do:
-                if verbose:
-                        display_plan(api_inst, verbose)
-                msg(_("No updates necessary for this image."))
-                return EXIT_NOP
-
-        display_plan_licenses(api_inst, show_all=show_licenses)
-        if not quiet:
-                display_plan(api_inst, verbose)
-        if noexecute:
-                return EXIT_OK
-
-        # Exceptions which happen here are printed in the above level, with
-        # or without some extra decoration done here.
-        ret_code = __api_prepare(op, api_inst, accept=accept)
-        if ret_code != EXIT_OK:
-                return ret_code
-
-        ret_code = __api_execute_plan(op, api_inst)
-
-        return ret_code
-
-def uninstall(api_inst, args):
+        return __api_op(op, api_inst, _accept=accept, _li_ignore=li_ignore,
+            _noexecute=noexecute, _origins=origins, _quiet=quiet,
+            _show_licenses=show_licenses, _verbose=verbose,
+            be_activate=be_activate, be_name=be_name,
+            li_parent_sync=li_parent_sync, new_be=new_be, pkgs_inst=pargs,
+            refresh_catalogs=refresh_catalogs, reject_list=reject_pats,
+            update_index=update_index)
+
+def uninstall(op, api_inst, pargs,
+    be_activate, be_name, new_be, li_ignore, update_index, noexecute,
+    quiet, verbose, stage, recursive_removal):
         """Attempt to take package specified to DELETED state."""
 
-        op = "uninstall"
-        opts, pargs = getopt.getopt(args, "nrvq", ["no-be-activate", "no-index",
-            "deny-new-be", "require-new-be", "be-name="])
-
-        quiet = noexecute = recursive_removal = False
-        verbose = 0
-        be_activate = update_index = True
-        be_name = None
-        new_be = None
-
-        for opt, arg in opts:
-                if opt == "-n":
-                        noexecute = True
-                elif opt == "-r":
-                        recursive_removal = True
-                elif opt == "-v":
-                        verbose = verbose + 1
-                elif opt == "-q":
-                        quiet = True
-                elif opt == "--no-be-activate":
-                        be_activate = False
-                elif opt == "--no-index":
-                        update_index = False
-                elif opt == "--deny-new-be":
-                        new_be = False
-                elif opt == "--require-new-be":
-                        new_be = True
-                elif opt == "--be-name":
-                        be_name = arg
+        api_inst.progresstracker = get_tracker(quiet, verbose)
 
         if not pargs:
                 usage(_("at least one package name required"), cmd=op)
 
         if verbose and quiet:
                 usage(_("-v and -q may not be combined"), cmd=op)
-        if verbose > 2:
-                DebugValues.set_value("plan", "True")
-
-        api_inst.progresstracker = get_tracker(quiet)
 
         rval, res = get_fmri_args(api_inst, pargs, cmd=op)
         if not rval:
                 return EXIT_OOPS
 
-        try:
-                if not api_inst.plan_uninstall(pargs, recursive_removal,
-                    noexecute, update_index=update_index, be_name=be_name,
-                    new_be=new_be, be_activate=be_activate):
-                        assert 0
-        except Exception, e:
-                ret_code = __api_plan_exception(op, api_inst, noexecute,
-                    verbose)
-                if isinstance(e, api_errors.ConflictingActionErrors) and verbose:
-                        display_plan(api_inst, verbose)
-                if ret_code != EXIT_OK:
-                        return ret_code
-
-        if not quiet:
-                display_plan(api_inst, verbose)
-        if noexecute:
-                return EXIT_OK
-
-        # Exceptions which happen here are printed in the above level, with
-        # or without some extra decoration done here.
-        ret_code = __api_prepare(op, api_inst)
-        if ret_code != EXIT_OK:
-                return ret_code
-
-        return __api_execute_plan(op, api_inst)
-
-def update(api_inst, args):
-        """Attempt to take specified installed packages to a different version,
-        or all installed packages to latest version if none are specified.
-        The operands are interpreted as glob patterns."""
-
-        op = "update"
-        opts, pargs = getopt.getopt(args, "fg:nvq", ["accept", "be-name=",
-            "reject=", "licenses", "no-be-activate", "no-refresh", "no-index",
-            "deny-new-be", "require-new-be"])
-
-        accept = force = quiet = noexecute = show_licenses = False
-        verbose = 0
-        be_activate = refresh_catalogs = update_index = True
-        be_name = None
-        new_be = None
-        origins = set()
-        reject_pats = []
-
-        for opt, arg in opts:
-                if opt == "-f":
-                        force = True
-                elif opt == "-g":
-                        origins.add(misc.parse_uri(arg, cwd=orig_cwd))
-                elif opt == "-n":
-                        noexecute = True
-                elif opt == "-v":
-                        verbose = verbose + 1
-                elif opt == "-q":
-                        quiet = True
-                elif opt == "--accept":
-                        accept = True
-                elif opt == "--be-name":
-                        be_name = arg
-                elif opt == "--licenses":
-                        show_licenses = True
-                elif opt == "--no-be-activate":
-                        be_activate = False
-                elif opt == "--no-refresh":
-                        refresh_catalogs = False
-                elif opt == "--no-index":
-                        update_index = False
-                elif opt == "--deny-new-be":
-                        new_be = False
-                elif opt == "--reject":
-                        reject_pats.append(arg)
-                elif opt == "--require-new-be":
-                        new_be = True
-
-        if verbose and quiet:
-                usage(_("-v and -q may not be combined"), cmd=op)
-        if verbose > 2:
-                DebugValues.set_value("plan", "True")
-
-        api_inst.progresstracker = get_tracker(quiet)
+        return __api_op(op, api_inst, _li_ignore=li_ignore,
+            _noexecute=noexecute, _quiet=quiet, _stage=stage,
+            _verbose=verbose, be_activate=be_activate, be_name=be_name,
+            new_be=new_be, pkgs_to_uninstall=pargs,
+            recursive_removal=recursive_removal, update_index=update_index)
+
+def update(op, api_inst, pargs,
+    accept, be_activate, be_name, force, li_ignore, li_parent_sync,
+    new_be, noexecute, origins, quiet, refresh_catalogs, reject_pats,
+    show_licenses, stage, update_index, verbose):
+        """Attempt to take all installed packages specified to latest
+        version."""
+
+        api_inst.progresstracker = get_tracker(quiet, verbose)
 
         rval, res = get_fmri_args(api_inst, pargs, cmd=op)
         if not rval:
@@ -1710,65 +2022,29 @@
         if not xrval:
                 return EXIT_OOPS
 
-        stuff_to_do = opensolaris_image = None
-        try:
-                if res and "*" not in pargs and "*@*" not in pargs:
-                        # If there are specific installed packages to update,
-                        # then take only those packages to the latest version
-                        # allowed by the patterns specified.  (The versions
-                        # specified can be older than what is installed.)
-                        stuff_to_do = api_inst.plan_update(pargs,
-                            refresh_catalogs=refresh_catalogs,
-                            noexecute=noexecute, be_name=be_name, new_be=new_be,
-                            update_index=update_index, reject_list=reject_pats,
-                            repos=origins, be_activate=be_activate)
-                else:
-                        # If no packages were specified, or '*' was one of
-                        # the patterns provided, attempt to update all
-                        # installed packages.
-                        stuff_to_do, opensolaris_image = \
-                            api_inst.plan_update_all(
-                                refresh_catalogs=refresh_catalogs,
-                                noexecute=noexecute, be_name=be_name,
-                                new_be=new_be, force=force,
-                                update_index=update_index,
-                                reject_list=reject_pats,
-                                repos=origins, be_activate=be_activate)
-        except Exception, e:
-                ret_code = __api_plan_exception(op, api_inst, noexecute,
-                    verbose)
-
-                if isinstance(e, api_errors.ConflictingActionErrors) and verbose:
-                        display_plan(api_inst, verbose)
-
-                if ret_code != EXIT_OK:
-                        return ret_code
-
-        if not stuff_to_do:
-                if verbose:
-                        display_plan(api_inst, verbose)
-                msg(_("No updates available for this image."))
-                return EXIT_NOP
-
-        display_plan_licenses(api_inst, show_all=show_licenses)
-        if not quiet:
-                display_plan(api_inst, verbose)
-        if noexecute:
-                return EXIT_OK
-
-        ret_code = __api_prepare(op, api_inst, accept=accept)
-        if ret_code != EXIT_OK:
-                return ret_code
-
-        ret_code = __api_execute_plan(op, api_inst)
-
-        if ret_code == 0 and opensolaris_image:
-                msg("\n" + "-" * 75)
-                msg(_("NOTE: Please review release notes posted at:\n" ))
-                msg(misc.get_release_notes_url())
-                msg("-" * 75 + "\n")
-
-        return ret_code
+        api_inst.set_stage(stage)
+
+        if res and "*" not in pargs and "*@*" not in pargs:
+                # If there are specific installed packages to update,
+                # then take only those packages to the latest version
+                # allowed by the patterns specified.  (The versions
+                # specified can be older than what is installed.)
+                pkgs_update = pargs
+                review_release_notes = False
+        else:
+                # If no packages were specified, or '*' was one of the
+                # patterns provided, attempt to update all installed packages.
+                pkgs_update = None
+                review_release_notes = True
+
+        return __api_op(op, api_inst, _accept=accept, _li_ignore=li_ignore,
+            _noexecute=noexecute, _origins=origins, _quiet=quiet,
+            _review_release_notes=review_release_notes,
+            _show_licenses=show_licenses, _stage=stage, _verbose=verbose,
+            be_activate=be_activate, be_name=be_name, force=force,
+            li_parent_sync=li_parent_sync, new_be=new_be,
+            pkgs_update=pkgs_update, refresh_catalogs=refresh_catalogs,
+            reject_list=reject_pats, update_index=update_index)
 
 def revert(api_inst, args):
         """Attempt to revert files to their original state, either
@@ -1807,13 +2083,15 @@
         if verbose > 2:
                 DebugValues.set_value("plan", "True")
 
-        api_inst.progresstracker = get_tracker(quiet)
+        api_inst.progresstracker = get_tracker(quiet, verbose)
 
         stuff_to_do = None
         try:
-                stuff_to_do = api_inst.plan_revert(pargs, tagged=tagged,
+                for pd in api_inst.gen_plan_revert(pargs, tagged=tagged,
                     noexecute=noexecute, be_name=be_name, new_be=new_be,
-                    be_activate=be_activate)
+                    be_activate=be_activate):
+                        continue
+                stuff_to_do = not api_inst.planned_nothingtodo()
         except:
                 ret_code = __api_plan_exception(op, api_inst, noexecute,
                     verbose)
@@ -1822,12 +2100,12 @@
 
         if not stuff_to_do:
                 if verbose:
-                        display_plan(api_inst, verbose)
+                        __display_plan(api_inst, verbose)
                 msg(_("No files need to be reverted."))
                 return EXIT_NOP
 
         if not quiet:
-                display_plan(api_inst, verbose)
+                __display_plan(api_inst, verbose)
 
         if noexecute:
                 return EXIT_OK
@@ -4021,6 +4299,298 @@
 
         return EXIT_OK
 
+def list_linked(op, api_inst, pargs,
+    li_ignore, omit_headers):
+        """pkg list-linked [-H]
+
+        List all the linked images known to the current image."""
+
+        api_inst.progresstracker = get_tracker(quiet=omit_headers)
+
+        if li_ignore and type(li_ignore) == list:
+                li_ignore = api_inst.parse_linked_name_list(li_ignore)
+
+        li_list = api_inst.list_linked(li_ignore)
+        if len(li_list) == 0:
+                return EXIT_OK
+
+        fmt = ""
+        li_header = [_("NAME"), _("RELATIONSHIP"), _("PATH")]
+        for col in range(0, len(li_header)):
+                width = max([len(row[col]) for row in li_list])
+                width = max(width, len(li_header[col]))
+                if (fmt != ''):
+                        fmt += "\t"
+                fmt += "%%-%ss" % width
+
+        if not omit_headers:
+                msg(fmt % tuple(li_header))
+        for row in li_list:
+                msg(fmt % tuple(row))
+        return EXIT_OK
+
+def __parse_linked_props(args, op):
+        """"Parse linked image property options that were specified on the
+        command line into a dictionary.  Make sure duplicate properties were
+        not specified."""
+
+        linked_props = dict()
+        for pv in args:
+                try:
+                        p, v = pv.split("=", 1)
+                except ValueError:
+                        usage(_("linked image property arguments must be of "
+                            "the form '<name>=<value>'."), cmd=op)
+
+                if p not in li.prop_values:
+                        usage(_("invalid linked image property: '%s'.") % p,
+                            cmd=op)
+
+                if p in linked_props:
+                        usage(_("linked image property specified multiple "
+                            "times: '%s'.") % p, cmd=op)
+
+                linked_props[p] = v
+
+        return linked_props
+
+def list_property_linked(op, api_inst, pargs,
+    li_name, omit_headers):
+        """pkg property-linked [-H] [-l <li-name>] [propname ...]
+
+        List the linked image properties associated with a child or parent
+        image."""
+
+        api_inst.progresstracker = get_tracker(quiet=omit_headers)
+
+        lin=None
+        if li_name:
+                lin = api_inst.parse_linked_name(li_name)
+        props = api_inst.get_linked_props(lin=lin)
+
+        for p in pargs:
+                if p not in props.keys():
+                        error(_("%(op)s: no such property: %(p)s") %
+                            {"op": op, "p": p})
+                        return EXIT_OOPS
+
+        if len(props) == 0:
+                return EXIT_OK
+
+        if not pargs:
+                pargs = props.keys()
+
+        width = max(max([len(p) for p in pargs if props[p]]), 8)
+        fmt = "%%-%ss\t%%s" % width
+        if not omit_headers:
+                msg(fmt % ("PROPERTY", "VALUE"))
+        for p in sorted(pargs):
+                if not props[p]:
+                        continue
+                msg(fmt % (p, props[p]))
+
+        return EXIT_OK
+
+def set_property_linked(op, api_inst, pargs,
+    accept, be_activate, be_name, li_ignore, li_md_only, li_name,
+    li_parent_sync, li_pkg_updates, new_be, noexecute, origins, quiet,
+    refresh_catalogs, reject_pats, show_licenses, update_index, verbose):
+        """pkg set-property-linked
+            [-nvq] [--accept] [--licenses] [--no-index] [--no-refresh]
+            [--no-parent-sync] [--no-pkg-updates]
+            [--linked-md-only] <propname>=<propvalue> ...
+
+        Change the specified linked image properties.  This may result in
+        updating the package contents of a child image."""
+
+        api_inst.progresstracker = get_tracker(quiet, verbose)
+
+        # make sure we're a child image
+        if li_name:
+                lin = api_inst.parse_linked_name(li_name)
+        else:
+                lin = api_inst.get_linked_name()
+
+        xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op)
+        if not xrval:
+                return EXIT_OOPS
+
+        LIXXX
+
+def audit_linked(op, api_inst, pargs,
+    li_parent_sync, li_target_all, li_target_list, omit_headers, quiet):
+        """pkg audit-linked [-a|-l <li-name>]
+
+        Audit one or more child images to see if they are in sync
+        with their parent image."""
+
+        api_inst.progresstracker = get_tracker(quiet=omit_headers)
+
+        li_target_list = api_inst.parse_linked_name_list(li_target_list)
+
+        # audit the requested child image(s)
+        if not li_target_all and not li_target_list:
+                # audit the current image
+                rvdict = api_inst.audit_linked(li_parent_sync=li_parent_sync)
+        else:
+                # audit the requested child image(s)
+                rvdict = api_inst.audit_linked_children(li_target_list)
+                if not rvdict:
+                        # may not have had any children
+                        return EXIT_OK
+
+        # display audit return values
+        width = max(max([len(k) for k in rvdict.keys()]), 8)
+        fmt = "%%-%ss\t%%s" % width
+        if not omit_headers:
+                msg(fmt % ("NAME", "STATUS"))
+
+        if not quiet:
+                for k, (rv, err) in rvdict.items():
+                        if rv == EXIT_OK:
+                                msg(fmt % (k, _("synced")))
+                        elif rv == EXIT_DIVERGED:
+                                msg(fmt % (k, _("diverged")))
+
+        rv, err = api_inst.audit_linked_rvdict2rv(rvdict)
+        if err:
+                error(err, cmd=op)
+        return rv
+
+def sync_linked(op, api_inst, pargs,
+    accept, be_activate, be_name, li_ignore, li_parent_sync, new_be,
+    noexecute, origins, quiet, refresh_catalogs, reject_pats,
+    show_licenses, update_index, verbose, li_md_only, li_pkg_updates,
+    li_target_all, li_target_list, stage):
+
+        """pkg audit-linked [-a|-l <li-name>]
+            [-nvq] [--accept] [--licenses] [--no-index] [--no-refresh]
+            [--no-parent-sync] [--no-pkg-updates]
+            [--linked-md-only] [-a|-l <name>]
+
+        Sync one or more child images with their parent image."""
+
+        api_inst.progresstracker = get_tracker(quiet, verbose)
+
+        xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op)
+        if not xrval:
+                return EXIT_OOPS
+
+        api_inst.set_stage(stage)
+
+        li_target_list = api_inst.parse_linked_name_list(li_target_list)
+
+        if not li_target_all and not li_target_list:
+                # sync the current image
+                return __api_op(op, api_inst, _accept=accept,
+                    _li_ignore=li_ignore, _noexecute=noexecute,
+                    _origins=origins, _quiet=quiet,
+                    _show_licenses=show_licenses, _stage=stage,
+                    _verbose=verbose, be_activate=be_activate,
+                    be_name=be_name, li_md_only=li_md_only,
+                    li_parent_sync=li_parent_sync,
+                    li_pkg_updates=li_pkg_updates, new_be=new_be,
+                    refresh_catalogs=refresh_catalogs,
+                    reject_list=reject_pats,
+                    update_index=update_index)
+
+        # sync the requested child image(s)
+        rvdict = api_inst.sync_linked_children(li_target_list,
+            noexecute=noexecute, accept=accept, show_licenses=show_licenses,
+            refresh_catalogs=refresh_catalogs, update_index=update_index,
+            li_pkg_updates=li_pkg_updates, li_md_only=li_md_only)
+
+        rv, err = api_inst.sync_linked_rvdict2rv(rvdict)
+        if err:
+                error(err, cmd=op)
+        return rv
+
+def attach_linked(op, api_inst, pargs,
+    accept, allow_relink, attach_child, attach_parent, be_activate,
+    be_name, force, li_ignore, li_md_only, li_parent_sync, li_pkg_updates,
+    li_props, new_be, noexecute, origins, quiet, refresh_catalogs,
+    reject_pats, show_licenses, update_index, verbose):
+        """pkg attach-linked
+            [-fnvq] [--accept] [--licenses] [--no-index] [--no-refresh]
+            [--no-pkg-updates] [--linked-md-only]
+            [--allow-relink]
+            [--prop-linked <propname>=<propvalue> ...]
+            (-c|-p) <li-name> <dir>
+
+        Attach a child linked image.  The child could be this image attaching
+        itself to a parent, or another image being attach as a child with
+        this image being the parent."""
+
+        api_inst.progresstracker = get_tracker(quiet, verbose)
+
+        for k, v in li_props:
+                if k in [li.PROP_PATH, li.PROP_NAME, li.PROP_MODEL]:
+                        usage(_("cannot specify linked image property: '%s'") %
+                            k, cmd=op)
+
+        if len(pargs) < 2:
+                usage(_("a linked image name and path must be specified"),
+                    cmd=op)
+
+        li_name = pargs[0]
+        li_path = pargs[1]
+
+        # parse the specified name
+        lin = api_inst.parse_linked_name(li_name, allow_unknown=True)
+
+        xrval, xres = get_fmri_args(api_inst, reject_pats, cmd=op)
+        if not xrval:
+                return EXIT_OOPS
+
+        if attach_parent:
+                # attach the current image to a parent
+                return __api_op(op, api_inst, _accept=accept,
+                    _li_ignore=li_ignore, _noexecute=noexecute,
+                    _origins=origins, _quiet=quiet,
+                    _show_licenses=show_licenses,
+                    _verbose=verbose, allow_relink=allow_relink,
+                    be_activate=be_activate, be_name=be_name, force=force,
+                    li_md_only=li_md_only, li_path=li_path,
+                    li_pkg_updates=li_pkg_updates, li_props=li_props,
+                    lin=lin, new_be=new_be, refresh_catalogs=refresh_catalogs,
+                    reject_list=reject_pats, update_index=update_index)
+
+        # attach the requested child image
+        (rv, err) = api_inst.attach_linked_child(lin, li_path, li_props,
+            accept=accept, allow_relink=allow_relink, force=force,
+            li_md_only=li_md_only, li_pkg_updates=li_pkg_updates,
+            noexecute=noexecute, refresh_catalogs=refresh_catalogs,
+            show_licenses=show_licenses, update_index=update_index)
+
+        if err:
+                error(err, cmd=op)
+        return rv
+
+def detach_linked(op, api_inst, pargs,
+    force, li_target_all, li_target_list, noexecute, quiet, verbose):
+        """pkg detach-linked
+            [-fnvq] [-a|-l <li-name>] [--linked-md-only]
+
+        Detach one or more child linked images."""
+
+        api_inst.progresstracker = get_tracker(quiet, verbose)
+
+        li_target_list = api_inst.parse_linked_name_list(li_target_list)
+
+        if not li_target_all and not li_target_list:
+                # detach the current image
+                return __api_op(op, api_inst, _noexecute=noexecute,
+                    _quiet=quiet, _verbose=verbose, force=force)
+
+        # detach the requested child image(s)
+        rvdict = api_inst.detach_linked_children(li_target_list, force=force,
+            noexecute=noexecute)
+
+        rv, err = api_inst.detach_linked_rvdict2rv(rvdict)
+        if err:
+                error(err, cmd=op)
+        return rv
+
 def image_create(args):
         """Create an image of the requested kind, at the given path.  Load
         catalog for initial publisher for convenience.
@@ -4626,6 +5196,13 @@
         logger.info(_("Image format already current."))
         return EXIT_NOP
 
+def print_version(pargs):
+        if pargs:
+                usage(_("version: command does not take operands ('%s')") %
+                    " ".join(pargs), cmd="version")
+        msg(pkg.VERSION)
+        return EXIT_OK
+
 # To allow exception handler access to the image.
 img = None
 orig_cwd = None
@@ -4648,10 +5225,11 @@
 
         try:
                 opts, pargs = getopt.getopt(sys.argv[1:], "R:D:?",
-                    ["debug=", "help"])
+                    ["debug=", "help", "runid="])
         except getopt.GetoptError, e:
                 usage(_("illegal global option -- %s") % e.opt)
 
+        runid = os.getpid()
         show_usage = False
         for opt, arg in opts:
                 if opt == "-D" or opt == "--debug":
@@ -4668,47 +5246,59 @@
                         DebugValues.set_value(key, value)
                 elif opt == "-R":
                         mydir = arg
+                elif opt == "--runid":
+                        runid = arg
                 elif opt in ("--help", "-?"):
                         show_usage = True
 
         # placeholders in this lookup table for image-create, help and version
         # which don't have dedicated methods
         cmds = {
-            "add-property-value" : property_add_value,
-            "authority"        : publisher_list,
-            "avoid"            : avoid,
-            "change-facet"     : change_facet,
-            "change-variant"   : change_variant,
-            "contents"         : list_contents,
-            "facet"            : facet_list,
-            "fix"              : fix_image,
-            "help"             : None,
-            "history"          : history_list,
-            "image-create"     : None,
-            "info"             : info,
-            "install"          : install,
-            "list"             : list_inventory,
-            "property"         : property_list,
-            "publisher"        : publisher_list,
-            "purge-history"    : history_purge,
-            "rebuild-index"    : rebuild_index,
-            "refresh"          : publisher_refresh,
-            "remove-property-value" : property_remove_value,
-            "revert"           : revert,
-            "search"           : search,
-            "set-authority"    : publisher_set,
-            "set-property"     : property_set,
-            "set-publisher"    : publisher_set,
-            "unavoid"          : unavoid,
-            "uninstall"        : uninstall,
-            "unset-authority"  : publisher_unset,
-            "unset-property"   : property_unset,
-            "unset-publisher"  : publisher_unset,
-            "update"           : update,
-            "update-format"    : update_format,
-            "variant"          : variant_list,
-            "verify"           : verify_image,
-            "version"          : None
+            "add-property-value"    : (property_add_value, None),
+            "attach-linked"         : (attach_linked, opts_attach_linked, 2),
+            "avoid"                 : (avoid, None),
+            "audit-linked"          : (audit_linked, opts_audit_linked),
+            "authority"             : (publisher_list, None),
+            "change-facet"          : (change_facet, opts_install, -1),
+            "change-variant"        : (change_variant, opts_install, -1),
+            "contents"              : (list_contents, None),
+            "detach-linked"         : (detach_linked, opts_detach_linked),
+            "facet"                 : (facet_list, None),
+            "fix"                   : (fix_image, None),
+            "help"                  : (None, None),
+            "history"               : (history_list, None),
+            "image-create"          : (None, None),
+            "info"                  : (info, None),
+            "install"               : (install, opts_install, -1),
+            "list"                  : (list_inventory, opts_list_inventory, -1),
+            "list-linked"           : (list_linked, opts_list_linked),
+            "property"              : (property_list, None),
+            "property-linked"       : (list_property_linked,
+                                          opts_list_property_linked, -1),
+            "publisher"             : (publisher_list, None),
+            "purge-history"         : (history_purge, None),
+            "rebuild-index"         : (rebuild_index, None),
+            "refresh"               : (publisher_refresh, None),
+            "remove-property-value" : (property_remove_value, None),
+            "revert"                : (revert, None),
+            "search"                : (search, None),
+            "set-authority"         : (publisher_set, None),
+            "set-property"          : (property_set, None),
+            "set-property-linked"   : (set_property_linked,
+                                          opts_set_property_linked, -1),
+            "unavoid"               : (unavoid, None),
+            "set-publisher"         : (publisher_set, None),
+            "sync-linked"           : (sync_linked, opts_sync_linked),
+            "uninstall"             : (uninstall, opts_uninstall, -1),
+            "unset-authority"       : (publisher_unset, None),
+            "unset-property"        : (property_unset, None),
+            "update-format"         : (update_format, None),
+            "unset-publisher"       : (publisher_unset, None),
+            "update"                : (update, opts_update, -1),
+            "update-format"         : (update_format, None),
+            "variant"               : (variant_list, None),
+            "verify"                : (verify_image, None),
+            "version"               : (None, None),
         }
 
         subcommand = None
@@ -4739,6 +5329,11 @@
                 usage(retcode=0, full=True)
         if not subcommand:
                 usage(_("no subcommand specified"))
+        if runid:
+                try:
+                        runid = int(runid)
+                except:
+                        usage(_("runid must be an integer"))
 
         for opt in ["--help", "-?"]:
                 if opt in pargs:
@@ -4751,24 +5346,20 @@
         # code the value here, at least for now.
         socket.setdefaulttimeout(30) # in secs
 
-        if subcommand == "image-create":
+        cmds_no_image = {
+                "version"        : print_version,
+                "image-create"   : image_create,
+        }
+        func = cmds_no_image.get(subcommand, None)
+        if func:
                 if "mydir" in locals():
                         usage(_("-R not allowed for %s subcommand") %
                               subcommand, cmd=subcommand)
                 try:
-                        ret = image_create(pargs)
+                        ret = func(pargs)
                 except getopt.GetoptError, e:
                         usage(_("illegal option -- %s") % e.opt, cmd=subcommand)
                 return ret
-        elif subcommand == "version":
-                if "mydir" in locals():
-                        usage(_("-R not allowed for %s subcommand") %
-                              subcommand, cmd=subcommand)
-                if pargs:
-                        usage(_("version: command does not take operands "
-                            "('%s')") % " ".join(pargs), cmd=subcommand)
-                msg(pkg.VERSION)
-                return EXIT_OK
 
         provided_image_dir = True
         pkg_image_used = False
@@ -4785,15 +5376,27 @@
                 return EXIT_OOPS
 
         # Get ImageInterface and image object.
-        api_inst = __api_alloc(mydir, provided_image_dir, pkg_image_used, False)
+        api_inst = __api_alloc(mydir, provided_image_dir, pkg_image_used, False,
+            runid=runid)
         if api_inst is None:
                 return EXIT_OOPS
         img = api_inst.img
 
         # Find subcommand and execute operation.
-        func = cmds.get(subcommand, None)
+        pargs_limit = 0
+        func = cmds[subcommand][0]
+        opts_cmd = cmds[subcommand][1]
+        if len(cmds[subcommand]) > 2:
+                pargs_limit = cmds[subcommand][2]
         try:
-                return func(api_inst, pargs)
+                if opts_cmd == None:
+                        return func(api_inst, pargs)
+
+                opts, pargs = misc.opts_parse(subcommand, api_inst, pargs,
+                    opts_cmd, pargs_limit, usage)
+                return func(op=subcommand, api_inst=api_inst,
+                    pargs=pargs, **opts)
+
         except getopt.GetoptError, e:
                 usage(_("illegal option -- %s") % e.opt, cmd=subcommand)
 
@@ -4834,6 +5437,10 @@
                 # We don't want to display any messages here to prevent
                 # possible further broken pipe (EPIPE) errors.
                 __ret = EXIT_OOPS
+        except api_errors.LinkedImageException, __e:
+                error(_("Linked image exception(s):\n%s") %
+                      str(__e))
+                __ret = __e.lix_exitrv
         except api_errors.CertificateError, __e:
                 if img:
                         img.history.abort(RESULT_FAILED_CONFIGURATION)
--- a/src/gui/modules/installupdate.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/gui/modules/installupdate.py	Sat May 07 00:25:10 2011 -0700
@@ -642,7 +642,10 @@
         def __ipkg_ipkgui_uptodate(self):
                 if self.ipkg_ipkgui_list == None:
                         return True
-                upgrade_needed = self.api_o.plan_install(self.ipkg_ipkgui_list)
+                for pd in self.api_o.gen_plan_install(self.ipkg_ipkgui_list):
+                        continue
+                upgrade_needed = not self.api_o.planned_nothingtodo(
+                    li_ignore_all=True)
                 return not upgrade_needed
 
         def __proceed_with_stages(self, continue_operation = False):
@@ -668,14 +671,14 @@
                                             _("Ensuring %s is up to date...") %
                                             self.parent_name,
                                             bounce_progress=True)
-                                        opensolaris_image = True
+                                        solaris_image = True
                                         ips_uptodate = True
                                         notfound = self.__installed_fmris_from_args(
                                             [gui_misc.package_name["SUNWipkg"],
                                             gui_misc.package_name["SUNWcs"]])
                                         if notfound:
-                                                opensolaris_image = False
-                                        if opensolaris_image:
+                                                solaris_image = False
+                                        if solaris_image:
                                                 ips_uptodate = \
                                                     self.__ipkg_ipkgui_uptodate()
                                         if not ips_uptodate:
@@ -1340,28 +1343,30 @@
 
         def __plan_stage(self):
                 '''Function which plans the image'''
-                stuff_to_do = False
                 if self.action == enumerations.INSTALL_UPDATE:
-                        stuff_to_do = self.api_o.plan_install(
-                            self.list_of_packages, refresh_catalogs = False)
+                        for pd in self.api_o.gen_plan_install(
+                            self.list_of_packages, refresh_catalogs=False):
+                                continue
                 elif self.action == enumerations.REMOVE:
-                        plan_uninstall = self.api_o.plan_uninstall
-                        stuff_to_do = \
-                            plan_uninstall(self.list_of_packages, False, False)
+                        for pd in self.api_o.gen_plan_uninstall(
+                            self.list_of_packages,
+                            recursive_removal=False, noexecute=False):
+                                continue
                 elif self.action == enumerations.IMAGE_UPDATE:
                         # we are passing force, since we already checked if the
-                        # packages are up to date.
-                        stuff_to_do, opensolaris_image = \
-                            self.api_o.plan_update_all(
-                            refresh_catalogs = False,
-                            noexecute = False, force = True,
-                            be_name = None, new_be = None) # Create BE if required
-                        self.pylint_stub = opensolaris_image
-                elif self.action == enumerations.UPDATE_FACETS:
-                        stuff_to_do = self.api_o.plan_change_varcets(
-                                variants = None, facets = self.facets,
-                                noexecute = False, be_name = None, new_be = None)
-                return stuff_to_do
+                        # packages are up to date.  Create BE if required
+                        for pd in self.api_o.gen_plan_update(
+                            refresh_catalogs=False, noexecute=False,
+                            force=True, be_name=None, new_be=None):
+                                continue
+                        self.pylint_stub = self.api_o.solaris_image()
+                else:
+                        assert self.action == enumerations.UPDATE_FACETS
+                        for pd in self.api_o.gen_plan_change_varcets(
+                            variants=None, facets=self.facets,
+                            noexecute=False, be_name=None, new_be=None):
+                                continue
+                return not self.api_o.planned_nothingtodo(li_ignore_all=True)
 
         def __operations_done(self, alternate_done_txt = None):
                 self.__reset_window_title()
--- a/src/gui/modules/misc_non_gui.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/gui/modules/misc_non_gui.py	Sat May 07 00:25:10 2011 -0700
@@ -40,7 +40,7 @@
 
 # The current version of the Client API the PM, UM and
 # WebInstall GUIs have been tested against and are known to work with.
-CLIENT_API_VERSION = 58
+CLIENT_API_VERSION = 59
 LOG_DIR = "/var/tmp"
 LOG_ERROR_EXT = "_error.log"
 LOG_INFO_EXT = "_info.log"
--- a/src/gui/modules/versioninfo.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/gui/modules/versioninfo.py	Sat May 07 00:25:10 2011 -0700
@@ -85,9 +85,10 @@
                         install_update_list = []
                         stuff_to_do = False
                         install_update_list.append(pkg_stem)
-                        stuff_to_do = api_o.plan_install(
-                                    install_update_list,
-                                    refresh_catalogs = False)
+                        for pd in api_o.gen_plan_install(install_update_list,
+                            refresh_catalogs=False):
+                                continue
+                        stuff_to_do = not api_o.planned_nothingtodo()
                         if stuff_to_do:
                                 plan_desc = api_o.describe()
                                 if plan_desc == None:
--- a/src/man/pkg.5.txt	Fri May 06 17:24:48 2011 -0700
+++ b/src/man/pkg.5.txt	Sat May 07 00:25:10 2011 -0700
@@ -273,6 +273,13 @@
            silently satisfy the group dependency.  See the avoid subcommand
            in pkg(1).
 
+           If the value is 'parent', then the dependency is ignored if
+           the image is not a child image.  If the image is a child
+           image then it's required that the dependency be present in
+           the parent image.  The package version matching for a
+           'parent' dependency is the same as that used for 'incorporate'
+           dependencies.
+
      fmri  The FMRI representing the depended-upon package.  It must not
            include the publisher.  In the case of require-any dependencies,
            there may be multiple values.  This is the dependency action's
--- a/src/modules/actions/__init__.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/actions/__init__.py	Sat May 07 00:25:10 2011 -0700
@@ -208,11 +208,12 @@
 
         action = types[atype](data=data, **attr_dict)
 
-        ka = action.key_attr
-        if ka is not None and (ka not in action.attrs or
-            action.attrs[ka] is None):
-                raise InvalidActionError(string, _("required attribute '%s' "
-                    "was not provided.") % ka)
+        if not action.key_attr_opt:
+                ka = action.key_attr
+                if ka is not None and (ka not in action.attrs or
+                    action.attrs[ka] is None):
+                        raise InvalidActionError(string,
+                            _("required attribute '%s' was not provided.") % ka)
 
         if ahash:
                 action.hash = ahash
--- a/src/modules/actions/attribute.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/actions/attribute.py	Sat May 07 00:25:10 2011 -0700
@@ -21,8 +21,7 @@
 #
 
 #
-# Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
-# Use is subject to license terms.
+# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 #
 
 """module describing a package attribute
@@ -57,6 +56,31 @@
                         raise pkg.actions.InvalidActionError(str(self),
                             'Missing "name" or "value" attribute')
 
+        def __getstate__(self):
+                """This object doesn't have a default __dict__, instead it
+                stores its contents via __slots__.  Hence, this routine must
+                be provide to translate this object's contents into a
+                dictionary for pickling"""
+
+                pstate = generic.Action.__getstate__(self)
+                state = {}
+                for name in AttributeAction.__slots__:
+                        if not hasattr(self, name):
+                                continue
+                        state[name] = getattr(self, name)
+                return (state, pstate)
+
+        def __setstate__(self, state):
+                """This object doesn't have a default __dict__, instead it
+                stores its contents via __slots__.  Hence, this routine must
+                be provide to translate a pickled dictionary copy of this
+                object's contents into a real in-memory object."""
+
+                (state, pstate) = state
+                generic.Action.__setstate__(self, pstate)
+                for name in state:
+                        setattr(self, name, state[name])
+
         def generate_indices(self):
                 """Generates the indices needed by the search dictionary.  See
                 generic.py for a more detailed explanation."""
--- a/src/modules/actions/depend.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/actions/depend.py	Sat May 07 00:25:10 2011 -0700
@@ -32,11 +32,26 @@
 """
 
 import generic
-import pkg.fmri as fmri
+import pkg.fmri
 import pkg.version
 
-known_types = ("optional", "require", "exclude", "incorporate", 
-    "conditional", "require-any", "origin", "group")
+known_types = (
+    "conditional",
+    "exclude",
+    "group",
+    "incorporate",
+    "optional",
+    "origin",
+    "parent",
+    "require",
+    "require-any")
+
+#
+# this is a special package name that when present in an fmri defines a
+# dependency on the current package in which the dependency is present.
+# this is useful with the "parent" dependency type.
+#
+DEPEND_SELF = "feature/package/dependency/self"
 
 class DependencyAction(generic.Action):
         """Class representing a dependency packaging object.  The fmri attribute
@@ -47,7 +62,7 @@
         other words, if installed, other packages must be at least at specified
         version level.
 
-        require -  dependency on minimum version of other package is needed 
+        require - dependency on minimum version of other package is needed
         for correct function of this package.
 
         conditional - dependency on minimum version of specified package
@@ -60,10 +75,14 @@
         in order to install this package; if root-image=true, dependency is
         on version installed in / rather than image being modified.
 
-        incorporate - optional dependency on precise version of other package; 
+        parent - dependency on same version of this package being present in
+        the parent image.  if the current image is not a child then this
+        dependency is ignored.
+
+        incorporate - optional dependency on precise version of other package;
         non-specified portion of version is free to float.
 
-        exclude - package may not be installed together with named version 
+        exclude - package may not be installed together with named version
         or higher - reverse logic of require.
 
         group - a version of package is required unless stem is in image
@@ -87,8 +106,8 @@
 
                 if len(self.attrlist("fmri")) > 1 and \
                     self.attrs["type"] != "require-any":
-                        raise pkg.actions.InvalidActionError(str(self), 
-                            _("Multiple fmris specifed for %s dependency type") % 
+                        raise pkg.actions.InvalidActionError(str(self),
+                            _("Multiple fmris specified for %s dependency type") %
                             self.attrs["type"])
 
                 if self.attrs["type"] not in known_types:
@@ -133,7 +152,7 @@
                 #
                 fmri_string = self.attrs["fmri"]
                 if not isinstance(fmri_string, basestring):
-                        return 
+                        return
                 #
                 # First, try to eliminate fmris that don't need cleaning since
                 # this process is relatively expensive (when considering tens
@@ -202,6 +221,47 @@
                 #           (fmri_string, cleanfmri)
                 self.attrs["fmri"] = cleanfmri
 
+        def __check_parent_installed(self, image, fmri):
+
+                if not image.linked.ischild():
+                        # if we're not a linked child then ignore "parent"
+                        # dependencies.
+                        return []
+
+                # create a dictionary of packages installed in the parent
+                ppkgs_dict = dict([
+                    (i.pkg_name, i)
+                    for i in image.linked.parent_fmris()
+                ])
+
+                errors = []
+                if fmri.pkg_name not in ppkgs_dict:
+                        errors.append(_("Package is not installed in "
+                            "parent image %s") % fmri.pkg_name)
+                        return errors
+
+                pf = ppkgs_dict[fmri.pkg_name]
+                if fmri.publisher and fmri.publisher != pf.publisher:
+                        # package is from a different publisher
+                        errors.append(_("Package in parent is from a "
+                            "different publisher: %s") % pf)
+                        return errors
+
+                if pf.version == fmri.version or pf.version.is_successor(
+                    fmri.version, pkg.version.CONSTRAINT_AUTO):
+                        # parent dependency is satisfied
+                        return []
+
+                if pf.version.is_successor(fmri.version,
+                    pkg.version.CONSTRAINT_NONE):
+                        errors.append(_("Parent image has a newer "
+                            "version of package %s") % pf)
+                else:
+                        errors.append(_("Parent image has an older "
+                            "version of package %s") % pf)
+
+                return errors
+
         def __check_installed(self, image, installed_version, min_fmri,
             max_fmri, required, ctype):
                 errors = []
@@ -231,12 +291,12 @@
                     image.get_pkg_state(installed_version):
                         errors.append(
                             _("%s dependency on an obsolete package (%s);"
-                            "this package must be uninstalled manually") % 
-                            (ctype, installed_version))                                  
+                            "this package must be uninstalled manually") %
+                            (ctype, installed_version))
                         return errors
                 return errors
 
-        def verify(self, image, **args):
+        def verify(self, image, pfmri, **args):
                 """Returns a tuple of lists of the form (errors, warnings,
                 info).  The error list will be empty if the action has been
                 correctly installed in the given image."""
@@ -245,6 +305,10 @@
                 warnings = []
                 info = []
 
+                # the fmri for the package containing this action should
+                # include a publisher
+                assert pfmri.publisher
+
                 # XXX Exclude and range between min and max not yet handled
                 def __min_version():
                         return pkg.version.Version("0",
@@ -257,13 +321,23 @@
                             _("Unknown type (%s) in depend action") % ctype)
                         return errors, warnings, info
 
-                pfmris = [
-                    fmri.PkgFmri(f, image.attrs["Build-Release"]) 
-                    for f in self.attrlist("fmri")
-                ]
+                # get a list of fmris and do fmri token substitution
+                pfmris = []
+                for i in self.attrlist("fmri"):
+                        f = pkg.fmri.PkgFmri(i, image.attrs["Build-Release"])
+                        if f.pkg_name == DEPEND_SELF:
+                                f = pfmri
+                        pfmris.append(f)
+
+                if ctype == "parent":
+                        # handle "parent" dependencies here
+                        assert len(pfmris) == 1
+                        errors.extend(self.__check_parent_installed(image,
+                            pfmris[0]))
+                        return errors, warnings, info
 
                 installed_versions = [
-                    image.get_version_installed(f) 
+                    image.get_version_installed(f)
                     for f in pfmris
                 ]
 
@@ -287,7 +361,7 @@
                         min_fmri = pfmri.copy()
                         min_fmri.version = __min_version()
                 elif ctype == "conditional":
-                        cfmri = fmri.PkgFmri(self.attrs["predicate"],
+                        cfmri = pkg.fmri.PkgFmri(self.attrs["predicate"],
                             image.attrs["Build-Release"])
                         installed_cversion = image.get_version_installed(cfmri)
                         if installed_cversion is not None and \
@@ -324,7 +398,6 @@
                 # operation, not final state
 
                 return errors, warnings, info
-            
 
         def generate_indices(self):
                 """Generates the indices needed by the search dictionary.  See
--- a/src/modules/actions/driver.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/actions/driver.py	Sat May 07 00:25:10 2011 -0700
@@ -21,8 +21,7 @@
 #
 
 #
-# Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
-# Use is subject to license terms.
+# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 #
 
 """module describing a driver packaging object.
@@ -56,22 +55,14 @@
                 generic.Action.__init__(self, data, **attrs)
 
                 if not self.__class__.usr_sbin:
-                        usr_sbin = DebugValues.get("driver-cmd-dir",
-                            "/usr/sbin") + "/"
-                        self.__class__.usr_sbin = usr_sbin
-                        self.__class__.add_drv = usr_sbin + "add_drv"
-                        self.__class__.rem_drv = usr_sbin + "rem_drv"
-                        self.__class__.update_drv = usr_sbin + "update_drv"
+                        self.__usr_sbin_init()
 
                 #
                 # Clean up clone_perms.  This attribute may been specified either as:
-                # 
-                #  <minorname> <mode> <owner> <group>
-                #
+                #     <minorname> <mode> <owner> <group>
                 # or
+                #     <mode> <owner> <group>
                 #
-                #  <mode> <owner> <group>
-                # 
                 # In the latter case, the <minorname> is assumed to be
                 # the same as the driver name.  Correct any such instances
                 # here so that there is only one form, so that we can cleanly
@@ -94,6 +85,38 @@
                 else:
                         self.attrs["clone_perms"] = new_cloneperms
 
+        def __usr_sbin_init(self):
+                """Initialize paths to device management commands that we will
+                execute when handling package driver actions"""
+
+                usr_sbin = DebugValues.get("driver-cmd-dir", "/usr/sbin") + "/"
+                self.__class__.usr_sbin = usr_sbin
+                self.__class__.add_drv = usr_sbin + "add_drv"
+                self.__class__.rem_drv = usr_sbin + "rem_drv"
+                self.__class__.update_drv = usr_sbin + "update_drv"
+
+        def __getstate__(self):
+                """This object doesn't have a default __dict__, instead it
+                stores its contents via __slots__.  Hence, this routine must
+                be provide to translate this object's contents into a
+                dictionary for pickling"""
+
+                pstate = generic.Action.__getstate__(self)
+                return (None, pstate)
+
+        def __setstate__(self, state):
+                """This object doesn't have a default __dict__, instead it
+                stores its contents via __slots__.  Hence, this routine must
+                be provide to translate a pickled dictionary copy of this
+                object's contents into a real in-memory object."""
+
+                (state, pstate) = state
+                assert state == None
+                generic.Action.__setstate__(self, pstate)
+
+                if not self.__class__.usr_sbin:
+                        self.__usr_sbin_init()
+
         @staticmethod
         def __call(args, fmt, fmtargs):
                 proc = subprocess.Popen(args, stdout = subprocess.PIPE,
--- a/src/modules/actions/file.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/actions/file.py	Sat May 07 00:25:10 2011 -0700
@@ -66,6 +66,31 @@
                 self.hash = "NOHASH"
                 self.replace_required = False
 
+        def __getstate__(self):
+                """This object doesn't have a default __dict__, instead it
+                stores its contents via __slots__.  Hence, this routine must
+                be provide to translate this object's contents into a
+                dictionary for pickling"""
+
+                pstate = generic.Action.__getstate__(self)
+                state = {}
+                for name in FileAction.__slots__:
+                        if not hasattr(self, name):
+                                continue
+                        state[name] = getattr(self, name)
+                return (state, pstate)
+
+        def __setstate__(self, state):
+                """This object doesn't have a default __dict__, instead it
+                stores its contents via __slots__.  Hence, this routine must
+                be provide to translate a pickled dictionary copy of this
+                object's contents into a real in-memory object."""
+
+                (state, pstate) = state
+                generic.Action.__setstate__(self, pstate)
+                for name in state:
+                        setattr(self, name, state[name])
+
         # this check is only needed on Windows
         if portable.ostype == "windows":
                 def preinstall(self, pkgplan, orig):
--- a/src/modules/actions/generic.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/actions/generic.py	Sat May 07 00:25:10 2011 -0700
@@ -97,6 +97,8 @@
         # key_attr would be the driver name.  When 'key_attr' is None, it means
         # that all attributes of the action are distinguishing.
         key_attr = None
+        # 'key_attr_opt' indicates if the 'key_attr' attribute is optional.
+        key_attr_opt = False
         # 'globally_identical' is True if all actions representing a single
         # object on a system must be identical.
         globally_identical = False
@@ -153,6 +155,28 @@
                 self.__class__.unknown = \
                     self.orderdict[pkg.actions.types["unknown"]]
 
+        def __getstate__(self):
+                """This object doesn't have a default __dict__, instead it
+                stores its contents via __slots__.  Hence, this routine must
+                be provide to translate this object's contents into a
+                dictionary for pickling"""
+
+                state = {}
+                for name in Action.__slots__:
+                        if not hasattr(self, name):
+                                continue
+                        state[name] = getattr(self, name)
+                return state
+
+        def __setstate__(self, state):
+                """This object doesn't have a default __dict__, instead it
+                stores its contents via __slots__.  Hence, this routine must
+                be provide to translate a pickled dictionary copy of this
+                object's contents into a real in-memory object."""
+
+                for name in state:
+                        setattr(self, name, state[name])
+
         def __init__(self, data=None, **attrs):
                 """Action constructor.
 
@@ -489,6 +513,8 @@
 
                 if self.key_attr is None:
                         return str(self)
+                if self.key_attr_opt and self.key_attr not in self.attrs:
+                        return str(self)
                 return "%s: %s" % \
                     (self.name, self.attrs.get(self.key_attr, "???"))
 
--- a/src/modules/actions/license.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/actions/license.py	Sat May 07 00:25:10 2011 -0700
@@ -60,6 +60,31 @@
                 generic.Action.__init__(self, data, **attrs)
                 self.hash = "NOHASH"
 
+        def __getstate__(self):
+                """This object doesn't have a default __dict__, instead it
+                stores its contents via __slots__.  Hence, this routine must
+                be provide to translate this object's contents into a
+                dictionary for pickling"""
+
+                pstate = generic.Action.__getstate__(self)
+                state = {}
+                for name in LicenseAction.__slots__:
+                        if not hasattr(self, name):
+                                continue
+                        state[name] = getattr(self, name)
+                return (state, pstate)
+
+        def __setstate__(self, state):
+                """This object doesn't have a default __dict__, instead it
+                stores its contents via __slots__.  Hence, this routine must
+                be provide to translate a pickled dictionary copy of this
+                object's contents into a real in-memory object."""
+
+                (state, pstate) = state
+                generic.Action.__setstate__(self, pstate)
+                for name in state:
+                        setattr(self, name, state[name])
+
         def preinstall(self, pkgplan, orig):
                 # Set attrs["path"] so filelist can handle this action;
                 # the path must be relative to the root of the image.
--- a/src/modules/actions/signature.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/actions/signature.py	Sat May 07 00:25:10 2011 -0700
@@ -434,3 +434,27 @@
                         raise apx.AlmostIdentical(hsh,
                             self.attrs["algorithm"], self.attrs["version"])
                 return False
+        def __getstate__(self):
+                """This object doesn't have a default __dict__, instead it
+                stores its contents via __slots__.  Hence, this routine must
+                be provide to translate this object's contents into a
+                dictionary for pickling"""
+
+                pstate = generic.Action.__getstate__(self)
+                state = {}
+                for name in SignatureAction.__slots__:
+                        if not hasattr(self, name):
+                                continue
+                        state[name] = getattr(self, name)
+                return (state, pstate)
+
+        def __setstate__(self, state):
+                """This object doesn't have a default __dict__, instead it
+                stores its contents via __slots__.  Hence, this routine must
+                be provide to translate a pickled dictionary copy of this
+                object's contents into a real in-memory object."""
+
+                (state, pstate) = state
+                generic.Action.__setstate__(self, pstate)
+                for name in state:
+                        setattr(self, name, state[name])
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/modules/altroot.py	Sat May 07 00:25:10 2011 -0700
@@ -0,0 +1,418 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+"""
+Generic interfaces for manipulating files in an alternate root.  There
+routines guarantee that if you perform operations on a specified path, and
+that path is contained within "root", then those operations will not affect
+any files living outside of "root".  These routines mainly protect us when
+accessing paths which could contain symbolic links which might otherwise
+redirect us to an unexpected file system object.
+"""
+
+# standard python classes
+import errno
+import os
+import stat
+
+# pkg classes
+#
+# pkg.syscallat is only needed until we have a newer version of python which
+# has native support for all the *at(2) system calls we use below.
+import pkg.syscallat as sat
+
+# ---------------------------------------------------------------------------
+# Misc Functions
+#
+def __path_abs_to_relative(path):
+        """Strip the leading '/' from a path using os.path.split()."""
+
+        path_new = None
+        while True:
+                (path, tail) = os.path.split(path)
+                if not tail:
+                        break
+                if path_new:
+                        path_new = os.path.join(tail, path_new)
+                else:
+                        path_new = tail
+        return path_new
+
+def __fd_to_path(fd):
+        """Given a file descriptor return the path to that file descriptor.
+
+        The readlink() call below can return ENOENT due to 6964121.  Normally
+        images live on zfs filesystems so this isn't a problem, but the pkg(5)
+        test suite runs all its tests on tmpfs filesystems, so this function
+        always fails in that case.  Either 6964121 should be fixed before fcs,
+        or we should come up with a different safe way to open files in an
+        alternate root."""
+
+        path = "/proc/%d/path/%d" % (os.getpid(), fd)
+        return os.readlink(path)
+
+# ---------------------------------------------------------------------------
+# Functions for accessing files in an alternate image
+#
+def ar_open(root, path, flags,
+    mode=None, create=False, truncate=False):
+        """A function similar to os.open() that ensures that the path
+        we're accessing resides within a specified directory subtree.
+
+        'root' is a directory that path must reside in.
+
+        'path' is a path that is interpreted relative to 'root'.  i.e., 'root'
+        is prepended to path.  'path' can not contain any symbolic links that
+        would cause an access to be redirected outside of 'root'.  If this
+        happens we'll raise an OSError exception with errno set to EREMOTE
+
+        'mode' optional permissions mask used if we create 'path'
+
+        'create' optional flag indicating if we should create 'path'
+
+        'truncate' optional flag indicating if we should truncate 'path' after
+        opening it."""
+
+        # all paths must be absolute
+        assert os.path.isabs(root)
+
+        # only allow read/write flags
+        assert (flags & ~(os.O_WRONLY|os.O_RDONLY)) == 0
+
+        # we can't truncate a file unless we open it for writing
+        assert not truncate or (flags & os.O_WRONLY)
+
+        # if create is true the user must supply a mode mask
+        assert not create or mode != None
+
+        # we're going to update root and path so prepare an error
+        # message with the existing values now.
+        eremote = _("Path outside alternate root: root=%s, path=%s") % \
+            (root, path)
+
+        # make target into a relative path
+        if os.path.isabs(path):
+                path = __path_abs_to_relative(path)
+
+        # now open the alternate root and get its path
+        # done to eliminate any links/mounts/etc in the path
+        root_fd = os.open(root, os.O_RDONLY)
+        try:
+                root = __fd_to_path(root_fd)
+        except OSError, e:
+                # W0511 XXX / FIXME Comments; pylint: disable-msg=W0511
+                # XXX: __fd_to_path() can return ENOENT due to 6964121
+                # pylint: enable-msg=W0511
+                if e.errno != errno.ENOENT:
+                        os.close(root_fd)
+                        raise e
+        os.close(root_fd)
+
+        # now open the target file, get its path, and make sure it
+        # lives in the alternate root
+        path_fd = None
+        try:
+                path_tmp = os.path.join(root, path)
+                path_fd = os.open(path_tmp, flags)
+        except OSError, e:
+                if e.errno != errno.ENOENT or not create:
+                        raise e
+
+        assert path_fd or create
+        if not path_fd:
+                # the file doesn't exist so we should try to create it.
+                # we'll do this by first opening the directory which
+                # will contain the file and then using openat within
+                # that directory.
+                path_dir = os.path.dirname(path)
+                path_file = os.path.basename(path)
+                try:
+                        path_dir_fd = \
+                            ar_open(root, path_dir, os.O_RDONLY)
+                except OSError, e:
+                        if e.errno != errno.EREMOTE:
+                                raise e
+                        raise OSError(errno.EREMOTE, eremote)
+
+                # we opened the directory, now create the file
+                try:
+                        path_fd = sat.openat(path_dir_fd, path_file,
+                            flags|os.O_CREAT|os.O_EXCL, mode)
+                except OSError, e:
+                        os.close(path_dir_fd)
+                        raise e
+
+                # we created the file
+                assert path_fd
+                os.close(path_dir_fd)
+
+        # verify that the file we opened lives in the alternate root
+        try:
+                path = __fd_to_path(path_fd)
+        except OSError, e:
+                # W0511 XXX / FIXME Comments; pylint: disable-msg=W0511
+                # XXX: __fd_to_path() can return ENOENT due to 6964121
+                # pylint: enable-msg=W0511
+                if e.errno != errno.ENOENT:
+                        os.close(path_fd)
+                        raise e
+                path = os.path.join(root, path)
+
+        if not path.startswith(root):
+                os.close(path_fd)
+                raise OSError(errno.EREMOTE, eremote)
+
+        if truncate:
+                # the user wanted us to truncate the file
+                try:
+                        os.ftruncate(path_fd, 0)
+                except OSError, e:
+                        os.close(path_fd)
+                        raise e
+
+        return path_fd
+
+def ar_unlink(root, path, noent_ok=False):
+        """A function similar to os.unlink() that ensures that the path
+        we're accessing resides within a specified directory subtree.
+
+        'noent_ok' optional flag indicating if it's ok for 'path' to be
+        missing.
+
+        For all other parameters, refer to the 'ar_open' function
+        for an explanation of their usage and effects."""
+
+        # all paths must be absolute
+        assert os.path.isabs(root)
+
+        # make target into a relative path
+        if os.path.isabs(path):
+                path = __path_abs_to_relative(path)
+
+        path_dir = os.path.dirname(path)
+        path_file = os.path.basename(path)
+
+        try:
+                path_dir_fd = ar_open(root, path_dir, os.O_RDONLY)
+        except OSError, e:
+                if noent_ok and e.errno == errno.ENOENT:
+                        return
+                raise e
+
+        try:
+                sat.unlinkat(path_dir_fd, path_file, 0)
+        except OSError, e:
+                os.close(path_dir_fd)
+                if noent_ok and e.errno == errno.ENOENT:
+                        return
+                raise e
+
+        os.close(path_dir_fd)
+        return
+
+def ar_rename(root, src, dst):
+        """A function similar to os.rename() that ensures that the path
+        we're accessing resides within a specified directory subtree.
+
+        'src' and 'dst' are paths that are interpreted relative to 'root'.
+        i.e., 'root' is prepended to both.  'src' and 'dst' can not contain
+        any symbolic links that would cause an access to be redirected outside
+        of 'root'.  If this happens we'll raise an OSError exception with
+        errno set to EREMOTE
+
+        For all other parameters, refer to the 'ar_open' function
+        for an explanation of their usage and effects."""
+
+        # all paths must be absolute
+        assert os.path.isabs(root)
+
+        # make target into a relative path
+        if os.path.isabs(src):
+                src = __path_abs_to_relative(src)
+        if os.path.isabs(dst):
+                dst = __path_abs_to_relative(dst)
+
+        src_dir = os.path.dirname(src)
+        src_file = os.path.basename(src)
+        dst_dir = os.path.dirname(dst)
+        dst_file = os.path.basename(dst)
+
+        src_dir_fd = ar_open(root, src_dir, os.O_RDONLY)
+        try:
+                dst_dir_fd = ar_open(root, dst_dir, os.O_RDONLY)
+        except OSError, e:
+                os.close(src_dir_fd)
+                raise e
+
+        try:
+                sat.renameat(src_dir_fd, src_file, dst_dir_fd, dst_file)
+        except OSError, e:
+                os.close(src_dir_fd)
+                os.close(dst_dir_fd)
+                raise e
+
+        os.close(src_dir_fd)
+        os.close(dst_dir_fd)
+        return
+
+def ar_mkdir(root, path, mode):
+        """A function similar to os.mkdir() that ensures that the path we're
+        opening resides within a specified directory subtree.
+
+        For all other parameters, refer to the 'ar_open' function
+        for an explanation of their usage and effects."""
+
+        # all paths must be absolute
+        assert os.path.isabs(root)
+
+        # make target into a relative path
+        if os.path.isabs(path):
+                path = __path_abs_to_relative(path)
+
+        path_dir = os.path.dirname(path)
+        path_file = os.path.basename(path)
+
+        path_dir_fd = ar_open(root, path_dir, os.O_RDONLY)
+        try:
+                sat.mkdirat(path_dir_fd, path_file, mode)
+        except OSError, e:
+                os.close(path_dir_fd)
+                raise e
+
+        os.close(path_dir_fd)
+        return
+
+def ar_stat(root, path):
+        """A function similar to os.stat() that ensures that the path
+        we're accessing resides within a specified directory subtree.
+
+        For all other parameters, refer to the 'ar_open' function
+        for an explanation of their usage and effects."""
+
+        try:
+                fd = ar_open(root, path, os.O_RDONLY)
+        except OSError, e:
+                raise e
+        si = os.fstat(fd)
+        os.close(fd)
+        return si
+
+def ar_isdir(root, path):
+        """A function similar to os.path.isdir() that ensures that the path
+        we're accessing resides within a specified directory subtree.
+
+        For all other parameters, refer to the 'ar_open' function
+        for an explanation of their usage and effects."""
+
+        try:
+                si = ar_stat(root, path)
+        except OSError, e:
+                if e.errno == errno.ENOENT:
+                        return False
+                raise e
+
+        if stat.S_ISDIR(si.st_mode):
+                return True
+        return False
+
+def ar_exists(root, path):
+        """A function similar to os.path.exists() that ensures that the path
+        we're accessing resides within a specified directory subtree.
+
+        For all other parameters, refer to the 'ar_open' function
+        for an explanation of their usage and effects."""
+
+        try:
+                fd = ar_open(root, path, os.O_RDONLY)
+        except OSError, e:
+                if e.errno == errno.ENOENT:
+                        return False
+                raise e
+        os.close(fd)
+        return True
+
+def ar_diff(root, path1, path2):
+        """A function similar to filecmp.cmp() that ensures that the path
+        we're accessing resides within a specified directory subtree.
+
+        For all other parameters, refer to the 'ar_open' function
+        for an explanation of their usage and effects."""
+
+        fd1 = fd2 = None
+
+        diff = False
+        try:
+                fd1 = ar_open(root, path1, os.O_RDONLY)
+                fd2 = ar_open(root, path2, os.O_RDONLY)
+
+                while True:
+                        b1 = os.read(fd1, 1024)
+                        b2 = os.read(fd2, 1024)
+                        if len(b1) == 0 and len(b2) == 0:
+                                # we're done
+                                break
+                        if len(b1) != len(b2) or b1 != b2:
+                                diff = True
+                                break
+        except OSError, e:
+                if fd1:
+                        os.close(fd1)
+                if fd2:
+                        os.close(fd2)
+                raise e
+
+        os.close(fd1)
+        os.close(fd2)
+        return diff
+
+def ar_img_prefix(root):
+        """A function that attempts to determine if a user or root pkg(5)
+        managed image can be found at 'root'.  If 'root' does point to a
+        pkg(5) image, then we return the relative path to the image metadata
+        directory."""
+
+        import pkg.client.image as image
+
+        user_img = False
+        root_img = False
+
+        if ar_isdir(root, image.img_user_prefix):
+                user_img = True
+
+        if ar_isdir(root, image.img_root_prefix):
+                root_img = True
+
+        if user_img and root_img:
+                #
+                # why would an image have two pkg metadata directories.
+                # is this image corrupt?
+                #
+                return None
+        if user_img:
+                return image.img_user_prefix
+        if root_img:
+                return image.img_root_prefix
+        return None
--- a/src/modules/client/api.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/client/api.py	Sat May 07 00:25:10 2011 -0700
@@ -67,11 +67,12 @@
 
 from pkg.api_common import (PackageInfo, LicenseInfo, PackageCategory,
     _get_pkg_cat_data)
+from pkg.client import global_settings
 from pkg.client.debugvalues import DebugValues
-from pkg.client import global_settings
+from pkg.client.pkgdefs import *
 from pkg.smf import NonzeroExitException
 
-CURRENT_API_VERSION = 58
+CURRENT_API_VERSION = 59
 CURRENT_P5I_VERSION = 1
 
 # Image type constants.
@@ -188,8 +189,8 @@
 class ImageInterface(object):
         """This class presents an interface to images that clients may use.
         There is a specific order of methods which must be used to install
-        or uninstall packages, or update an image. First, plan_install,
-        plan_uninstall, plan_update_all or plan_change_variant must be
+        or uninstall packages, or update an image. First, gen_plan_install,
+        gen_plan_uninstall, gen_plan_update or gen_plan_change_varcets must be
         called.  After that method completes successfully, describe may be
         called, and prepare must be called. Finally, execute_plan may be
         called to implement the previous created plan. The other methods
@@ -213,16 +214,45 @@
         MATCH_GLOB = 2
 
         # Private constants used for tracking which type of plan was made.
-        __INSTALL   = 1
-        __UNINSTALL = 2
-        __UPDATE    = 3
-        __VARCET    = 4
-        __REVERT    = 5
-        __valid_plan_types = (1, 2, 3, 4, 5)
+        __INSTALL     = "install"
+        __REVERT      = "revert"
+        __SYNC        = "sync"
+        __UNINSTALL   = "uninstall"
+        __UPDATE      = "update"
+        __VARCET      = "varcet"
+        __plan_values = frozenset([
+            __INSTALL,
+            __REVERT,
+            __SYNC,
+            __UNINSTALL,
+            __UPDATE,
+            __VARCET,
+        ])
+
+        __api_op_2_plan = {
+            API_OP_ATTACH:         __SYNC,
+            API_OP_CHANGE_FACET:   __VARCET,
+            API_OP_CHANGE_VARIANT: __VARCET,
+            API_OP_DETACH:         __UNINSTALL,
+            API_OP_INSTALL:        __INSTALL,
+            API_OP_REVERT:         __REVERT,
+            API_OP_SYNC:           __SYNC,
+            API_OP_UNINSTALL:      __UNINSTALL,
+            API_OP_UPDATE:         __UPDATE,
+        }
+
+        __stage_2_ip_mode = {
+            API_STAGE_DEFAULT:  ip.IP_MODE_DEFAULT,
+            API_STAGE_PUBCHECK: ip.IP_MODE_SAVE,
+            API_STAGE_PLAN:     ip.IP_MODE_SAVE,
+            API_STAGE_PREPARE:  ip.IP_MODE_LOAD,
+            API_STAGE_EXECUTE:  ip.IP_MODE_LOAD,
+        }
 
 
         def __init__(self, img_path, version_id, progresstracker,
-            cancel_state_callable, pkg_client_name, exact_match=True):
+            cancel_state_callable, pkg_client_name, exact_match=True,
+            cmdpath=None, runid=-1):
                 """Constructs an ImageInterface object.
 
                 'img_path' is the absolute path to an existing image or to a
@@ -252,24 +282,67 @@
                 other platforms, a value of False will allow any image location.
                 """
 
-                compatible_versions = set([57, CURRENT_API_VERSION])
+                compatible_versions = set([57, 58, CURRENT_API_VERSION])
 
                 if version_id not in compatible_versions:
                         raise apx.VersionException(CURRENT_API_VERSION,
                             version_id)
 
+                if sys.path[0].startswith("/dev/fd/"):
+                        #
+                        # Normally when the kernel forks off an interpreted
+                        # program, it executes the interpreter with the first
+                        # argument being the path to the interpreted program
+                        # we're executing.  But in the case of suid scripts
+                        # this presents a security problem because that path
+                        # could be updated after exec but before the
+                        # interpreter opens reads the program.  To avoid this
+                        # race, for suid script the kernel replaces the name
+                        # of the interpreted program with /dev/fd/###, and
+                        # opens the interpreted program such that it can be
+                        # read from the specified file descriptor device node.
+                        # So if we detect that path[0] (which should be then
+                        # interpreted program name) is a /dev/fd/ path, that
+                        # means we're being run as an suid script, which we
+                        # don't really want to support.  (Since this breaks
+                        # our subsequent code that attempt to determine the
+                        # name of the executable we are running as.)
+                        #
+                        raise apx.SuidUnsupportedError()
+
                 # The image's History object will use client_name from
                 # global_settings, but if the program forgot to set it,
                 # we'll go ahead and do so here.
                 if global_settings.client_name is None:
                         global_settings.client_name = pkg_client_name
 
+                if runid < 0:
+                        runid = os.getpid()
+                self.__runid = runid
+
+                if cmdpath == None:
+                        cmdpath = misc.api_cmdpath()
+                self.cmdpath = cmdpath
+
+                # prevent brokeness in the test suite
+                if self.cmdpath and \
+                    "PKG_NO_RUNPY_CMDPATH" in os.environ and \
+                    self.cmdpath.endswith(os.sep + "run.py"):
+                        raise RuntimeError, """
+An ImageInterface object was allocated from within ipkg test suite and
+cmdpath was not explicitly overridden.  Please make sure to set
+explicitly set cmdpath when allocating an ImageInterface object, or
+override cmdpath when allocating an Image object by setting PKG_CMDPATH
+in the environment or by setting simulate_cmdpath in DebugValues."""
+
                 if isinstance(img_path, basestring):
                         # Store this for reset().
                         self._img_path = img_path
                         self._img = image.Image(img_path,
                             progtrack=progresstracker,
-                            user_provided_dir=exact_match)
+                            user_provided_dir=exact_match,
+                            cmdpath=self.cmdpath,
+                            runid=runid)
 
                         # Store final image path.
                         self._img_path = self._img.get_root()
@@ -283,9 +356,17 @@
                         raise TypeError(_("Unknown img_path type."))
 
                 self.__progresstracker = progresstracker
+                lin = None
+                if self._img.linked.ischild():
+                        lin = self._img.linked.child_name
+                self.__progresstracker.set_linked_name(lin)
+
                 self.__cancel_state_callable = cancel_state_callable
+                self.__stage = API_STAGE_DEFAULT
                 self.__plan_type = None
+                self.__api_op = None
                 self.__plan_desc = None
+                self.__planned_children = False
                 self.__prepared = False
                 self.__executed = False
                 self.__be_activate = True
@@ -380,6 +461,14 @@
                 bootenv.BootEnv.check_be_name(be_name)
                 return True
 
+        def set_stage(self, stage):
+                """Tell the api which stage of execution we're in.  This is
+                used when executing in child images during recursive linked
+                operations."""
+
+                assert stage in api_stage_values
+                self.__stage = stage
+
         def __cert_verify(self, log_op_end=None):
                 """Verify validity of certificates.  Any apx.ExpiringCertificate
                 exceptions are caught here, a message is displayed, and
@@ -509,11 +598,11 @@
                         if not unavoid:
                                 self._img.avoid_pkgs(fmri_strings,
                                     progtrack=self.__progresstracker,
-                                    check_cancelation=self.__check_cancelation)
+                                    check_cancel=self.__check_cancel)
                         else:
                                 self._img.unavoid_pkgs(fmri_strings,
                                     progtrack=self.__progresstracker,
-                                    check_cancelation=self.__check_cancelation)
+                                    check_cancel=self.__check_cancel)
                 finally:
                         self._activity_lock.release()
                 return True
@@ -523,21 +612,12 @@
                 dependencies on this) """
                 return [a for a in self._img.get_avoid_dict().iteritems()]
 
-        def __plan_common_exception(self, log_op_end=None):
+        def __plan_common_exception(self, log_op_end_all=False):
                 """Deal with exceptions that can occur while planning an
                 operation.  Any exceptions generated here are passed
                 onto the calling context.  By default all exceptions
                 will result in a call to self.log_operation_end() before
-                they are passed onto the calling context.  Optionally,
-                the caller can specify the exceptions that should result
-                in a call to self.log_operation_end() by setting
-                log_op_end."""
-
-                if log_op_end == None:
-                        log_op_end = []
-
-                # we always explicity handle apx.PlanCreationException
-                assert apx.PlanCreationException not in log_op_end
+                they are passed onto the calling context."""
 
                 exc_type, exc_value, exc_traceback = sys.exc_info()
 
@@ -548,7 +628,11 @@
                 elif exc_type == apx.ConflictingActionErrors:
                         self.log_operation_end(error=str(exc_value),
                             result=history.RESULT_CONFLICTING_ACTIONS)
-                elif not log_op_end or exc_type in log_op_end:
+                elif exc_type in [
+                    apx.IpkgOutOfDateException,
+                    fmri.IllegalFmri]:
+                        self.log_operation_end(error=exc_value)
+                elif log_op_end_all:
                         self.log_operation_end(error=exc_value)
 
                 if exc_type != apx.ImageLockedError:
@@ -581,199 +665,9 @@
                 self._activity_lock.release()
                 raise
 
-        def plan_install(self, pkg_list, refresh_catalogs=True,
-            noexecute=False, update_index=True, be_name=None,
-            reject_list=misc.EmptyI, new_be=False, repos=None,
-            be_activate=True):
-                """Constructs a plan to install the packages provided in
-                pkg_list.  Once an operation has been planned, it may be
-                executed by first calling prepare(), and then execute_plan().
-                After execution of a plan, or to abandon a plan, reset() should
-                be called.
-
-                'pkg_list' is a list of packages to install.
-
-                'refresh_catalogs' controls whether the catalogs will
-                automatically be refreshed.
-
-                'noexecute' determines whether the resulting plan can be
-                executed and whether history will be recorded after
-                planning is finished.
-
-                'update_index' determines whether client search indexes
-                will be updated after operation completion during plan
-                execution.
-
-                'be_name' is a string to use as the name of any new boot
-                environment created during the operation.
-
-                'reject_list' is a list of patterns not to be permitted
-                in solution; installed packages matching these patterns
-                are removed.
-
-                'new_be' indicates whether a new boot environment should be
-                created during the operation.  If True, a new boot environment
-                will be created.  If False, and a new boot environment is
-                needed, an ImageUpdateOnLiveImageException will be raised.
-                If None, a new boot environment will be created only if needed.
-
-                'repos' is a list of URI strings or RepositoryURI objects that
-                represent the locations of additional sources of package data to
-                use during the planned operation.  All API functions called
-                while a plan is still active will use this package data.
-
-                'be_activate' is an optional boolean indicating whether any
-                new boot environment created for the operation should be set
-                as the active one on next boot if the operation is successful.
-
-                This function returns a boolean indicating whether there is
-                anything to do."""
-
-                self.__plan_common_start("install", noexecute, new_be, be_name,
-                    be_activate)
-                try:
-                        if refresh_catalogs:
-                                self.__refresh_publishers()
-
-                        self.__set_img_alt_sources(repos)
-                        self._img.make_install_plan(pkg_list,
-                            self.__progresstracker,
-                            self.__check_cancelation, noexecute,
-                            reject_list=reject_list)
-
-                        assert self._img.imageplan
-
-                        self._disable_cancel()
-
-                        if not noexecute:
-                                self.__plan_type = self.__INSTALL
-
-                        self.__set_new_be()
-
-                        self.__plan_desc = PlanDescription(self._img,
-                            self.__new_be, self.__be_activate)
-                        if self._img.imageplan.nothingtodo() or noexecute:
-                                self.log_operation_end(
-                                    result=history.RESULT_NOTHING_TO_DO)
-
-                        self._img.imageplan.update_index = update_index
-                except:
-                        self.__plan_common_exception(log_op_end=[
-                            apx.CanceledException, fmri.IllegalFmri,
-                            Exception])
-                        # NOTREACHED
-
-                self.__plan_common_finish()
-                res = not self._img.imageplan.nothingtodo()
-                return res
-
-        def plan_uninstall(self, pkg_list, recursive_removal, noexecute=False,
-            update_index=True, be_name=None, new_be=False, be_activate=True):
-                """Constructs a plan to remove the packages provided in
-                pkg_list.  Once an operation has been planned, it may be
-                executed by first calling prepare(), and then execute_plan().
-                After execution of a plan, or to abandon a plan, reset() should
-                be called.
-
-                'pkg_list' is a list of packages to install.
-
-                'recursive_removal' controls whether recursive removal is
-                allowed.
-
-                For all other parameters, refer to the 'plan_install' function
-                for an explanation of their usage and effects.
-
-                This function returns a boolean which indicates whether there
-                is anything to do."""
-
-                self.__plan_common_start("uninstall", noexecute, new_be,
-                    be_name, be_activate)
-                try:
-                        self._img.make_uninstall_plan(pkg_list,
-                            recursive_removal, self.__progresstracker,
-                            self.__check_cancelation, noexecute)
-
-                        assert self._img.imageplan
-
-                        self._disable_cancel()
-
-                        if not noexecute:
-                                self.__plan_type = self.__UNINSTALL
-
-                        self.__set_new_be()
-
-                        self.__plan_desc = PlanDescription(self._img,
-                            self.__new_be, self.__be_activate)
-                        if noexecute:
-                                self.log_operation_end(
-                                    result=history.RESULT_NOTHING_TO_DO)
-                        self._img.imageplan.update_index = update_index
-                except:
-                        self.__plan_common_exception()
-                        # NOTREACHED
-
-                self.__plan_common_finish()
-                res = not self._img.imageplan.nothingtodo()
-                return res
-
-        def plan_update(self, pkg_list, refresh_catalogs=True,
-            reject_list=misc.EmptyI, noexecute=False, update_index=True,
-            be_name=None, new_be=False, repos=None, be_activate=True):
-                """Constructs a plan to update the packages provided in
-                pkg_list.  Once an operation has been planned, it may be
-                executed by first calling prepare(), and then execute_plan().
-                After execution of a plan, or to abandon a plan, reset() should
-                be called.
-
-                'pkg_list' is a list of packages to update.
-
-                For all other parameters, refer to the 'plan_install' function
-                for an explanation of their usage and effects.
-
-                This function returns a boolean which indicates whether there
-                is anything to do."""
-
-                self.__plan_common_start("update", noexecute, new_be,
-                    be_name, be_activate)
-                try:
-                        if refresh_catalogs:
-                                self.__refresh_publishers()
-
-                        self.__set_img_alt_sources(repos)
-                        self._img.make_update_plan(self.__progresstracker,
-                            self.__check_cancelation, noexecute,
-                            pkg_list=pkg_list, reject_list=reject_list)
-
-                        assert self._img.imageplan
-
-                        self._disable_cancel()
-
-                        if not noexecute:
-                                self.__plan_type = self.__UPDATE
-
-                        self.__set_new_be()
-
-                        self.__plan_desc = PlanDescription(self._img,
-                            self.__new_be, self.__be_activate)
-                        if self._img.imageplan.nothingtodo() or noexecute:
-                                self.log_operation_end(
-                                    result=history.RESULT_NOTHING_TO_DO)
-
-                        self._img.imageplan.update_index = update_index
-                except:
-                        self.__plan_common_exception(log_op_end=[
-                            apx.CanceledException, fmri.IllegalFmri,
-                            Exception])
-                        # NOTREACHED
-
-                self.__plan_common_finish()
-                res = not self._img.imageplan.nothingtodo()
-                return res
-
-        def __is_pkg5_native_packaging(self):
-                """Helper routine that returns True if this object represents an
-                image where pkg(5) is the native packaging system and needs to
-                be upgraded before the image can be."""
+        def solaris_image(self):
+                """Returns True if the current image is a solaris image, or an
+                image which contains the pkg(5) packaging system."""
 
                 # First check to see if the special package "release/name"
                 # exists and contains metadata saying this is Solaris.
@@ -799,180 +693,864 @@
 
                 return False
 
+        def __ipkg_require_latest(self, noexecute):
+                """Raises an IpkgOutOfDateException if the current image
+                contains the pkg(5) packaging system and a newer version
+                of the pkg(5) packaging system is installable."""
+
+                if not self.solaris_image():
+                        return
+                try:
+                        if self._img.ipkg_is_up_to_date(
+                            self.__check_cancel, noexecute,
+                            refresh_allowed=False,
+                            progtrack=self.__progresstracker):
+                                return
+                except apx.ImageNotFoundException:
+                        # Can't do anything in this
+                        # case; so proceed.
+                        return
+
+                raise apx.IpkgOutOfDateException()
+
+        def __plan_op(self, _op, _accept=False, _ad_kwargs=None,
+            _be_activate=True, _be_name=None, _ipkg_require_latest=False,
+            _li_ignore=None, _li_md_only=False, _li_parent_sync=True,
+            _new_be=False, _noexecute=False, _refresh_catalogs=True,
+            _repos=None, _update_index=True,
+            **kwargs):
+                """Contructs a plan to change the package or linked image
+                state of an image.
+
+                We can raise PermissionsException, PlanCreationException,
+                InventoryException, or LinkedImageException.
+
+                Arguments prefixed with '_' are primarily used within this
+                function.  All other arguments must be specified via keyword
+                assignment and will be passed directly on to the image
+                interfaces being invoked."
+
+                '_op' is the API operation we will perform.
+
+                '_ad_kwargs' is only used dyring attach or detach and it
+                is a dictionary of arguments that will be passed to the
+                linked image attach/detach interfaces.
+
+                '_ipkg_require_latest' enables a check to verify that the
+                latest installable version of the pkg(5) packaging system is
+                installed before we proceed with the requested operation.
+
+                For all other '_' prefixed parameters, please refer to the
+                'gen_plan_*' functions which invoke this function for an
+                explanation of their usage and effects.
+
+                This function returns a boolean indicating whether there is
+                anything to do."""
+
+                # sanity checks
+                assert _op in api_op_values
+                assert _ad_kwargs == None or \
+                    _op in [API_OP_ATTACH, API_OP_DETACH]
+                assert _ad_kwargs != None or \
+                    _op not in [API_OP_ATTACH, API_OP_DETACH]
+                assert not _li_md_only or \
+                    _op in [API_OP_ATTACH, API_OP_DETACH, API_OP_SYNC]
+                assert not _li_md_only or _li_parent_sync
+
+                # make some perf optimizations
+                if _li_md_only:
+                        _refresh_catalogs = _update_index = False
+                if self.__stage not in [API_STAGE_DEFAULT, API_STAGE_PUBCHECK]:
+                        _li_parent_sync = False
+                if self.__stage not in [API_STAGE_DEFAULT, API_STAGE_PLAN]:
+                        _refresh_catalogs = False
+                        _ipkg_require_latest = False
+
+                # if we have any children we don't support operations using
+                # temporary repositories.
+                if _repos and self._img.linked.list_related(_li_ignore):
+                        raise apx.PlanCreationException(no_tmp_origins=True)
+
+                # All the image interface functions that we inovke have some
+                # common arguments.  Set those up now.
+                args_common = {}
+                args_common["op"] = _op
+                args_common["progtrack"] = self.__progresstracker
+                args_common["check_cancel"] = self.__check_cancel
+                args_common["noexecute"] = _noexecute
+                args_common["ip_mode"] = self.__stage_2_ip_mode[self.__stage]
+
+
+                # make sure there is no overlap between the common arguments
+                # supplied to all api interfaces and the arguments that the
+                # api arguments that caller passed to this function.
+                assert (set(args_common) & set(kwargs)) == set(), \
+                    "%s & %s != set()" % (str(set(args_common)),
+                    str(set(kwargs)))
+                kwargs.update(args_common)
+
+                # Lock the current image.
+                self.__plan_common_start(_op, _noexecute, _new_be, _be_name,
+                    _be_activate)
+
+                try:
+                        # reset any child recursion state we might have
+                        self._img.linked.reset_recurse()
+
+                        # prepare to recurse into child images
+                        self._img.linked.init_recurse(_op, _li_ignore,
+                            _accept, _refresh_catalogs, _update_index, kwargs)
+
+                        if _op == API_OP_ATTACH:
+                                self._img.linked.attach_parent(**_ad_kwargs)
+                        elif _op == API_OP_DETACH:
+                                self._img.linked.detach_parent(**_ad_kwargs)
+
+                        if _li_parent_sync:
+                                # try to refresh linked image
+                                # constraints from the parent image.
+                                self._img.linked.syncmd_from_parent(_op)
+
+                        if self.__stage in [API_STAGE_DEFAULT,
+                            API_STAGE_PUBCHECK]:
+
+                                # do a linked image publisher check
+                                self._img.linked.check_pubs(_op)
+                                self._img.linked.do_recurse(API_STAGE_PUBCHECK)
+
+                                if self.__stage == API_STAGE_PUBCHECK:
+                                        # If this was just a publisher check
+                                        # then return immediately.
+                                        return
+
+                        if _refresh_catalogs:
+                                self.__refresh_publishers()
+
+                        if _ipkg_require_latest:
+                                # If this is an image update then make
+                                # sure the latest version of the ipkg
+                                # software is installed.
+                                self.__ipkg_require_latest(_noexecute)
+
+                        self.__set_img_alt_sources(_repos)
+
+                        if _li_md_only:
+                                self._img.make_noop_plan(**args_common)
+                        elif _op in [API_OP_ATTACH, API_OP_DETACH, API_OP_SYNC]:
+                                self._img.make_sync_plan(**kwargs)
+                        elif _op in [API_OP_CHANGE_FACET,
+                            API_OP_CHANGE_VARIANT]:
+                                self._img.make_change_varcets_plan(**kwargs)
+                        elif _op == API_OP_INSTALL:
+                                self._img.make_install_plan(**kwargs)
+                        elif _op == API_OP_REVERT:
+                                self._img.make_revert_plan(**kwargs)
+                        elif _op == API_OP_UNINSTALL:
+                                self._img.make_uninstall_plan(**kwargs)
+                        elif _op == API_OP_UPDATE:
+                                self._img.make_update_plan(**kwargs)
+                        else:
+                                raise RuntimeError("Unknown api op: %s" % _op)
+
+                        self.__api_op = _op
+                        self.__accept = _accept
+                        if not _noexecute:
+                                self.__plan_type = self.__api_op_2_plan[_op]
+
+                        if self._img.imageplan.nothingtodo():
+                                # no package changes mean no index changes
+                                _update_index = False
+
+                        self._disable_cancel()
+                        self.__set_new_be()
+                        self.__plan_desc = PlanDescription(self._img,
+                            self.__new_be, self.__be_activate)
+
+                        # Yield to our caller so they can display our plan
+                        # before we recurse into child images.  Drop the
+                        # activity lock before yielding because otherwise the
+                        # caller can't do things like set the displayed
+                        # license state for pkg plans).
+                        self._activity_lock.release()
+                        yield self.__plan_desc
+                        self._activity_lock.acquire()
+
+                        # plan operation in child images.  eventually these
+                        # will yield plan descriptions objects as well.
+                        if self.__stage in [API_STAGE_DEFAULT, API_STAGE_PLAN]:
+                                self._img.linked.do_recurse(API_STAGE_PLAN,
+                                    ip=self._img.imageplan)
+                        self.__planned_children = True
+
+                except:
+                        if _op in [
+                            API_OP_UPDATE,
+                            API_OP_INSTALL,
+                            API_OP_REVERT,
+                            API_OP_SYNC]:
+                                self.__plan_common_exception(
+                                    log_op_end_all=True)
+                        else:
+                                self.__plan_common_exception()
+                        # NOTREACHED
+
+                stuff_to_do = not self.planned_nothingtodo()
+
+                if not stuff_to_do or _noexecute:
+                        self.log_operation_end(
+                            result=history.RESULT_NOTHING_TO_DO)
+
+                self._img.imageplan.update_index = _update_index
+                self.__plan_common_finish()
+
+        def planned_nothingtodo(self, li_ignore_all=False):
+                """Once an operation has been planned check if there is
+                something todo.
+
+                Callers should pass all arguments by name assignment and
+                not by positional order.
+
+                'li_ignore_all' indicates if we should only report on work
+                todo in the parent image.  (i.e., if an operation was planned
+                and that operation only involves changes to children, and
+                li_ignore_all is true, then we'll report that there's nothing
+                todo."""
+
+                if self.__stage == API_STAGE_PUBCHECK:
+                        # if this was just a publisher check then report
+                        # that there is something todo so we continue with
+                        # the operation.
+                        return False
+                if not self._img.imageplan:
+                        # if theres no plan there nothing to do
+                        return True
+                if not self._img.imageplan.nothingtodo():
+                        return False
+                if not self._img.linked.nothingtodo():
+                        return False
+                if not li_ignore_all:
+                        assert self.__planned_children
+                        if not self._img.linked.recurse_nothingtodo():
+                                return False
+                return True
+
+        def plan_update(self, pkg_list, refresh_catalogs=True,
+            reject_list=None, noexecute=False, update_index=True,
+            be_name=None, new_be=False, repos=None, be_activate=True):
+                """DEPRECATED.  use gen_plan_update()."""
+                for pd in self.gen_plan_update(
+                    pkgs_update=pkg_list, refresh_catalogs=refresh_catalogs,
+                    reject_list=reject_list, noexecute=noexecute,
+                    update_index=update_index, be_name=be_name, new_be=new_be,
+                    repos=repos, be_activate=be_activate):
+                        continue
+                return not self.planned_nothingtodo()
+
         def plan_update_all(self, refresh_catalogs=True,
-            reject_list=misc.EmptyI, noexecute=False, force=False,
+            reject_list=None, noexecute=False, force=False,
             update_index=True, be_name=None, new_be=True, repos=None,
             be_activate=True):
-                """Constructs a plan to update all packages on the system
-                to the latest known versions.  Once an operation has been
-                planned, it may be executed by first calling prepare(), and
-                then execute_plan().  After execution of a plan, or to abandon
-                a plan, reset() should be called.
+                """DEPRECATED.  use gen_plan_update()."""
+                for pd in self.gen_plan_update(
+                    refresh_catalogs=refresh_catalogs, reject_list=reject_list,
+                    noexecute=noexecute, force=force,
+                    update_index=update_index, be_name=be_name, new_be=new_be,
+                    repos=repos, be_activate=be_activate):
+                        continue
+                return (not self.planned_nothingtodo(), self.solaris_image())
+
+        def gen_plan_update(self, pkgs_update=None, accept=False,
+            be_activate=True, be_name=None, force=False, li_ignore=None,
+            li_parent_sync=True, new_be=True, noexecute=False,
+            refresh_catalogs=True, reject_list=None, repos=None,
+            update_index=True):
+                """This is a generator function that returns PlanDescription
+                objects.
+
+                If pkgs_update is not set, constructs a plan to update all
+                packages on the system to the latest known versions.  Once an
+                operation has been planned, it may be executed by first
+                calling prepare(), and then execute_plan().  After execution
+                of a plan, or to abandon a plan, reset() should be called.
+
+                Callers should pass all arguments by name assignment and
+                not by positional order.
+
+                If 'pkgs_update' is set, constructs a plan to update the
+                packages provided in pkgs_update.
+
+                Once an operation has been planned, it may be executed by
+                first calling prepare(), and then execute_plan().
 
                 'force' indicates whether update should skip the package
                 system up to date check.
 
-                For all other parameters, refer to the 'plan_install' function
-                for an explanation of their usage and effects.
-
-                This function returns a tuple of booleans of the form
-                (stuff_to_do, solaris_image)."""
-
-                self.__plan_common_start("update", noexecute, new_be, be_name,
-                    be_activate)
-                try:
-                        if refresh_catalogs:
-                                self.__refresh_publishers()
-
-                        # If the target image is an opensolaris image, we
-                        # activate some special behavior.
-                        opensolaris_image = self.__is_pkg5_native_packaging()
-
-                        if opensolaris_image and not force:
-                                try:
-                                        if not self._img.ipkg_is_up_to_date(
-                                            self.__check_cancelation,
-                                            noexecute,
-                                            refresh_allowed=refresh_catalogs,
-                                            progtrack=self.__progresstracker):
-                                                raise apx.IpkgOutOfDateException()
-                                except apx.ImageNotFoundException:
-                                        # Can't do anything in this
-                                        # case; so proceed.
-                                        pass
-
-                        self.__set_img_alt_sources(repos)
-                        self._img.make_update_plan(self.__progresstracker,
-                            self.__check_cancelation, noexecute,
-                            reject_list=reject_list)
-
-                        assert self._img.imageplan
-
-                        self._disable_cancel()
-
-                        if not noexecute:
-                                self.__plan_type = self.__UPDATE
-                        self.__set_new_be()
-
-                        self.__plan_desc = PlanDescription(self._img,
-                            self.__new_be, self.__be_activate)
-
-                        if self._img.imageplan.nothingtodo() or noexecute:
-                                self.log_operation_end(
-                                    result=history.RESULT_NOTHING_TO_DO)
-                        self._img.imageplan.update_index = update_index
-
-                except:
-                        self.__plan_common_exception(
-                            log_op_end=[apx.IpkgOutOfDateException])
-                        # NOTREACHED
-
-                self.__plan_common_finish()
-                res = not self._img.imageplan.nothingtodo()
-                return res, opensolaris_image
+                For all other parameters, refer to the 'gen_plan_install'
+                function for an explanation of their usage and effects."""
+
+                if pkgs_update or force:
+                        ipkg_require_latest = False
+                else:
+                        ipkg_require_latest = True
+
+                op = API_OP_UPDATE
+                return self.__plan_op(op, _accept=accept,
+                    _be_activate=be_activate, _be_name=be_name,
+                    _ipkg_require_latest=ipkg_require_latest,
+                    _li_ignore=li_ignore, _li_parent_sync=li_parent_sync,
+                    _new_be=new_be, _noexecute=noexecute,
+                    _refresh_catalogs=refresh_catalogs, _repos=repos,
+                    _update_index=update_index, pkgs_update=pkgs_update,
+                    reject_list=reject_list)
+
+        def plan_install(self, pkg_list, refresh_catalogs=True,
+            noexecute=False, update_index=True, be_name=None,
+            reject_list=None, new_be=False, repos=None,
+            be_activate=True):
+                """DEPRECATED.  use gen_plan_install()."""
+                for pd in self.gen_plan_install(
+                     pkgs_inst=pkg_list, refresh_catalogs=refresh_catalogs,
+                     noexecute=noexecute, update_index=update_index,
+                     be_name=be_name, reject_list=reject_list, new_be=new_be,
+                     repos=repos, be_activate=be_activate):
+                        continue
+                return not self.planned_nothingtodo()
+
+        def gen_plan_install(self, pkgs_inst, accept=False, be_activate=True,
+            be_name=None, li_ignore=None, li_parent_sync=True, new_be=False,
+            noexecute=False, refresh_catalogs=True, reject_list=None,
+            repos=None, update_index=True):
+                """This is a generator function that returns PlanDescription
+                objects.
+
+                Constructs a plan to install the packages provided in
+                pkgs_inst.  Once an operation has been planned, it may be
+                executed by first calling prepare(), and then execute_plan().
+                After execution of a plan, or to abandon a plan, reset()
+                should be called.
+
+                Callers should pass all arguments by name assignment and
+                not by positional order.
+
+                'pkgs_inst' is a list of packages to install.
+
+                'be_name' is a string to use as the name of any new boot
+                environment created during the operation.
+
+                'li_ignore' is either None or a list.  If it's None (the
+                default), the planning operation will attempt to keep all
+                linked children in sync.  If it's an empty list the planning
+                operation will ignore all children.  If this is a list of
+                linked image children names, those children will be ignored
+                during the planning operation.  If a child is ignored during
+                the planning phase it will also be skipped during the
+                preparation and execution phases.
+
+                'li_parent_sync' if the current image is a child image, this
+                flag controls whether the linked image parent metadata will be
+                automatically refreshed.
+
+                'new_be' indicates whether a new boot environment should be
+                created during the operation.  If True, a new boot environment
+                will be created.  If False, and a new boot environment is
+                needed, an ImageUpdateOnLiveImageException will be raised.
+                If None, a new boot environment will be created only if needed.
+
+                'noexecute' determines whether the resulting plan can be
+                executed and whether history will be recorded after
+                planning is finished.
+
+                'refresh_catalogs' controls whether the catalogs will
+                automatically be refreshed.
+
+                'reject_list' is a list of patterns not to be permitted
+                in solution; installed packages matching these patterns
+                are removed.
+
+                'repos' is a list of URI strings or RepositoryURI objects that
+                represent the locations of additional sources of package data to
+                use during the planned operation.  All API functions called
+                while a plan is still active will use this package data.
+
+                'be_activate' is an optional boolean indicating whether any
+                new boot environment created for the operation should be set
+                as the active one on next boot if the operation is successful.
+
+                'update_index' determines whether client search indexes
+                will be updated after operation completion during plan
+                execution."""
+
+                # certain parameters must be specified
+                assert pkgs_inst and type(pkgs_inst) == list
+
+                op = API_OP_INSTALL
+                return self.__plan_op(op, _accept=accept,
+                    _be_activate=be_activate, _be_name=be_name,
+                    _li_ignore=li_ignore, _li_parent_sync=li_parent_sync,
+                    _new_be=new_be, _noexecute=noexecute,
+                    _refresh_catalogs=refresh_catalogs, _repos=repos,
+                    _update_index=update_index, pkgs_inst=pkgs_inst,
+                    reject_list=reject_list)
+
+        def gen_plan_sync(self, accept=False, be_activate=True, be_name=None,
+            li_ignore=None, li_md_only=False, li_parent_sync=True,
+            li_pkg_updates=True, new_be=False, noexecute=False,
+            refresh_catalogs=True, reject_list=None, repos=None,
+            update_index=True):
+                """This is a generator function that returns PlanDescription
+                objects.
+
+                Constructs a plan to sync the current image with its
+                linked image constraints.  Once an operation has been planned,
+                it may be executed by first calling prepare(), and then
+                execute_plan().  After execution of a plan, or to abandon a
+                plan, reset() should be called.
+
+                Callers should pass all arguments by name assignment and
+                not by positional order.
+
+                'li_md_only' don't actually modify any packages in the current
+                images, only sync the linked image metadata from the parent
+                image.  If this options is True, 'li_parent_sync' must also be
+                True.
+
+                'li_pkg_updates' when planning a sync operation, allow updates
+                to packages other than the constraints package.  If this
+                option is False, planning a sync will fail if any packages
+                (other than the constraints package) need updating to bring
+                the image in sync with its parent.
+
+                For all other parameters, refer to the 'gen_plan_install'
+                function for an explanation of their usage and effects."""
+
+                # verify that the current image is a linked image by trying to
+                # access its name.
+                self._img.linked.child_name
+
+                op = API_OP_SYNC
+                return self.__plan_op(op, _accept=accept,
+                    _be_activate=be_activate, _be_name=be_name,
+                    _li_ignore=li_ignore, _li_md_only=li_md_only,
+                    _li_parent_sync=li_parent_sync, _new_be=new_be,
+                    _noexecute=noexecute, _refresh_catalogs=refresh_catalogs,
+                    _repos=repos, _update_index=update_index,
+                    li_pkg_updates=li_pkg_updates, reject_list=reject_list)
+
+        def gen_plan_attach(self, lin, li_path, accept=False,
+            allow_relink=False, be_activate=True, be_name=None,
+            force=False, li_ignore=None, li_md_only=False,
+            li_pkg_updates=True, li_props=None, new_be=False,
+            noexecute=False, refresh_catalogs=True, reject_list=None,
+            repos=None, update_index=True):
+                """This is a generator function that returns PlanDescription
+                objects.
+
+                Attach a parent image and sync the packages in the current
+                image with the new parent.  Once an operation has been
+                planned, it may be executed by first calling prepare(), and
+                then execute_plan().  After execution of a plan, or to abandon
+                a plan, reset() should be called.
+
+                Callers should pass all arguments by name assignment and
+                not by positional order.
+
+                'lin' a LinkedImageName object that is a name for the current
+                image.
+
+                'li_path' a path to the parent image.
+
+                'allow_relink' allows re-linking of an image that is already a
+                linked image child.  If this option is True we'll overwrite
+                all existing linked image metadata.
+
+                'li_props' optional linked image properties to apply to the
+                child image.
+
+                For all other parameters, refer to the 'gen_plan_install' and
+                'gen_plan_sync' functions for an explanation of their usage
+                and effects."""
+
+                if li_props == None:
+                        li_props = dict()
+
+                op = API_OP_ATTACH
+                ad_kwargs = {
+                    "allow_relink": allow_relink,
+                    "force": force,
+                    "lin": lin,
+                    "path": li_path,
+                    "props": li_props,
+                }
+                return self.__plan_op(op, _accept=accept,
+                    _be_activate=be_activate, _be_name=be_name,
+                    _li_ignore=li_ignore, _li_md_only=li_md_only,
+                    _new_be=new_be, _noexecute=noexecute,
+                    _refresh_catalogs=refresh_catalogs, _repos=repos,
+                    _update_index=update_index, _ad_kwargs=ad_kwargs,
+                    li_pkg_updates=li_pkg_updates, reject_list=reject_list)
+
+        def gen_plan_detach(self, accept=False, be_activate=True,
+            be_name=None, force=False, li_ignore=None, new_be=False,
+            noexecute=False):
+                """This is a generator function that returns PlanDescription
+                objects.
+
+                Detach from a parent image and remove any constraints
+                package from this image.  Once an operation has been planned,
+                it may be executed by first calling prepare(), and then
+                execute_plan().  After execution of a plan, or to abandon a
+                plan, reset() should be called.
+
+                Callers should pass all arguments by name assignment and
+                not by positional order.
+
+                For all other parameters, refer to the 'gen_plan_install' and
+                'gen_plan_sync' functions for an explanation of their usage
+                and effects."""
+
+                op = API_OP_DETACH
+                ad_kwargs = {
+                    "force": force
+                }
+                return self.__plan_op(op, _accept=accept, _ad_kwargs=ad_kwargs,
+                    _be_activate=be_activate, _be_name=be_name,
+                    _li_ignore=li_ignore, _new_be=new_be,
+                    _noexecute=noexecute, _refresh_catalogs=False,
+                    _update_index=False, li_pkg_updates=False)
+
+        def plan_uninstall(self, pkg_list, recursive_removal, noexecute=False,
+            update_index=True, be_name=None, new_be=False, be_activate=True):
+                """DEPRECATED.  use gen_plan_uninstall()."""
+                for pd in self.gen_plan_uninstall(
+                    pkgs_to_uninstall=pkg_list,
+                    recursive_removal=recursive_removal, noexecute=noexecute,
+                    update_index=update_index, be_name=be_name,
+                    new_be=new_be, be_activate=be_activate):
+                        continue
+                return not self.planned_nothingtodo()
+
+        def gen_plan_uninstall(self, pkgs_to_uninstall, recursive_removal=False,
+            accept=False, be_activate=True, be_name=None, li_ignore=None,
+            new_be=False, noexecute=False, update_index=True):
+                """This is a generator function that returns PlanDescription
+                objects.
+
+                Constructs a plan to remove the packages provided in
+                pkgs_to_uninstall.  Once an operation has been planned, it may
+                be executed by first calling prepare(), and then
+                execute_plan().  After execution of a plan, or to abandon a
+                plan, reset() should be called.
+
+                Callers should pass all arguments by name assignment and
+                not by positional order.
+
+                'pkgs_to_uninstall' is a list of packages to uninstall.
+
+                'recursive_removal' controls whether recursive removal is
+                allowed.
+
+                For all other parameters, refer to the 'gen_plan_install'
+                function for an explanation of their usage and effects."""
+
+                # certain parameters must be specified
+                assert pkgs_to_uninstall and type(pkgs_to_uninstall) == list
+
+                op = API_OP_UNINSTALL
+                return self.__plan_op(op, _accept=accept,
+                    _be_activate=be_activate, _be_name=be_name,
+                    _li_ignore=li_ignore, _li_parent_sync=False,
+                    _new_be=new_be, _noexecute=noexecute,
+                    _refresh_catalogs=False, _update_index=update_index,
+                    pkgs_to_uninstall=pkgs_to_uninstall,
+                    recursive_removal=recursive_removal)
 
         def plan_change_varcets(self, variants=None, facets=None,
             noexecute=False, be_name=None, new_be=None, repos=None,
             be_activate=True):
-                """Creates a plan to change the specified variants and/or facets
-                for the image.  After execution of a plan, or to abandon a plan,
-                reset() should be called.
-
-                'variants' is a dict of the variants to change the values of.
+                """DEPRECATED.  use gen_plan_change_varcets()."""
+                for pd in self.gen_plan_change_varcets(
+                    variants=variants, facets=facets, noexecute=noexecute,
+                    be_name=be_name, new_be=new_be, repos=repos,
+                    be_activate=be_activate):
+                        continue
+                return not self.planned_nothingtodo()
+
+        def gen_plan_change_varcets(self, facets=None, variants=None,
+            accept=False, be_activate=True, be_name=None, li_ignore=None,
+            li_parent_sync=True, new_be=None, noexecute=False,
+            refresh_catalogs=True, reject_list=None, repos=None,
+            update_index=True):
+                """This is a generator function that returns PlanDescription
+                objects.
+
+                Creates a plan to change the specified variants and/or
+                facets for the image.  Once an operation has been planned, it
+                may be executed by first calling prepare(), and then
+                execute_plan().  After execution of a plan, or to abandon a
+                plan, reset() should be called.
+
+                Callers should pass all arguments by name assignment and
+                not by positional order.
 
                 'facets' is a dict of the facets to change the values of.
 
-                For all other parameters, refer to the 'plan_install' function
-                for an explanation of their usage and effects.
-
-                This function returns a boolean which indicates whether there
-                is anything to do.
-                """
-
-                self.__plan_common_start("change-variant", noexecute, new_be,
-                    be_name, be_activate)
+                'variants' is a dict of the variants to change the values of.
+
+                For all other parameters, refer to the 'gen_plan_install'
+                function for an explanation of their usage and effects."""
+
                 if not variants and not facets:
                         raise ValueError, "Nothing to do"
-                try:
-                        self.__refresh_publishers()
-
-                        self.__set_img_alt_sources(repos)
-                        self._img.image_change_varcets(variants,
-                            facets, self.__progresstracker,
-                            self.__check_cancelation, noexecute)
-
-                        assert self._img.imageplan
-                        self.__set_new_be()
-
-                        self._disable_cancel()
-
-                        if not noexecute:
-                                self.__plan_type = self.__VARCET
-
-                        self.__plan_desc = PlanDescription(self._img,
-                            self.__new_be, self.__be_activate)
-
-                        if self._img.imageplan.nothingtodo() or noexecute:
-                                self.log_operation_end(
-                                    result=history.RESULT_NOTHING_TO_DO)
-
-                        #
-                        # We always rebuild the search index after a
-                        # variant change
-                        #
-                        self._img.imageplan.update_index = True
-
-                except:
-                        self.__plan_common_exception()
-                        # NOTREACHED
-
-                self.__plan_common_finish()
-                res = not self._img.imageplan.nothingtodo()
-                return res
+
+                if variants:
+                        op = API_OP_CHANGE_VARIANT
+                else:
+                        op = API_OP_CHANGE_FACET
+
+                return self.__plan_op(op, _accept=accept,
+                    _be_activate=be_activate, _be_name=be_name,
+                    _li_ignore=li_ignore, _li_parent_sync=li_parent_sync,
+                    _new_be=new_be, _noexecute=noexecute,
+                    _refresh_catalogs=refresh_catalogs, _repos=repos,
+                    _update_index=update_index, variants=variants,
+                    facets=facets, reject_list=reject_list)
 
         def plan_revert(self, args, tagged=False, noexecute=True, be_name=None,
             new_be=None, be_activate=True):
-                """Plan to revert either files or all files tagged with
-                specified values.  Args contains either path names or tag names
-                to be reverted, tagged is True if args contains tags.
-
-                For all other parameters, refer to the 'plan_install' function
-                for an explanation of their usage and effects."""
-
-                self.__plan_common_start("revert", noexecute, new_be, be_name,
-                    be_activate)
-                try:
-                        self._img.make_revert_plan(args,
-                            tagged,
-                            self.__progresstracker,
-                            self.__check_cancelation,
-                            noexecute)
-
-                        assert self._img.imageplan
-
-                        self._disable_cancel()
-
-                        if not noexecute:
-                                self.__plan_type = self.__REVERT
-
-                        self.__set_new_be()
-
-                        self.__plan_desc = PlanDescription(self._img,
-                            self.__new_be, self.__be_activate)
-                        if self._img.imageplan.nothingtodo() or noexecute:
-                                self.log_operation_end(
-                                    result=history.RESULT_NOTHING_TO_DO)
-
-                        self._img.imageplan.update_index = False
-                except:
-                        self.__plan_common_exception(log_op_end=[
-                            apx.CanceledException, fmri.IllegalFmri,
-                            Exception])
-                        # NOTREACHED
-
-                self.__plan_common_finish()
-                res = not self._img.imageplan.nothingtodo()
-                return res
+                """DEPRECATED.  use gen_plan_revert()."""
+                for pd in self.gen_plan_revert(
+                    args=args, tagged=tagged, noexecute=noexecute,
+                    be_name=be_name, new_be=new_be, be_activate=be_activate):
+                        continue
+                return not self.planned_nothingtodo()
+
+        def gen_plan_revert(self, args, tagged=False, noexecute=True,
+            be_activate=True, be_name=None, new_be=None):
+                """This is a generator function that returns PlanDescription
+                objects.
+
+                Plan to revert either files or all files tagged with
+                specified values.  Args contains either path names or tag
+                names to be reverted, tagged is True if args contains tags.
+                Once an operation has been planned, it may be executed by
+                first calling prepare(), and then execute_plan().  After
+                execution of a plan, or to abandon a plan, reset() should be
+                called.
+
+                For all other parameters, refer to the 'gen_plan_install'
+                function for an explanation of their usage and effects."""
+
+                op = API_OP_REVERT
+                return self.__plan_op(op, _be_activate=be_activate,
+                    _be_name=be_name, _li_ignore=[], _new_be=new_be,
+                    _noexecute=noexecute, _refresh_catalogs=False,
+                    _update_index=False, args=args, tagged=tagged)
+
+        def attach_linked_child(self, lin, li_path, li_props=None,
+            accept=False, allow_relink=False, force=False, li_md_only=False,
+            li_pkg_updates=True, noexecute=False,
+            refresh_catalogs=True, show_licenses=False, update_index=True):
+                """Attach an image as a child to the current image (the
+                current image will become a parent image. This operation
+                results in attempting to sync the child image with the parent
+                image.
+
+                'lin' is the name of the child image
+
+                'li_path' is the path to the child image
+
+                'li_props' optional linked image properties to apply to the
+                child image.
+
+                'accept' indicates whether we should accept package licenses
+                for any packages being installed during the child image sync.
+
+                'allow_relink' indicates whether we should allow linking of a
+                child image that is already linked (the child may already
+                be a child or a parent image).
+
+                'force' indicates whether we should allow linking of a child
+                image even if the specified linked image type doesn't support
+                attaching of children.
+
+                'li_md_only' indicates whether we should only update linked
+                image metadata and not actually try to sync the child image.
+
+                'li_pkg_updates' indicates whether we should disallow pkg
+                updates during the child image sync.
+
+                'noexecute' indicates if we should actually make any changes
+                rather or just simulate the operation.
+
+                'refresh_catalogs' controls whether the catalogs will
+                automatically be refreshed.
+
+                'show_licenses' indicates whether we should display package
+                licenses for any packages being installed during the child
+                image sync.
+
+                'update_index' determines whether client search indexes will
+                be updated in the child after the sync operation completes.
+
+                This function returns a tuple of the format (rv, err) where rv
+                is a pkg.client.pkgdefs return value and if an error was
+                encountered err is an exception object which describes the
+                error."""
+
+                return self._img.linked.attach_child(lin, li_path, li_props,
+                    accept=accept, allow_relink=allow_relink, force=force,
+                    li_md_only=li_md_only, li_pkg_updates=li_pkg_updates,
+                    noexecute=noexecute, progtrack=self.__progresstracker,
+                    refresh_catalogs=refresh_catalogs,
+                    show_licenses=show_licenses, update_index=update_index)
+
+        def detach_linked_children(self, li_list, force=False, noexecute=False):
+                """Detach one or more children from the current image. This
+                operation results in the removal of any constraint package
+                from the child images.
+
+                'li_list' a list of linked image name objects which specified
+                which children to operate on.  If the list is empty then we
+                operate on all children.
+
+                For all other parameters, refer to the 'attach_linked_child'
+                function for an explanation of their usage and effects.
+
+                This function returns a dictionary where the keys are linked
+                image name objects and the values are the result of the
+                specified operation on the associated child image.  The result
+                is a tuple of the format (rv, err) where rv is a
+                pkg.client.pkgdefs return value and if an error was
+                encountered err is an exception object which describes the
+                error."""
+
+                rvdict = self._img.linked.detach_children(li_list,
+                    force=force, noexecute=noexecute,
+                    progtrack=self.__progresstracker)
+                return rvdict
+
+        def detach_linked_rvdict2rv(self, rvdict):
+                """Convenience function that takes a dictionary returned from
+                an operations on multiple children and merges the results into
+                a single return code."""
+
+                return self._img.linked.detach_rvdict2rv(rvdict)
+
+        def sync_linked_children(self, li_list,
+            accept=False, li_md_only=False,
+            li_pkg_updates=True, noexecute=False, refresh_catalogs=True,
+            show_licenses=False, update_index=True):
+                """Sync one or more children of the current image.
+
+                For all other parameters, refer to the 'attach_linked_child'
+                and 'detach_linked_children' functions for an explanation of
+                their usage and effects.
+
+                For a description of the return value, refer to the
+                'detach_linked_children' function."""
+
+                rvdict = self._img.linked.sync_children(li_list,
+                    accept=accept, li_md_only=li_md_only,
+                    li_pkg_updates=li_pkg_updates, noexecute=noexecute,
+                    progtrack=self.__progresstracker,
+                    refresh_catalogs=refresh_catalogs,
+                    show_licenses=show_licenses, update_index=update_index)
+                return rvdict
+
+        def sync_linked_rvdict2rv(self, rvdict):
+                """Convenience function that takes a dictionary returned from
+                an operations on multiple children and merges the results into
+                a single return code."""
+
+                return self._img.linked.sync_rvdict2rv(rvdict)
+
+        def audit_linked_children(self, li_list):
+                """Audit one or more children of the current image to see if
+                they are in sync with this image.
+
+                For all parameters, refer to the 'detach_linked_children'
+                functions for an explanation of their usage and effects.
+
+                For a description of the return value, refer to the
+                'detach_linked_children' function."""
+
+                rvdict = self._img.linked.audit_children(li_list)
+                return rvdict
+
+        def audit_linked_rvdict2rv(self, rvdict):
+                """Convenience function that takes a dictionary returned from
+                an operations on multiple children and merges the results into
+                a single return code."""
+
+                return self._img.linked.audit_rvdict2rv(rvdict)
+
+        def audit_linked(self, li_parent_sync=True):
+                """If the current image is a child image, this function
+                audits the current image to see if it's in sync with it's
+                parent.
+
+                For a description of the return value, refer to the
+                'detach_linked_children' function."""
+
+                lin = self._img.linked.child_name
+                rvdict = {}
+                rvdict[lin] = self._img.linked.audit_self(
+                    li_parent_sync=li_parent_sync)
+                return rvdict
+
+        def ischild(self):
+                """Indicates whether the current image is a child image."""
+                return self._img.linked.ischild()
+
+        def get_linked_name(self):
+                """If the current image is a child image, this function
+                returns a linked image name object which represents the name
+                of the current image."""
+                return self._img.linked.child_name
+
+        def get_linked_props(self, lin=None):
+                """Return a dictionary which represents the linked image
+                properties associated with a linked image.
+
+                'lin' is the name of the child image.  If lin is None then
+                the current image is assumed to be a linked image and it's
+                properties are returned."""
+
+                return self._img.linked.child_props(lin=lin)
+
+        def list_linked(self, li_ignore=None):
+                """Returns a list of linked images associated with the
+                current image.  This includes both child and parent images.
+
+                For all parameters, refer to the 'gen_plan_install' function
+                for an explanation of their usage and effects.
+
+                The returned value is a list of tuples where each tuple
+                contains (<li name>, <relationship>, <li path>)."""
+
+                return self._img.linked.list_related(li_ignore=li_ignore)
+
+        def parse_linked_name(self, li_name, allow_unknown=False):
+                """Given a string representing a linked image child name,
+                returns linked image name object representing the same name.
+
+                'allow_unknown' indicates whether the name must represent
+                actual children or simply be syntactically correct."""
+
+                return self._img.linked.parse_name(li_name, allow_unknown)
+
+        def parse_linked_name_list(self, li_name_list, allow_unknown=False):
+                """Given a list of strings representing linked image child
+                names, returns a list of linked image name objects
+                representing the same names.
+
+                For all other parameters, refer to the 'parse_linked_name'
+                function for an explanation of their usage and effects."""
+
+                return [
+                    self.parse_linked_name(li_name, allow_unknown)
+                    for li_name in li_name_list
+                ]
 
         def describe(self):
                 """Returns None if no plan is ready yet, otherwise returns
@@ -984,9 +1562,9 @@
                 """Takes care of things which must be done before the plan can
                 be executed.  This includes downloading the packages to disk and
                 preparing the indexes to be updated during execution.  Should
-                only be called once a plan_*() method has been called.  If a
-                plan is abandoned after calling this method, reset() should be
-                called."""
+                only be called once a gen_plan_*() method has been called.  If
+                a plan is abandoned after calling this method, reset() should
+                be called."""
 
                 self._acquire_activity_lock()
                 try:
@@ -999,10 +1577,15 @@
                         if not self._img.imageplan:
                                 raise apx.PlanMissingException()
 
+                        if not self.__planned_children:
+                                # if we never planned children images then we
+                                # didn't finish planning.
+                                raise apx.PlanMissingException()
+
                         if self.__prepared:
                                 raise apx.AlreadyPreparedException()
-
-                        assert self.__plan_type in self.__valid_plan_types
+                        assert self.__plan_type in self.__plan_values, \
+                            "self.__plan_type = %s" % self.__plan_type
 
                         self._enable_cancel()
 
@@ -1052,6 +1635,9 @@
                                 pass
                         self._activity_lock.release()
 
+                if self.__stage in [API_STAGE_DEFAULT, API_STAGE_PREPARE]:
+                        self._img.linked.do_recurse(API_STAGE_PREPARE)
+
         def execute_plan(self):
                 """Executes the plan. This is uncancelable once it begins.
                 Should only be called after the prepare method has been
@@ -1075,7 +1661,8 @@
                         if self.__executed:
                                 raise apx.AlreadyExecutedException()
 
-                        assert self.__plan_type in self.__valid_plan_types
+                        assert self.__plan_type in self.__plan_values, \
+                            "self.__plan_type = %s" % self.__plan_type
 
                         try:
                                 be = bootenv.BootEnv(self._img)
@@ -1109,8 +1696,22 @@
                                         e = apx.UnableToCopyBE()
                                         self.log_operation_end(error=e)
                                         raise e
+
+                        raise_later = None
+
+                        # we're about to execute a plan so change our current
+                        # working directory to / so that we won't fail if we
+                        # try to remove our current working directory
+                        os.chdir(os.sep)
+
                         try:
-                                self._img.imageplan.execute()
+                                try:
+                                        self._img.imageplan.execute()
+                                except apx.WrapIndexingException, e:
+                                        raise_later = e
+
+                                if not self._img.linked.nothingtodo():
+                                        self._img.linked.syncmd()
                         except RuntimeError, e:
                                 if self.__new_be == True:
                                         be.restore_image()
@@ -1137,9 +1738,7 @@
                                 error = apx.ActuatorException(e)
                                 self.log_operation_end(error=error)
                                 raise error
-                        except apx.WrapIndexingException, e:
-                                self.__finished_execution(be)
-                                raise
+
                         except Exception, e:
                                 if self.__new_be == True:
                                         be.restore_image()
@@ -1162,7 +1761,14 @@
                                 self.log_operation_end(error=exc_type)
                                 raise
 
+                        if self.__stage in \
+                            [API_STAGE_DEFAULT, API_STAGE_EXECUTE]:
+                                self._img.linked.do_recurse(API_STAGE_EXECUTE)
+
                         self.__finished_execution(be)
+                        if raise_later:
+                                raise raise_later
+
                 finally:
                         self._img.cleanup_downloads()
                         if self._img.locked:
@@ -1525,7 +2131,7 @@
                         pubs = None
                         try:
                                 pubs = self._img.transport.get_publisherdata(
-                                    repo, ccancel=self.__check_cancelation)
+                                    repo, ccancel=self.__check_cancel)
                         except apx.UnsupportedRepositoryOperation:
                                 raise apx.RepoPubConfigUnavailable(
                                     location=str(repo))
@@ -2685,10 +3291,18 @@
                 # object was created with instead of the current path.
                 self._img = image.Image(self._img_path,
                     progtrack=self.__progresstracker,
-                    user_provided_dir=True)
+                    user_provided_dir=True,
+                    cmdpath=self.cmdpath,
+                    runid=self.__runid)
                 self._img.blocking_locks = self.__blocking_locks
 
+                lin = None
+                if self._img.linked.ischild():
+                        lin = self._img.linked.child_name
+                self.__progresstracker.set_linked_name(lin)
+
                 self.__plan_desc = None
+                self.__planned_children = False
                 self.__plan_type = None
                 self.__prepared = False
                 self.__executed = False
@@ -2696,7 +3310,7 @@
 
                 self._cancel_cleanup_exception()
 
-        def __check_cancelation(self):
+        def __check_cancel(self):
                 """Private method. Provides a callback method for internal
                 code to use to determine whether the current action has been
                 canceled."""
@@ -2925,7 +3539,7 @@
                         try:
                                 res = self._img.transport.do_search(pub,
                                     query_str_and_args_lst,
-                                    ccancel=self.__check_cancelation)
+                                    ccancel=self.__check_cancel)
                         except apx.CanceledException:
                                 raise
                         except apx.NegativeSearchResult:
@@ -3158,7 +3772,7 @@
                 pub = max(pub, repo)
 
                 return self._img.transport.get_publisherdata(pub,
-                    ccancel=self.__check_cancelation)
+                    ccancel=self.__check_cancel)
 
         def get_publishers(self, duplicate=False):
                 """Returns a list of the publisher objects for the current
@@ -3169,16 +3783,11 @@
                 of the originals.
                 """
 
-                names = self._img.cfg.get_property("property",
-                    "publisher-search-order")
-                d = self._img.get_publishers()
-                missing_names = set(d) - set(names)
-                res =  [d[n] for n in names] + \
-                    [d[n] for n in sorted(missing_names)]
+                res = self._img.get_sorted_publishers()
                 if duplicate:
                         return [copy.copy(p) for p in res]
                 return res
-        
+
         def get_publisher_last_update_time(self, prefix=None, alias=None):
                 """Returns a datetime object representing the last time the
                 catalog for a publisher was modified or None."""
@@ -3448,6 +4057,17 @@
                 self._img.history.log_operation_start(name,
                     be_name=be_name, be_uuid=be_uuid)
 
+        def parse_liname(self, name, unknown_ok=False):
+                """Parse a linked image name string and return a
+                LinkedImageName object.  If "unknown_ok" is true then
+                liname must correspond to an existing linked image.  If
+                "unknown_ok" is false and liname doesn't correspond to
+                an existing linked image then liname must be a
+                syntactically valid and fully qualified linked image
+                name."""
+
+                return self._img.linked.parse_name(name, unknown_ok=unknown_ok)
+
         def parse_p5i(self, data=None, fileobj=None, location=None):
                 """Reads the pkg(5) publisher JSON formatted data at 'location'
                 or from the provided file-like object 'fileobj' and returns a
@@ -3734,7 +4354,11 @@
                 moved to.  This method may only be called after plan execution
                 has completed."""
 
-                assert self.__plan.state in (ip.EXECUTED_OK, ip.EXECUTED_ERROR)
+                assert self.__plan.state in (ip.EXECUTED_OK,
+                    ip.EXECUTED_ERROR), \
+                        "self.__plan.state in (ip.EXECUTED_OK, " \
+                        "ip.EXECUTED_ERROR)\nself.__plan.state == %d" % \
+                        self.__plan.state
                 return copy.copy(self.__plan.salvaged)
 
         def get_solver_errors(self):
@@ -3779,7 +4403,8 @@
     cancel_state_callable=None, facets=misc.EmptyDict, force=False,
     mirrors=misc.EmptyI, origins=misc.EmptyI, prefix=None, refresh_allowed=True,
     repo_uri=None, ssl_cert=None, ssl_key=None, user_provided_dir=False,
-    progtrack=None, variants=misc.EmptyDict, props=misc.EmptyDict):
+    progtrack=None, variants=misc.EmptyDict, props=misc.EmptyDict,
+    cmdpath=None):
         """Creates an image at the specified location.
 
         'pkg_client_name' is a string containing the name of the client,
@@ -3869,11 +4494,10 @@
         # repository, or a prefix and origins, or no prefix and no origins.
         assert (prefix and repo_uri) or (not prefix and repo_uri) or (prefix and
             origins or (not prefix and not origins))
-        
-
-        # If prefix isn't provided, and refresh isn't allowed, then auto-config
+
+        # If prefix isn't provided and refresh isn't allowed, then auto-config
         # cannot be done.
-        assert not repo_uri or (repo_uri and refresh_allowed)
+        assert (prefix or refresh_allowed) or not repo_uri
 
         destroy_root = False
         try:
@@ -3888,10 +4512,11 @@
         # needed to retrieve publisher configuration information.
         img = image.Image(root, force=force, imgtype=imgtype,
             progtrack=progtrack, should_exist=False,
-            user_provided_dir=user_provided_dir, props=props)
-
+            user_provided_dir=user_provided_dir, cmdpath=cmdpath,
+            props=props)
         api_inst = ImageInterface(img, version_id,
-            progtrack, cancel_state_callable, pkg_client_name)
+            progtrack, cancel_state_callable, pkg_client_name,
+            cmdpath=cmdpath)
 
         pubs = []
 
@@ -4024,4 +4649,3 @@
         img.cleanup_downloads()
 
         return api_inst
-
--- a/src/modules/client/api_errors.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/client/api_errors.py	Sat May 07 00:25:10 2011 -0700
@@ -28,6 +28,9 @@
 import os
 import urlparse
 
+# pkg classes
+import pkg.client.pkgdefs as pkgdefs
+
 # EmptyI for argument defaults; can't import from misc due to circular
 # dependency.
 EmptyI = tuple()
@@ -50,6 +53,24 @@
         def verbose_info(self):
                 return self.__verbose_info
 
+class SuidUnsupportedError(ApiException):
+        def __str__(self):
+                return _("""
+The pkg client api module can not be invoked from an setuid executable.""")
+
+class SubprocessError(ApiException):
+
+        def __init__(self, rv, cmd):
+                if type(cmd) == list:
+                        cmd = " ".join(cmd)
+                assert type(cmd) == str
+                self.err = _("The following subprocess returned an "
+                    "unexpected exit code of %(rv)d:\n%(cmd)s") % \
+                    {"rv": rv, "cmd": cmd}
+
+        def __str__(self):
+                return self.err
+
 class ImageLockedError(ApiException):
         """Used to indicate that the image is currently locked by another thread
         or process and cannot be modified."""
@@ -382,37 +403,57 @@
 
 
 class PlanCreationException(ApiException):
-        def __init__(self, unmatched_fmris=EmptyI, multiple_matches=EmptyI,
-            missing_matches=EmptyI, illegal=EmptyI,
-            badarch=EmptyI, installed=EmptyI, multispec=EmptyI,
-            no_solution=False, no_version=EmptyI, missing_dependency=EmptyI,
-            wrong_publishers=EmptyI, obsolete=EmptyI, nofiles=EmptyI,
-            solver_errors=EmptyI, already_installed=EmptyI,
-            would_install=EmptyI, wrong_variants=EmptyI):
+        def __init__(self,
+            already_installed=EmptyI,
+            badarch=EmptyI,
+            illegal=EmptyI,
+            installed=EmptyI,
+            linked_pub_error=EmptyI,
+            missing_dependency=EmptyI,
+            missing_matches=EmptyI,
+            multiple_matches=EmptyI,
+            multispec=EmptyI,
+            no_solution=False,
+            no_tmp_origins=False,
+            no_version=EmptyI,
+            nofiles=EmptyI,
+            obsolete=EmptyI,
+            pkg_updates_required=EmptyI,
+            solver_errors=EmptyI,
+            unmatched_fmris=EmptyI,
+            would_install=EmptyI,
+            wrong_publishers=EmptyI,
+            wrong_variants=EmptyI):
+
                 ApiException.__init__(self)
-                self.unmatched_fmris       = unmatched_fmris
+                self.already_installed     = already_installed
+                self.badarch               = badarch
+                self.illegal               = illegal
+                self.installed             = installed
+                self.linked_pub_error      = linked_pub_error
+                self.missing_dependency    = missing_dependency
+                self.missing_matches       = missing_matches
                 self.multiple_matches      = multiple_matches
-                self.missing_matches       = missing_matches
-                self.illegal               = illegal
-                self.badarch               = badarch
-                self.installed             = installed
                 self.multispec             = multispec
+                self.no_solution           = no_solution
+                self.no_tmp_origins        = no_tmp_origins
+                self.no_version            = no_version
+                self.nofiles               = nofiles
                 self.obsolete              = obsolete
-                self.no_solution           = no_solution
-                self.no_version            = no_version
-                self.missing_dependency    = missing_dependency
+                self.pkg_updates_required  = pkg_updates_required
+                self.solver_errors         = solver_errors
+                self.unmatched_fmris       = unmatched_fmris
+                self.would_install         = would_install
                 self.wrong_publishers      = wrong_publishers
                 self.wrong_variants        = wrong_variants
-                self.nofiles               = nofiles
-                self.solver_errors         = solver_errors
-                self.already_installed     = already_installed
-                self.would_install         = would_install
+
         def __str__(self):
                 res = []
                 if self.unmatched_fmris:
                         s = _("""\
-The following pattern(s) did not match any packages in the current catalog.
-Try relaxing the pattern, refreshing and/or examining the catalogs:""")
+The following pattern(s) did not match any allowable packages.  Try
+using a different matching pattern, or refreshing the image:
+""")
                         res += [s]
                         res += ["\t%s" % p for p in self.unmatched_fmris]
 
@@ -477,9 +518,27 @@
                         if isinstance(self.no_solution, list):
                                 res.extend(self.no_solution)
 
+                if self.pkg_updates_required:
+                        s = _("""\
+Syncing this linked image would require the following package updates:
+""")
+                        res += [s]
+                        for (oldfmri, newfmri) in self.pkg_updates_required:
+                                res += ["%(oldfmri)s -> %(newfmri)s\n" % \
+                                    {"oldfmri": oldfmri, "newfmri": newfmri}]
+
                 if self.no_version:
                         res += self.no_version
 
+                if self.no_tmp_origins:
+                        s = _("""
+The proposed operation on this parent image can not be performed because
+temporary origins were specified and this image has children.  Please either
+retry the operation again without specifying any temporary origins, or if
+packages from additional origins are required, please configure those origins
+persistently.""")
+                        res = [s]
+
                 if self.missing_dependency:
                         res += [_("Package %(pkg)s is missing a dependency: "
                             "%(dep)s") %
@@ -495,13 +554,46 @@
                         res.extend(self.solver_errors)
 
                 if self.already_installed:
-                        res += [_("The following packages are already installed in this image; use uninstall to avoid these:")]
+                        res += [_("The following packages are already "
+                            "installed in this image; use uninstall to "
+                            "avoid these:")]
                         res += [ "\t%s" % s for s in self.already_installed]
 
                 if self.would_install:
-                        res += [_("The following packages are a target of group dependencies; use install to unavoid these:")]
+                        res += [_("The following packages are a target "
+                            "of group dependencies; use install to unavoid "
+                            "these:")]
                         res += [ "\t%s" % s for s in self.would_install]
 
+                def __format_li_pubs(pubs, res):
+                        i = 0
+                        for pub, sticky in pubs:
+                                s = "    %s %d: %s" % (_("PUBLISHER"), i, pub)
+                                mod = []
+                                if not sticky:
+                                        mod.append(_("non-sticky"))
+                                if mod:
+                                        s += " (%s)" % ",".join(mod)
+                                res.append(s)
+                                i += 1
+
+                if self.linked_pub_error:
+                        res = []
+                        (pubs, parent_pubs) = self.linked_pub_error
+
+                        res.append(_("""
+Invalid child image publisher configuration.  Child image publisher
+configuration must be a subset of the parent image publisher configuration.
+Please update the child publisher configuration to match the parent.  If the
+child image is a zone this can be done automatically by detaching and
+attaching the zone.
+
+The parent image has the following enabled publishers:"""))
+                        __format_li_pubs(parent_pubs, res)
+                        res.append(_("""
+The child image has the following enabled publishers:"""))
+                        __format_li_pubs(pubs, res)
+
                 return "\n".join(res)
 
 
@@ -1429,6 +1521,16 @@
                 return _("'%s' is not a valid publisher name.") % self.data
 
 
+class ReservedPublisherPrefix(PublisherError):
+        """Used to indicate that a publisher name is not valid."""
+
+        def __str__(self):
+                fmri = self._args["fmri"]
+                return _("'%(pkg_pub)s' is a reserved publisher and does not "
+                    "contain the requested package: pkg:/%(pkg_name)s") % \
+                    {"pkg_pub": fmri.publisher, "pkg_name": fmri.pkg_name}
+
+
 class BadRepositoryAttributeValue(PublisherError):
         """Used to indicate that the specified repository attribute value is
         invalid."""
@@ -2327,3 +2429,273 @@
         if e.errno == errno.EROFS:
                 return ReadOnlyFileSystemException(e.filename)
         return e
+
+class LinkedImageException(ApiException):
+
+        def __init__(self, bundle=None, lin=None, exitrv=None,
+            attach_bad_prop=None,
+            attach_bad_prop_value=None,
+            attach_child_notsup=None,
+            attach_parent_notsup=None,
+            attach_root_as_child=None,
+            child_bad_img=None,
+            child_diverged=None,
+            child_dup=None,
+            child_nested=None,
+            child_not_in_altroot=None,
+            child_not_nested=None,
+            child_op_failed=None,
+            child_path_eaccess=None,
+            child_path_notabs=None,
+            child_unknown=None,
+            detach_child_notsup=None,
+            detach_from_parent=None,
+            detach_parent_notsup=None,
+            img_linked=None,
+            lin_malformed=False,
+            link_to_self=False,
+            parent_bad_img=None,
+            parent_bad_notabs=None,
+            parent_bad_path=None,
+            parent_not_in_altroot=None,
+            recursive_cmd_fail=None,
+            self_linked=None,
+            self_not_child=None):
+
+                self.attach_bad_prop = attach_bad_prop
+                self.attach_bad_prop_value = attach_bad_prop_value
+                self.attach_child_notsup = attach_child_notsup
+                self.attach_parent_notsup = attach_parent_notsup
+                self.attach_root_as_child = attach_root_as_child
+                self.child_bad_img = child_bad_img
+                self.child_diverged = child_diverged
+                self.child_dup = child_dup
+                self.child_nested = child_nested
+                self.child_not_in_altroot = child_not_in_altroot
+                self.child_not_nested = child_not_nested
+                self.child_op_failed = child_op_failed
+                self.child_path_eaccess = child_path_eaccess
+                self.child_path_notabs = child_path_notabs
+                self.child_unknown = child_unknown
+                self.detach_child_notsup = detach_child_notsup
+                self.detach_from_parent = detach_from_parent
+                self.detach_parent_notsup = detach_parent_notsup
+                self.img_linked = img_linked
+                self.lin_malformed = lin_malformed
+                self.link_to_self = link_to_self
+                self.parent_bad_img = parent_bad_img
+                self.parent_bad_notabs = parent_bad_notabs
+                self.parent_bad_path = parent_bad_path
+                self.parent_not_in_altroot = parent_not_in_altroot
+                self.recursive_cmd_fail = recursive_cmd_fail
+                self.self_linked = self_linked
+                self.self_not_child = self_not_child
+
+                # first deal with an error bundle
+                if bundle:
+                        assert type(bundle) in [tuple, list, set]
+                        for e in bundle:
+                                assert isinstance(e, LinkedImageException)
+
+                        # set default error return value
+                        if exitrv == None:
+                                exitrv = pkgdefs.EXIT_OOPS
+
+                        self.lix_err = None
+                        self.lix_bundle = bundle
+                        self.lix_exitrv = exitrv
+                        return
+
+                err = None
+
+                if attach_bad_prop:
+                        err = _("Invalid linked image attach property: %s") % \
+                            attach_bad_prop
+
+                if attach_bad_prop_value:
+                        assert type(attach_bad_prop_value) in [tuple, list]
+                        assert len(attach_bad_prop_value) == 2
+                        err =  _("Invalid linked image attach property "
+                            "value: %s") % "=".join(attach_bad_prop_value)
+
+                if attach_child_notsup:
+                        err = _("Linked image type does not support child "
+                            "attach: %s") % attach_child_notsup
+
+                if attach_parent_notsup:
+                        err = _("Linked image type does not support parent "
+                            "attach: %s") % attach_parent_notsup
+
+                if attach_root_as_child:
+                        err = _("Cannot attach root image as child")
+
+                if child_bad_img:
+                        if exitrv == None:
+                                exitrv = pkgdefs.EXIT_EACCESS
+                        if lin:
+                                err = _("Can't initialize child image "
+                                    "(%(lin)s) at path: %(path)s") % {
+                                        "lin": lin,
+                                        "path": child_bad_img
+                                    }
+                        else:
+                                err = _("Can't initialize child image "
+                                    "at path: %s") % child_bad_img
+
+                if child_diverged:
+                        if exitrv == None:
+                                exitrv = pkgdefs.EXIT_DIVERGED
+                        err = _("Linked image is diverged: %s") % \
+                            child_diverged
+
+                if child_dup:
+                        err = _("A linked child image with this name "
+                            "already exists: %s") % child_dup
+
+                if child_nested:
+                        cpath, ipath = child_nested
+                        err = _("Child image '%(cpath)s' is nested "
+                            "within another image: '%(ipath)s'") % {
+                                "cpath": cpath,
+                                "ipath": ipath,
+                            }
+
+                if child_not_in_altroot:
+                        path, altroot = child_not_in_altroot
+                        err = _("Child image '%(path)s' is not located "
+                           "within the parent's altroot '%(altroot)s'") % {
+                                "path": path,
+                                "altroot": altroot
+                            }
+
+                if child_not_nested:
+                        cpath, ppath = child_not_nested
+                        err = _("Child image '%(cpath)s' is not nested "
+                            "within the parent image '%(ppath)s'") % {
+                                "cpath": cpath,
+                                "ppath": ppath,
+                            }
+
+                if child_op_failed:
+                        assert lin
+                        err = _("Linked image %(op)s failed for: %(lin)s") % \
+                            {"op": child_op_failed, "lin": lin}
+
+                if child_path_eaccess:
+                        if exitrv == None:
+                                exitrv = pkgdefs.EXIT_EACCESS
+                        if lin:
+                                err = _("Can't access child image "
+                                    "(%(lin)s) at path: %(path)s") % {
+                                        "lin": lin,
+                                        "path": child_path_eaccess
+                                    }
+                        else:
+                                err = _("Can't access child image "
+                                    "at path: %s") % child_path_eaccess
+
+                if child_path_notabs:
+                        err = _("Child path not absolute: %s") % \
+                            child_path_notabs
+
+                if child_unknown:
+                        err = _("Unknown child linked image: %s") % \
+                            child_unknown
+
+                if detach_child_notsup:
+                        err = _("Linked image type does not support "
+                            "child detach: %s") % detach_child_notsup
+
+                if detach_from_parent:
+                        if exitrv == None:
+                                exitrv = pkgdefs.EXIT_PARENTOP
+                        err =  _("Parent linked to child, can not detach "
+                            "child: %s") % detach_from_parent
+
+                if detach_parent_notsup:
+                        err = _("Linked image type does not support "
+                            "parent detach: %s") % detach_parent_notsup
+
+                if img_linked:
+                        err = _("Image already a linked child: %s") % \
+                            img_linked
+
+                if lin_malformed:
+                        err = _("Invalid linked image name: %s") % \
+                            lin_malformed
+
+                if link_to_self:
+                        err = _("Can't link image to itself.")
+
+                if parent_bad_img:
+                        if exitrv == None:
+                                exitrv = pkgdefs.EXIT_EACCESS
+                        err = _("Can't initialize parent image at path: %s") % \
+                            parent_bad_img
+
+                if parent_bad_notabs:
+                        err = _("Parent path not absolute: %s") % \
+                            parent_bad_notabs
+
+                if parent_bad_path:
+                        if exitrv == None:
+                                exitrv = pkgdefs.EXIT_EACCESS
+                        err = _("Can't access parent image at path: %s") % \
+                            parent_bad_path
+
+                if parent_not_in_altroot:
+                        path, altroot = parent_not_in_altroot
+                        err = _("Parent image '%(path)s' is not located "
+                            "within the child's altroot '%(altroot)s'") % {
+                                "path": path,
+                                "altroot": altroot
+                            }
+
+                if recursive_cmd_fail:
+                        if type(recursive_cmd_fail) == list:
+                                recursive_cmd_fail = \
+                                    " ".join(recursive_cmd_fail)
+                        assert type(recursive_cmd_fail) == str
+                        err = _("""
+Recursive linked image operation failed for child '%(lin)s'.
+The following subprocess returned an unexpected exit code of %(exitrv)d:
+    %(recursive_cmd_fail)s"""
+                        ) % {
+                            "lin": lin,
+                            "exitrv": exitrv,
+                            "recursive_cmd_fail": recursive_cmd_fail,
+                        }
+
+                if self_linked:
+                        err = _("Current image already a linked child: %s") % \
+                            self_linked
+
+                if self_not_child:
+                        if exitrv == None:
+                                exitrv = pkgdefs.EXIT_NOPARENT
+                        err = _("Current image is not a linked child: %s") % \
+                            self_not_child
+
+                # set default error return value
+                if exitrv == None:
+                        exitrv = pkgdefs.EXIT_OOPS
+
+                self.lix_err = err
+                self.lix_bundle = None
+                self.lix_exitrv = exitrv
+
+        def __str__(self):
+                assert self.lix_err or self.lix_bundle
+                assert not (self.lix_err and self.lix_bundle), \
+                   "self.lix_err = %s, self.lix_bundle = %s" % \
+                   (str(self.lix_err), str(self.lix_bundle))
+
+                # single error
+                if self.lix_err:
+                        return self.lix_err
+
+                # concatenate multiple errors
+                bundle_str = []
+                for e in self.lix_bundle:
+                        bundle_str.append(str(e))
+                return "\n".join(bundle_str)
--- a/src/modules/client/debugvalues.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/client/debugvalues.py	Sat May 07 00:25:10 2011 -0700
@@ -23,7 +23,21 @@
 # Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
 # Use is subject to license terms.
 
-from pkg.misc import Singleton
+class Singleton(type):
+        """Set __metaclass__ to Singleton to create a singleton.
+        See http://en.wikipedia.org/wiki/Singleton_pattern """
+
+        def __init__(self, name, bases, dictionary):
+                super(Singleton, self).__init__(name, bases, dictionary)
+                self.instance = None
+
+        def __call__(self, *args, **kw):
+                if self.instance is None:
+                        self.instance = super(Singleton, self).__call__(*args,
+                            **kw)
+
+                return self.instance
+
 
 class DebugValues(dict):
         """Singleton dict that returns None if unknown value
@@ -40,4 +54,5 @@
         def set_value(self, key, value):
                 self[key] = value
 
+
 DebugValues=DebugValues()
--- a/src/modules/client/image.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/client/image.py	Sat May 07 00:25:10 2011 -0700
@@ -24,6 +24,7 @@
 # Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
 #
 
+import M2Crypto as m2
 import atexit
 import copy
 import datetime
@@ -31,6 +32,7 @@
 import os
 import platform
 import shutil
+import simplejson as json
 import stat
 import tempfile
 import time
@@ -47,6 +49,8 @@
 import pkg.client.history               as history
 import pkg.client.imageconfig           as imageconfig
 import pkg.client.imageplan             as imageplan
+import pkg.client.linkedimage           as li
+import pkg.client.pkgdefs               as pkgdefs
 import pkg.client.pkgplan               as pkgplan
 import pkg.client.progress              as progress
 import pkg.client.publisher             as publisher
@@ -58,13 +62,11 @@
 import pkg.manifest                     as manifest
 import pkg.misc                         as misc
 import pkg.nrlock
+import pkg.pkgsubprocess                as subprocess
 import pkg.portable                     as portable
 import pkg.server.catalog
 import pkg.smf                          as smf
-import pkg.pkgsubprocess                as subprocess
 import pkg.version
-import M2Crypto as m2
-import simplejson as json
 
 from pkg.client.debugvalues import DebugValues
 from pkg.client.imagetypes import IMG_USER, IMG_ENTIRE
@@ -153,7 +155,9 @@
         def __init__(self, root, user_provided_dir=False, progtrack=None,
             should_exist=True, imgtype=None, force=False,
             augment_ta_from_parent_image=True, allow_ondisk_upgrade=None,
-            allow_ambiguous=False, props=misc.EmptyDict):
+            allow_ambiguous=False, props=misc.EmptyDict, cmdpath=None,
+            runid=-1):
+
                 if should_exist:
                         assert(imgtype is None)
                         assert(not force)
@@ -166,6 +170,31 @@
                 self.__alt_known_cat = None
                 self.__alt_pkg_sources_loaded = False
 
+                if (runid < 0):
+                        runid = os.getpid()
+                self.runid = runid
+
+                # Determine identity of client executable if appropriate.
+                if cmdpath == None:
+                        cmdpath = misc.api_cmdpath()
+                self.cmdpath = cmdpath
+
+                if self.cmdpath != None:
+                        self.__cmddir = os.path.dirname(cmdpath)
+
+                # prevent brokeness in the test suite
+                if self.cmdpath and \
+                    "PKG_NO_RUNPY_CMDPATH" in os.environ and \
+                    self.cmdpath.endswith(os.sep + "run.py"):
+                        raise RuntimeError, """
+An Image object was allocated from within ipkg test suite and
+cmdpath was not explicitly overridden.  Please make sure to
+explicitly set cmdpath when allocating an Image object, or
+override cmdpath when allocating an Image object by setting PKG_CMDPATH
+in the environment or by setting simulate_cmdpath in DebugValues."""
+
+                self.linked = None
+
                 # Indicates whether automatic image format upgrades of the
                 # on-disk format are allowed.
                 self.allow_ondisk_upgrade = allow_ondisk_upgrade
@@ -179,10 +208,11 @@
                 self.blocking_locks = False
                 self.cfg = None
                 self.history = history.History()
-                self.imageplan = None # valid after evaluation succeeds
+                self.imageplan = None
                 self.img_prefix = None
                 self.imgdir = None
                 self.index_dir = None
+                self.plandir = None
                 self.root = root
                 self.version = -1
 
@@ -228,6 +258,8 @@
                 self.transport = transport.Transport(
                     transport.ImageTransportCfg(self))
 
+                self.linked = li.LinkedImage(self)
+
                 if should_exist:
                         self.find_root(self.root, user_provided_dir,
                             progtrack)
@@ -245,17 +277,12 @@
                 # locked down umask.
                 os.umask(0022)
 
-                # Determine identity of client executable if appropriate.
-                self.__cmddir = None
-                if global_settings.client_args[0]:
-                        cmdpath = os.path.join(os.getcwd(),
-                            global_settings.client_args[0])
-                        cmdpath = os.path.realpath(cmdpath)
-                        self.__cmddir = os.path.dirname(os.path.realpath(
-                            cmdpath))
-
                 self.augment_ta_from_parent_image = augment_ta_from_parent_image
 
+        @staticmethod
+        def alloc(*args, **kwargs):
+                return Image(*args, **kwargs)
+
         def __catalog_loaded(self, name):
                 """Returns a boolean value indicating whether the named catalog
                 has already been loaded.  This is intended to be used as an
@@ -308,7 +335,8 @@
                 if self.__cmddir and self.augment_ta_from_parent_image:
                         pkg_trust_anchors = Image(self.__cmddir,
                             augment_ta_from_parent_image=False,
-                            allow_ambiguous=True).trust_anchors
+                            allow_ambiguous=True,
+                            cmdpath=self.cmdpath).trust_anchors
                 if not loc_is_dir and os.path.exists(trust_anchor_loc):
                         raise apx.InvalidPropertyValue(_("The trust "
                             "anchors for the image were expected to be found "
@@ -478,10 +506,6 @@
                 # eliminate problem if relative path such as "." is passed in
                 d = os.path.realpath(d)
 
-                live_root = DebugValues.get_value("simulate_live_root")
-                if not live_root:
-                        live_root = "/"
-
                 while True:
                         imgtype = self.image_type(d)
                         if imgtype in (IMG_USER, IMG_ENTIRE):
@@ -490,6 +514,7 @@
                                     os.path.realpath(d):
                                         raise apx.ImageNotFoundException(
                                             exact_match, startd, d)
+                                live_root = misc.liveroot()
                                 if not exact_match and d != live_root and \
                                     not self.allow_ambiguous and \
                                     portable.osname == "sunos":
@@ -504,7 +529,7 @@
                                         raise apx.ImageLocationAmbiguous(d,
                                             live_root=live_root)
                                 self.__set_dirs(imgtype=imgtype, root=d,
-                                    progtrack=progtrack)
+                                    startd=startd, progtrack=progtrack)
                                 return
 
                         # XXX follow symlinks or not?
@@ -599,10 +624,19 @@
                         except EnvironmentError, e:
                                 raise apx._convert_error(e)
 
-        def __set_dirs(self, imgtype, root, progtrack=None, purge=False):
+        def __set_dirs(self, imgtype, root, startd=None, progtrack=None,
+            purge=False):
                 # Ensure upgraded status is reset.
                 self.__upgraded = False
 
+                if not self.__allow_liveroot() and root == misc.liveroot():
+                        if startd == None:
+                                startd = root
+                        raise RuntimeError, \
+                           "Live root image access is disabled but was \
+                           attempted.\nliveroot: %s\nimage path: %s" % \
+                           (misc.liveroot(), startd)
+
                 self.type = imgtype
                 self.root = root
                 if self.type == IMG_USER:
@@ -614,18 +648,12 @@
                 self.transport = transport.Transport(
                     transport.ImageTransportCfg(self))
 
-                # Change directory to the root of the image so that we can
-                # remove any directories beneath us.  If we're changing the
-                # image, don't chdir, as we're likely changing to a new BE
-                # and want to be able to unmount it later.
-                if not self.imgdir and os.path.isdir(root):
+                # cleanup specified path
+                if os.path.isdir(root):
+                        cwd = os.getcwd()
                         os.chdir(root)
-
-                        # The specified root may have been a relative path.
                         self.root = os.getcwd()
-
-                if not os.path.isabs(self.root):
-                        self.root = os.path.abspath(self.root)
+                        os.chdir(cwd)
 
                 # If current image is locked, then it should be unlocked
                 # and then relocked after the imgdir is changed.  This
@@ -733,6 +761,7 @@
                 else:
                         self.__tmpdir = os.path.join(self.imgdir, "tmp")
                 self._statedir = os.path.join(self.imgdir, "state")
+                self.plandir = os.path.join(self.__tmpdir, "plan")
                 self.update_index_dir()
 
                 self.history.root_dir = self.imgdir
@@ -868,6 +897,9 @@
                         # is an image creation operation (hence the purge).
                         self.save_config()
 
+                # Let the linked image subsystem know that root is moving
+                self.linked._init_root()
+
                 # load image avoid pkg set
                 self.__avoid_set_load()
 
@@ -1375,6 +1407,9 @@
 
                 if refresh_allowed:
                         self.refresh_publishers(progtrack=progtrack)
+                else:
+                        # initialize empty catalogs on disk
+                        self.__rebuild_image_catalogs(progtrack=progtrack)
 
                 self.cfg.set_property("property", "publisher-search-order",
                     [p.prefix for p in pubs])
@@ -1384,9 +1419,27 @@
 
                 self.history.log_operation_end()
 
+        @staticmethod
+        def __allow_liveroot():
+                """Check if we're allowed to access the current live root
+                image."""
+
+                # if we're simulating a live root then allow access to it
+                if DebugValues.get_value("simulate_live_root") or \
+                    "PKG_LIVE_ROOT" in os.environ:
+                        return True
+
+                # check if the user disabled access to the live root
+                if DebugValues.get_value("simulate_no_live_root"):
+                        return False
+                if "PKG_NO_LIVE_ROOT" in os.environ:
+                        return False
+
+                # by default allow access to the live root
+                return True
+
         def is_liveroot(self):
-                return bool(self.root == "/" or
-                    self.root == DebugValues.get_value("simulate_live_root"))
+                return bool(self.root == misc.liveroot())
 
         def is_zone(self):
                 return self.cfg.variants["variant.opensolaris.zone"] == \
@@ -1534,28 +1587,26 @@
                                 yield pub
 
         def get_publisher_ranks(self):
-                """Returns dictionary of publishers by name; each
-                entry contains a tuple of search order index starting
-                at 0, and a boolean indicating whether or not
-                this publisher is "sticky", and a boolean indicating
-                whether or not the publisher is enabled"""
-
-                # automatically make disabled publishers not sticky
-                so = copy.copy(self.cfg.get_property("property",
-                    "publisher-search-order"))
-
-                pubs = list(self.gen_publishers())
-                so.extend((p.prefix for p in pubs if p.prefix not in so))
-
+                """Return dictionary of configured + enabled publishers and
+                unconfigured publishers which still have packages installed.
+
+                Each entry contains a tuple of search order index starting at
+                0, and a boolean indicating whether or not this publisher is
+                "sticky", and a boolean indicating whether or not the
+                publisher is enabled"""
+
+                pubs = self.get_sorted_publishers(inc_disabled=False)
                 ret = dict([
-                    (p.prefix, (so.index(p.prefix), p.sticky, True))
-                    for p in self.gen_publishers()
+                    (pubs[i].prefix, (i, pubs[i].sticky, True))
+                    for i in range(0, len(pubs))
                 ])
 
-                # add any publishers for pkgs that are installed,
-                # but have been deleted... so they're not sticky.
+                # Add any publishers for pkgs that are installed,
+                # but have been deleted. These publishers are implicitly
+                # not-sticky and disabled.
                 for pub in self.get_installed_pubs():
-                        ret.setdefault(pub, (len(ret) + 1, False, False))
+                        i = len(ret)
+                        ret.setdefault(pub, (i, False, False))
                 return ret
 
         def get_highest_ranked_publisher(self):
@@ -1611,12 +1662,43 @@
                         self.remove_publisher_metadata(pub, progtrack=progtrack)
                         self.save_config()
 
-        def get_publishers(self):
+        def get_publishers(self, inc_disabled=True):
+                """Return a dictionary of configured publishers.  This doesn't
+                include unconfigured publishers which still have packages
+                installed."""
+
                 return dict(
                     (p.prefix, p)
-                    for p in self.gen_publishers(inc_disabled=True)
+                    for p in self.gen_publishers(inc_disabled=inc_disabled)
                 )
 
+        def get_sorted_publishers(self, inc_disabled=True):
+                """Return a list of configured publishers sorted by rank.
+                This doesn't include unconfigured publishers which still have
+                packages installed."""
+
+                d = self.get_publishers(inc_disabled=inc_disabled)
+                names = self.cfg.get_property("property",
+                    "publisher-search-order")
+
+                #
+                # If someone has been editing the config file we may have
+                # unranked publishers.  Also, as publisher come and go via the
+                # sysrepo we can end up with configured but unranked
+                # publishers.  In either case just sort unranked publishers
+                # alphabetically.
+                #
+                unranked = set(d) - set(names)
+                ret = [
+                    d[n]
+                    for n in names
+                    if n in d
+                ] + [
+                    d[n]
+                    for n in sorted(unranked)
+                ]
+                return ret
+
         def get_publisher(self, prefix=None, alias=None, origin=None):
                 for pub in self.gen_publishers(inc_disabled=True):
                         if prefix and prefix == pub.prefix:
@@ -1929,7 +2011,7 @@
                 # Only after success should the configuration be saved.
                 self.save_config()
 
-        def verify(self, fmri, progresstracker, **args):
+        def verify(self, fmri, progresstracker, **kwargs):
                 """Generator that returns a tuple of the form (action, errors,
                 warnings, info) if there are any error, warning, or other
                 messages about an action contained within the specified
@@ -1941,7 +2023,7 @@
 
                 'progresstracker' is a ProgressTracker object.
 
-                'args' is a dict of additional keyword arguments to be passed
+                'kwargs' is a dict of additional keyword arguments to be passed
                 to each action verification routine."""
 
                 try:
@@ -1976,7 +2058,7 @@
                 for act in manf.gen_actions(
                     self.list_excludes()):
                         errors, warnings, info = act.verify(self, pfmri=fmri,
-                            **args)
+                            **kwargs)
                         progresstracker.verify_add_progress(fmri)
                         actname = act.distinguished_name()
                         if errors:
@@ -1991,59 +2073,6 @@
                         if errors or warnings or info:
                                 yield act, errors, warnings, info
 
-        def __call_imageplan_evaluate(self, ip):
-                # A plan can be requested without actually performing an
-                # operation on the image.
-                if self.history.operation_name:
-                        self.history.operation_start_state = ip.get_plan()
-
-                try:
-                        ip.evaluate()
-                except apx.ConflictingActionErrors:
-                        # Image plan evaluation can fail because of duplicate
-                        # action discovery, but we still want to be able to
-                        # display and log the solved FMRI changes.
-                        self.imageplan = ip
-                        if self.history.operation_name:
-                                self.history.operation_end_state = \
-                                    "Unevaluated: merged plan had errors\n" + \
-                                    ip.get_plan(full=False)
-                        raise
-
-                self.imageplan = ip
-
-                if self.history.operation_name:
-                        self.history.operation_end_state = \
-                            ip.get_plan(full=False)
-
-        def image_change_varcets(self, variants, facets, progtrack, check_cancelation,
-            noexecute):
-
-                # Allow garbage collection of previous plan.
-                self.imageplan = None
-
-                ip = imageplan.ImagePlan(self, progtrack, check_cancelation,
-                    noexecute=noexecute)
-
-                progtrack.evaluate_start()
-
-                # Always start with most current (on-disk) state information.
-                self.__init_catalogs()
-
-                # compute dict of changing variants
-                if variants:
-                        variants = dict(set(variants.iteritems()) - \
-                           set(self.cfg.variants.iteritems()))
-                # facets are always the entire set
-
-                try:
-                        ip.plan_change_varcets(variants, facets)
-                        self.__call_imageplan_evaluate(ip)
-                except apx.ActionExecutionError, e:
-                        raise
-                except pkg.actions.ActionError, e:
-                        raise apx.InvalidPackageErrors([e])
-
         def image_config_update(self, new_variants, new_facets):
                 """update variants in image config"""
 
@@ -2087,7 +2116,7 @@
 
                 # XXX: This (lambda x: False) is temporary until we move pkg fix
                 # into the api and can actually use the
-                # api::__check_cancelation() function.
+                # api::__check_cancel() function.
                 pps = []
                 for fmri, actions in repairs:
                         logger.info("Repairing: %-50s" % fmri.get_pkg_stem())
@@ -3544,24 +3573,24 @@
 
                 return olist, onames
 
-        def avoid_pkgs(self, pat_list, progtrack, check_cancelation):
+        def avoid_pkgs(self, pat_list, progtrack, check_cancel):
                 """Avoid the specified packages... use pattern matching on
                 names; ignore versions."""
 
                 with self.locked_op("avoid"):
-                        ip = imageplan.ImagePlan(self, progtrack, check_cancelation,
+                        ip = imageplan.ImagePlan(self, progtrack, check_cancel,
                             noexecute=False)
 
                         self._avoid_set_save(self.avoid_set_get() |
                             set(ip.match_user_stems(pat_list, ip.MATCH_UNINSTALLED)))
 
-        def unavoid_pkgs(self, pat_list, progtrack, check_cancelation):
+        def unavoid_pkgs(self, pat_list, progtrack, check_cancel):
                 """Unavoid the specified packages... use pattern matching on
                 names; ignore versions."""
 
                 with self.locked_op("unavoid"):
 
-                        ip = imageplan.ImagePlan(self, progtrack, check_cancelation,
+                        ip = imageplan.ImagePlan(self, progtrack, check_cancel,
                             noexecute=False)
                         unavoid_set = set(ip.match_user_stems(pat_list, ip.MATCH_ALL))
                         current_set = self.avoid_set_get()
@@ -3589,9 +3618,34 @@
                                 ret[group].append(fmri.pkg_name)
                 return ret
 
-
-        def __make_plan_common(self, plan_name, progtrack, check_cancelation,
-            noexecute, *args):
+        def __call_imageplan_evaluate(self, ip):
+                # A plan can be requested without actually performing an
+                # operation on the image.
+                if self.history.operation_name:
+                        self.history.operation_start_state = ip.get_plan()
+
+                try:
+                        ip.evaluate()
+                except apx.ConflictingActionErrors:
+                        # Image plan evaluation can fail because of duplicate
+                        # action discovery, but we still want to be able to
+                        # display and log the solved FMRI changes.
+                        self.imageplan = ip
+                        if self.history.operation_name:
+                                self.history.operation_end_state = \
+                                    "Unevaluated: merged plan had errors\n" + \
+                                    ip.get_plan(full=False)
+                        raise
+
+                self.imageplan = ip
+
+                if self.history.operation_name:
+                        self.history.operation_end_state = \
+                            ip.get_plan(full=False)
+
+        def __make_plan_common(self, _op, _progtrack, _check_cancel,
+            _ip_mode, _noexecute, _ip_noop=False,
+            **kwargs):
                 """Private helper function to perform base plan creation and
                 cleanup.
                 """
@@ -3599,18 +3653,39 @@
                 # Allow garbage collection of previous plan.
                 self.imageplan = None
 
-                ip = imageplan.ImagePlan(self, progtrack, check_cancelation,
-                    noexecute=noexecute)
-
-                progtrack.evaluate_start()
+                ip = imageplan.ImagePlan(self, _progtrack, _check_cancel,
+                    noexecute=_noexecute, mode=_ip_mode)
+
+                _progtrack.evaluate_start()
 
                 # Always start with most current (on-disk) state information.
                 self.__init_catalogs()
 
                 try:
                         try:
-                                pfunc = getattr(ip, "plan_%s" % plan_name)
-                                pfunc(*args)
+                                if _ip_noop:
+                                        ip.plan_noop()
+                                elif _op in [
+                                    pkgdefs.API_OP_ATTACH,
+                                    pkgdefs.API_OP_DETACH,
+                                    pkgdefs.API_OP_SYNC]:
+                                        ip.plan_sync(**kwargs)
+                                elif _op in [
+                                    pkgdefs.API_OP_CHANGE_FACET,
+                                    pkgdefs.API_OP_CHANGE_VARIANT]:
+                                        ip.plan_change_varcets(**kwargs)
+                                elif _op == pkgdefs.API_OP_INSTALL:
+                                        ip.plan_install(**kwargs)
+                                elif _op == pkgdefs.API_OP_REVERT:
+                                        ip.plan_revert(**kwargs)
+                                elif _op == pkgdefs.API_OP_UNINSTALL:
+                                        ip.plan_uninstall(**kwargs)
+                                elif _op == pkgdefs.API_OP_UPDATE:
+                                        ip.plan_update(**kwargs)
+                                else:
+                                        raise RuntimeError(
+                                            "Unknown api op: %s" % _op)
+
                         except apx.ActionExecutionError, e:
                                 raise
                         except pkg.actions.ActionError, e:
@@ -3626,45 +3701,82 @@
                 finally:
                         self.__cleanup_alt_pkg_certs()
 
-        def make_install_plan(self, pkg_list, progtrack, check_cancelation,
-            noexecute, reject_list=EmptyI):
-                """Take a list of packages, specified in pkg_list, and attempt
+        def make_install_plan(self, op, progtrack, check_cancel, ip_mode,
+            noexecute, pkgs_inst=None, reject_list=None):
+                """Take a list of packages, specified in pkgs_inst, and attempt
                 to assemble an appropriate image plan.  This is a helper
                 routine for some common operations in the client.
                 """
 
-                self.__make_plan_common("install", progtrack, check_cancelation,
-                    noexecute, pkg_list, reject_list)
-
-        def make_uninstall_plan(self, fmri_list, recursive_removal,
-            progtrack, check_cancelation, noexecute):
+                self.__make_plan_common(op, progtrack, check_cancel,
+                    ip_mode, noexecute, pkgs_inst=pkgs_inst,
+                    reject_list=reject_list)
+
+        def make_change_varcets_plan(self, op, progtrack, check_cancel,
+            ip_mode, noexecute, facets=None, reject_list=None,
+            variants=None):
+                """Take a list of variants and/or facets and attempt to
+                assemble an image plan which changes them.  This is a helper
+                routine for some common operations in the client."""
+
+                # compute dict of changing variants
+                if variants:
+                        new = set(variants.iteritems())
+                        cur = set(self.cfg.variants.iteritems())
+                        variants = dict(new - cur)
+
+                self.__make_plan_common(op, progtrack, check_cancel, ip_mode,
+                    noexecute, new_variants=variants, new_facets=facets,
+                    reject_list=reject_list)
+
+        def make_sync_plan(self, op, progtrack, check_cancel, ip_mode,
+            noexecute, li_pkg_updates=True, reject_list=None):
+                """Attempt to create an appropriate image plan to bring an
+                image in sync with it's linked image constraints.  This is a
+                helper routine for some common operations in the client."""
+
+                self.__make_plan_common(op, progtrack, check_cancel, ip_mode,
+                    noexecute, reject_list=reject_list,
+                    li_pkg_updates=li_pkg_updates)
+
+        def make_uninstall_plan(self, op, progtrack, check_cancel, ip_mode,
+            noexecute, pkgs_to_uninstall, recursive_removal):
                 """Create uninstall plan to remove the specified packages;
                 do so recursively iff recursive_removal is set"""
 
-                self.__make_plan_common("uninstall", progtrack,
-                    check_cancelation, noexecute, fmri_list,
-                    recursive_removal)
-
-        def make_update_plan(self, progtrack, check_cancelation, noexecute,
-            pkg_list=None, reject_list=EmptyI):
+                self.__make_plan_common(op, progtrack, check_cancel,
+                    ip_mode, noexecute, pkgs_to_uninstall=pkgs_to_uninstall,
+                    recursive_removal=recursive_removal)
+
+        def make_update_plan(self, op, progtrack, check_cancel, ip_mode,
+            noexecute, pkgs_update=None, reject_list=None):
                 """Create a plan to update all packages or the specific ones as
                 far as possible.  This is a helper routine for some common
                 operations in the client.
                 """
 
-                self.__make_plan_common("update", progtrack,
-                    check_cancelation, noexecute, pkg_list, reject_list)
-
-        def make_revert_plan(self, args, tagged, progtrack, check_cancelation,
-            noexecute):
+                self.__make_plan_common(op, progtrack, check_cancel,
+                    ip_mode, noexecute, pkgs_update=pkgs_update,
+                    reject_list=reject_list)
+
+        def make_revert_plan(self, op, progtrack, check_cancel, ip_mode,
+            noexecute, args, tagged):
                 """Revert the specified files, or all files tagged as specified
                 in args to their manifest definitions.
                 """
 
-                self.__make_plan_common("revert", progtrack, check_cancelation,
-                    noexecute, args, tagged)
-
-        def ipkg_is_up_to_date(self, check_cancelation, noexecute,
+                self.__make_plan_common(op, progtrack, check_cancel,
+                    ip_mode, noexecute, args=args, tagged=tagged)
+
+        def make_noop_plan(self, op, progtrack, check_cancel, ip_mode,
+            noexecute):
+                """Create an image plan that doesn't update the image in any
+                way."""
+
+                self.__make_plan_common(op, progtrack, check_cancel,
+                    ip_mode, noexecute, _ip_noop=True)
+
+        def ipkg_is_up_to_date(self, check_cancel, noexecute,
             refresh_allowed=True, progtrack=None):
                 """Test whether the packaging system is updated to the latest
                 version known to be available for this image."""
@@ -3704,7 +3816,7 @@
                         #
                         newimg = Image(self.__cmddir,
                             allow_ondisk_upgrade=False, allow_ambiguous=True,
-                            progtrack=progtrack)
+                            progtrack=progtrack, cmdpath=self.cmdpath)
                         useimg = True
                         if refresh_allowed:
                                 # If refreshing publisher metadata is allowed,
@@ -3733,8 +3845,9 @@
 
                 # XXX call to progress tracker that the package is being
                 # refreshed
-                img.make_install_plan(["pkg:/package/pkg"], progtrack,
-                    check_cancelation, True)
+                img.make_install_plan(pkgdefs.API_OP_INSTALL, progtrack,
+                    check_cancel, pkgdefs.API_STAGE_DEFAULT, noexecute,
+                    pkgs_inst=["pkg:/package/pkg"])
 
                 return img.imageplan.nothingtodo()
 
@@ -3787,7 +3900,7 @@
 
                 if not self.__avoid_set_altered:
                         return
-                
+
 
                 state_file = os.path.join(self._statedir, "avoid_set")
                 tmp_file   = os.path.join(self._statedir, "avoid_set.new")
--- a/src/modules/client/imageconfig.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/client/imageconfig.py	Sat May 07 00:25:10 2011 -0700
@@ -21,7 +21,7 @@
 #
 
 #
-# Copyright (c) 2007, 2011 Oracle and/or its affiliates.  All rights reserved.
+# Copyright (c) 2007, 2011, Oracle and/or its affiliates.  All rights reserved.
 #
 
 import errno
@@ -35,6 +35,7 @@
 import pkg.client.api_errors as apx
 import pkg.client.publisher as publisher
 import pkg.client.sigpolicy as sigpolicy
+import pkg.client.linkedimage as li
 import pkg.config as cfg
 import pkg.facet as facet
 import pkg.misc as misc
@@ -172,6 +173,7 @@
                     cfg.PropertyTemplate("^facet\..*", prop_type=cfg.PropBool),
                 ]),
                 cfg.PropertySection("variant", properties=[]),
+
                 cfg.PropertySectionTemplate("^authority_.*", properties=[
                     # Base publisher information.
                     cfg.PropPublisher("alias", value_map=_val_map_none),
@@ -217,6 +219,11 @@
                         value_map=_val_map_none),
                     cfg.Property("repo.sort_policy", value_map=_val_map_none),
                 ]),
+                cfg.PropertySectionTemplate("^linked_.*", properties=[
+                    cfg.Property(li.PROP_NAME, value_map=_val_map_none),
+                    cfg.Property(li.PROP_PATH, value_map=_val_map_none),
+                    cfg.PropBool(li.PROP_RECURSE, default=True),
+                ]),
             ],
         }
 
@@ -227,6 +234,7 @@
                 self.__validate = False
                 self.facets = facet.Facets()
                 self.variants = variant.Variants()
+                self.linked_children = {}
                 cfg.FileConfig.__init__(self, cfgpathname,
                     definitions=self.__defs, overrides=overrides,
                     version=version)
@@ -354,6 +362,16 @@
                 if "variant.opensolaris.zone" not in self.variants:
                         self.variants["variant.opensolaris.zone"] = "global"
 
+                # load linked image child properties
+                for s, v in idx.iteritems():
+                        if not re.match("linked_.*", s):
+                                continue
+                        linked_props = self.read_linked(s, v)
+                        if linked_props:
+                                lin = linked_props[li.PROP_NAME]
+                                assert lin not in self.linked_children
+                                self.linked_children[lin] = linked_props
+
                 # Merge disabled publisher file with configuration; the DA_FILE
                 # is used for compatibility with older clients.
                 dafile = os.path.join(os.path.dirname(self.target), DA_FILE)
@@ -469,6 +487,21 @@
                 for f in self.facets:
                         self.set_property("facet", f, self.facets[f])
 
+                # remove all linked image child configuration
+                idx = self.get_index()
+                for s, v in idx.iteritems():
+                        if not re.match("linked_.*", s):
+                                continue
+                        self.remove_section(s)
+
+                # add sections for any known linked children
+                for lin in sorted(self.linked_children):
+                        linked_props = self.linked_children[lin]
+                        s = "linked_%s" % str(lin)
+                        for k in [li.PROP_NAME, li.PROP_PATH, li.PROP_RECURSE]:
+                                self.set_property(s, k, str(linked_props[k]))
+
+
                 # Transfer current publisher information to configuration.
                 for prefix in self.__publishers:
                         pub = self.__publishers[prefix]
@@ -608,6 +641,45 @@
                                         self.set_property("property", name,
                                             DEF_TOKEN)
 
+        def read_linked(self, s, sidx):
+                """Read linked image properties associated with a child image.
+                Zone linked images do not store their properties here in the
+                image config.
+
+                If we encounter an error while parsing property data, then
+                instead of throwing an error/exception which the user would
+                have no way of fixing, we simply return and ignore the child.
+                The child data will be removed from the config file the next
+                time it gets re-written, and if the user want the child back
+                they'll have to re-attach it."""
+
+                linked_props = dict()
+
+                # Check for known properties
+                for k in [li.PROP_NAME, li.PROP_PATH, li.PROP_RECURSE]:
+                        if k not in sidx:
+                                # we're missing a property
+                                return None
+                        linked_props[k] = sidx[k]
+
+                # all children saved in the config file are pushed based
+                linked_props[li.PROP_MODEL] = li.PV_MODEL_PUSH
+
+                # make sure the name is valid
+                try:
+                        lin = li.LinkedImageName(linked_props[li.PROP_NAME])
+                except apx.MalformedLinkedImageName:
+                        # invalid child image name
+                        return None
+                linked_props[li.PROP_NAME] = lin
+
+                # check if this image is already defined
+                if lin in self.linked_children:
+                        # duplicate child linked image data, first copy wins
+                        return None
+
+                return linked_props
+
         def read_publisher(self, sname, sec_idx):
                 # s is the section of the config file.
                 # publisher block has alias, prefix, origin, and mirrors
@@ -1046,6 +1118,15 @@
 
         facets = property(__get_facets, __set_facets)
 
+        def __get_linked_children(self):
+                return self.img_cfg.linked_children
+
+        def __set_linked_children(self, linked_children):
+                self.img_cfg.linked_children = linked_children
+
+        linked_children = property(__get_linked_children,
+            __set_linked_children)
+
         def __is_sys_pub(self, prefix):
                 """Return whether the publisher with the prefix 'prefix' is a
                 system publisher."""
--- a/src/modules/client/imageplan.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/client/imageplan.py	Sat May 07 00:25:10 2011 -0700
@@ -29,6 +29,8 @@
 import itertools
 import operator
 import os
+import simplejson as json
+import sys
 import traceback
 
 from pkg.client import global_settings
@@ -46,7 +48,6 @@
 import pkg.misc as misc
 import pkg.search_errors as se
 import pkg.version
-import sys
 
 from pkg.client.debugvalues import DebugValues
 
@@ -61,6 +62,18 @@
 
 ActionPlan = namedtuple("ActionPlan", "p src dst")
 
+IP_MODE_DEFAULT = "default"
+IP_MODE_SAVE    = "save"
+IP_MODE_LOAD    = "load"
+ip_mode_values = frozenset([
+    IP_MODE_DEFAULT,
+    IP_MODE_SAVE,
+    IP_MODE_LOAD,
+])
+
+STATE_FILE_PKGS = "pkgs"
+STATE_FILE_ACTIONS = "actions"
+
 class ImagePlan(object):
         """ImagePlan object contains the plan for changing the image...
         there are separate routines for planning the various types of
@@ -68,20 +81,36 @@
         and buildig lists of removeal, install and update actions
         and their execution is all common code"""
 
+        PLANNED_FIX       = "fix"
+        PLANNED_INSTALL   = "install"
+        PLANNED_NOOP      = "no-op"
         PLANNED_NOTHING   = "no-plan"
-        PLANNED_INSTALL   = "install"
+        PLANNED_REVERT    = "revert"
+        PLANNED_SYNC      = "sync"
         PLANNED_UNINSTALL = "uninstall"
         PLANNED_UPDATE    = "update"
-        PLANNED_FIX       = "fix"
         PLANNED_VARIANT   = "change-variant"
-        PLANNED_REVERT    = "revert"
+        __planned_values  = frozenset([
+                PLANNED_FIX,
+                PLANNED_INSTALL,
+                PLANNED_NOTHING,
+                PLANNED_REVERT,
+                PLANNED_SYNC,
+                PLANNED_UNINSTALL,
+                PLANNED_UPDATE,
+                PLANNED_VARIANT,
+        ])
 
         MATCH_ALL           = 0
         MATCH_INST_VERSIONS = 1
         MATCH_INST_STEMS    = 2
         MATCH_UNINSTALLED   = 3
 
-        def __init__(self, image, progtrack, check_cancel, noexecute=False):
+        def __init__(self, image, progtrack, check_cancel, noexecute=False,
+            mode=IP_MODE_DEFAULT):
+
+                assert mode in ip_mode_values
+
                 self.image = image
                 self.pkg_plans = []
 
@@ -126,7 +155,7 @@
                 self.__old_excludes = image.list_excludes()
                 self.__new_excludes = self.__old_excludes
 
-                self.__check_cancelation = check_cancel
+                self.__check_cancel = check_cancel
 
                 self.__actuators = actuator.Actuator()
 
@@ -137,11 +166,36 @@
                 self.__pkg_solver = None
                 self.__new_variants = None
                 self.__new_facets = None
-                self.__variant_change = False
-                self.__references = {} # dict of fmri -> pattern
+                self.__varcets_change = False
+                self.__match_inst = {} # dict of fmri -> pattern
+                self.__match_rm = {} # dict of fmri -> pattern
+                self.__match_update = {} # dict of fmri -> pattern
                 self.__need_boot_archive = None
                 self.__new_avoid_obs = (None, None)
                 self.__salvaged = []
+                self.__mode = mode
+
+                if noexecute:
+                        return
+
+                # generate filenames for state files
+                self.__planfile = dict()
+                self.__planfile[STATE_FILE_PKGS] = \
+                    "%s.%d.json" % (STATE_FILE_PKGS, image.runid)
+                self.__planfile[STATE_FILE_ACTIONS] = \
+                    "%s.%d.json" % (STATE_FILE_ACTIONS, image.runid)
+
+                # delete any pre-existing state files
+                rm_paths = []
+                if mode in [IP_MODE_DEFAULT, IP_MODE_SAVE]:
+                        rm_paths.append(self.__planfile[STATE_FILE_PKGS])
+                        rm_paths.append(self.__planfile[STATE_FILE_ACTIONS])
+                for path in rm_paths:
+                        try:
+                                os.remove(path)
+                        except OSError, e:
+                                if e.errno != errno.ENOENT:
+                                        raise
 
         def __str__(self):
 
@@ -156,15 +210,15 @@
 
                 s += "Package version changes:\n"
 
-                for pp in self.pkg_plans:
-                        s += "%s -> %s\n" % (pp.origin_fmri, pp.destination_fmri)
+                for oldfmri, newfmri in self.__fmri_changes:
+                        s += "%s -> %s\n" % (oldfmri, newfmri)
 
                 if self.__actuators:
-                        s = s + "Actuators:\n%s\n" % self.__actuators
+                        s = s + "\nActuators:\n%s\n" % self.__actuators
 
                 if self.__old_excludes != self.__new_excludes:
-                        s = s + "Variants/Facet changes: %s -> %s\n" % (self.__old_excludes,
-                            self.__new_excludes)
+                        s = s + "\nVariants/Facet changes:\n %s -> %s\n" % \
+                            (self.__old_excludes, self.__new_excludes)
 
                 return s
 
@@ -228,6 +282,60 @@
 
                 return self._planned_op
 
+        @property
+        def plan_desc(self):
+                """Get the proposed fmri changes."""
+                return self.__fmri_changes
+
+        def __vector_2_fmri_changes(self, installed_dict, vector,
+            li_pkg_updates=True, new_variants=None, new_facets=None):
+                """Given an installed set of packages, and a proposed vector
+                of package changes determine what, if any, changes should be
+                made to the image.  This takes into account different
+                behaviors during operations like variant changes, and updates
+                where the only packages being updated are linked image
+                constraints, etc."""
+
+                cat = self.image.get_catalog(self.image.IMG_CATALOG_KNOWN)
+
+                fmri_updates = []
+                for a, b in ImagePlan.__dicts2fmrichanges(installed_dict,
+                    ImagePlan.__fmris2dict(vector)):
+                        if a != b:
+                                fmri_updates.append((a, b))
+                                continue
+                        if new_facets or new_variants:
+                                #
+                                # In the case of a facet change we reinstall
+                                # packages since any action in a package could
+                                # have a facet attached to it.
+                                #
+                                # In the case of variants packages should
+                                # declare what variants they contain.  Hence,
+                                # theoretically, we should be able to reduce
+                                # the number of package reinstalls by removing
+                                # re-installs of packages that don't declare
+                                # variants.  But unfortunately we've never
+                                # enforced this requirement that packages with
+                                # action variant tags declare their variants.
+                                # So now we're stuck just re-installing every
+                                # pacakge.  sigh.
+                                #
+                                fmri_updates.append((a, b))
+                                continue
+
+                if not fmri_updates:
+                        # no planned fmri changes
+                        return []
+
+                if fmri_updates and not li_pkg_updates:
+                        # oops.  the caller requested no package updates and
+                        # we couldn't satisfy that request.
+                        raise api_errors.PlanCreationException(
+                            pkg_updates_required=fmri_updates)
+
+                return fmri_updates
+
         def __plan_op(self, op):
                 """Private helper method used to mark the start of a planned
                 operation."""
@@ -235,10 +343,22 @@
                 self._planned_op = op
                 self._image_lm = self.image.get_last_modified()
 
-        def plan_install(self, pkgs_to_install, reject_list):
-                """Determine the fmri changes needed to install the specified
-                pkgs"""
-                self.__plan_op(self.PLANNED_INSTALL)
+        def __plan_install_solver(self, li_pkg_updates=True, li_sync_op=False,
+            new_facets=None, new_variants=None, pkgs_inst=None,
+            reject_list=None):
+                """Use the solver to determine the fmri changes needed to
+                install the specified pkgs, sync the specified image, and/or
+                change facets/variants within the current image."""
+
+                if not (new_variants or new_facets or pkgs_inst or li_sync_op):
+                        # nothing to do
+                        self.__fmri_changes = []
+                        return
+
+                if new_variants or new_facets:
+                        self.__varcets_change = True
+                        self.__new_variants = new_variants
+                        self.__new_facets   = new_facets
 
                 # get ranking of publishers
                 pub_ranks = self.image.get_publisher_ranks()
@@ -247,46 +367,107 @@
                 installed_dict = ImagePlan.__fmris2dict(
                     self.image.gen_installed_pkgs())
 
-                # build installed publisher dictionary
-                installed_pubs = dict((
-                    (f.pkg_name, f.get_publisher())
-                    for f in installed_dict.values()
-                ))
-
-                proposed_dict, self.__references = self.match_user_fmris(
-                    pkgs_to_install, self.MATCH_ALL, pub_ranks=pub_ranks,
-                    installed_pubs=installed_pubs,
-                    installed_pkgs=installed_dict)
-
-                reject_set = self.match_user_stems(reject_list, self.MATCH_ALL)
+                if pkgs_inst:
+                        inst_dict, references = self.__match_user_fmris(
+                            pkgs_inst, self.MATCH_ALL, pub_ranks=pub_ranks,
+                            installed_pkgs=installed_dict)
+                        self.__match_inst = references
+                else:
+                        inst_dict = {}
+
+                if reject_list:
+                        reject_set = self.match_user_stems(reject_list,
+                            self.MATCH_ALL)
+                else:
+                        reject_set = set()
+
+                self.__new_excludes = self.image.list_excludes(new_variants,
+                    new_facets)
+
+                if new_variants:
+                        variants = new_variants
+                else:
+                        variants = self.image.get_variants()
 
                 # instantiate solver
                 self.__pkg_solver = pkg_solver.PkgSolver(
                     self.image.get_catalog(self.image.IMG_CATALOG_KNOWN),
                     installed_dict,
                     pub_ranks,
-                    self.image.get_variants(),
+                    variants,
                     self.image.avoid_set_get(),
-                    self.__progtrack
-                    )
+                    self.image.linked.parent_fmris(),
+                    self.image.linked.extra_dep_actions(self.__new_excludes),
+                    self.__progtrack)
 
                 # Solve... will raise exceptions if no solution is found
-                new_vector, self.__new_avoid_obs = self.__pkg_solver.solve_install(
-                    [], proposed_dict, self.__new_excludes,
-                    reject_set=reject_set)
-
-                self.__fmri_changes = [
-                    (a, b)
-                    for a, b in ImagePlan.__dicts2fmrichanges(installed_dict,
-                        ImagePlan.__fmris2dict(new_vector))
-                    if a != b
-                ]
+                new_vector, self.__new_avoid_obs = \
+                    self.__pkg_solver.solve_install([], inst_dict,
+                        new_variants=new_variants, new_facets=new_facets,
+                        excludes=self.__new_excludes, reject_set=reject_set,
+                        relax_all=li_sync_op)
+
+                self.__fmri_changes = self.__vector_2_fmri_changes(
+                    installed_dict, new_vector,
+                    li_pkg_updates=li_pkg_updates,
+                    new_variants=new_variants, new_facets=new_facets)
+
+        def __plan_install(self, li_pkg_updates=True, li_sync_op=False,
+            new_facets=None, new_variants=None, pkgs_inst=None,
+            reject_list=None):
+                """Determine the fmri changes needed to install the specified
+                pkgs, sync the image, and/or change facets/variants within the
+                current image."""
+
+                # someone better have called __plan_op()
+                assert self._planned_op in self.__planned_values
+
+                plandir = self.image.plandir
+
+                if self.__mode in [IP_MODE_DEFAULT, IP_MODE_SAVE]:
+                        self.__plan_install_solver(
+                            li_pkg_updates=li_pkg_updates,
+                            li_sync_op=li_sync_op,
+                            new_facets=new_facets,
+                            new_variants=new_variants,
+                            pkgs_inst=pkgs_inst,
+                            reject_list=reject_list)
+
+                        if self.__mode == IP_MODE_SAVE:
+                                self.__save(STATE_FILE_PKGS)
+                else:
+                        assert self.__mode == IP_MODE_LOAD
+                        self.__fmri_changes = self.__load(STATE_FILE_PKGS)
 
                 self.state = EVALUATED_PKGS
 
+        def plan_install(self, pkgs_inst=None, reject_list=None):
+                """Determine the fmri changes needed to install the specified
+                pkgs"""
+
+                self.__plan_op(self.PLANNED_INSTALL)
+                self.__plan_install(pkgs_inst=pkgs_inst,
+                     reject_list=reject_list)
+
+        def plan_change_varcets(self, new_facets=None, new_variants=None,
+            reject_list=None):
+                """Determine the fmri changes needed to change the specified
+                facets/variants."""
+
+                self.__plan_op(self.PLANNED_VARIANT)
+                self.__plan_install(new_facets=new_facets,
+                     new_variants=new_variants, reject_list=reject_list)
+
+        def plan_sync(self, li_pkg_updates=True, reject_list=None):
+                """Determine the fmri changes needed to sync the image."""
+
+                self.__plan_op(self.PLANNED_SYNC)
+                self.__plan_install(li_pkg_updates=li_pkg_updates,
+                    li_sync_op=True, reject_list=reject_list)
+
         def plan_uninstall(self, pkgs_to_uninstall, recursive_removal=False):
                 self.__plan_op(self.PLANNED_UNINSTALL)
-                proposed_dict, self.__references = self.match_user_fmris(
+                proposed_dict, self.__match_rm = self.__match_user_fmris(
                     pkgs_to_uninstall, self.MATCH_INST_VERSIONS)
                 # merge patterns together
                 proposed_removals = set([
@@ -296,10 +477,8 @@
                 ])
 
                 # build installed dict
-                installed_dict = dict([
-                    (f.pkg_name, f)
-                    for f in self.image.gen_installed_pkgs()
-                ])
+                installed_dict = ImagePlan.__fmris2dict(
+                    self.image.gen_installed_pkgs())
 
                 # instantiate solver
                 self.__pkg_solver = pkg_solver.PkgSolver(
@@ -308,6 +487,8 @@
                     self.image.get_publisher_ranks(),
                     self.image.get_variants(),
                     self.image.avoid_set_get(),
+                    self.image.linked.parent_fmris(),
+                    self.image.linked.extra_dep_actions(self.__new_excludes),
                     self.__progtrack)
 
                 new_vector, self.__new_avoid_obs = self.__pkg_solver.solve_uninstall([],
@@ -322,11 +503,10 @@
 
                 self.state = EVALUATED_PKGS
 
-        def plan_update(self, pkgs_to_update=None, reject_list=None):
-                """Determine the fmri changes needed to update the specified
-                pkgs or all packages if none were specified."""
-                self.__plan_op(self.PLANNED_UPDATE)
-
+        def __plan_update_solver(self, pkgs_update=None, reject_list=None):
+                """Use the solver to determine the fmri changes needed to
+                update the specified pkgs or all packages if none were
+                specified."""
                 # get ranking of publishers
                 pub_ranks = self.image.get_publisher_ranks()
 
@@ -336,22 +516,17 @@
 
                 # If specific packages or patterns were provided, then
                 # determine the proposed set to pass to the solver.
-                proposed_dict = None
-                removal_list = []
-
-                if pkgs_to_update or reject_list:
-                        # build installed publisher dictionary
-                        installed_pubs = dict((
-                            (f.pkg_name, f.get_publisher())
-                            for f in installed_dict.values()
-                        ))
-                if pkgs_to_update:
-                        proposed_dict, self.__references = self.match_user_fmris(
-                            pkgs_to_update, self.MATCH_INST_STEMS, pub_ranks=pub_ranks,
-                            installed_pubs=installed_pubs,
-                            installed_pkgs=installed_dict)
-
-                reject_set = self.match_user_stems(reject_list, self.MATCH_ALL)
+                if pkgs_update:
+                        update_dict, references = self.__match_user_fmris(
+                            pkgs_update, self.MATCH_INST_STEMS,
+                            pub_ranks=pub_ranks, installed_pkgs=installed_dict)
+                        self.__match_update = references
+
+                if reject_list:
+                        reject_set = self.match_user_stems(reject_list,
+                            self.MATCH_ALL)
+                else:
+                        reject_set = set()
 
                 # instantiate solver
                 self.__pkg_solver = pkg_solver.PkgSolver(
@@ -360,26 +535,44 @@
                     pub_ranks,
                     self.image.get_variants(),
                     self.image.avoid_set_get(),
+                    self.image.linked.parent_fmris(),
+                    self.image.linked.extra_dep_actions(self.__new_excludes),
                     self.__progtrack)
 
-                # Solve... will raise exceptions if no solution is found
-                if pkgs_to_update:
-                        new_vector, self.__new_avoid_obs = self.__pkg_solver.solve_update([],
-                            proposed_dict, excludes=self.__new_excludes,
-                            reject_set=reject_set)
+                if pkgs_update:
+                        new_vector, self.__new_avoid_obs = \
+                            self.__pkg_solver.solve_install([],
+                                update_dict, excludes=self.__new_excludes,
+                                reject_set=reject_set,
+                                trim_proposed_installed=False)
                 else:
                         # Updating all installed packages requires a different
                         # solution path.
-                        new_vector, self.__new_avoid_obs = self.__pkg_solver.solve_update_all([],
-                            excludes=self.__new_excludes,
-                            reject_set=reject_set)
-
-                self.__fmri_changes = [
-                    (a, b)
-                    for a, b in ImagePlan.__dicts2fmrichanges(installed_dict,
-                        ImagePlan.__fmris2dict(new_vector))
-                    if a != b
-                ]
+                        new_vector, self.__new_avoid_obs = \
+                            self.__pkg_solver.solve_update_all([],
+                                excludes=self.__new_excludes,
+                                reject_set=reject_set)
+
+                self.__fmri_changes = self.__vector_2_fmri_changes(
+                    installed_dict, new_vector)
+
+        def plan_update(self, pkgs_update=None, reject_list=None):
+                """Determine the fmri changes needed to update the specified
+                pkgs or all packages if none were specified."""
+                self.__plan_op(self.PLANNED_UPDATE)
+
+                plandir = self.image.plandir
+
+                if self.__mode in [IP_MODE_DEFAULT, IP_MODE_SAVE]:
+                        self.__plan_update_solver(
+                            pkgs_update=pkgs_update,
+                            reject_list=reject_list)
+
+                        if self.__mode == IP_MODE_SAVE:
+                                self.__save(STATE_FILE_PKGS)
+                else:
+                        assert self.__mode == IP_MODE_LOAD
+                        self.__fmri_changes = self.__load(STATE_FILE_PKGS)
 
                 self.state = EVALUATED_PKGS
 
@@ -446,9 +639,10 @@
                                         needs_change.append(act)
                         if needs_change:
                                 pp = pkgplan.PkgPlan(self.image,
-                                    self.__progtrack, self.__check_cancelation)
+                                    self.__progtrack, self.__check_cancel)
                                 pp.propose_repair(f, m, needs_change)
-                                pp.evaluate(self.__new_excludes, self.__new_excludes)
+                                pp.evaluate(self.__new_excludes,
+                                    self.__new_excludes)
                                 self.pkg_plans.append(pp)
 
                 self.__fmri_changes = []
@@ -458,6 +652,12 @@
                 """Create the list of pkgs to fix"""
                 self.__plan_op(self.PLANNED_FIX)
 
+        def plan_noop(self):
+                """Create a plan that doesn't change the package contents of
+                the current image."""
+                self.__plan_op(self.PLANNED_NOOP)
+                self.__fmri_changes = []
+                self.state = EVALUATED_PKGS
 
         @staticmethod
         def __fmris2dict(fmri_list):
@@ -473,50 +673,6 @@
                     for k in set(olddict.keys() + newdict.keys())
                 ]
 
-        def plan_change_varcets(self, variants, facets):
-                """Determine the fmri changes needed to change
-                the specified variants/facets"""
-                self.__plan_op(self.PLANNED_VARIANT)
-
-                if variants == None and facets == None: # nothing to do
-                        self.state = EVALUATED_PKGS
-                        return
-
-                self.__variant_change = True
-
-                # build installed dict
-                installed_dict = dict([
-                    (f.pkg_name, f)
-                    for f in self.image.gen_installed_pkgs()
-                ])
-
-                # instantiate solver
-                self.__pkg_solver = pkg_solver.PkgSolver(
-                    self.image.get_catalog(self.image.IMG_CATALOG_KNOWN),
-                    installed_dict,
-                    self.image.get_publisher_ranks(),
-                    self.image.get_variants(),
-                    self.image.avoid_set_get(),
-                    self.__progtrack)
-
-                self.__new_excludes = self.image.list_excludes(variants, facets)
-
-                new_vector, self.__new_avoid_obs = \
-                    self.__pkg_solver.solve_change_varcets([],
-                    variants, facets, self.__new_excludes)
-
-                self.__new_variants = variants
-                self.__new_facets   = facets
-
-                self.__fmri_changes = [
-                    (a, b)
-                    for a, b in ImagePlan.__dicts2fmrichanges(installed_dict,
-                       ImagePlan.__fmris2dict(new_vector))
-                ]
-
-                self.state = EVALUATED_PKGS
-                return
-
         def reboot_needed(self):
                 """Check if evaluated imageplan requires a reboot"""
                 assert self.state >= MERGED_OK
@@ -855,7 +1011,7 @@
                 pp, al = self.__fixups.get(pfmri, (None, []))
                 if pp is None:
                         # XXX The lambda: False is temporary until fix is moved
-                        # into the API and self.__check_cancelation can be used.
+                        # into the API and self.__check_cancel can be used.
                         pp = pkgplan.PkgPlan(self.image, self.__progtrack,
                             lambda: False)
                         self.__fixups[pfmri] = pp, [action]
@@ -1136,7 +1292,7 @@
                 """Return manifest for pfmri"""
                 if pfmri:
                         return self.image.get_manifest(pfmri,
-                            all_variants=all_variants or self.__variant_change,
+                            all_variants=all_variants or self.__varcets_change,
                             intent=intent)
                 else:
                         return manifest.NullFactoredManifest
@@ -1152,23 +1308,32 @@
                 if self.__noexecute:
                         return None, None
 
+                __match_intent = dict()
+                __match_intent.update(self.__match_inst)
+                __match_intent.update(self.__match_rm)
+                __match_intent.update(self.__match_update)
+
                 if new_fmri:
-                        reference = self.__references.get(new_fmri, None)
+                        reference = __match_intent.get(new_fmri, None)
                         # don't leak prev. version info across publishers
                         if old_fmri:
                                 if old_fmri.get_publisher() != \
                                     new_fmri.get_publisher():
                                         old_fmri = "unknown"
                                 else:
-                                        old_fmri = old_fmri.get_fmri(anarchy=True)
-                        new_fmri = new_fmri.get_fmri(anarchy=True)# don't send pub
+                                        old_fmri = \
+                                            old_fmri.get_fmri(anarchy=True)
+                        # don't send pub
+                        new_fmri = new_fmri.get_fmri(anarchy=True)
                 else:
-                        reference = self.__references.get(old_fmri, None)
+                        reference = __match_intent.get(old_fmri, None)
                         # don't try to send intent info to disabled publisher
                         if old_fmri.get_publisher() in enabled_publishers:
-                                old_fmri = old_fmri.get_fmri(anarchy=True)# don't send pub
+                                # don't send pub
+                                old_fmri = old_fmri.get_fmri(anarchy=True)
                         else:
                                 old_fmri = None
+
                 info = {
                     "operation": self._planned_op,
                     "old_fmri" : old_fmri,
@@ -1221,12 +1386,20 @@
                         # plan is no longer valid.
                         raise api_errors.InvalidPlanError()
 
-                if self.image.has_boot_archive():
-                        ramdisk_prefixes = tuple(self.image.get_ramdisk_filelist())
-                        if not ramdisk_prefixes:
-                                self.__need_boot_archive = False
+                plandir = self.image.plandir
+                if self.__mode in [IP_MODE_DEFAULT, IP_MODE_SAVE]:
+                        self.evaluate_pkg_plans()
+                        if self.__mode == IP_MODE_SAVE:
+                                self.__save(STATE_FILE_ACTIONS)
                 else:
-                        self.__need_boot_archive = False
+                        assert self.__mode == IP_MODE_LOAD
+                        self.pkg_plans = self.__load(STATE_FILE_ACTIONS)
+
+                self.merge_actions()
+
+        def evaluate_pkg_plans(self):
+                """Internal helper function that does the work of converting
+                fmri changes into pkg plans."""
 
                 # prefetch manifests
                 prefetch_mfsts = [] # manifest, intents to be prefetched
@@ -1254,32 +1427,26 @@
 
                 # No longer needed.
                 del enabled_publishers
-                self.__references = None
+                self.__match_inst = {}
+                self.__match_rm = {}
 
                 self.image.transport.prefetch_manifests(prefetch_mfsts,
-                    ccancel=self.__check_cancelation)
+                    ccancel=self.__check_cancel)
 
                 # No longer needed.
                 del prefetch_mfsts
 
                 for oldfmri, old_in, newfmri, new_in in eval_list:
                         pp = pkgplan.PkgPlan(self.image, self.__progtrack,
-                            self.__check_cancelation)
-
-                        pp.propose(oldfmri,
-                            self.__get_manifest(oldfmri, old_in),
+                            self.__check_cancel)
+
+                        pp.propose(
+                            oldfmri, self.__get_manifest(oldfmri, old_in),
                             newfmri, self.__get_manifest(newfmri, new_in,
                             all_variants=True))
 
                         pp.evaluate(self.__old_excludes, self.__new_excludes)
 
-                        if pp.origin_fmri and pp.destination_fmri:
-                                self.__target_update_count += 1
-                        elif pp.destination_fmri:
-                                self.__target_install_count += 1
-                        elif pp.origin_fmri:
-                                self.__target_removal_count += 1
-
                         self.pkg_plans.append(pp)
                         pp = None
                         self.__progtrack.evaluate_progress()
@@ -1287,6 +1454,32 @@
                 # No longer needed.
                 del eval_list
 
+        def merge_actions(self):
+                """Given a set of fmri changes and their associated pkg plan,
+                merge all the resultant actions for the packages being
+                updated."""
+
+                if self.image.has_boot_archive():
+                        ramdisk_prefixes = tuple(
+                            self.image.get_ramdisk_filelist())
+                        if not ramdisk_prefixes:
+                                self.__need_boot_archive = False
+                else:
+                        self.__need_boot_archive = False
+
+                # now combine all actions together to create a synthetic
+                # single step upgrade operation, and handle editable
+                # files moving from package to package.  See theory
+                # comment in execute, below.
+
+                for pp in self.pkg_plans:
+                        if pp.origin_fmri and pp.destination_fmri:
+                                self.__target_update_count += 1
+                        elif pp.destination_fmri:
+                                self.__target_install_count += 1
+                        elif pp.origin_fmri:
+                                self.__target_removal_count += 1
+
                 # we now have a workable set of pkgplans to add/upgrade/remove
                 # now combine all actions together to create a synthetic single
                 # step upgrade operation, and handle editable files moving from
@@ -1623,8 +1816,7 @@
                 self.state = EVALUATED_OK
 
         def nothingtodo(self):
-                """ Test whether this image plan contains any work to do """
-                # handle case w/ -n no verbose
+                """Test whether this image plan contains any work to do """
                 if self.state == EVALUATED_PKGS:
                         return not (self.__fmri_changes or self.__new_variants
                             or self.__new_facets or self.pkg_plans)
@@ -1885,7 +2077,7 @@
                                     executed_pp, self.__progtrack)
 
                                 # write out variant changes to the image config
-                                if self.__variant_change:
+                                if self.__varcets_change:
                                         self.image.image_config_update(
                                             self.__new_variants,
                                             self.__new_facets)
@@ -2172,9 +2364,8 @@
 
                 return set(matchdict.keys())
 
-        def match_user_fmris(self, patterns, match_type,
-            pub_ranks=misc.EmptyDict, installed_pubs=misc.EmptyDict,
-            installed_pkgs=misc.EmptyDict):
+        def __match_user_fmris(self, patterns, match_type,
+            pub_ranks=misc.EmptyDict, installed_pkgs=misc.EmptyDict):
                 """Given a user-specified list of patterns, return a dictionary
                 of matching fmris:
 
@@ -2240,7 +2431,14 @@
 
                 # ignore dups
                 patterns = list(set(patterns))
-                # print patterns, match_type, pub_ranks, installed_pubs
+
+                installed_pubs = misc.EmptyDict
+                if match_type in [self.MATCH_INST_STEMS, self.MATCH_ALL]:
+                        # build installed publisher dictionary
+                        installed_pubs = dict((
+                            (f.pkg_name, f.get_publisher())
+                            for f in installed_pkgs.values()
+                        ))
 
                 # figure out which kind of matching rules to employ
                 brelease = self.image.attrs["Build-Release"]
@@ -2303,6 +2501,7 @@
                                 pubs.append(fmri.publisher)
                                 versions.append(fmri.version)
                                 fmris.append(fmri)
+
                         except (pkg.fmri.FmriError,
                             pkg.version.VersionError), e:
                                 illegals.append(e)
@@ -2590,3 +2789,104 @@
                         ])
 
                 return proposed_dict, references
+
+        # We must save the planned fmri change or the pkg_plans
+        class __save_encode(json.JSONEncoder):
+
+                def default(self, obj):
+                        """Required routine that overrides the default base
+                        class version and attempts to serialize 'obj' when
+                        attempting to save 'obj' json format."""
+
+                        if isinstance(obj, pkg.fmri.PkgFmri):
+                                return str(obj)
+                        if isinstance(obj, pkg.client.pkgplan.PkgPlan):
+                                return obj.getstate()
+                        return json.JSONEncoder.default(self, obj)
+
+        def __save(self, filename):
+                """Json encode fmri changes or pkg plans and save them to a
+                file."""
+
+                assert filename in [STATE_FILE_PKGS, STATE_FILE_ACTIONS]
+                if not os.path.isdir(self.image.plandir):
+                        os.makedirs(self.image.plandir)
+
+                # write the output file to a temporary file
+                pathtmp = os.path.join(self.image.plandir,
+                    "%s.%d.%d.json" % (filename, self.image.runid, os.getpid()))
+                oflags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY
+                try:
+                        fobj = os.fdopen(os.open(pathtmp, oflags, 0644), "wb")
+                        if filename == STATE_FILE_PKGS:
+                                json.dump(self.__fmri_changes, fobj,
+                                    encoding="utf-8", cls=self.__save_encode)
+                        elif filename == STATE_FILE_ACTIONS:
+                                json.dump(self.pkg_plans, fobj,
+                                    encoding="utf-8", cls=self.__save_encode)
+                        fobj.close()
+                except OSError, e:
+                        raise api_errors._convert_error(e)
+
+                # atomically create the desired file
+                path = os.path.join(self.image.plandir,
+                    "%s.%d.json" % (filename, self.image.runid))
+
+                try:
+                        os.rename(pathtmp, path)
+                except OSError, e:
+                        raise api_errors._convert_error(e)
+
+        def __load_decode(self, dct):
+                """Routine that takes a loaded json dictionary and converts
+                any keys and/or values from unicode strings into ascii
+                strings.  (Keys or values of other types are left
+                unchanged.)"""
+
+                # Replace unicode keys/values with strings
+                rvdct = {}
+                for k, v in dct.items():
+                        # unicode must die
+                        if type(k) == unicode:
+                                k = k.encode("utf-8")
+                        if type(v) == unicode:
+                                v = v.encode("utf-8")
+                        rvdct[k] = v
+                return rvdct
+
+        def __load(self, filename):
+                """Load Json encoded fmri changes or pkg plans."""
+
+                assert filename in [STATE_FILE_PKGS, STATE_FILE_ACTIONS]
+
+                path = os.path.join(self.image.plandir,
+                    "%s.%d.json" % (filename, self.image.runid))
+
+                # load the json file
+                try:
+                        with open(path) as fobj:
+                                # fobj will be closed when we exit this loop
+                                data = json.load(fobj, encoding="utf-8",
+                                    object_hook=self.__load_decode)
+                except OSError, e:
+                        raise api_errors._convert_error(e)
+
+                if filename == STATE_FILE_PKGS:
+                        assert(type(data) == list)
+                        tuples = []
+                        for (old, new) in data:
+                                if old:
+                                        old = pkg.fmri.PkgFmri(str(old))
+                                if new:
+                                        new = pkg.fmri.PkgFmri(str(new))
+                                tuples.append((old, new))
+                        return tuples
+
+                elif filename == STATE_FILE_ACTIONS:
+                        pkg_plans = []
+                        for item in data:
+                                pp = pkgplan.PkgPlan(self.image,
+                                    self.__progtrack, self.__check_cancel)
+                                pp.setstate(item)
+                                pkg_plans.append(pp)
+                        return pkg_plans
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/modules/client/linkedimage/__init__.py	Sat May 07 00:25:10 2011 -0700
@@ -0,0 +1,80 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+"""
+Initialize the linked image module.  Consumers of linked image functionality
+should never import anything other than pkg".client.linkedimage".  Here we'll
+import everything in linkedimage/common.py into our namespace (since that's
+where most of our code lives.) We'll also hard code which linked image plugin
+modules are supported below.
+"""
+
+# standard python classes
+import inspect
+import os
+
+# import linked image common code
+# W0401 Wildcard import
+# W0403 Relative import
+from common import * # pylint: disable-msg=W0401,W0403
+
+# names of linked image plugins
+p_types = [ "zone", "system" ]
+
+# map of plugin names to their associated LinkedImagePlugin derived class
+p_classes = {}
+
+# map of plugin names to their associated LinkedImageChildPlugin derived class
+p_classes_child = {}
+
+# initialize temporary variables
+_modname = _module = _nvlist = _classes = _i = None
+
+# initialize p_classes and p_classes_child
+for _modname in p_types:
+        _module = __import__("%s.%s" % (__name__, _modname),
+            globals(), locals(), [_modname])
+
+        # Find all the classes actually defined in this module.
+        _nvlist = inspect.getmembers(_module, inspect.isclass)
+        _classes = [
+            _i[1]
+            for _i in _nvlist
+            if _i[1].__module__ == ("%s.%s" % (__name__, _modname))
+        ]
+
+        for _i in _classes:
+                if LinkedImagePlugin in inspect.getmro(_i):
+                        p_classes[_modname] = _i
+                elif LinkedImageChildPlugin in inspect.getmro(_i):
+                        p_classes_child[_modname] = _i
+                else:
+                        raise RuntimeError("""
+Invalid linked image plugin class '%s' for plugin '%s'""" %
+                             (_i.__name__, _modname))
+
+# Clean up temporary variables
+del _modname, _module, _nvlist, _classes, _i
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/modules/client/linkedimage/common.py	Sat May 07 00:25:10 2011 -0700
@@ -0,0 +1,2739 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+"""
+Linked image module classes.
+
+The following classes for manipulating linked images are defined here:
+
+        LinkedImage
+        LinkedImgaeChild
+
+The following template classes which linked image plugins should inherit from
+are also defined here:
+
+        LinkedImagePlugin
+        LinkedImageChildPlugin
+
+"""
+
+#
+# Too many lines in module; pylint: disable-msg=C0302
+#
+
+# standard python classes
+import operator
+import os
+import simplejson as json
+import sys
+
+# pkg classes
+import pkg.actions
+import pkg.altroot as ar
+import pkg.catalog
+import pkg.client.api_errors as apx
+import pkg.client.bootenv as bootenv
+import pkg.client.linkedimage
+import pkg.client.pkgdefs as pkgdefs
+import pkg.client.pkgplan as pkgplan
+import pkg.fmri
+import pkg.misc as misc
+import pkg.pkgsubprocess
+import pkg.version
+
+from pkg.client import global_settings
+from pkg.client.debugvalues import DebugValues
+from pkg.misc import EmptyI
+
+logger = global_settings.logger
+
+# linked image relationship types (returned by LinkedImage.list_related())
+REL_PARENT = "parent"
+REL_SELF   = "self"
+REL_CHILD  = "child"
+
+# linked image properties
+PROP_NAME           = "li-name"
+PROP_ALTROOT        = "li-altroot"
+PROP_PARENT_PATH    = "li-parent"
+PROP_PATH           = "li-path"
+PROP_MODEL          = "li-model"
+PROP_RECURSE        = "li-recurse"
+prop_values         = frozenset([
+    PROP_ALTROOT,
+    PROP_NAME,
+    PROP_PATH,
+    PROP_MODEL,
+    PROP_PARENT_PATH,
+    PROP_RECURSE,
+])
+
+# properties that never get saved
+temporal_props = frozenset([
+    PROP_ALTROOT,
+])
+
+# special linked image name values (PROP_NAME)
+PV_NAME_NONE = "-"
+
+# linked image model values (PROP_MODEL)
+PV_MODEL_PUSH = "push"
+PV_MODEL_PULL = "pull"
+model_values = frozenset([
+    PV_MODEL_PUSH,
+    PV_MODEL_PULL,
+])
+
+# files which contain linked image data
+__DATA_DIR     = "linked"
+PATH_PPKGS     = os.path.join(__DATA_DIR, "linked_ppkgs")
+PATH_PROP      = os.path.join(__DATA_DIR, "linked_prop")
+PATH_PUBS      = os.path.join(__DATA_DIR, "linked_ppubs")
+
+class LinkedImagePlugin(object):
+        """This class is a template that all linked image plugins should
+        inherit from.  Linked image plugins derived from this class are
+        designed to manage linked aspects of the current image (vs managing
+        linked aspects of a specific child of the current image).
+
+        All the interfaces exported by this class and its descendants are
+        private to the linked image subsystem and should not be called
+        directly by any other subsystem."""
+
+        # functionality flags
+        support_attach = False
+        support_detach = False
+
+        def __init__(self, pname, linked):
+                """Initialize a linked image plugin.
+
+                'pname' is the name of the plugin class derived from this
+                base class.
+
+                'linked' is the LinkedImage object initializing this plugin.
+                """
+
+                return
+
+        def init_root(self, old_altroot):
+                """Called when the path to the image that we're operating on
+                is changing.  This normally occurs when we clone an image
+                after we've planned and prepared to do an operation."""
+
+                # return value: None
+                raise NotImplementedError
+
+        def get_altroot(self):
+                """If the linked image plugin is able to detect that we're
+                operating on an image in an alternate root then return the
+                path of the alternate root."""
+
+                # return value: string or None
+                raise NotImplementedError
+
+        def get_child_list(self, nocache=False):
+                """Return a list of the child images associated with the
+                current image."""
+
+                # return value: list
+                raise NotImplementedError
+
+        def get_child_props(self, lin):
+                """Get the linked image properties associated with the
+                specified child image."""
+
+                # return value: dict
+                raise NotImplementedError
+
+        def attach_child_inmemory(self, props, allow_relink):
+                """Attach the specified child image. This operation should
+                only affect in-memory state of the current image. It should
+                not update any persistent on-disk linked image state or access
+                the child image in any way. This routine should assume that
+                the linked image properties have already been validated."""
+
+                # return value: None
+                raise NotImplementedError
+
+        def detach_child_inmemory(self, lin):
+                """Detach the specified child image. This operation should
+                only affect in-memory state of the current image. It should
+                not update any persistent on-disk linked image state or access
+                the child image in any way."""
+
+                # return value: None
+                raise NotImplementedError
+
+        def sync_children_todisk(self):
+                """Sync out the in-memory linked image state of this image to
+                disk."""
+
+                # return value: tuple:
+                #    (pkgdefs EXIT_* return value, exception object or None)
+                raise NotImplementedError
+
+
+class LinkedImageChildPlugin(object):
+        """This class is a template that all linked image child plugins should
+        inherit from.  Linked image child plugins derived from this class are
+        designed to manage linked aspects of children of the current image.
+        (vs managing linked aspects of the current image itself).
+
+        All the interfaces exported by this class and its descendants are
+        private to the linked image subsystem and should not be called
+        directly by any other subsystem."""
+
+        def __init__(self, lic):
+                """Initialize a linked image child plugin.
+
+                'lic' is the LinkedImageChild object initializing this plugin.
+                """
+
+                return
+
+        def munge_props(self, props):
+                """Called before a parent image saves linked image properties
+                into a child image.  Gives the linked image child plugin a
+                chance to update the properties that will be saved within the
+                child image."""
+
+                # return value: None
+                raise NotImplementedError
+
+
+class LinkedImageName(object):
+        """A class for naming child linked images.  Linked image names are
+        used for all child images (and only child images), and they encode two
+        pieces of information.  The name of the plugin used to manage the
+        image and a linked image name.  Linked image names have the following
+        format "<linked_image_plugin>:<linked_image_name>"""
+
+        def __init__(self, name):
+                assert type(name) == str
+
+                self.lin_type = self.lin_name = None
+
+                try:
+                        self.lin_type, self.lin_name = name.split(":")
+                except ValueError:
+                        raise apx.LinkedImageException(lin_malformed=name)
+
+                if len(self.lin_type) == 0 or len(self.lin_name) == 0 :
+                        raise apx.LinkedImageException(lin_malformed=name)
+
+                if self.lin_type not in pkg.client.linkedimage.p_types:
+                        raise apx.LinkedImageException(lin_malformed=name)
+
+        def __str__(self):
+                return "%s:%s" % (self.lin_type, self.lin_name)
+
+        def __len__(self):
+                return len(self.__str__())
+
+        def __cmp__(self, other):
+                assert (type(self) == LinkedImageName)
+                if not other:
+                        return 1
+                if other == PV_NAME_NONE:
+                        return 1
+                assert type(other) == LinkedImageName
+                c = cmp(self.lin_type, other.lin_type)
+                if c != 0:
+                        return c
+                c = cmp(self.lin_name, other.lin_name)
+                return c
+
+        def __hash__(self):
+                return hash(str(self))
+
+        def __eq__(self, other):
+                if not isinstance(other, LinkedImageName):
+                        return False
+
+                return str(self) == str(other)
+
+        def __ne__(self, other):
+                return not self.__eq__(self, other)
+
+class LinkedImage(object):
+        """A LinkedImage object is used to manage the linked image aspects of
+        an image.  This image could be a child image, a parent image, or both
+        a parent and child.  This object allows for access to linked image
+        properties and also provides routines that allow operations to be
+        performed on child images."""
+
+        # Too many instance attributes; pylint: disable-msg=R0902
+        # Too many public methods; pylint: disable-msg=R0904
+
+        # Properties that a parent image with push children should save locally.
+        __parent_props = frozenset([
+            PROP_PATH
+        ])
+
+        # Properties that a pull child image should save locally.
+        __pull_child_props = frozenset([
+            PROP_NAME,
+            PROP_PATH,
+            PROP_MODEL,
+            PROP_PARENT_PATH,
+        ])
+
+        # Properties that a parent image with push children should save in
+        # those children.
+        __push_child_props = frozenset([
+            PROP_NAME,
+            PROP_PATH,
+            PROP_MODEL,
+            PROP_RECURSE,
+        ])
+
+        # make sure there is no invalid overlap
+        assert not (temporal_props & (
+            __parent_props |
+            __pull_child_props |
+            __push_child_props))
+
+        def __init__(self, img):
+                """Initialize a new LinkedImage object."""
+
+                # globals
+                self.__img = img
+
+                # variables reset by self.__update_props()
+                self.__props = dict()
+                self.__ppkgs = frozenset()
+                self.__ppubs = None
+                self.__pimg = None
+
+                # variables reset by self.reset_recurse()
+                self.__lic_list = []
+
+                # variables reset by self._init_root()
+                self.__root = None
+                self.__path_ppkgs = None
+                self.__path_prop = None
+                self.__path_ppubs = None
+
+                # initialize with no properties
+                self.__update_props()
+                self.reset_recurse()
+
+                # initialize linked image plugin objects
+                self.__plugins = dict()
+                for p in pkg.client.linkedimage.p_types:
+                        self.__plugins[p] = \
+                            pkg.client.linkedimage.p_classes[p](p, self)
+
+                # if the image has a path setup, we can load data from it.
+                if self.__img.imgdir:
+                        self._init_root()
+
+        @property
+        def image(self):
+                """Get a pointer to the image object associated with this
+                linked image object."""
+                return self.__img
+
+        def _init_root(self):
+                """Called during object initialization and by
+                image.py`__set_root() to let us know when we're changing the
+                root location of the image.  (The only time we change the root
+                path is when changes BEs during operations which clone BEs.
+                So when this happens most our metadata shouldn't actually
+                change."""
+
+                assert self.__img.root, \
+                    "root = %s" % str(self.__img.root)
+                assert self.__img.imgdir, \
+                    "imgdir = %s" % str(self.__img.imgdir)
+
+                # save the old root image path
+                old_root = None
+                if self.__root:
+                        old_root = self.__root
+
+                # figure out the new root image path
+                new_root = self.__img.root.rstrip(os.sep)
+                if new_root == "":
+                        new_root = os.sep
+
+                # initialize paths for linked image data files
+                self.__root = new_root
+                imgdir = self.__img.imgdir.rstrip(os.sep)
+                self.__path_ppkgs = os.path.join(imgdir, PATH_PPKGS)
+                self.__path_prop = os.path.join(imgdir, PATH_PROP)
+                self.__path_ppubs = os.path.join(imgdir, PATH_PUBS)
+
+                # if this isn't a reset, then load data from the image
+                if not old_root:
+                        self.__load()
+
+                # we're not linked or we're not changing root paths we're done
+                if not old_root or not self.__props:
+                        return
+
+                # get the old altroot directory
+                old_altroot = self.altroot()
+
+                # update the altroot property
+                self.__set_altroot(self.__props, old_root=old_root)
+
+                # Tell linked image plugins about the updated paths
+                # Unused variable 'plugin'; pylint: disable-msg=W0612
+                for plugin, lip in self.__plugins.iteritems():
+                # pylint: enable-msg=W0612
+                        lip.init_root(old_altroot)
+
+                # Tell linked image children about the updated paths
+                for lic in self.__lic_list:
+                        lic.child_init_root(old_altroot)
+
+        def __update_props(self, props=None):
+                """Internal helper routine used when we want to update any
+                linked image properties.  This routine sanity check the
+                new properties, updates them, and resets any cached state
+                that is affected by property values."""
+
+                if props == None:
+                        props = dict()
+                elif props:
+                        self.__verify_props(props)
+
+                        # all temporal properties must exist
+                        assert (temporal_props - set(props)) == set(), \
+                            "%s - %s == set()" % (temporal_props, set(props))
+
+                # update state
+                self.__props = props
+                self.__ppkgs = frozenset()
+                self.__ppubs = None
+                self.__pimg = None
+
+        def __verify_props(self, props):
+                """Perform internal consistency checks for a set of linked
+                image properties.  Don't update any state."""
+
+                props_set = set(props)
+
+                # if we're not a child image ourselves, then we're done
+                if (props_set - temporal_props) == self.__parent_props:
+                        return props
+
+                # make sure PROP_MODEL was specified
+                if PROP_NAME not in props:
+                        _rterr(path=self.__root,
+                            missing_props=[PROP_NAME])
+
+                # validate the linked image name
+                try:
+                        lin = LinkedImageName(str(props[PROP_NAME]))
+                except apx.LinkedImageException:
+                        _rterr(path=self.__root,
+                            bad_prop=(PROP_NAME, props[PROP_NAME]))
+
+                if lin.lin_type not in self.__plugins:
+                        _rterr(path=self.__root, lin=lin,
+                            bad_lin_type=lin.lin_type)
+
+                # make sure PROP_MODEL was specified
+                if PROP_MODEL not in props:
+                        _rterr(path=self.__root, lin=lin,
+                            missing_props=[PROP_MODEL])
+
+                model = props[PROP_MODEL]
+                if model not in model_values:
+                        _rterr(path=self.__root, lin=lin,
+                            bad_prop=(PROP_MODEL, model))
+
+                if model == PV_MODEL_PUSH:
+                        missing = self.__push_child_props - props_set
+                        if missing:
+                                _rterr(path=self.__root, lin=lin,
+                                    missing_props=missing)
+
+                if model == PV_MODEL_PULL:
+                        missing = self.__pull_child_props - props_set
+                        if missing:
+                                _rterr(path=self.__root, lin=lin,
+                                    missing_props=missing)
+
+        @staticmethod
+        def __unset_altroot(props):
+                """Given a set of linked image properties, strip out any
+                altroot properties.  This involves removing the altroot
+                component from the image path property.  This is normally done
+                before we write image properties to disk."""
+
+                # get the current altroot
+                altroot = props[PROP_ALTROOT]
+
+                # remove it from the image path
+                props[PROP_PATH] = rm_altroot_path(
+                    props[PROP_PATH], altroot)
+
+                if PROP_PARENT_PATH in props:
+                        # remove it from the parent image path
+                        props[PROP_PARENT_PATH] = rm_altroot_path(
+                            props[PROP_PARENT_PATH], altroot)
+
+                # delete the current altroot
+                del props[PROP_ALTROOT]
+
+        def __set_altroot(self, props, old_root=None):
+                """Given a set of linked image properties, the image paths
+                stored within those properties may not match the actual image
+                paths if we're executing within an alternate root environment.
+                We try to detect this condition here, and if this situation
+                occurs we update the linked image paths to reflect the current
+                image paths and we fabricate a new linked image altroot
+                property that points to the new path prefix that was
+                pre-pended to the image paths."""
+
+                # we may have to update the parent image path as well
+                p_path = None
+                if PROP_PARENT_PATH in props:
+                        p_path = props[PROP_PARENT_PATH]
+
+                if old_root:
+                        # get the old altroot
+                        altroot = props[PROP_ALTROOT]
+
+                        # remove the altroot from the image paths
+                        path = rm_altroot_path(old_root, altroot)
+                        if p_path:
+                                p_path = rm_altroot_path(p_path, altroot)
+
+                        # get the new altroot
+                        altroot = get_altroot_path(self.__root, path)
+                else:
+                        path = props[PROP_PATH]
+                        altroot = get_altroot_path(self.__root, path)
+
+                # update properties with altroot
+                props[PROP_ALTROOT] = altroot
+                props[PROP_PATH] = add_altroot_path(path, altroot)
+                if p_path:
+                        props[PROP_PARENT_PATH] = \
+                            add_altroot_path(p_path, altroot)
+
+        def __guess_altroot(self):
+                """If we're initializing parent linked image properties for
+                the first time (or if those properties somehow got deleted)
+                then we need to know if the parent image that we're currently
+                operating on is located within an alternate root.  One way to
+                do this is to ask our linked image plugins if they can
+                determine this (the zones linked image plugin usually can
+                if the image is a global zone)."""
+
+                # ask each plugin if we're operating in an alternate root
+                p_altroots = []
+                for plugin, lip in self.__plugins.iteritems():
+                        p_altroot = lip.get_altroot()
+                        if p_altroot:
+                                p_altroots.append((plugin, p_altroot))
+
+                if not p_altroots:
+                        # no altroot suggested by plugins
+                        return os.sep
+
+                # check for conflicting altroots
+                altroots = list(set([
+                        p_altroot
+                        # Unused variable; pylint: disable-msg=W0612
+                        for pname, p_altroot in p_altroots
+                        # pylint: enable-msg=W0612
+                ]))
+
+                if len(altroots) == 1:
+                        # we have an altroot from our plugins
+                        return altroots[0]
+
+                # we have conflicting altroots, time to die
+                _rterr(li=self, multiple_altroots=p_altroots)
+
+        def __fabricate_parent_props(self):
+                """Fabricate the minimum set of properties required for a
+                parent image."""
+
+                props = dict()
+                props[PROP_PATH] = self.__img.root
+                props[PROP_ALTROOT] = self.__guess_altroot()
+                return props
+
+        def __load_ondisk_props(self, tmp=True):
+                """Load linked image properties from disk and return them to
+                the caller.  We sanity check the properties, but we don't
+                update any internal linked image state.
+
+                'tmp' determines if we should read/write to the official
+                linked image metadata files, or if we should access temporary
+                versions (which have ".<runid>" appended to them."""
+
+                path = self.__path_prop
+                path_tmp = "%s.%d" % (self.__path_prop, self.__img.runid)
+
+                # read the linked image properties from disk
+                if tmp and path_exists(path_tmp):
+                        path = path_tmp
+                        props = load_data(path)
+                elif path_exists(path):
+                        props = load_data(path)
+                else:
+                        return None
+
+                # make sure there are no saved temporal properties
+                assert not (set(props) & temporal_props)
+
+                if PROP_NAME in props:
+                        # convert PROP_NAME into a linked image name obj
+                        name = props[PROP_NAME]
+                        try:
+                                lin = LinkedImageName(name)
+                                props[PROP_NAME] = lin
+                        except apx.LinkedImageException:
+                                _rterr(path=self.__root,
+                                    bad_prop=(PROP_NAME, name))
+
+                # sanity check our properties
+                self.__verify_props(props)
+                return props
+
+        def __load_ondisk_ppkgs(self, tmp=True):
+                """Load linked image parent constraints from disk.
+                Don't update any internal state.
+
+                'tmp' determines if we should read/write to the official
+                linked image metadata files, or if we should access temporary
+                versions (which have ".<runid>" appended to them."""
+
+                path = "%s.%d" % (self.__path_ppkgs, self.__img.runid)
+                if tmp and path_exists(path):
+                        return frozenset([
+                            pkg.fmri.PkgFmri(str(s))
+                            for s in load_data(path, missing_val=EmptyI)
+                        ])
+
+                path = self.__path_ppkgs
+                if path_exists(path):
+                        return frozenset([
+                            pkg.fmri.PkgFmri(str(s))
+                            for s in load_data(path, missing_val=EmptyI)
+                        ])
+
+                return None
+
+        def __load_ondisk_ppubs(self, tmp=True):
+                """Load linked image parent publishers from disk.
+                Don't update any internal state.
+
+                'tmp' determines if we should read/write to the official
+                linked image metadata files, or if we should access temporary
+                versions (which have ".<runid>" appended to them."""
+
+                path = "%s.%d" % (self.__path_ppubs, self.__img.runid)
+                if tmp and path_exists(path):
+                        return load_data(path)
+
+                path = self.__path_ppubs
+                if path_exists(path):
+                        return load_data(path)
+
+                return None
+
+        def __load(self):
+                """Load linked image properties and constraints from disk.
+                Update the linked image internal state with the loaded data."""
+
+                # load properties
+                props = self.__load_ondisk_props()
+                if not props and not self.isparent():
+                        # we're not linked
+                        return
+
+                if not props:
+                        # there are no properties on disk but we're a parent
+                        # image so we're missing properties.  oops.  rather
+                        # than die (which would prevent the user from being
+                        # able to fix the problem) fabricate up some props
+                        # with reasonably guessed values which the user can
+                        # subsequently change and/or fix.
+                        props = self.__fabricate_parent_props()
+                else:
+                        self.__set_altroot(props)
+
+                self.__update_props(props)
+
+                ppkgs = self.__load_ondisk_ppkgs()
+                if self.ischild() and ppkgs == None:
+                        _rterr(li=self, err="Constraints data missing.")
+                if self.ischild():
+                        self.__ppkgs = ppkgs
+
+                # load parent publisher data. if publisher data is missing
+                # continue along and we'll just skip the publisher checks,
+                # it's better than failing and preventing any image updates.
+                self.__ppubs = self.__load_ondisk_ppubs()
+
+        @staticmethod
+        def __validate_prop_recurse(v):
+                """Verify property value for PROP_RECURSE."""
+                if v in [True, False]:
+                        return True
+                if type(v) == str and v.lower() in ["true", "false"]:
+                        return True
+                return False
+
+        def __validate_attach_props(self, model, props):
+                """Validate user supplied linked image attach properties.
+                Don't update any internal state."""
+
+                # make sure that only attach time options have been
+                # specified, and that they have allowed values.
+                validate_props = {
+                        PROP_RECURSE: self.__validate_prop_recurse
+                }
+
+                if model == PV_MODEL_PUSH:
+                        allowed_props = self.__push_child_props
+                else:
+                        assert model == PV_MODEL_PULL
+                        allowed_props = self.__pull_child_props
+
+                errs = []
+
+                # check each property the user specified.
+                for k, v in props.iteritems():
+
+                        # did the user specify an allowable property?
+                        if k not in validate_props:
+                                errs.append(apx.LinkedImageException(
+                                    attach_bad_prop=k))
+                                continue
+
+                        # did the user specify a valid property value?
+                        if not validate_props[k](v):
+                                errs.append(apx.LinkedImageException(
+                                    attach_bad_prop_value=(k, v)))
+                                continue
+
+                        # is this property valid for this type of image?
+                        if k not in allowed_props:
+                                errs.append(apx.LinkedImageException(
+                                    attach_bad_prop=k))
+                                continue
+
+                if errs:
+                        raise apx.LinkedImageException(bundle=errs)
+
+        def __init_pimg(self, path):
+                """Initialize an Image object which can be used to access a
+                parent image."""
+
+                try:
+                        os.stat(path)
+                except OSError:
+                        raise apx.LinkedImageException(parent_bad_path=path)
+
+                try:
+                        pimg = self.__img.alloc(
+                            root=path,
+                            runid=self.__img.runid,
+                            user_provided_dir=True,
+                            cmdpath=self.__img.cmdpath)
+                except apx.ImageNotFoundException:
+                        raise apx.LinkedImageException(parent_bad_img=path)
+
+                return pimg
+
+        def altroot(self):
+                """Return the altroot path prefix for the current image."""
+
+                return self.__props.get(PROP_ALTROOT, os.sep)
+
+        def nothingtodo(self):
+                """If our in-memory linked image state matches the on-disk
+                linked image state then there's nothing to do.  If the state
+                differs then there is stuff to do since the new state needs
+                to be saved to disk."""
+
+                # compare in-memory and on-disk properties
+                li_ondisk_props = self.__load_ondisk_props(tmp=False)
+                if li_ondisk_props == None:
+                        li_ondisk_props = dict()
+                li_inmemory_props = self.__props.copy()
+                if li_inmemory_props:
+                        self.__unset_altroot(li_inmemory_props)
+                li_inmemory_props = rm_dict_ent(li_inmemory_props,
+                    temporal_props)
+                if li_ondisk_props != li_inmemory_props:
+                        return False
+
+                # compare in-memory and on-disk constraints
+                li_ondisk_ppkgs = self.__load_ondisk_ppkgs(tmp=False)
+                if li_ondisk_ppkgs == None:
+                        li_ondisk_ppkgs = frozenset()
+                if self.__ppkgs != li_ondisk_ppkgs:
+                        return False
+
+                # compare in-memory and on-disk parent publishers
+                li_ondisk_ppubs = self.__load_ondisk_ppubs(tmp=False)
+                if self.__ppubs != li_ondisk_ppubs:
+                        return False
+
+                return True
+
+        def get_pubs(self, img=None):
+                """Return publisher information for the specified image.  If
+                no image is specified we return publisher information for the
+                current image.
+
+                Publisher information is returned in a sorted list of lists
+                of the format:
+                        <publisher name>, <sticky>
+
+                Where:
+                        <publisher name> is a string
+                        <sticky> is a boolean
+
+                The tuples are sorted by publisher rank.
+                """
+
+                # default to ourselves
+                if img == None:
+                        img = self.__img
+
+                # get a sorted list of the images publishers
+                pubs = img.get_sorted_publishers(inc_disabled=False)
+
+                rv = []
+                for p in pubs:
+                        rv.append([str(p), p.sticky])
+                return rv
+
+        def check_pubs(self, op):
+                """If we're a child image's, verify that the parent image
+                publisher configuration is a subset of the child images
+                publisher configuration.  This means that all publishers
+                configured within the parent image must also be configured
+                within the child image with the same:
+
+                        - publisher rank
+                        - sticky and disabled settings
+
+                The child image may have additional publishers configured but
+                they must all be lower ranked than the parent's publishers.
+                """
+
+                # if we're not a child image then bail
+                if not self.ischild():
+                        return
+
+                # if we're using the sysrepo then don't bother
+                if self.__img.cfg.get_policy("use-system-repo"):
+                        return
+
+                if op in [pkgdefs.API_OP_DETACH]:
+                        # we don't need to do a pubcheck for detach
+                        return
+
+                pubs = self.get_pubs()
+                ppubs = self.__ppubs
+
+                if ppubs == None:
+                        # parent publisher data is missing, press on and hope
+                        # for the best.
+                        return
+
+                # child image needs at least as many publishers as the parent
+                if len(pubs) < len(ppubs):
+                        raise apx.PlanCreationException(
+                            linked_pub_error=(pubs, ppubs))
+
+                # check rank, sticky, and disabled settings
+                for (p, pp) in zip(pubs, ppubs):
+                        if p == pp:
+                                continue
+                        raise apx.PlanCreationException(
+                            linked_pub_error=(pubs, ppubs))
+
+        def syncmd_from_parent(self, op=None):
+                """Update linked image constraint, publisher data, and
+                state from our parent image."""
+
+                if not self.ischild():
+                        # we're not a child image, nothing to do
+                        return
+
+                if self.__props[PROP_MODEL] == PV_MODEL_PUSH:
+                        # parent pushes data to us, nothing to do
+                        return
+
+                # initalize the parent image
+                if not self.__pimg:
+                        path = self.__props[PROP_PARENT_PATH]
+                        self.__pimg = self.__init_pimg(path)
+
+                # generate new constraints
+                cati = self.__pimg.get_catalog(self.__img.IMG_CATALOG_INSTALLED)
+                ppkgs = frozenset(cati.fmris())
+
+                # generate new publishers
+                ppubs = self.get_pubs(img=self.__pimg)
+
+                # check if anything has changed
+                need_sync = False
+
+                if self.__ppkgs != ppkgs:
+                        # we have new constraints
+                        self.__ppkgs = ppkgs
+                        need_sync = True
+
+                if self.__ppubs != ppubs:
+                        # parent has new publishers
+                        self.__ppubs = ppubs
+                        need_sync = True
+
+                if not need_sync:
+                        # nothing changed
+                        return
+
+                # if we're not planning an image attach operation then write
+                # the linked image metadata to disk.
+                if op != pkgdefs.API_OP_ATTACH:
+                        self.syncmd()
+
+        def syncmd(self):
+                """Write in-memory linked image state to disk."""
+
+                # create a list of metadata file paths
+                paths = [self.__path_ppkgs, self.__path_prop,
+                    self.__path_ppubs]
+
+                # cleanup any temporary files
+                for path in paths:
+                        path = "%s.%d" % (path, self.__img.runid)
+                        path_unlink(path, noent_ok=True)
+
+                if not self.ischild() and not self.isparent():
+                        # we're no longer linked; delete metadata
+                        for path in paths:
+                                path_unlink(path, noent_ok=True)
+                        return
+
+                # save our properties, but first remove altroot path prefixes
+                # and any temporal properties
+                props = self.__props.copy()
+                self.__unset_altroot(props)
+                props = rm_dict_ent(props, temporal_props)
+                save_data(self.__path_prop, props)
+
+                if not self.ischild():
+                        # if we're not a child we don't have constraints
+                        path_unlink(self.__path_ppkgs, noent_ok=True)
+                        return
+
+                # we're a child so save our latest constraints
+                save_data(self.__path_ppkgs, self.__ppkgs)
+                save_data(self.__path_ppubs, self.__ppubs)
+
+        @property
+        def child_name(self):
+                """If the current image is a child image, this function
+                returns a linked image name object which represents the name
+                of the current image."""
+
+                if not self.ischild():
+                        raise self.__apx_not_child()
+                return self.__props[PROP_NAME]
+
+        def ischild(self):
+                """Indicates whether the current image is a child image."""
+
+                return PROP_NAME in self.__props
+
+        def isparent(self):
+                """Indicates whether the current image is a parent image."""
+
+                return len(self.__list_children()) > 0
+
+        def child_props(self, lin=None):
+                """Return a dictionary which represents the linked image
+                properties associated with a linked image.
+
+                'lin' is the name of the child image.  If lin is None then
+                the current image is assumed to be a linked image and it's
+                properties are returned.
+
+                Always returns a copy of the properties in case the caller
+                tries to update them."""
+
+                if lin == None:
+                        # If we're not linked we'll return an empty
+                        # dictionary.  That's ok.
+                        return self.__props.copy()
+
+                # make sure the specified child exists
+                self.__verify_child_name(lin, raise_except=True)
+
+                # make a copy of the props in case they are updated
+                lip = self.__plugins[lin.lin_type]
+                props = lip.get_child_props(lin).copy()
+
+                # add temporal properties
+                props[PROP_ALTROOT] = self.altroot()
+                return props
+
+        def __apx_not_child(self):
+                """Raise an exception because the current image is not a child
+                image."""
+
+                return apx.LinkedImageException(self_not_child=self.__root)
+
+        def __verify_child_name(self, lin, raise_except=False):
+                """Check if a specific child image exists."""
+
+                assert type(lin) == LinkedImageName, \
+                    "%s == LinkedImageName" % type(lin)
+
+                for i in self.__list_children():
+                        if i[0] == lin:
+                                return True
+
+                if raise_except:
+                        raise apx.LinkedImageException(child_unknown=lin)
+                return False
+
+        def parent_fmris(self):
+                """A set of the fmris installed in our parent image."""
+
+                if not self.ischild():
+                        # We return None since frozenset() would indicate
+                        # that there are no packages installed in the parent
+                        # image.
+                        return None
+
+                return self.__ppkgs
+
+        def parse_name(self, name, allow_unknown=False):
+                """Given a string representing a linked image child name,
+                returns linked image name object representing the same name.
+
+                'allow_unknown' indicates whether the name must represent
+                actual children or simply be syntactically correct."""
+
+                assert type(name) == str
+
+                lin = LinkedImageName(name)
+                if not allow_unknown:
+                        self.__verify_child_name(lin, raise_except=True)
+                return lin
+
+        def __list_children(self, li_ignore=None):
+                """Returns a list of linked child images associated with the
+                current image.
+
+                'li_ignore' see list_related() for a description.
+
+                The returned value is a list of tuples where each tuple
+                contains (<li name>, <li path>)."""
+
+                if li_ignore == []:
+                        # ignore all children
+                        return []
+
+                li_children = [
+                    entry
+                    for p in pkg.client.linkedimage.p_types
+                    for entry in self.__plugins[p].get_child_list()
+                ]
+
+                # sort by linked image name
+                li_children = sorted(li_children, key=operator.itemgetter(0))
+
+                if li_ignore == None:
+                        # don't ignore any children
+                        return li_children
+
+                li_all = set([lin for lin, path in li_children])
+                errs = [
+                    apx.LinkedImageException(child_unknown=lin)
+                    for lin in (set(li_ignore) - li_all)
+                ]
+                if errs:
+                        raise apx.LinkedImageException(bundle=errs)
+
+                return [
+                    (lin, path)
+                    for lin, path in li_children
+                    if lin not in li_ignore
+                ]
+
+        def list_related(self, li_ignore=None):
+                """Returns a list of linked images associated with the
+                current image.  This includes both child and parent images.
+
+                'li_ignore' is either None or a list.  If it's None (the
+                default), all children will be listed.  If it's an empty list
+                no children will be listed.  Otherwise, any children listed
+                in li_ignore will be ommited from the results.
+
+                The returned value is a list of tuples where each tuple
+                contains (<li name>, <relationship>, <li path>)."""
+
+                li_children = self.__list_children(li_ignore=li_ignore)
+                li_list = [
+                    (lin, REL_CHILD, path)
+                    for lin, path in li_children
+                ]
+
+                if not li_list and not self.ischild():
+                        # we're not linked
+                        return []
+
+                # we're linked so append ourself to the list
+                lin = PV_NAME_NONE
+                if self.ischild():
+                        lin = self.child_name
+                li_self = (lin, REL_SELF, self.__props[PROP_PATH])
+                li_list.append(li_self)
+
+                # if we have a path to our parent then append that as well.
+                if PROP_PARENT_PATH in self.__props:
+                        li_parent = (PV_NAME_NONE, REL_PARENT,
+                            self.__props[PROP_PARENT_PATH])
+                        li_list.append(li_parent)
+
+                # sort by linked image name
+                li_list = sorted(li_list, key=operator.itemgetter(0))
+
+                return li_list
+
+        def attach_parent(self, lin, path, props, allow_relink=False,
+            force=False):
+                """We only update in-memory state; nothing is written to
+                disk, to sync linked image state to disk call syncmd."""
+
+                assert type(lin) == LinkedImageName
+                assert type(path) == str
+                assert props == None or type(props) == dict, \
+                    "type(props) == %s" % type(props)
+                if props == None:
+                        props = dict()
+
+                lip = self.__plugins[lin.lin_type]
+
+                if self.ischild() and not allow_relink:
+                        raise apx.LinkedImageException(self_linked=self.__root)
+
+                if not lip.support_attach and not force:
+                        raise apx.LinkedImageException(
+                            attach_parent_notsup=lin.lin_type)
+
+                # Path must be an absolute path.
+                if not os.path.isabs(path):
+                        raise apx.LinkedImageException(parent_path_notabs=path)
+
+                # we don't bother to cleanup the path to the parent image here
+                # because when we allocate an Image object for the parent
+                # image, it will do that work for us.
+                pimg = self.__init_pimg(path)
+
+                # make sure we're not linking to ourselves
+                if self.__img.root == pimg.root:
+                        raise apx.LinkedImageException(link_to_self=True)
+
+                # make sure we're not linking the root image as a child
+                if self.__img.root == misc.liveroot():
+                        raise apx.LinkedImageException(
+                            attach_root_as_child=True)
+
+                # get the cleaned up parent image path.
+                path = pimg.root
+
+                # If we're in an alternate root, the parent must also be within
+                # that alternate root.
+                if not check_altroot_path(path, self.altroot()):
+                        raise apx.LinkedImageException(
+                            parent_not_in_altroot=(path, self.altroot()))
+
+                self.__validate_attach_props(PV_MODEL_PULL, props)
+
+                # make a copy of the properties
+                props = props.copy()
+                props[PROP_NAME] = lin
+                props[PROP_PARENT_PATH] = path
+                props[PROP_PATH] = self.__img.root
+                props[PROP_MODEL] = PV_MODEL_PULL
+                props[PROP_ALTROOT] = self.altroot()
+
+                for k, v in lip.attach_props_def.iteritems():
+                        if k not in self.__pull_child_props:
+                                # this prop doesn't apply to pull images
+                                continue
+                        if k not in props:
+                                props[k] = v
+
+                self.__update_props(props)
+                self.__pimg = pimg
+
+        def detach_parent(self, force=False):
+                """We only update in memory state; nothing is written to
+                disk, to sync linked image state to disk call syncmd."""
+
+                lin = self.child_name
+                lip = self.__plugins[lin.lin_type]
+                if not force:
+                        if self.__props[PROP_MODEL] == PV_MODEL_PUSH:
+                                raise apx.LinkedImageException(
+                                    detach_from_parent=self.__root)
+
+                        if not lip.support_detach:
+                                raise apx.LinkedImageException(
+                                    detach_parent_notsup=lin.lin_type)
+
+                # Generate a new set of linked image properties.  If we have
+                # no children then we don't need any more properties.
+                props = None
+
+                # If we have children we'll need to keep some properties.
+                if self.isparent():
+                        strip = prop_values - \
+                            (self.__parent_props | temporal_props)
+                        props = rm_dict_ent(self.__props, strip)
+
+                # Update our linked image properties.
+                self.__update_props(props)
+
+        def __insync(self):
+                """Determine if an image is in sync with its constraints."""
+
+                assert self.ischild()
+
+                cat = self.__img.get_catalog(self.__img.IMG_CATALOG_INSTALLED)
+                extra_deps = self.extra_dep_actions(installed_catalog=True)
+                excludes = [ self.__img.cfg.variants.allow_action ]
+
+                sync_fmris = []
+
+                for fmri in cat.fmris():
+                        # get parent dependencies from the catalog
+                        parent_deps = [
+                            a
+                            for a in cat.get_entry_actions(fmri,
+                                [pkg.catalog.Catalog.DEPENDENCY],
+                                excludes=excludes)
+                            if a.name == "depend" and \
+                                a.attrs["type"] == "parent"
+                        ]
+
+                        # get extra parent dependencies
+                        parent_deps += [
+                            a
+                            for a in extra_deps.get(fmri, [])
+                            if a.name == "depend" and \
+                                a.attrs["type"] == "parent"
+                        ]
+
+                        if parent_deps:
+                                sync_fmris.append(fmri)
+
+                if not sync_fmris:
+                        # No packages to sync
+                        return True
+
+                # create a dictionary of packages installed in the parent
+                ppkgs_dict = dict([
+                        (fmri.pkg_name, fmri)
+                        for fmri in self.parent_fmris()
+                ])
+
+                for fmri in sync_fmris:
+                        if fmri.pkg_name not in ppkgs_dict:
+                                return False
+                        pfmri = ppkgs_dict[fmri.pkg_name]
+                        if fmri.version != pfmri.version and \
+                            not pfmri.version.is_successor(fmri.version,
+                                pkg.version.CONSTRAINT_AUTO):
+                                return False
+                return True
+
+        def audit_self(self, li_parent_sync=True):
+                """If the current image is a child image, this function
+                audits the current image to see if it's in sync with its
+                parent."""
+
+                if not self.ischild():
+                        return (pkgdefs.EXIT_OOPS, self.__apx_not_child())
+
+                try:
+                        if li_parent_sync:
+                                # try to refresh linked image constraints from
+                                # the parent image.
+                                self.syncmd_from_parent()
+
+                except apx.LinkedImageException, e:
+                        return (e.lix_exitrv, e)
+
+                if not self.__insync():
+                        e = apx.LinkedImageException(
+                            child_diverged=self.child_name)
+                        return (pkgdefs.EXIT_DIVERGED, e)
+
+                return (pkgdefs.EXIT_OK, None)
+
+        @staticmethod
+        def __rvdict2rv(rvdict, rv_map=None):
+                """Internal helper function that takes a dictionary returned
+                from an operations on multiple children and merges the results
+                into a single return code."""
+
+                assert not rvdict or type(rvdict) == dict
+                for k, (rv, err) in rvdict.iteritems():
+                        assert type(k) == LinkedImageName
+                        assert type(rv) == int
+                        assert err is None or \
+                            isinstance(err, apx.LinkedImageException)
+                if type(rv_map) != type(None):
+                        assert type(rv_map) == list
+                        for (rv_set, rv) in rv_map:
+                                assert(type(rv_set) == set)
+                                assert(type(rv) == int)
+
+                if not rvdict:
+                        return (pkgdefs.EXIT_OK, None)
+
+                if not rv_map:
+                        rv_map = [(set([pkgdefs.EXIT_OK]), pkgdefs.EXIT_OK)]
+
+                rv_mapped = set()
+                rv_seen = set([rv for (rv, e) in rvdict.itervalues()])
+                for (rv_map_set, rv_map_rv) in rv_map:
+                        if (rv_seen == rv_map_set):
+                                return (rv_map_rv, None)
+                        # keep track of all the return values that are mapped
+                        rv_mapped |= rv_map_set
+
+                # the mappings better have included pkgdefs.EXIT_OK
+                assert pkgdefs.EXIT_OK in rv_mapped
+
+                # if we had errors for unmapped return values, bundle them up
+                errs = [
+                        e
+                        for (rv, e) in rvdict.itervalues()
+                        if e and rv not in rv_mapped
+                ]
+                if errs:
+                        err = apx.LinkedImageException(bundle=errs)
+                else:
+                        err = None
+
+                if len(rv_seen) == 1:
+                        # we have one consistent return value
+                        return (list(rv_seen)[0], err)
+
+                return (pkgdefs.EXIT_PARTIAL, err)
+
+        def audit_rvdict2rv(self, rvdict):
+                """Convenience function that takes a dictionary returned from
+                an operations on multiple children and merges the results into
+                a single return code."""
+
+                rv_map = [
+                    (set([pkgdefs.EXIT_OK]), pkgdefs.EXIT_OK),
+                    (set([pkgdefs.EXIT_DIVERGED]), pkgdefs.EXIT_DIVERGED),
+                    (set([pkgdefs.EXIT_OK, pkgdefs.EXIT_DIVERGED]),
+                        pkgdefs.EXIT_DIVERGED),
+                ]
+                return self.__rvdict2rv(rvdict, rv_map)
+
+        def sync_rvdict2rv(self, rvdict):
+                """Convenience function that takes a dictionary returned from
+                an operations on multiple children and merges the results into
+                a single return code."""
+
+                rv_map = [
+                    (set([pkgdefs.EXIT_OK]), pkgdefs.EXIT_OK),
+                    (set([pkgdefs.EXIT_OK, pkgdefs.EXIT_NOP]), pkgdefs.EXIT_OK),
+                    (set([pkgdefs.EXIT_NOP]), pkgdefs.EXIT_NOP),
+                ]
+                return self.__rvdict2rv(rvdict, rv_map)
+
+        def detach_rvdict2rv(self, rvdict):
+                """Convenience function that takes a dictionary returned from
+                an operations on multiple children and merges the results into
+                a single return code."""
+
+                return self.__rvdict2rv(rvdict)
+
+        def __validate_child_attach(self, lin, path, props,
+            allow_relink=False):
+                """Sanity check the parameters associated with a child image
+                that we are trying to attach."""
+
+                assert type(lin) == LinkedImageName
+                assert type(props) == dict
+                assert type(path) == str
+
+                # check the name to make sure it doesn't already exist
+                if self.__verify_child_name(lin) and not allow_relink:
+                        raise apx.LinkedImageException(child_dup=lin)
+
+                self.__validate_attach_props(PV_MODEL_PUSH, props)
+
+                # Path must be an absolute path.
+                if not os.path.isabs(path):
+                        raise apx.LinkedImageException(child_path_notabs=path)
+
+                # If we're in an alternate root, the child must also be within
+                # that alternate root
+                if not check_altroot_path(path, self.altroot()):
+                        raise apx.LinkedImageException(
+                            child_not_in_altroot=(path, self.altroot()))
+
+                # path must be an image
+                try:
+                        img_prefix = ar.ar_img_prefix(path)
+                except OSError:
+                        raise apx.LinkedImageException(child_path_eaccess=path)
+                if not img_prefix:
+                        raise apx.LinkedImageException(child_bad_img=path)
+
+                # Does the parent image (ourselves) reside in clonable BE?
+                # Unused variable 'be_uuid'; pylint: disable-msg=W0612
+                (be_name, be_uuid) = bootenv.BootEnv.get_be_name(self.__root)
+                # pylint: enable-msg=W0612
+                if be_name:
+                        img_is_clonable = True
+                else:
+                        img_is_clonable = False
+
+                # If the parent image is clonable then the new child image
+                # must be nested within the parents filesystem namespace.
+                path = path.rstrip(os.sep) + os.sep
+                p_root = self.__root.rstrip(os.sep) + os.sep
+                if img_is_clonable and not path.startswith(p_root):
+                        raise apx.LinkedImageException(
+                            child_not_nested=(path, p_root))
+
+                # Find the common parent directory of the both parent and the
+                # child image.
+                dir_common = os.path.commonprefix([p_root, path])
+                dir_common.rstrip(os.sep)
+
+                # Make sure there are no additional images in between the
+                # parent and the child. (Ie, prevent linking of images if one
+                # of the images is nested within another unrelated image.)
+                # This is done by looking at all the parent directories for
+                # both the parent and the child image until we reach a common
+                # ancestor.
+
+                # First check the parent directories of the child.
+                d = os.path.dirname(path.rstrip(os.sep))
+                while d != dir_common and d.startswith(dir_common):
+                        try:
+                                tmp = ar.ar_img_prefix(d)
+                        except OSError, e:
+                                # W0212 Access to a protected member
+                                # pylint: disable-msg=W0212
+                                raise apx._convert_error(e)
+                        if not tmp:
+                                d = os.path.dirname(d)
+                                continue
+                        raise apx.LinkedImageException(child_nested=(path, d))
+
+                # Then check the parent directories of the parent.
+                d = os.path.dirname(p_root.rstrip(os.sep))
+                while d != dir_common and d.startswith(dir_common):
+                        try:
+                                tmp = ar.ar_img_prefix(d)
+                        except OSError, e:
+                                # W0212 Access to a protected member
+                                # pylint: disable-msg=W0212
+                                raise apx._convert_error(e)
+                        if not tmp:
+                                d = os.path.dirname(d)
+                                continue
+                        raise apx.LinkedImageException(child_nested=(path, d))
+
+                # Child image should not already be linked
+                img_li_data_props = os.path.join(img_prefix, PATH_PROP)
+                try:
+                        exists = ar.ar_exists(path, img_li_data_props)
+                except OSError, e:
+                        # W0212 Access to a protected member
+                        # pylint: disable-msg=W0212
+                        raise apx._convert_error(e)
+                if exists and not allow_relink:
+                        raise apx.LinkedImageException(img_linked=path)
+
+        def attach_child(self, lin, path, props,
+            accept=False, allow_relink=False, force=False, li_md_only=False,
+            li_pkg_updates=True, noexecute=False, progtrack=None,
+            refresh_catalogs=True, show_licenses=False, update_index=True):
+                """Attach an image as a child to the current image (the
+                current image will become a parent image. This operation
+                results in attempting to sync the child image with the parent
+                image."""
+
+                # Too many arguments; pylint: disable-msg=R0913
+                # Too many return statements; pylint: disable-msg=R0911
+
+                assert type(lin) == LinkedImageName
+                assert type(path) == str
+                assert props == None or type(props) == dict, \
+                    "type(props) == %s" % type(props)
+                if props == None:
+                        props = dict()
+
+                if li_md_only:
+                        li_pkg_updates = False
+
+                lip = self.__plugins[lin.lin_type]
+                if not lip.support_attach and not force:
+                        e = apx.LinkedImageException(
+                            attach_child_notsup=lin.lin_type)
+                        return (e.lix_exitrv, e)
+
+                # Path must be an absolute path.
+                if not os.path.isabs(path):
+                        e = apx.LinkedImageException(child_path_notabs=path)
+                        return (e.lix_exitrv, e)
+
+                # cleanup specified path
+                cwd = os.getcwd()
+                try:
+                        os.chdir(path)
+                except OSError, e:
+                        e = apx.LinkedImageException(child_path_eaccess=path)
+                        return (e.lix_exitrv, e)
+                path = os.getcwd()
+                os.chdir(cwd)
+
+                # make sure we're not linking to ourselves
+                if self.__img.root == path:
+                        raise apx.LinkedImageException(link_to_self=True)
+
+                # make sure we're not linking the root image as a child
+                if path == misc.liveroot():
+                        raise apx.LinkedImageException(
+                            attach_root_as_child=True)
+
+                # if the current image isn't linked yet then we need to
+                # generate some linked image properties for ourselves
+                if PROP_PATH not in self.__props:
+                        p_props = self.__fabricate_parent_props()
+                        self.__update_props(p_props)
+
+                # sanity check the input
+                try:
+                        self.__validate_child_attach(lin, path, props,
+                            allow_relink=allow_relink)
+                except apx.LinkedImageException, e:
+                        return (e.lix_exitrv, e)
+
+                # make a copy of the options and start updating them
+                child_props = props.copy()
+                child_props[PROP_NAME] = lin
+                child_props[PROP_PATH] = path
+                child_props[PROP_MODEL] = PV_MODEL_PUSH
+                child_props[PROP_ALTROOT] = self.altroot()
+
+                # fill in any missing defaults options
+                for k, v in lip.attach_props_def.iteritems():
+                        if k not in child_props:
+                                child_props[k] = v
+
+                # attach the child in memory
+                lip.attach_child_inmemory(child_props, allow_relink)
+
+                if noexecute and li_md_only:
+                        # we've validated parameters, nothing else to do
+                        return (pkgdefs.EXIT_OK, None)
+
+                # update the child
+                try:
+                        lic = LinkedImageChild(self, lin)
+                except apx.LinkedImageException, e:
+                        return (e.lix_exitrv, e)
+
+                rv, e = self.__sync_child(lin, lic,
+                    accept=accept, li_attach_sync=True, li_md_only=li_md_only,
+                    li_pkg_updates=li_pkg_updates, noexecute=noexecute,
+                    progtrack=progtrack, refresh_catalogs=refresh_catalogs,
+                    show_licenses=show_licenses, update_index=update_index)
+
+                assert isinstance(e, (type(None), apx.LinkedImageException))
+
+                if rv not in [pkgdefs.EXIT_OK, pkgdefs.EXIT_NOP]:
+                        return (rv, e)
+
+                if noexecute:
+                        # if noexecute then we're done
+                        return (pkgdefs.EXIT_OK, None)
+
+                # save child image properties
+                rv, e = lip.sync_children_todisk()
+                assert isinstance(e, (type(None), apx.LinkedImageException))
+                if e:
+                        return (pkgdefs.EXIT_OOPS, e)
+
+                # save parent image properties
+                self.syncmd()
+
+                return (pkgdefs.EXIT_OK, None)
+
+        def audit_children(self, lin_list, **kwargs):
+                """Audit one or more children of the current image to see if
+                they are in sync with this image."""
+
+                return self.__children_op(lin_list,
+                    self.__audit_child, **kwargs)
+
+        def sync_children(self, lin_list, **kwargs):
+                """Sync one or more children of the current image."""
+
+                return self.__children_op(lin_list,
+                    self.__sync_child, **kwargs)
+
+        def detach_children(self, lin_list, **kwargs):
+                """Detach one or more children from the current image. This
+                operation results in the removal of any constraint package
+                from the child images."""
+
+                # get parameter meant for __detach_child()
+                force = noexecute = False
+                if "force" in kwargs:
+                        force = kwargs["force"]
+                if "noexecute" in kwargs:
+                        noexecute = kwargs["noexecute"]
+
+                # expand lin_list before calling __detach_child()
+                if not lin_list:
+                        lin_list = [i[0] for i in self.__list_children()]
+
+                rvdict = self.__children_op(lin_list,
+                    self.__detach_child, **kwargs)
+
+                for lin in lin_list:
+                        # if the detach failed leave metadata in parent
+                        # Unused variable 'rv'; pylint: disable-msg=W0612
+                        rv, e = rvdict[lin]
+                        # pylint: enable-msg=W0612
+                        assert e == None or \
+                            (isinstance(e, apx.LinkedImageException))
+                        if e and not force:
+                                continue
+
+                        # detach the child in memory
+                        lip = self.__plugins[lin.lin_type]
+                        lip.detach_child_inmemory(lin)
+
+                        if not noexecute:
+                                # sync out the fact that we detached the child
+                                rv2, e2 = lip.sync_children_todisk()
+                                assert e2 == None or \
+                                    (isinstance(e2, apx.LinkedImageException))
+                                if not e:
+                                        # don't overwrite previous errors
+                                        rvdict[lin] = (rv2, e2)
+
+                if not (self.ischild() or self.isparent()):
+                        # we're not linked anymore, so delete all our linked
+                        # properties.
+                        self.__update_props()
+                        self.syncmd()
+
+                return rvdict
+
+        def __children_op(self, lin_list, op, **kwargs):
+                """Perform a linked image operation on multiple children."""
+
+                assert type(lin_list) == list
+                assert type(kwargs) == dict
+                assert "lin" not in kwargs
+                assert "lic" not in kwargs
+
+                if not lin_list:
+                        lin_list = [i[0] for i in self.__list_children()]
+
+                rvdict = dict()
+                for lin in lin_list:
+                        try:
+                                lic = LinkedImageChild(self, lin)
+
+                                # perform the requested operation
+                                kwargs["lin"] = lin
+                                kwargs["lic"] = lic
+                                rvdict[lin] = op(**kwargs)
+
+                                # Unused variable; pylint: disable-msg=W0612
+                                rv, e = rvdict[lin]
+                                # pylint: enable-msg=W0612
+                                assert e == None or \
+                                    (isinstance(e, apx.LinkedImageException))
+
+                        except apx.LinkedImageException, e:
+                                rvdict[lin] = (e.lix_exitrv, e)
+
+                return rvdict
+
+        @staticmethod
+        def __audit_child(lin, lic):
+                """Recurse into a child image and audit it."""
+
+                # Unused argument 'lin'; pylint: disable-msg=W0613
+                return lic.child_audit()
+
+        @staticmethod
+        def __sync_child(lin, lic, accept=False, li_attach_sync=False,
+            li_md_only=False, li_pkg_updates=True, noexecute=False,
+            progtrack=None, refresh_catalogs=True, show_licenses=False,
+            update_index=True):
+                """Recurse into a child image and sync it."""
+
+                # Too many arguments; pylint: disable-msg=R0913
+                # Unused argument 'lin'; pylint: disable-msg=W0613
+
+                # no new constraints currently implies no pkg updates.
+                if li_md_only:
+                        li_pkg_updates = False
+
+                # now try to sync the image
+                return lic.child_sync(
+                    accept=accept,
+                    li_attach_sync=li_attach_sync,
+                    li_md_only=li_md_only,
+                    li_pkg_updates=li_pkg_updates,
+                    noexecute=noexecute,
+                    progtrack=progtrack,
+                    refresh_catalogs=refresh_catalogs,
+                    show_licenses=show_licenses,
+                    update_index=update_index)
+
+        def __detach_child(self, lin, lic, force=False, noexecute=False,
+            progtrack=None):
+                """Recurse into a child image and detach it."""
+
+                lip = self.__plugins[lin.lin_type]
+                if not force and not lip.support_detach:
+                        # we can't detach this type of image.
+                        e = apx.LinkedImageException(
+                            detach_child_notsup=lin.lin_type)
+                        return (pkgdefs.EXIT_OOPS, e)
+
+                # remove linked data from the child
+                return lic.child_detach(noexecute=noexecute,
+                    progtrack=progtrack)
+
+        def reset_recurse(self):
+                """Reset all child recursion state."""
+
+                self.__lic_list = []
+
+        def init_recurse(self, op, li_ignore, accept, refresh_catalogs,
+            update_index, args):
+                """When planning changes on a parent image, prepare to
+                recurse into all child images and operate on them as well."""
+
+                if op == pkgdefs.API_OP_DETACH:
+                        # we don't need to recurse for these operations
+                        self.__lic_list = []
+                        return
+
+                if PROP_RECURSE in self.__props and \
+                    not self.__props[PROP_RECURSE]:
+                        # don't bother to recurse into children
+                        self.__lic_list = []
+                        return
+
+                self.__lic_list = []
+                # Unused variable 'path'; pylint: disable-msg=W0612
+                for (lin, path) in self.__list_children(li_ignore):
+                # pylint: enable-msg=W0612
+                        self.__lic_list.append(LinkedImageChild(self, lin))
+
+                if not self.__lic_list:
+                        # no child images to recurse into
+                        return
+
+                # given the api operation being performed on the
+                # current image, figure out what api operation should
+                # be performed on child images.
+                if op == pkgdefs.API_OP_UPDATE:
+                        pkg_op = pkgdefs.PKG_OP_UPDATE
+                else:
+                        pkg_op = pkgdefs.PKG_OP_SYNC
+
+                for lic in self.__lic_list:
+                        lic.child_init_recurse(pkg_op, accept,
+                            refresh_catalogs, update_index, args)
+
+        def do_recurse(self, stage, ip=None):
+                """When planning changes within a parent image, recurse into
+                all child images and operate on them as well."""
+
+                assert stage in pkgdefs.api_stage_values
+                assert stage != pkgdefs.API_STAGE_DEFAULT
+
+                for lic in self.__lic_list:
+                        lic.child_do_recurse(stage=stage, ip=ip)
+
+        def recurse_nothingtodo(self):
+                """Return True if there is no planned work to do on child
+                image."""
+
+                for lic in self.__lic_list:
+                        if not lic.child_nothingtodo():
+                                return False
+                return True
+
+        @staticmethod
+        def __has_parent_dep(fmri, cat, excludes):
+                """Check if a package has a parent dependency."""
+
+                for a in cat.get_entry_actions(fmri,
+                    [pkg.catalog.Catalog.DEPENDENCY], excludes=excludes):
+                        if a.name == "depend" and a.attrs["type"] == "parent":
+                                return True
+                return False
+
+        def extra_dep_actions(self, excludes=EmptyI,
+            installed_catalog=False):
+                """Since we don't publish packages with parent dependencies
+                yet, but we want to be able to sync packages between zones,
+                we'll need to fake up some extra package parent dependencies.
+
+                Here we'll inspect the catalog to find packages that we think
+                should have parent dependencies and then we'll return a
+                dictionary, indexed by fmri, which contains the extra
+                dependency actions that should be added to each package."""
+
+                # create a parent dependency action with a nonglobal zone
+                # variant tag.
+                attrs = dict()
+                attrs["type"] = "parent"
+                attrs["fmri"] = pkg.actions.depend.DEPEND_SELF
+                attrs["variant.opensolaris.zone"] = "nonglobal"
+
+                # Used * or ** magic; pylint: disable-msg=W0142
+                pda = pkg.actions.depend.DependencyAction(**attrs)
+                # pylint: enable-msg=W0142
+
+                if not pda.include_this(excludes):
+                        # we're not operating on a nonglobal zone image so we
+                        # don't need to fabricate parent zone dependencies
+                        return dict()
+
+                if not self.ischild():
+                        # we're not a child image so parent dependencies are
+                        # irrelevant
+                        return dict()
+
+                if installed_catalog:
+                        cat_name = self.__img.IMG_CATALOG_INSTALLED
+                else:
+                        cat_name = self.__img.IMG_CATALOG_KNOWN
+                cat = self.__img.get_catalog(cat_name)
+
+                # assume that the osnet and ips incorporations should always
+                # have a parent dependencies.
+                inc_fmris = set()
+                for tgt in [
+                    "consolidation/osnet/osnet-incorporation",
+                    "consolidation/ips/ips-incorporation"]:
+                        for (ver, fmris) in cat.fmris_by_version(tgt):
+                                inc_fmris |= set(fmris)
+                                for fmri in fmris:
+                                        if not self.__has_parent_dep(fmri, cat,
+                                            excludes):
+                                                inc_fmris |= set([fmri])
+
+                # find the fmris that each osnet/ips incorporation incorporates
+                inc_pkgs = set()
+                for fmri in inc_fmris:
+                        for a in cat.get_entry_actions(fmri,
+                            [pkg.catalog.Catalog.DEPENDENCY],
+                            excludes=excludes):
+                                if (a.name != "depend") or \
+                                    (a.attrs["type"] != "incorporate"):
+                                        continue
+
+                                # create an fmri for the incorporated package
+                                build_release = str(fmri.version.build_release)
+                                inc_pkgs |= set([pkg.fmri.PkgFmri(
+                                    a.attrs["fmri"],
+                                    build_release=build_release)])
+
+                # translate the incorporated package fmris into actual
+                # packages in the known catalog
+                dep_fmris = set()
+                for fmri in inc_pkgs:
+                        for (ver, fmris) in cat.fmris_by_version(fmri.pkg_name):
+                                if ver == fmri.version or ver.is_successor(
+                                    fmri.version, pkg.version.CONSTRAINT_AUTO):
+                                        dep_fmris |= set(fmris)
+
+                # all the fmris we want to add dependencies to.
+                all_fmris = inc_fmris | dep_fmris
+
+                # eliminate renamed or obsoleted fmris
+                rm_fmris = set()
+                for pfmri in all_fmris:
+                        entry = cat.get_entry(pfmri)
+                        state = entry["metadata"]["states"]
+                        if self.__img.PKG_STATE_OBSOLETE in state or \
+                            self.__img.PKG_STATE_RENAMED in state:
+                                rm_fmris |= set([pfmri])
+                all_fmris -= rm_fmris
+
+                return dict([(fmri, [pda]) for fmri in all_fmris])
+
+
+class LinkedImageChild(object):
+        """A LinkedImageChild object is used when a parent image wants to
+        access a child image.  These accesses may include things like:
+        saving/pushing linked image metadata into a child image, syncing or
+        auditing a child image, or recursing into a child image to keep it in
+        sync with planned changes in the parent image."""
+
+        # Too many instance attributes; pylint: disable-msg=R0902
+
+        def __init__(self, li, lin):
+                assert isinstance(li, LinkedImage), \
+                    "isinstance(%s, LinkedImage)" % type(li)
+                assert isinstance(lin, LinkedImageName), \
+                    "isinstance(%s, LinkedImageName)" % type(lin)
+
+                # globals
+                self.__linked = li
+                self.__img = li.image
+
+                # cache properties.
+                self.__props = self.__linked.child_props(lin)
+                assert self.__props[PROP_NAME] == lin
+
+                try:
+                        imgdir = ar.ar_img_prefix(self.child_path)
+                except OSError:
+                        raise apx.LinkedImageException(
+                            lin=lin, child_path_eaccess=self.child_path)
+
+                if not imgdir:
+                        raise apx.LinkedImageException(
+                            lin=lin, child_bad_img=self.child_path)
+
+                # initialize paths for linked image data files
+                self.__path_ppkgs = os.path.join(imgdir, PATH_PPKGS)
+                self.__path_prop = os.path.join(imgdir, PATH_PROP)
+                self.__path_ppubs = os.path.join(imgdir, PATH_PUBS)
+
+                # initialize a linked image child plugin
+                self.__plugin = \
+                    pkg.client.linkedimage.p_classes_child[lin.lin_type](self)
+
+                # variables reset by self.child_reset_recurse()
+                self.__r_op = None
+                self.__r_args = None
+                self.__r_progtrack = None
+                self.__r_rv_nop = False
+                self.child_reset_recurse()
+
+        @property
+        def child_name(self):
+                """Get the path associated with a child image."""
+                return self.__props[PROP_NAME]
+
+        @property
+        def child_path(self):
+                """Get the path associated with a child image."""
+                return self.__props[PROP_PATH]
+
+        @property
+        def child_pimage(self):
+                """Get a pointer to the parent image object associated with
+                this child."""
+                return self.__img
+
+        def __push_data(self, root, path, data, tmp, test):
+                """Write data to a child image."""
+
+                # first save our data to a temporary file
+                path_tmp = "%s.%s" % (path, self.__img.runid)
+                save_data(path_tmp, data, root=root)
+
+                # check if we're updating the data
+                updated = True
+
+                try:
+                        exists = ar.ar_exists(root, path)
+                except OSError, e:
+                        # W0212 Access to a protected member
+                        # pylint: disable-msg=W0212
+                        raise apx._convert_error(e)
+
+                if exists:
+                        try:
+                                updated = ar.ar_diff(root, path, path_tmp)
+                        except OSError, e:
+                                # W0212 Access to a protected member
+                                # pylint: disable-msg=W0212
+                                raise apx._convert_error(e)
+
+                # if we're not actually updating any data, or if we were just
+                # doing a test to see if the data has changed, then delete the
+                # temporary data file
+                if not updated or test:
+                        ar.ar_unlink(root, path_tmp)
+                        return updated
+
+                if not tmp:
+                        # we are updating the real data.
+                        try:
+                                ar.ar_rename(root, path_tmp, path)
+                        except OSError, e:
+                                # W0212 Access to a protected member
+                                # pylint: disable-msg=W0212
+                                raise apx._convert_error(e)
+
+                return True
+
+        def __push_ppkgs(self, tmp=False, test=False, ip=None):
+                """Sync linked image parent constraint data to a child image.
+
+                'tmp' determines if we should read/write to the official
+                linked image metadata files, or if we should access temporary
+                versions (which have ".<runid>" appended to them."""
+
+                # there has to be an image plan to export
+                cati = self.__img.get_catalog(self.__img.IMG_CATALOG_INSTALLED)
+                ppkgs = set(cati.fmris())
+
+                if ip != None and ip.plan_desc:
+                        # if there's an image plan the we need to update the
+                        # installed packages based on that plan.
+                        for src, dst in ip.plan_desc:
+                                if src == dst:
+                                        continue
+                                if src:
+                                        assert src in ppkgs
+                                        ppkgs -= set([src])
+                                if dst:
+                                        assert dst not in ppkgs
+                                        ppkgs |= set([dst])
+
+                # paranoia
+                ppkgs = frozenset(ppkgs)
+
+                # save the planned cips
+                return self.__push_data(self.child_path, self.__path_ppkgs,
+                    ppkgs, tmp, test)
+
+        def __push_props(self, tmp=False, test=False):
+                """Sync linked image properties data to a child image.
+
+                'tmp' determines if we should read/write to the official
+                linked image metadata files, or if we should access temporary
+                versions (which have ".<runid>" appended to them."""
+
+                # make a copy of the props we want to push
+                props = self.__props.copy()
+                assert PROP_PARENT_PATH not in props
+
+                self.__plugin.munge_props(props)
+
+                # delete temporal properties
+                props = rm_dict_ent(props, temporal_props)
+
+                return self.__push_data(self.child_path, self.__path_prop,
+                    props, tmp, test)
+
+        def __push_ppubs(self, tmp=False, test=False):
+                """Sync linked image parent publisher data to a child image.
+
+                'tmp' determines if we should read/write to the official
+                linked image metadata files, or if we should access temporary
+                versions (which have ".<runid>" appended to them."""
+
+                ppubs = self.__linked.get_pubs()
+                return self.__push_data(self.child_path, self.__path_ppubs,
+                    ppubs, tmp, test)
+
+        def __syncmd(self, tmp=False, test=False, ip=None):
+                """Sync linked image data to a child image.
+
+                'tmp' determines if we should read/write to the official
+                linked image metadata files, or if we should access temporary
+                versions (which have ".<runid>" appended to them."""
+
+                if ip:
+                        tmp = True
+
+                ppkgs_updated = self.__push_ppkgs(tmp, test, ip=ip)
+                props_updated = self.__push_props(tmp, test)
+                pubs_updated = self.__push_ppubs(tmp, test)
+
+                return (props_updated or ppkgs_updated or pubs_updated)
+
+        @staticmethod
+        def __flush_output():
+                """We flush stdout and stderr before and after operating on
+                child images to avoid any out-of-order output problems that
+                could be caused by caching of output."""
+
+                try:
+                        sys.stdout.flush()
+                except IOError:
+                        pass
+                except OSError, e:
+                        # W0212 Access to a protected member
+                        # pylint: disable-msg=W0212
+                        raise apx._convert_error(e)
+
+                try:
+                        sys.stderr.flush()
+                except IOError:
+                        pass
+                except OSError, e:
+                        # W0212 Access to a protected member
+                        # pylint: disable-msg=W0212
+                        raise apx._convert_error(e)
+
+        def __pkg_cmd(self, pkg_op, pkg_args, stage=None, progtrack=None):
+                """Perform a pkg(1) operation on a child image."""
+
+                if stage == None:
+                        stage = pkgdefs.API_STAGE_DEFAULT
+                assert stage in pkgdefs.api_stage_values
+
+                #
+                # Build up a command line to execute.  Note that we take care
+                # to try to run the exact same pkg command that we were
+                # executed with.  We do this because pkg commonly tries to
+                # access the image that the command is being run from.
+                #
+                pkg_bin = "pkg"
+                cmdpath = self.__img.cmdpath
+                if cmdpath and os.path.basename(cmdpath) == "pkg":
+                        try:
+                                # check if the currently running pkg command
+                                # exists and is accessible.
+                                os.stat(cmdpath)
+                                pkg_bin = cmdpath
+                        except OSError:
+                                pass
+
+                pkg_cmd = [
+                    pkg_bin,
+                    "-R", str(self.child_path),
+                    "--runid=%s" % self.__img.runid,
+                ]
+
+                # propagate certain debug options
+                for k in [
+                    "allow-overlays",
+                    "broken-conflicting-action-handling",
+                    "disp_linked_cmds",
+                    "plan"]:
+                        if DebugValues[k]:
+                                pkg_cmd.append("-D")
+                                pkg_cmd.append("%s=1" % k)
+
+                # add the subcommand argument
+                pkg_cmd.append(pkg_op)
+
+                # propagate stage option
+                if stage != pkgdefs.API_STAGE_DEFAULT:
+                        pkg_cmd.append("--stage=%s" % stage)
+
+                # add the subcommand argument options
+                pkg_cmd.extend(pkg_args)
+
+                if progtrack:
+                        progtrack.li_recurse_start(self.child_name)
+
+                # flush all output before recursing into child
+                self.__flush_output()
+
+                disp_linked_cmds = DebugValues.get_value("disp_linked_cmds")
+                if not disp_linked_cmds and \
+                    "PKG_DISP_LINKED_CMDS" in os.environ:
+                        disp_linked_cmds = True
+                if disp_linked_cmds:
+                        logger.info("child cmd: %s" % " ".join(pkg_cmd))
+                else:
+                        logger.debug("child cmd: %s" % " ".join(pkg_cmd))
+
+                # Start the operation on the child
+                try:
+                        p = pkg.pkgsubprocess.Popen(pkg_cmd)
+                        p.wait()
+                except OSError, e:
+                        # W0212 Access to a protected member
+                        # pylint: disable-msg=W0212
+                        raise apx._convert_error(e)
+
+                # flush all output generated by the child
+                self.__flush_output()
+
+                if progtrack:
+                        progtrack.li_recurse_end(self.child_name)
+
+                return (p.returncode, pkg_cmd)
+
+        def child_detach(self, noexecute=False, progtrack=None):
+                """Detach a child image."""
+
+                # When issuing a detach from a prent we must always use the
+                # force flag. (Normally a child will refuse to detach from a
+                # parent unless it attached to the parent, which is never the
+                # case here.)
+                pkg_args = ["-f"]
+                pkg_args.extend(["-v"] * progtrack.verbose)
+                if progtrack.quiet:
+                        pkg_args.append("-q")
+                if noexecute:
+                        pkg_args.append("-n")
+
+                # Unused variable 'pkg_cmd'; pylint: disable-msg=W0612
+                rv, pkg_cmd = self.__pkg_cmd(pkgdefs.PKG_OP_DETACH,
+                    pkg_args)
+                # pylint: enable-msg=W0612
+
+                # if the detach command ran, return its status.
+                if rv in [pkgdefs.EXIT_OK, pkgdefs.EXIT_NOPARENT]:
+                        return (pkgdefs.EXIT_OK, None)
+
+                e = apx.LinkedImageException(lin=self.child_name,
+                    exitrv=rv, child_op_failed=pkgdefs.PKG_OP_DETACH)
+                return (rv, e)
+
+        def child_audit(self):
+                """Audit a child image to see if it's in sync with its
+                constraints."""
+
+                # first sync our metadata
+                self.__syncmd()
+
+                # recurse into the child image
+                pkg_args = ["-q"]
+
+                # Unused variable 'pkg_cmd'; pylint: disable-msg=W0612
+                rv, pkg_cmd = self.__pkg_cmd(pkgdefs.PKG_OP_AUDIT_LINKED,
+                    pkg_args)
+                # pylint: enable-msg=W0612
+
+                # if the audit command ran, return its status.
+                if rv in [pkgdefs.EXIT_OK, pkgdefs.EXIT_DIVERGED]:
+                        return (rv, None)
+
+                # something went unexpectedly wrong.
+                e = apx.LinkedImageException(lin=self.child_name,
+                    exitrv=rv, child_op_failed=pkgdefs.PKG_OP_AUDIT_LINKED)
+                return (rv, e)
+
+        def child_sync(self, accept=False, li_attach_sync=False,
+            li_md_only=False, li_pkg_updates=True, progtrack=None,
+            noexecute=False, refresh_catalogs=True, show_licenses=False,
+            update_index=True):
+                """Try to bring a child image into sync with its
+                constraints."""
+
+                # Too many arguments; pylint: disable-msg=R0913
+
+                if li_md_only:
+                        # we're not going to recurse into the child image,
+                        # we're just going to update its metadata.
+                        try:
+                                updated = self.__syncmd(test=noexecute)
+                        except apx.LinkedImageException, e:
+                                return (e.lix_exitrv, e)
+
+                        if updated:
+                                return (pkgdefs.EXIT_OK, None)
+                        else:
+                                return (pkgdefs.EXIT_NOP, None)
+
+                # first sync the metadata
+                try:
+                        # if we're doing this sync as part of an attach, then
+                        # temporarily sync the metadata since we don't know
+                        # yet if the attach will succeed.  if the attach
+                        # doesn't succeed this means we don't have to delete
+                        # any metadata.  if the attach succeeds the child will
+                        # make the temporary metadata permanent as part of the
+                        # commit.
+                        self.__syncmd(tmp=li_attach_sync)
+                except apx.LinkedImageException, e:
+                        return (e.lix_exitrv, e)
+
+                pkg_args = []
+                pkg_args.extend(["-v"] * progtrack.verbose)
+                if progtrack.quiet:
+                        pkg_args.append("-q")
+                if noexecute:
+                        pkg_args.append("-n")
+                if accept:
+                        pkg_args.append("--accept")
+                if show_licenses:
+                        pkg_args.append("--licenses")
+                if not refresh_catalogs:
+                        pkg_args.append("--no-refresh")
+                if not update_index:
+                        pkg_args.append("--no-index")
+                if not li_pkg_updates:
+                        pkg_args.append("--no-pkg-updates")
+
+                # Unused variable 'pkg_cmd'; pylint: disable-msg=W0612
+                rv, pkg_cmd = self.__pkg_cmd(pkgdefs.PKG_OP_SYNC, pkg_args,
+                    progtrack=progtrack)
+                # pylint: enable-msg=W0612
+
+                # if the audit command ran, return its status.
+                if rv in [pkgdefs.EXIT_OK, pkgdefs.EXIT_NOP]:
+                        return (rv, None)
+
+                # something went unexpectedly wrong.
+                e = apx.LinkedImageException(lin=self.child_name,
+                    exitrv=rv, child_op_failed=pkgdefs.PKG_OP_SYNC)
+                return (rv, e)
+
+        def child_init_root(self, old_altroot):
+                """Our image path is being updated, so figure out our new
+                child image paths.  This interface only gets invoked when:
+
+                - We're doing a packaging operation on a parent image and
+                  we've just cloned that parent to create a new BE that we're
+                  going to update.  This clone also cloned all the children
+                  and so now we need to update our paths to point to the newly
+                  created children.
+
+                - We tried to update a cloned image (as described above) and
+                  our update failed, hence we're changing paths back to the
+                  original images that were the source of the clone."""
+
+                # get the image path without the altroot
+                altroot_path = self.__props[PROP_PATH]
+                path = rm_altroot_path(altroot_path, old_altroot)
+
+                # update the path with the current altroot
+                altroot = self.__linked.altroot()
+                path = add_altroot_path(path, altroot)
+
+                # update properties with altroot
+                self.__props[PROP_PATH] = path
+                self.__props[PROP_ALTROOT] = altroot
+
+                # we don't bother to update update PROP_PARENT_PATH since
+                # that is only used when reading constraint data from the
+                # parent image, and this interface is only invoked when we're
+                # starting or finishing execution of a plan on a cloned image
+                # (at which point we have no need to access the parent
+                # anymore).
+
+        def child_nothingtodo(self):
+                """Check if there are any changes planned for a child
+                image."""
+                return self.__r_rv_nop
+
+        def child_reset_recurse(self):
+                """Reset child recursion state for child."""
+
+                self.__r_op = None
+                self.__r_args = None
+                self.__r_progtrack = None
+                self.__r_rv_nop = False
+
+        def child_init_recurse(self, pkg_op, accept, refresh_catalogs,
+            update_index, args):
+                """When planning changes on a parent image, prepare to
+                recurse into a child image."""
+
+                assert pkg_op in [pkgdefs.PKG_OP_SYNC, pkgdefs.PKG_OP_UPDATE]
+
+                progtrack = args["progtrack"]
+                noexecute = args["noexecute"]
+
+                pkg_args = []
+
+                pkg_args.extend(["-v"] * progtrack.verbose)
+                if progtrack.quiet:
+                        pkg_args.append("-q")
+                if noexecute:
+                        pkg_args.append("-n")
+
+                # W0511 XXX / FIXME Comments; pylint: disable-msg=W0511
+                # XXX: also need to support --licenses.
+                # pylint: enable-msg=W0511
+                if accept:
+                        pkg_args.append("--accept")
+                if not refresh_catalogs:
+                        pkg_args.append("--no-refresh")
+                if not update_index:
+                        pkg_args.append("--no-index")
+
+                # options specific to: attach, set-property-linked, sync
+                if "li_pkg_updates" in args and not args["li_pkg_updates"]:
+                        pkg_args.append("--no-pkg-updates")
+
+                if pkg_op == pkgdefs.PKG_OP_UPDATE:
+                        # skip ipkg up to date check for child images
+                        pkg_args.append("-f")
+
+                self.__r_op = pkg_op
+                self.__r_args = pkg_args
+                self.__r_progtrack = progtrack
+
+        def child_do_recurse(self, stage, ip=None):
+                """When planning changes within a parent image, recurse into
+                a child image."""
+
+                assert stage in pkgdefs.api_stage_values
+                assert stage != pkgdefs.API_STAGE_DEFAULT
+                assert stage != pkgdefs.API_STAGE_PLAN or ip != None
+
+                assert self.__r_op != None
+                assert self.__r_args != None
+
+                if stage == pkgdefs.API_STAGE_PUBCHECK:
+                        self.__syncmd()
+
+                if stage == pkgdefs.API_STAGE_PLAN:
+                        # sync our metadata
+                        self.__syncmd(ip=ip)
+
+                if self.__r_rv_nop:
+                        if stage == pkgdefs.API_STAGE_EXECUTE:
+                                self.child_reset_recurse()
+                        # the child image told us it has no changes planned.
+                        return pkgdefs.EXIT_NOP
+
+                rv, pkg_cmd = self.__pkg_cmd(self.__r_op,
+                    self.__r_args, stage=stage,
+                    progtrack=self.__r_progtrack)
+
+                if rv in [pkgdefs.EXIT_OK, pkgdefs.EXIT_NOP]:
+                        # common case (we hope)
+                        pass
+                else:
+                        self.child_reset_recurse()
+                        raise apx.LinkedImageException(
+                            lin=self.child_name,
+                            exitrv=rv, recursive_cmd_fail=pkg_cmd)
+
+                if stage == pkgdefs.API_STAGE_PLAN and rv == pkgdefs.EXIT_NOP:
+                        self.__r_rv_nop = True
+
+                if stage == pkgdefs.API_STAGE_EXECUTE:
+                        # we're done with this operation
+                        self.child_reset_recurse()
+
+                return rv
+
+
+# ---------------------------------------------------------------------------
+# Utility Functions
+#
+def save_data(path, data, root="/"):
+        """Save JSON encoded linked image metadata to a file."""
+
+        # make sure the directory we're about to save data into exists.
+        path_dir = os.path.dirname(path)
+        pathtmp = "%s.%d.tmp" % (path, os.getpid())
+
+        try:
+                if not ar.ar_exists(root, path_dir):
+                        ar.ar_mkdir(root, path_dir, misc.PKG_DIR_MODE)
+
+                # write the output to a temporary file
+                fd = ar.ar_open(root, pathtmp, os.O_WRONLY,
+                    mode=0644, create=True, truncate=True)
+                fobj = os.fdopen(fd, "w")
+                json.dump(data, fobj, encoding="utf-8",
+                    cls=pkg.client.linkedimage.PkgEncoder)
+                fobj.close()
+
+                # atomically create the desired file
+                ar.ar_rename(root, pathtmp, path)
+        except OSError, e:
+                # W0212 Access to a protected member
+                # pylint: disable-msg=W0212
+                raise apx._convert_error(e)
+
+def load_data(path, missing_val=None):
+        """Load JSON encoded linked image metadata from a file."""
+
+        try:
+                if (missing_val != None) and not path_exists(path):
+                        return missing_val
+                fobj = open(path)
+                data = json.load(fobj, encoding="utf-8",
+                    object_hook=pkg.client.linkedimage.PkgDecoder)
+                fobj.close()
+        except OSError, e:
+                # W0212 Access to a protected member
+                # pylint: disable-msg=W0212
+                raise apx._convert_error(e)
+        return data
+
+
+class PkgEncoder(json.JSONEncoder):
+        """Utility class used when json encoding linked image metadata."""
+
+        # E0202 An attribute inherited from JSONEncoder hide this method
+        # pylint: disable-msg=E0202
+        def default(self, obj):
+                """Required routine that overrides the default base
+                class version.  This routine must serialize 'obj' when
+                attempting to save 'obj' json format."""
+
+                if isinstance(obj, (pkg.fmri.PkgFmri,
+                    pkg.client.linkedimage.common.LinkedImageName)):
+                        return str(obj)
+
+                if isinstance(obj, pkgplan.PkgPlan):
+                        return obj.getstate()
+
+                if isinstance(obj, (set, frozenset)):
+                        return list(obj)
+
+                return json.JSONEncoder.default(self, obj)
+
+
+def PkgDecoder(dct):
+        """Utility class used when json decoding linked image metadata."""
+        # Replace unicode keys/values with strings
+        rvdct = {}
+        for k, v in dct.iteritems():
+
+                # unicode must die
+                if type(k) == unicode:
+                        k = k.encode("utf-8")
+                if type(v) == unicode:
+                        v = v.encode("utf-8")
+
+                # convert boolean strings values back into booleans
+                if type(v) == str:
+                        if v.lower() == "true":
+                                v = True
+                        elif v.lower() == "false":
+                                v = False
+
+                rvdct[k] = v
+        return rvdct
+
+def rm_dict_ent(d, keys):
+        """Remove a set of keys from a dictionary."""
+        return dict([
+                (k, v)
+                for k, v in d.iteritems()
+                if k not in keys
+        ])
+
+def _rterr(li=None, lic=None, lin=None, path=None, err=None,
+    bad_cp=None,
+    bad_iup=None,
+    bad_lin_type=None,
+    bad_prop=None,
+    missing_props=None,
+    multiple_altroots=None,
+    saved_temporal_props=None):
+        """Oops.  We hit a runtime error.  Die with a nice informative
+        message.  Note that runtime errors should never happen and usually
+        indicate bugs (or possibly corrupted linked image metadata), so they
+        are not localized (just like asserts are not localized)."""
+        # Too many arguments; pylint: disable-msg=R0913
+
+        assert not (li and lic)
+        assert not ((lin or path) and li)
+        assert not ((lin or path) and lic)
+        assert path == None or type(path) == str
+
+        if bad_cp:
+                assert err == None
+                err = "Invalid linked content policy: %s" % bad_cp
+        elif bad_iup:
+                assert err == None
+                err = "Invalid linked image update policy: %s" % bad_iup
+        elif bad_lin_type:
+                assert err == None
+                err = "Invalid linked image type: %s" % bad_lin_type
+        elif bad_prop:
+                assert err == None
+                err = "Invalid linked property value: %s=%s" % bad_prop
+        elif missing_props:
+                assert err == None
+                err = "Missing required linked properties: %s" % \
+                    ", ".join(missing_props)
+        elif multiple_altroots:
+                assert err == None
+                err = "Multiple plugins reported different altroots:"
+                for plugin, altroot in multiple_altroots:
+                        err += "\n\t%s = %s" % (plugin, altroot)
+        elif saved_temporal_props:
+                assert err == None
+                err = "Found saved temporal linked properties: %s" % \
+                    ", ".join(saved_temporal_props)
+        else:
+                assert err != None
+
+        if li:
+                if li.ischild():
+                        lin = li.child_name
+                path = li.image.root
+
+        if lic:
+                lin = lic.child_name
+                path = lic.child_path
+
+        err_prefix = "Linked image error: "
+        if lin:
+                err_prefix = "Linked image (%s) error: " % (str(lin))
+
+        err_suffix = ""
+        if path and lin:
+                err_suffix = "\nLinked image (%s) path: %s" % (str(lin), path)
+        elif path:
+                err_suffix = "\nLinked image path: %s" % (path)
+
+        raise RuntimeError(
+            "%s: %s%s" % (err_prefix, err, err_suffix))
+
+# ---------------------------------------------------------------------------
+# Functions for accessing files in the current root
+#
+def path_exists(path):
+        """Simple wrapper for accessing files in the current root."""
+
+        try:
+                return ar.ar_exists("/", path)
+        except OSError, e:
+                # W0212 Access to a protected member
+                # pylint: disable-msg=W0212
+                raise apx._convert_error(e)
+
+def path_isdir(path):
+        """Simple wrapper for accessing files in the current root."""
+
+        try:
+                return ar.ar_isdir("/", path)
+        except OSError, e:
+                # W0212 Access to a protected member
+                # pylint: disable-msg=W0212
+                raise apx._convert_error(e)
+
+def path_mkdir(path, mode):
+        """Simple wrapper for accessing files in the current root."""
+
+        try:
+                return ar.ar_mkdir("/", path, mode)
+        except OSError, e:
+                # W0212 Access to a protected member
+                # pylint: disable-msg=W0212
+                raise apx._convert_error(e)
+
+def path_unlink(path, noent_ok=False):
+        """Simple wrapper for accessing files in the current root."""
+
+        try:
+                return ar.ar_unlink("/", path, noent_ok=noent_ok)
+        except OSError, e:
+                # W0212 Access to a protected member
+                # pylint: disable-msg=W0212
+                raise apx._convert_error(e)
+
+# ---------------------------------------------------------------------------
+# Functions for managing images which may be in alternate roots
+#
+def check_altroot_path(path, altroot):
+        """Check if 'path' is nested within 'altroot'"""
+
+        assert os.path.isabs(path), "os.path.isabs(%s)" % path
+        assert os.path.isabs(altroot), "os.path.isabs(%s)" % altroot
+
+        # make sure both paths have one trailing os.sep.
+        altroot = altroot.rstrip(os.sep) + os.sep
+        path = path.rstrip(os.sep) + os.sep
+
+        # check for nested or equal paths
+        if path.startswith(altroot):
+                return True
+        return False
+
+def add_altroot_path(path, altroot):
+        """Return a path where 'path' is nested within 'altroot'"""
+
+        assert os.path.isabs(path), "os.path.isabs(%s)" % path
+        assert os.path.isabs(altroot), "os.path.isabs(%s)" % altroot
+
+        altroot = altroot.rstrip(os.sep) + os.sep
+        path = path.lstrip(os.sep)
+        altroot_path = altroot + path
+
+        # sanity check
+        assert check_altroot_path(altroot_path, altroot), \
+            "check_altroot_path(%s, %s)" % (altroot_path, altroot)
+
+        return altroot_path
+
+def rm_altroot_path(path, altroot):
+        """Return the relative porting of 'path', which must be nested within
+        'altroot'"""
+
+        assert os.path.isabs(path), "not os.path.isabs(%s)" % path
+        assert os.path.isabs(altroot), "not os.path.isabs(%s)" % altroot
+
+        assert check_altroot_path(path, altroot), \
+            "not check_altroot_path(%s, %s)" % (path, altroot)
+
+        rv = path[len(altroot.rstrip(os.sep)):]
+        if rv == "":
+                rv = "/"
+        assert os.path.isabs(rv)
+        return rv
+
+def get_altroot_path(path, path_suffix):
+        """Given 'path', and a relative path 'path_suffix' that must match
+        the suffix of 'path', return the unmatched prefix of 'path'."""
+
+        assert os.path.isabs(path), "os.path.isabs(%s)" % path
+        assert os.path.isabs(path_suffix), "os.path.isabs(%s)" % path_suffix
+
+        # make sure both paths have one trailing os.sep.
+        path = path.rstrip(os.sep) + os.sep
+        path_suffix = path_suffix.rstrip(os.sep) + os.sep
+
+        i = path.rfind(path_suffix)
+        if i <= 0:
+                # path and path_suffix are either unrelated or equal
+                altroot = os.sep
+        else:
+                altroot = path[:i]
+
+        # sanity check
+        assert check_altroot_path(path, altroot), \
+            "check_altroot_path(%s, %s)" % (path, altroot)
+
+        return altroot
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/modules/client/linkedimage/system.py	Sat May 07 00:25:10 2011 -0700
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+"""
+System linked image module classes.  System linked images support both child
+and parent linking.  System linked image child configuration information is
+stored within a parent images pkg5.image configuration file.
+"""
+
+# pkg classes
+import pkg.client.pkgdefs as pkgdefs
+
+# import linked image common code
+import common as li # Relative import; pylint: disable-msg=W0403
+
+
+class LinkedImageSystemPlugin(li.LinkedImagePlugin):
+        """See parent class for docstring."""
+
+        # specify what functionality we support
+        support_attach = True
+        support_detach = True
+
+        # default attach property values
+        attach_props_def = {
+            li.PROP_RECURSE:        True
+        }
+
+        def __init__(self, pname, linked):
+                """See parent class for docstring."""
+                li.LinkedImagePlugin.__init__(self, pname, linked)
+
+                # globals
+                self.__pname = pname
+                self.__linked = linked
+                self.__img = linked.image
+
+        def init_root(self, old_altroot):
+                """See parent class for docstring."""
+                # nothing to do
+                return
+
+        def get_altroot(self):
+                """See parent class for docstring."""
+                # nothing to do
+                return None
+
+        def get_child_list(self, nocache=False):
+                """See parent class for docstring."""
+
+                if not self.__img.cfg:
+                        # this may be a new image that hasn't actually been
+                        # created yet
+                        return []
+
+                rv = []
+                for lin in self.__img.cfg.linked_children:
+                        path = self.get_child_props(lin)[li.PROP_PATH]
+                        rv.append([lin, path])
+
+                for lin, path in rv:
+                        assert lin.lin_type == self.__pname
+
+                return rv
+
+        def get_child_props(self, lin):
+                """See parent class for docstring."""
+
+                # make a copy of the properties
+                props = self.__img.cfg.linked_children[lin].copy()
+
+                # update path to include any altroot
+                altroot = self.__linked.altroot()
+                props[li.PROP_PATH] = \
+                    li.add_altroot_path(props[li.PROP_PATH], altroot)
+
+                return props
+
+        def attach_child_inmemory(self, props, allow_relink):
+                """See parent class for docstring."""
+
+                # make sure this child doesn't already exist
+                lin = props[li.PROP_NAME]
+                lin_list = [i[0] for i in self.get_child_list()]
+                assert lin not in lin_list or allow_relink
+
+                # make a copy of the properties
+                props = props.copy()
+
+                # update path to remove any altroot
+                altroot = self.__linked.altroot()
+                props[li.PROP_PATH] = \
+                    li.rm_altroot_path(props[li.PROP_PATH], altroot)
+
+                # delete temporal properties
+                props = li.rm_dict_ent(props, li.temporal_props)
+
+                self.__img.cfg.linked_children[lin] = props
+
+        def detach_child_inmemory(self, lin):
+                """See parent class for docstring."""
+
+                # make sure this child exists
+                assert lin in [i[0] for i in self.get_child_list()]
+
+                # Delete this linked image
+                del self.__img.cfg.linked_children[lin]
+
+        def sync_children_todisk(self):
+                """See parent class for docstring."""
+
+                self.__img.cfg.write()
+
+                return (pkgdefs.EXIT_OK, None)
+
+
+class LinkedImageSystemChildPlugin(li.LinkedImageChildPlugin):
+        """See parent class for docstring."""
+
+        def __init__(self, lic):
+                """See parent class for docstring."""
+                li.LinkedImageChildPlugin.__init__(self, lic)
+
+                # globals
+                self.__linked = lic.child_pimage.linked
+
+        def munge_props(self, props):
+                """See parent class for docstring."""
+
+                # update path to remove any altroot
+                altroot = self.__linked.altroot()
+                props[li.PROP_PATH] = \
+                    li.rm_altroot_path(props[li.PROP_PATH], altroot)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/modules/client/linkedimage/zone.py	Sat May 07 00:25:10 2011 -0700
@@ -0,0 +1,408 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+"""
+Zone linked image module classes.  Zone linked images only support
+child attach.  Zone linked image child configuration information is all
+derived from zonecfg(1m) options and this plugin, no child configuration
+information is stored within a parent image.
+"""
+
+# standard python classes
+import os
+import tempfile
+
+# pkg classes
+import pkg.client.api_errors as apx
+import pkg.client.pkgdefs as pkgdefs
+import pkg.pkgsubprocess
+
+from pkg.client.debugvalues import DebugValues
+
+# import linked image common code
+import common as li # Relative import; pylint: disable-msg=W0403
+
+# W0511 XXX / FIXME Comments; pylint: disable-msg=W0511
+# XXX: should be defined by libzonecfg python wrapper
+# pylint: enable-msg=W0511
+
+ZONE_GLOBAL                  = "global"
+
+ZONE_STATE_STR_CONFIGURED    = "configured"
+ZONE_STATE_STR_INCOMPLETE    = "incomplete"
+ZONE_STATE_STR_INSTALLED     = "installed"
+ZONE_STATE_STR_READY         = "ready"
+ZONE_STATE_STR_MOUNTED       = "mounted"
+ZONE_STATE_STR_RUNNING       = "running"
+ZONE_STATE_STR_SHUTTING_DOWN = "shutting_down"
+ZONE_STATE_STR_DOWN          = "down"
+
+zone_installed_states = [
+    ZONE_STATE_STR_INSTALLED,
+    ZONE_STATE_STR_READY,
+    ZONE_STATE_STR_MOUNTED,
+    ZONE_STATE_STR_RUNNING,
+    ZONE_STATE_STR_SHUTTING_DOWN,
+    ZONE_STATE_STR_DOWN
+]
+
+
+#
+# If we're operating on a zone image it's very tempting to want to know
+# the zone name of that image.  Unfortunately, there's no good way to
+# determine this, and more importantly, there is no real need to know
+# the zone name.  The only reason to know the name of a linked image is
+# so that we can import linked image properties from the associated
+# parent image.  But for zones we should never do this.  When operating
+# on zone images we may not have access to the associated parent image
+# (for example, when running inside a zone).  So every zone image must
+# contain all the information needed to do a pkg operation at the start
+# of that operation.  i.e., the linked image information required for
+# operations must be pushed (or exported) from the parent image.  We can
+# not pull (or import) this information from the parent image (in the
+# cases where the parent image is accessible).
+#
+#
+# There are lots of possible execution modes that we can find ourselves
+# in.  Here are some of the possibilities:
+#
+# 1) in a gz operating on /
+# 2) in a gz operating on a zone linked image via pkg -R
+# 3) in a ngz operating on /
+# 4) in a ngz operating on an alterate BE image via pkg -R
+#    (not supported yet, but we'd like to support it).
+# 5) in a ngz operating on an linked image via pkg -R
+#    (this could be a default or user image linked to
+#    the zone.)
+# 6) in a ngz operating on an unlinked image via pkg -R
+#
+# The only scenarios that we really care about in this plugin are are 2,
+# 3, and 4.  While it's tempting to try and detect these scenarios by
+# looking at image paths, private zone files, or libbe uuids, all those
+# methods have problems.  We can't even check the image zone variant to
+# determine if we're dealing with a zone, since in the future if we want
+# to support user images within a zone, it's likely they will have the
+# zone variant also set to nonglobal.  There's really one way
+# to detect if we're working on a zone image, and that is via the image
+# metadata.  Ie, either via a pkg cfg_cache linked image property, or
+# via linked image properties exported to us by our associated parent
+# image.
+#
+
+
+class LinkedImageZonePlugin(li.LinkedImagePlugin):
+        """See parent class for docstring."""
+
+        # default attach property values
+        attach_props_def = {
+            li.PROP_RECURSE:        False
+        }
+
+        __zone_pkgs = frozenset([
+            frozenset(["system/zones"]),
+            frozenset(["SUNWzoner", "SUNWzoneu"])
+        ])
+
+        def __init__(self, pname, linked):
+                """See parent class for docstring."""
+                li.LinkedImagePlugin.__init__(self, pname, linked)
+
+                # globals
+                self.__pname = pname
+                self.__linked = linked
+                self.__img = linked.image
+
+                # check if we're running in the gz
+                try:
+                        self.__in_gz = (_zonename() == ZONE_GLOBAL)
+                except OSError, e:
+                        # W0212 Access to a protected member
+                        # pylint: disable-msg=W0212
+                        raise apx._convert_error(e)
+
+                # keep track of our freshly attach children
+                self.__children = dict()
+
+                # cache zoneadm output
+                self.__zoneadm_list_cache = None
+
+        def __zones_supported(self):
+                """Check to see if zones are supported in the current image.
+                i.e. can the current image have zone children."""
+
+                # first check if the image variant is global
+                variant = "variant.opensolaris.zone"
+                value = self.__img.cfg.variants[variant]
+                if value != "global":
+                        return False
+
+                # get a set of installed packages
+                cati = self.__img.get_catalog(self.__img.IMG_CATALOG_INSTALLED)
+                pkgs_inst = frozenset([
+                        stem
+                        # Unused variable 'pub'; pylint: disable-msg=W0612
+                        for pub, stem in cati.pkg_names()
+                        # pylint: enable-msg=W0612
+                ])
+
+                # check if the zones packages are installed
+                for pkgs in self.__zone_pkgs:
+                        if (pkgs & pkgs_inst) == pkgs:
+                                return True
+
+                return False
+
+        def __list_zones_cached(self, nocache=False):
+                """List the zones associated with the current image.  Since
+                this involves forking and running zone commands, cache the
+                results."""
+
+                # if nocache is set then delete any cached children
+                if nocache:
+                        self.__zoneadm_list_cache = None
+
+                # try to return the cached children
+                if self.__zoneadm_list_cache != None:
+                        assert type(self.__zoneadm_list_cache) == list
+                        return self.__zoneadm_list_cache
+
+                # see if the target image supports zones
+                if not self.__zones_supported():
+                        self.__zoneadm_list_cache = []
+                        return self.__list_zones_cached()
+
+                # zones are only visible when running in the global zone
+                if not self.__in_gz:
+                        self.__zoneadm_list_cache = []
+                        return self.__list_zones_cached()
+
+                # find zones
+                try:
+                        zdict = _list_zones(self.__img.root)
+                except OSError, e:
+                        # W0212 Access to a protected member
+                        # pylint: disable-msg=W0212
+                        raise apx._convert_error(e)
+
+                # convert zone names into into LinkedImageName objects
+                zlist = []
+                for zone, path in zdict.iteritems():
+                        lin = li.LinkedImageName("%s:%s" % (self.__pname, zone))
+                        zlist.append([lin, path])
+
+                self.__zoneadm_list_cache = zlist
+                return self.__list_zones_cached()
+
+        def init_root(self, old_altroot):
+                """See parent class for docstring."""
+                # nuke any cached children
+                self.__zoneadm_list_cache = None
+
+        def get_altroot(self):
+                """See parent class for docstring."""
+
+                zlist = self.__list_zones_cached(nocache=True)
+                if not zlist:
+                        return None
+
+                # only global zones can have zone children, and global zones
+                # always execute with "/" as their root.  so if the current
+                # image path is not "/", then assume we're in an alternate
+                # root.
+                root = self.__img.root.rstrip(os.sep)
+                if root == "":
+                        root = os.sep
+                return root
+
+        def get_child_list(self, nocache=False):
+                """See parent class for docstring."""
+
+                inmemory = []
+                # find any newly attached zone images
+                for lin in self.__children:
+                        path = self.__children[lin][li.PROP_PATH]
+                        inmemory.append([lin, path])
+
+                ondisk = []
+                for (lin, path) in self.__list_zones_cached(nocache):
+                        if lin in [i[0] for i in inmemory]:
+                                # we re-attached a zone in memory.
+                                continue
+                        ondisk.append([lin, path])
+
+                rv = []
+                rv.extend(ondisk)
+                rv.extend(inmemory)
+
+                for lin, path in rv:
+                        assert lin.lin_type == self.__pname
+
+                return rv
+
+        def get_child_props(self, lin):
+                """See parent class for docstring."""
+
+                if lin in self.__children:
+                        return self.__children[lin]
+
+                props = dict()
+                props[li.PROP_NAME] = lin
+                for i_lin, i_path in self.get_child_list():
+                        if lin == i_lin:
+                                props[li.PROP_PATH] = i_path
+                                break
+                assert li.PROP_PATH in props
+
+                props[li.PROP_MODEL] = li.PV_MODEL_PUSH
+                for k, v in self.attach_props_def.iteritems():
+                        if k not in props:
+                                props[k] = v
+
+                return props
+
+        def attach_child_inmemory(self, props, allow_relink):
+                """See parent class for docstring."""
+
+                # make sure this child doesn't already exist
+                lin = props[li.PROP_NAME]
+                lin_list = [i[0] for i in self.get_child_list()]
+                assert lin not in lin_list or allow_relink
+
+                # make a copy of the properties
+                props = props.copy()
+
+                # Cache this linked image
+                self.__children[lin] = props
+
+        def detach_child_inmemory(self, lin):
+                """See parent class for docstring."""
+
+                # make sure this child exists
+                assert lin in [i[0] for i in self.get_child_list()]
+
+                # Delete this linked image
+                del self.__children[lin]
+
+        def sync_children_todisk(self):
+                """See parent class for docstring."""
+
+                # nothing to do
+                return (pkgdefs.EXIT_OK, None)
+
+
+class LinkedImageZoneChildPlugin(li.LinkedImageChildPlugin):
+        """See parent class for docstring."""
+
+        def __init__(self, lic):
+                """See parent class for docstring."""
+                li.LinkedImageChildPlugin.__init__(self, lic)
+
+        def munge_props(self, props):
+                """See parent class for docstring."""
+
+                #
+                # For zones we always update the pushed child image path to
+                # be '/' (Since any linked children of the zone will be
+                # relative to that zone's root).
+                #
+                props[li.PROP_PATH] = "/"
+
+
+def _zonename():
+        """Get the zonname of the current system."""
+
+        cmd = DebugValues.get_value("zone_name")
+        if not cmd:
+                cmd = ["/bin/zonename"]
+
+        # if the command doesn't exist then bail.
+        if not li.path_exists(cmd[0]):
+                return
+
+        f = tempfile.TemporaryFile()
+        p = pkg.pkgsubprocess.Popen(cmd, stdout=f)
+        p.wait()
+        if (p.returncode != 0):
+                raise apx.SubprocessError(rv=p.returncode, cmd=cmd)
+
+        # parse the command output
+        f.seek(0)
+        l = f.readlines()[0].rstrip()
+        return l
+
+def _list_zones(root):
+        """Get the zones associated with the image located at 'root'.  We
+        return a dictionary where the keys are zone names and the values are
+        zone root pahts.  The global zone is excluded from the results.
+        Solaris10 branded zones are excluded from the results.  """
+
+        rv = dict()
+        cmd = ["/usr/sbin/zoneadm"]
+
+        # if the command doesn't exist then bail.
+        if not li.path_exists(cmd[0]):
+                return rv
+
+        # create the zoneadm command line
+        if (root and (root != "/")):
+                cmd.extend(["-R", str(root)])
+        cmd.extend(["list", "-cp"])
+
+        # execute zoneadm and save its output to a file
+        f = tempfile.TemporaryFile()
+        p = pkg.pkgsubprocess.Popen(cmd, stdout=f)
+        p.wait()
+        if (p.returncode != 0):
+                raise apx.SubprocessError(rv=p.returncode, cmd=cmd)
+
+        # parse the command output
+        f.seek(0)
+        for l in f.readlines():
+                l = l.rstrip()
+
+                # Unused variable; pylint: disable-msg=W0612
+                z_id, z_name, z_state, z_path, z_uuid, z_brand, \
+                    z_iptype = l.strip().split(':', 6)
+                # pylint: enable-msg=W0612
+                z_rootpath = os.path.join(z_path, "root")
+
+                # we don't care about the global zone.
+                if (z_name == "global"):
+                        continue
+
+                # W0511 XXX / FIXME Comments; pylint: disable-msg=W0511
+                # XXX: don't hard code brand names, use a brand attribute
+                # pylint: enable-msg=W0511
+                if z_brand not in ["ipkg", "sn1", "labeled"]:
+                        continue
+
+                # we only care about zones that have been installed
+                if z_state not in zone_installed_states:
+                        continue
+
+                rv[z_name] = z_rootpath
+
+        return rv
--- a/src/modules/client/pkg_solver.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/client/pkg_solver.py	Sat May 07 00:25:10 2011 -0700
@@ -23,12 +23,17 @@
 #
 # Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
 #
-import pkg.client.api_errors as api_errors
+
+import time
+
+import pkg.actions
 import pkg.catalog           as catalog
+import pkg.client.api_errors as api_errors
 import pkg.client.image
+import pkg.fmri
+import pkg.misc as misc
 import pkg.solver
 import pkg.version           as version
-import time
 
 from collections import defaultdict
 from pkg.client.debugvalues import DebugValues
@@ -57,16 +62,18 @@
 
 class PkgSolver(object):
 
-        def __init__(self, cat, installed_fmris, pub_ranks, variants, avoids, progtrack):
-                """Create a PkgSolver instance; catalog
-                should contain all known pkgs, installed fmris
-                should be a dict of fmris indexed by name that define
-                pkgs current installed in the image. Pub_ranks dict contains
-                (rank, stickiness, enabled) for each publisher.
-                variants are the current image variants; avoids is the
-                set of pkg stems being avoided in the image"""
+        def __init__(self, cat, installed_dict, pub_ranks, variants, avoids,
+            parent_pkgs, extra_deps, progtrack):
+                """Create a PkgSolver instance; catalog should contain all
+                known pkgs, installed fmris should be a dict of fmris indexed
+                by name that define pkgs current installed in the image.
+                Pub_ranks dict contains (rank, stickiness, enabled) for each
+                publisher.  variants are the current image variants; avoids is
+                the set of pkg stems being avoided in the image; extra_deps is
+                a dictionary, indexed by fmris, of extra dependency actions
+                that should be added to packages."""
+
                 self.__catalog = cat
-                self.__installed_fmris = {}	# indexed by stem
                 self.__publisher = {}		# indexed by stem
                 self.__possible_dict = defaultdict(list) # indexed by stem
                 self.__pub_ranks = pub_ranks    # rank indexed by pub
@@ -74,15 +81,19 @@
                                                 # consideration
 
 
+                self.__installed_dict = installed_dict.copy() # indexed by stem
+                self.__installed_pkgs = frozenset(self.__installed_dict)
+                self.__installed_fmris = frozenset(
+                    self.__installed_dict.values())
+
                 self.__pub_trim = {}		# pkg names already
                                                 # trimmed by pub.
-                self.__installed_fmris = installed_fmris.copy()
-                self.__removal_list = []        # installed fmris we're
+                self.__removal_fmris = set()    # installed fmris we're
                                                 # going to remove
 
                 self.__req_pkg_names = set()    # package names that must be
                                                 # present in solution by spec.
-                for f in installed_fmris.values(): # record only sticky pubs
+                for f in self.__installed_fmris: # record only sticky pubs
                         pub = f.get_publisher()
                         if self.__pub_ranks[pub][1]:
                                 self.__publisher[f.pkg_name] = f.get_publisher()
@@ -121,6 +132,18 @@
                 self.__obs_set = None           #
                 self.__reject_set = set()       # set of stems we're rejecting
 
+                assert isinstance(parent_pkgs, (type(None), frozenset))
+                self.__parent_pkgs = parent_pkgs
+                self.__parent_dict = dict()
+                if self.__parent_pkgs != None:
+                        self.__parent_dict = dict([
+                            (f.pkg_name, f)
+                            for f in self.__parent_pkgs
+                        ])
+
+                assert isinstance(extra_deps, dict)
+                self.__extra_deps = extra_deps
+
         def __str__(self):
 
                 s = "Solver: ["
@@ -150,18 +173,20 @@
                 be performed after a solution is successfully returned."""
 
                 self.__catalog = None
-                self.__installed_fmris = None
-                self.__publisher = None
-                self.__possible_dict = None
+                self.__installed_dict = {}
+                self.__installed_pkgs = frozenset()
+                self.__installed_fmris = frozenset()
+                self.__publisher = {}
+                self.__possible_dict = {}
                 self.__pub_ranks = None
-                self.__pub_trim = None
-                self.__publisher = None
+                self.__pub_trim = {}
+                self.__removal_fmris = set()
                 self.__id2fmri = None
                 self.__fmri2id = None
                 self.__solver = None
                 self.__poss_set = None
                 self.__progtrack = None
-                self.__addclause_failure = None
+                self.__addclause_failure = False
                 self.__variant_dict = None
                 self.__variants = None
                 self.__cache = None
@@ -188,49 +213,89 @@
                         self.__timings.append((phase, now - self.__start_time))
                         self.__start_time = now
 
-        def __solve_install(self, existing_freezes, proposed_dict,
-            excludes=EmptyI, reject_set=frozenset(), trim_proposed_installed=
-            True):
-                """Share logic for install solutions.  Existing_freezes is a
-                list of incorp. style FMRIs that constrain pkg motion,
-                proposed_dict contains user specified FMRIs indexed by pkg_name;
-                returns FMRIs to be installed / upgraded in system.
+        def solve_install(self, existing_freezes, proposed_dict,
+            new_variants=None, new_facets=None, excludes=EmptyI,
+            reject_set=frozenset(), trim_proposed_installed=True,
+            relax_all=False):
+                """Logic to install packages, change variants, and/or change
+                facets.
+
+                Returns FMRIs to be installed / upgraded in system and a new
+                set of packages to be avoided.
+
+                'existing_freezes' is a list of incorp. style FMRIs that
+                constrain package motion.
+
+                'proposed_dict' contains user specified FMRIs indexed by
+                pkg_name that should be installed within an image.
 
-                'trim_proposed_installed' is an optional boolean indicating
-                whether the solver should elide versions of proposed packages
-                older than those installed from the set of possible solutions.
-                If False, package downgrades are allowed, but only for installed
+                'new_variants' a dictionary containing variants which are
+                being updated.  (It should not contain existing variants which
+                are not changing.)
+
+                'new_facets' a dictionary containing all the facets for an
+                image.  (This includes facets which are changing and also
+                facets which are not.)
+
+                'reject_set' contains user specified package names that should
+                not be present within the final image.  (These packages may or
+                may not be currently installed.)
+
+                'trim_proposed_installed' is a boolean indicating whether the
+                solver should elide versions of proposed packages older than
+                those installed from the set of possible solutions.  If False,
+                package downgrades are allowed, but only for installed
                 packages matching those in the proposed_dict.
+
+                'relax_all' indicates if the solver should relax all install
+                holds, or only install holds specified by proposed packages.
                 """
 
                 # Once solution has been returned or failure has occurred, a new
                 # solver must be used.
                 assert self.__state == SOLVER_INIT
-
                 self.__state = SOLVER_OXY
 
+                proposed_pkgs = set(proposed_dict)
+
                 self.__progtrack.evaluate_progress()
+                self.__timeit()
+
+                if new_variants:
+                        self.__variants = new_variants
 
-                self.__timeit()
+                        #
+                        # Entire packages can be tagged with variants thereby
+                        # making those packages uninstallable in certain
+                        # images.  So if we're changing variants such that
+                        # some currently installed packages are becomming
+                        # uninstallable add them to the removal package set.
+                        #
+                        for f in self.__installed_fmris:
+                                d = self.__get_variant_dict(f)
+                                for k in new_variants:
+                                        if k in d and \
+                                            new_variants[k] not in d[k]:
+                                                self.__removal_fmris |= set([f])
 
                 # proposed_dict already contains publisher selection logic,
                 # so prevent any further trimming of named packages based
                 # on publisher if they are installed.
                 for name in proposed_dict:
-                        if name in self.__installed_fmris:
+                        if name in self.__installed_dict:
                                 self.__mark_pub_trimmed(name)
                         else:
                                 self.__publisher[name] = \
                                     proposed_dict[name][0].get_publisher()
 
-                self.__removal_list = [
-                    self.__installed_fmris[name]
+                self.__removal_fmris |= set([
+                    self.__installed_dict[name]
                     for name in reject_set
-                    if name in self.__installed_fmris
-                ]
+                    if name in self.__installed_dict
+                ])
 
                 # remove packages to be installed from avoid_set
-                self.__avoid_set -= set(proposed_dict.keys())
+                self.__avoid_set -= proposed_pkgs
                 self.__reject_set = reject_set
 
                 # trim fmris that user explicitly disallowed
@@ -238,16 +303,22 @@
                         self.__trim(self.__get_catalog_fmris(name),
                             N_("This version rejected by user request"))
 
-                self.__req_pkg_names = (set(self.__installed_fmris.keys()) |
-                    set(proposed_dict.keys())) - reject_set
+                self.__req_pkg_names = (self.__installed_pkgs |
+                    proposed_pkgs) - reject_set
 
                 self.__progtrack.evaluate_progress()
 
                 # find list of incorps we don't let change as a side
                 # effect of other changes; exclude any specified on
                 # command line
+                # translate proposed_dict into a set
+                if relax_all:
+                        relax_pkgs = self.__installed_pkgs
+                else:
+                        relax_pkgs = proposed_pkgs
+
                 inc_list, con_lists = self.__get_installed_unbound_inc_list(
-                    proposed_dict, excludes=excludes)
+                    relax_pkgs, excludes=excludes)
 
                 self.__inc_list = inc_list
                 self.__progtrack.evaluate_progress()
@@ -255,9 +326,7 @@
                 # If requested, trim any proposed fmris older than those of
                 # corresponding installed packages.
                 self.__timeit("phase 1")
-                for f in self.__installed_fmris.values():
-                        if f in self.__removal_list:
-                                continue
+                for f in self.__installed_fmris - self.__removal_fmris:
                         if not trim_proposed_installed and \
                             f.pkg_name in proposed_dict:
                                 # Don't trim versions if newest version in
@@ -312,29 +381,32 @@
                 self.__timeit("phase 5")
                 for name in proposed_dict:
                         tv = self.__dotrim(proposed_dict[name])
-                        if not tv:
-                                ret = [_("No matching version of %s can be "
-                                    "installed:") % name]
-                                ret.extend(self.__fmri_list_errors(proposed_dict[name]))
-                                solver_errors = None
-                                if DebugValues["plan"]:
-                                        solver_errors = self.get_trim_errors()
-                                raise api_errors.PlanCreationException(
-                                    no_version=ret, solver_errors=solver_errors)
-                        proposed_dict[name] = tv
+                        if tv:
+                                proposed_dict[name] = tv
+                                continue
+
+                        ret = [_("No matching version of %s can be "
+                            "installed:") % name]
+                        ret.extend(self.__fmri_list_errors(proposed_dict[name]))
+                        solver_errors = None
+                        if DebugValues["plan"]:
+                                solver_errors = self.get_trim_errors()
+                        raise api_errors.PlanCreationException(
+                            no_version=ret, solver_errors=solver_errors)
 
                 self.__progtrack.evaluate_progress()
 
                 # build set of possible pkgs
                 self.__timeit("phase 6")
 
-                possible_set = set()
+                # generate set of possible fmris
+                #
                 # ensure existing pkgs stay installed; explicitly add in
                 # installed fmris in case publisher change has occurred and
                 # some pkgs aren't part of new publisher
-                for f in self.__installed_fmris.values():
-                        if f not in self.__removal_list:
-                                possible_set |= self.__comb_newer_fmris(f)[0] | set([f])
+                possible_set = set()
+                for f in self.__installed_fmris - self.__removal_fmris:
+                        possible_set |= self.__comb_newer_fmris(f)[0] | set([f])
 
                 # add the proposed fmris
                 for flist in proposed_dict.values():
@@ -344,29 +416,31 @@
                 possible_set.update(self.__generate_dependency_closure(
                     possible_set, excludes=excludes))
 
-                # remove any that cannot be installed due to origin dependencies
+                # remove any possibles that must be excluded because of
+                # origin and parent dependencies
                 for f in possible_set.copy():
                         if not self.__trim_nonmatching_origins(f, excludes):
                                 possible_set.remove(f)
-
+                        elif not self.__trim_nonmatching_parents(f, excludes):
+                                possible_set.remove(f)
 
                 # remove any versions from proposed_dict that are in trim_dict
                 # as trim dict has been updated w/ missing dependencies
                 self.__timeit("phase 8")
                 for name in proposed_dict:
                         tv = self.__dotrim(proposed_dict[name])
-                        if not tv:
-                                ret = [_("No matching version of %s can be "
-                                    "installed:") % name]
+                        if tv:
+                                proposed_dict[name] = tv
+                                continue
 
-                                ret.extend(self.__fmri_list_errors(proposed_dict[name]))
-                                solver_errors = None
-                                if DebugValues["plan"]:
-                                        solver_errors = self.get_trim_errors()
-                                raise api_errors.PlanCreationException(
-                                    no_version=ret, solver_errors=solver_errors)
-
-                        proposed_dict[name] = tv
+                        ret = [_("No matching version of %s can be "
+                            "installed:") % name]
+                        ret.extend(self.__fmri_list_errors(proposed_dict[name]))
+                        solver_errors = None
+                        if DebugValues["plan"]:
+                                solver_errors = self.get_trim_errors()
+                        raise api_errors.PlanCreationException(
+                            no_version=ret, solver_errors=solver_errors)
 
                 self.__timeit("phase 9")
                 self.__progtrack.evaluate_progress()
@@ -377,18 +451,18 @@
                 # generate clauses for only one version of each package, and for
                 # dependencies for each package.  Do so for all possible fmris.
 
-                for name in self.__possible_dict.keys():
+                for name in self.__possible_dict:
                         self.__progtrack.evaluate_progress()
-                        # Ensure that at most one version of a package is
-                        # installed.
+                        # Ensure only one version of a package is installed
                         self.__addclauses(self.__gen_highlander_clauses(
                             self.__possible_dict[name]))
                         # generate dependency clauses for each pkg
                         for fmri in self.__possible_dict[name]:
                                 for da in self.__get_dependency_actions(fmri,
                                     excludes=excludes):
-                                        self.__addclauses(self.__gen_dependency_clauses(
-                                            fmri, da))
+                                        self.__addclauses(
+                                            self.__gen_dependency_clauses(fmri,
+                                            da))
 
                 self.__timeit("phase 10")
 
@@ -399,19 +473,34 @@
 
                 for name in proposed_dict:
                         self.__progtrack.evaluate_progress()
-                        self.__addclauses(self.__gen_one_of_these_clauses(
-                            set(proposed_dict[name]) & set(self.__possible_dict[name])))
+                        self.__addclauses(
+                            self.__gen_one_of_these_clauses(
+                                set(proposed_dict[name]) &
+                                set(self.__possible_dict[name])))
 
-                for name in set(self.__installed_fmris.keys()) - set(proposed_dict.keys()):
-                        if name in reject_set:
+                for name in self.__installed_pkgs - proposed_pkgs - \
+                    reject_set - self.__avoid_set:
+                        if (self.__installed_dict[name] in
+                            self.__removal_fmris):
                                 continue
 
-                        if name in self.__avoid_set:
+                        if name in self.__possible_dict:
+                                self.__progtrack.evaluate_progress()
+                                self.__addclauses(
+                                    self.__gen_one_of_these_clauses(
+                                        self.__possible_dict[name]))
                                 continue
 
-                        self.__progtrack.evaluate_progress()
-                        self.__addclauses(self.__gen_one_of_these_clauses(
-                            self.__possible_dict[name]))
+                        # no version of this package is allowed
+                        ret = [_("The installed package %s is not "
+                            "permissible.") % name]
+                        ret.extend(self.__fmri_list_errors(
+                            [self.__installed_dict[name]]))
+                        solver_errors = None
+                        if DebugValues["plan"]:
+                                solver_errors = self.get_trim_errors()
+                        raise api_errors.PlanCreationException(
+                            no_version=ret, solver_errors=solver_errors)
 
                 # save a solver instance so we can come back here
                 # this is where errors happen...
@@ -469,24 +558,6 @@
 
                 self.__timeit("phase 11")
 
-
-                # check to see if we actually got anything done
-                # that we requested... it is possible that the
-                # solver cannot find anything to do for the
-                # requested packages, but actually just
-                # picked some other packages to upgrade
-
-                installed_set = set(self.__installed_fmris.values())
-                proposed_changes = [
-                        f
-                        for f in saved_solution - installed_set
-                        if f.pkg_name in proposed_dict
-                ]
-
-                if not (proposed_changes or self.__removal_list):
-                        return (self.__cleanup(installed_set),
-                            (self.__avoid_set, self.__obs_set))
-
                 # we have a solution that works... attempt to
                 # reduce collateral damage to other packages
                 # while still keeping command line pkgs at their
@@ -515,9 +586,9 @@
                 # and drive forward again w/ the remainder
                 self.__restore_solver(saved_solver)
 
-                for fmri in (saved_solution & set(self.__installed_fmris.values())):
-                        self.__addclauses(self.__gen_one_of_these_clauses([
-                            fmri]))
+                for fmri in (saved_solution & self.__installed_fmris):
+                        self.__addclauses(
+                            self.__gen_one_of_these_clauses([fmri]))
 
                 solution = self.__solve()
 
@@ -527,50 +598,35 @@
                 return self.__cleanup((self.__elide_possible_renames(solution,
                     excludes), (self.__avoid_set, self.__obs_set)))
 
-        def solve_install(self, existing_freezes, proposed_dict,
-            excludes=EmptyI, reject_set=frozenset()):
-                """Existing_freezes is a list of incorp. style FMRIs that
-                constrain pkg motion, proposed_dict contains user specified
-                FMRIs indexed by pkg_name; reject_set contains user specified
-                pkg_names that are not permitted in solution;
-                returns FMRIs to be installed or upgraded in system,
-                new set of packages to be avoided."""
-
-                return self.__solve_install(existing_freezes, proposed_dict,
-                    excludes=excludes, reject_set=reject_set)
-
-        def solve_update(self, existing_freezes, proposed_dict, excludes=EmptyI,
-            reject_set=frozenset()):
-                """Existing_freezes is a list of incorp. style FMRIs that
-                constrain pkg motion, proposed_dict contains user specified
-                FMRIs indexed by pkg_name; reject_set contains user specified
-                pkg_names that are not permitted in solution;
-                returns FMRIs to be installed, upgraded, downgraded, or removed
-                in system, new avoid set."""
-
-                return self.__solve_install(existing_freezes, proposed_dict,
-                    excludes=excludes, reject_set=reject_set,
-                    trim_proposed_installed=False)
-
         def solve_update_all(self, existing_freezes, excludes=EmptyI,
             reject_set=frozenset()):
-                """Existing_freezes is a list of incorp. style FMRIs that
-                constrain pkg motion; reject_set contains user specified
-                pkg_names that are not permitted in solution;
-                returns FMRIs to be installed, upgraded, or removed in system,
-                new set of packages to be avoided."""
+                """Logic to update all packages within an image to the latest
+                versions possible.
+
+                Returns FMRIs to be installed / upgraded in system and a new
+                set of packages to be avoided.
+
+                'existing_freezes' is a list of incorp. style FMRIs that
+                constrain pkg motion
+
+                'reject_set' contains user specified FMRIs that should not be
+                present within the final image.  (These packages may or may
+                not be currently installed.)
+                """
 
                 # Once solution has been returned or failure has occurred, a new
                 # solver must be used.
                 assert self.__state == SOLVER_INIT
+                self.__state = SOLVER_OXY
 
+                self.__progtrack.evaluate_progress()
                 self.__timeit()
 
-                self.__removal_list = [
-                    self.__installed_fmris[name]
+                self.__removal_fmris = frozenset([
+                    self.__installed_dict[name]
                     for name in reject_set
-                    if name in self.__installed_fmris
-                ]
+                    if name in self.__installed_dict
+                ])
                 self.__reject_set = reject_set
 
                 # trim fmris that user explicitly disallowed
@@ -578,64 +634,80 @@
                         self.__trim(self.__get_catalog_fmris(name),
                             N_("This version rejected by user request"))
 
-                self.__req_pkg_names = set(self.__installed_fmris.keys()) - \
-                    reject_set
+                self.__req_pkg_names = self.__installed_pkgs - reject_set
 
                 # trim fmris we cannot install because they're older
-                for f in self.__installed_fmris.values():
+                for f in self.__installed_fmris:
                         self.__trim_older(f)
 
                 self.__timeit("phase 1")
 
                 # generate set of possible fmris
                 possible_set = set()
-
-                for f in self.__installed_fmris.values():
-                        if f in self.__removal_list:
-                                continue
+                for f in self.__installed_fmris - self.__removal_fmris:
                         matching = self.__comb_newer_fmris(f)[0]
                         if not matching:            # disabled publisher...
                                 matching = set([f]) # staying put is an option
-                        possible_set |=  matching
+                        possible_set |= matching
 
                 self.__timeit("phase 2")
 
-                possible_set.update(self.__generate_dependency_closure(possible_set,
-                    excludes=excludes))
+                possible_set.update(self.__generate_dependency_closure(
+                    possible_set, excludes=excludes))
 
-                # remove any possibles that must be excluded because of origin dependencies
+                # remove any possibles that must be excluded because of
+                # origin and parent dependencies
                 for f in possible_set.copy():
                         if not self.__trim_nonmatching_origins(f, excludes):
                                 possible_set.remove(f)
+                        elif not self.__trim_nonmatching_parents(f, excludes):
+                                possible_set.remove(f)
 
                 self.__timeit("phase 3")
 
                 # generate ids, possible_dict for clause generation
                 self.__assign_fmri_ids(possible_set)
 
-                # generate clauses for only one version of each package, and
-                # for dependencies for each package.  Do so for all possible fmris.
+                # generate clauses for only one version of each package, and for
+                # dependencies for each package.  Do so for all possible fmris.
 
-                for name in self.__possible_dict.keys():
-                        # insure that at most one version of a package is installed
+                for name in self.__possible_dict:
+                        # Ensure only one version of a package is installed
                         self.__addclauses(self.__gen_highlander_clauses(
                             self.__possible_dict[name]))
                         # generate dependency clauses for each pkg
                         for fmri in self.__possible_dict[name]:
                                 for da in self.__get_dependency_actions(fmri,
                                     excludes=excludes):
-                                        self.__addclauses(self.__gen_dependency_clauses(
-                                            fmri, da))
+                                        self.__addclauses(
+                                            self.__gen_dependency_clauses(fmri,
+                                                da))
                 self.__timeit("phase 4")
 
                 # generate clauses for installed pkgs
-
-                for name in self.__installed_fmris.keys():
-                        if name in self.__avoid_set or name in self.__reject_set:
+                for name in self.__installed_pkgs - self.__avoid_set:
+                        if (self.__installed_dict[name] in
+                            self.__removal_fmris):
+                                # we're uninstalling this package
                                 continue
 
-                        self.__addclauses(self.__gen_one_of_these_clauses(
-                            self.__possible_dict[name]))
+                        if name in self.__possible_dict:
+                                self.__progtrack.evaluate_progress()
+                                self.__addclauses(
+                                    self.__gen_one_of_these_clauses(
+                                    self.__possible_dict[name]))
+                                continue
+
+                        # no version of this package is allowed
+                        ret = [_("The installed package %s is not "
+                            "permissible.") % name]
+                        ret.extend(self.__fmri_list_errors(
+                            [self.__installed_dict[name]]))
+                        solver_errors = None
+                        if DebugValues["plan"]:
+                                solver_errors = self.get_trim_errors()
+                        raise api_errors.PlanCreationException(
+                            no_version=ret, solver_errors=solver_errors)
 
                 self.__timeit("phase 5")
 
@@ -648,9 +720,7 @@
                                 solution.remove(f)
 
                 # check if we cannot upgrade (heuristic)
-                installed_set = set(self.__installed_fmris.values())
-
-                if solution == installed_set:
+                if solution == self.__installed_fmris:
                         # no solution can be found.
                         incorps = self.__get_installed_upgradeable_incorps(excludes)
                         if incorps:
@@ -695,7 +765,7 @@
                 # generate list of installed pkgs w/ possible renames removed to forestall
                 # failing removal due to presence of unneeded renamed pkg
 
-                orig_installed_set = set(self.__installed_fmris.values())
+                orig_installed_set = self.__installed_fmris
                 renamed_set = orig_installed_set - \
                     self.__elide_possible_renames(orig_installed_set, excludes)
 
@@ -720,58 +790,9 @@
                 # Run it through the solver; w/ more complex dependencies we're
                 # going to be out of luck w/o it.
 
-                return self.__solve_install(existing_freezes, {},
+                return self.solve_install(existing_freezes, {},
                     excludes=excludes, reject_set=reject_set)
 
-        def solve_change_varcets(self, existing_freezes, new_variants, new_facets, new_excludes):
-                """Compute packaging changes needed to effect
-                desired variant and or facet change"""
-
-                # Once solution has been returned or failure has occurred, a new
-                # solver must be used.
-                assert self.__state == SOLVER_INIT
-
-                # First, determine if there are any packages that are
-                # not compatible w/ the new variants, and compute
-                # their removal
-
-                keep_set = set()
-                removal_set = set()
-
-                if new_variants:
-                        self.__variants = new_variants
-                #self.__excludes = new_excludes #must include facet changes
-
-                if new_variants:
-                        for f in self.__installed_fmris.values():
-                                d = self.__get_variant_dict(f)
-                                for k in new_variants.keys():
-                                        if k in d and new_variants[k] not in \
-                                            d[k]:
-                                                removal_set.add(f)
-                                                break
-                                else:
-                                        keep_set.add(f)
-                else: # keep the same pkgs as a starting point for facet changes only
-                        keep_set = set(self.__installed_fmris.values())
-
-                # XXX check existing freezes to see if they permit removals
-
-                # recompute solution as if a blank image was being
-                # considered; if a generic package depends on a
-                # architecture specific one, the operation will fail.
-
-                if not keep_set:
-                        # in case this deletes our last package
-                        return self.__cleanup(([], (self.__avoid_set, self.__obs_set)))
-
-                blank_solver = PkgSolver(self.__catalog, {} , self.__pub_ranks,
-                    self.__variants, self.__avoid_set, self.__progtrack)
-
-                proposed_dict = dict([(f.pkg_name, [f]) for f in keep_set])
-                return self.__cleanup(blank_solver.solve_install(
-                    existing_freezes, proposed_dict, new_excludes))
-
         def __update_solution_set(self, solution, excludes):
                 """Update avoid set w/ any missing packages (due to reject).
                 Remove obsolete packages from solution.
@@ -815,7 +836,8 @@
                 self.__iterations = 0
 
         def __solve(self, older=False, max_iterations=2000):
-                """Perform iterative solution; try for newest pkgs unless older=True"""
+                """Perform iterative solution; try for newest pkgs unless
+                older=True"""
                 solution_vector = []
                 self.__state = SOLVER_FAIL
                 eliminated = set()
@@ -826,26 +848,25 @@
                                 break
 
                         solution_vector = self.__get_solution_vector()
+                        if not solution_vector:
+                                break
 
                         # prevent the selection of any older pkgs
                         for fid in solution_vector:
+                                pfmri = self.__getfmri(fid)
+                                matching, remaining = \
+                                    self.__comb_newer_fmris(pfmri)
                                 if not older:
-                                        for f in self.__comb_newer_fmris(
-                                            self.__getfmri(fid))[1]:
-                                                if f not in eliminated:
-                                                        eliminated.add(f)
-                                                        self.__addclauses([[-self.__getid(f)]])
+                                        remove = remaining
                                 else:
-                                        pfmri = self.__getfmri(fid)
-                                        for f in self.__comb_newer_fmris(pfmri)[0] - \
-                                            set([pfmri]):
-                                                if f not in eliminated:
-                                                        eliminated.add(f)
-                                                        self.__addclauses([[-self.__getid(f)]])
+                                        remove = matching - set([pfmri]) - \
+                                            eliminated
+                                for f in remove:
+                                        self.__addclauses([[-self.__getid(f)]])
 
-                        # prevent the selection of this exact combo; permit [] solution
-                        if not solution_vector:
-                                break
+
+                        # prevent the selection of this exact combo;
+                        # permit [] solution
                         self.__addclauses([[-i for i in solution_vector]])
 
                 if not self.__iterations:
@@ -863,14 +884,16 @@
 
         def __get_solution_vector(self):
                 """Return solution vector from solver"""
-                return sorted([
+                return frozenset([
                     (i + 1) for i in range(self.__solver.get_variables())
                     if self.__solver.dereference(i)
                 ])
 
         def __assign_fmri_ids(self, possible_set):
                 """ give a set of possible fmris, assign ids"""
+
                 # generate dictionary of possible pkgs fmris by pkg stem
+
                 self.__possible_dict.clear()
                 self.__poss_set |= possible_set
 
@@ -918,33 +941,40 @@
                                 for f in tp[1]
                                 ]
 
-
         def __comb_newer_fmris(self, fmri, dotrim=True, obsolete_ok=True):
                 """Returns tuple of set of fmris that are matched within
-                CONSTRAINT.NONE of specified version and set of remaining fmris."""
-                return self.__comb_common(fmri, dotrim, version.CONSTRAINT_NONE, obsolete_ok)
+                CONSTRAINT.NONE of specified version and set of remaining
+                fmris."""
+                return self.__comb_common(fmri, dotrim,
+                    version.CONSTRAINT_NONE, obsolete_ok)
 
         def __comb_common(self, fmri, dotrim, constraint, obsolete_ok):
                 """Underlying impl. of other comb routines"""
                 tp = (fmri, dotrim, constraint, obsolete_ok) # cache index
                 # determine if the data is cacheable or cached:
                 if (not self.__trimdone and dotrim) or tp not in self.__cache:
+
+                        # use frozensets so callers don't inadvertently update
+                        # these sets (which may be cached).
                         all_fmris = set(self.__get_catalog_fmris(fmri.pkg_name))
-                        matching = set([
-                                        f
-                                        for f in all_fmris
-                                        if f not in self.__trim_dict or not dotrim
-                                        if not fmri.version or
-                                        fmri.version == f.version or
-                                        f.version.is_successor(fmri.version,
-                                            constraint=constraint)
-                                        if obsolete_ok or not self.__fmri_is_obsolete(f)
-                                        ])
-                        # if we haven't finished triming, don't cache this
+                        matching = frozenset([
+                            f
+                            for f in all_fmris
+                            if f not in self.__trim_dict or not dotrim
+                            if not fmri.version or
+                                fmri.version == f.version or
+                                f.version.is_successor(fmri.version,
+                                    constraint=constraint)
+                            if obsolete_ok or not self.__fmri_is_obsolete(f)
+                        ])
+                        remaining = frozenset(all_fmris - matching)
+
+                        # if we haven't finished trimming, don't cache this
                         if not self.__trimdone:
-                                return matching, all_fmris - matching
+                                return matching, remaining
                         # cache the result
-                        self.__cache[tp] = (matching, all_fmris - matching)
+                        self.__cache[tp] = (matching, remaining)
+
                 return self.__cache[tp]
 
         def __comb_older_fmris(self, fmri, dotrim=True, obsolete_ok=True):
@@ -967,7 +997,7 @@
                         return older - trimmed_older, newer | trimmed_older
 
         def __comb_auto_fmris(self, fmri, dotrim=True, obsolete_ok=True):
-                """Returns tuple of set of fmris that are match witinin
+                """Returns tuple of set of fmris that are match within
                 CONSTRAINT.AUTO of specified version and set of remaining fmris."""
                 return self.__comb_common(fmri, dotrim, version.CONSTRAINT_AUTO, obsolete_ok)
 
@@ -1010,7 +1040,7 @@
                 """Return list of all dependency actions for this fmri"""
 
                 try:
-                        return [
+                        return self.__extra_deps.get(fmri, []) + [
                             a
                             for a in self.__catalog.get_entry_actions(fmri,
                             [catalog.Catalog.DEPENDENCY], excludes=excludes)
@@ -1027,7 +1057,7 @@
                         else:
                                 raise
 
-        def __get_variant_dict(self, fmri, excludes=EmptyI):
+        def __get_variant_dict(self, fmri):
                 """Return dictionary of variants suppported by fmri"""
                 try:
                         if fmri not in self.__variant_dict:
@@ -1051,8 +1081,8 @@
                         fmri = needs_processing.pop()
                         self.__progtrack.evaluate_progress()
                         already_processed.add(fmri)
-                        needs_processing |= (self.__generate_dependencies(fmri, excludes,
-                            dotrim) - already_processed)
+                        needs_processing |= (self.__generate_dependencies(fmri,
+                            excludes, dotrim) - already_processed)
                 return already_processed
 
         def __generate_dependencies(self, fmri, excludes=EmptyI, dotrim=True):
@@ -1061,8 +1091,10 @@
                 try:
                         return set([
                              f
-                             for da in self.__get_dependency_actions(fmri, excludes)
-                             for f in self.__parse_dependency(da, dotrim, check_req=True)[1]
+                             for da in self.__get_dependency_actions(fmri,
+                                 excludes)
+                             for f in self.__parse_dependency(da, fmri,
+                                 dotrim, check_req=True)[1]
                         ])
 
                 except DependencyException, e:
@@ -1116,13 +1148,16 @@
                 on specified installed fmri"""
                 if self.__dependents is None:
                         self.__dependents = {}
-                        for f in self.__installed_fmris.values():
-                                for da in self.__get_dependency_actions(f, excludes):
-                                        if da.attrs["type"] == "require":
-                                                self.__dependents.setdefault(
-                                                    self.__installed_fmris[pkg.fmri.PkgFmri(
-                                                    da.attrs["fmri"], "5.11").pkg_name],
-                                                    set()).add(f)
+                        for f in self.__installed_fmris:
+                                for da in self.__get_dependency_actions(f,
+                                    excludes):
+                                        if da.attrs["type"] != "require":
+                                                continue
+                                        pkg_name = pkg.fmri.PkgFmri(
+                                            da.attrs["fmri"], "5.11").pkg_name
+                                        self.__dependents.setdefault(
+                                            self.__installed_dict[pkg_name],
+                                            set()).add(f)
                 return self.__dependents.get(pfmri, set())
 
         def __trim_recursive_incorps(self, fmri_list, excludes):
@@ -1171,19 +1206,26 @@
                 dictionary containing (matching, non matching fmris),
                 indexed by pkg name"""
                 ret = dict()
-                for da in self.__get_dependency_actions(fmri, excludes=excludes):
+                for da in self.__get_dependency_actions(fmri,
+                    excludes=excludes):
                         if da.attrs["type"] != "incorporate":
                                 continue
-                        nm, m, c, d, r = self.__parse_dependency(da, dotrim=False)
+                        nm, m, c, d, r = self.__parse_dependency(da, fmri,
+                            dotrim=False)
                         for n in nm:
-                                ret.setdefault(n.pkg_name, (set(), set()))[1].add(n)
+                                ret.setdefault(n.pkg_name,
+                                    (set(), set()))[1].add(n)
                         for n in m:
-                                ret.setdefault(n.pkg_name, (set(), set()))[0].add(n)
+                                ret.setdefault(n.pkg_name,
+                                    (set(), set()))[0].add(n)
                 return ret
 
-        def __parse_dependency(self, dependency_action, dotrim=True, check_req=False):
-                """Return tuple of (disallowed fmri list, allowed fmri list, conditional_list,
-                    dependency_type, required)"""
+        def __parse_dependency(self, dependency_action, fmri,
+            dotrim=True, check_req=False):
+
+                """Return tuple of (disallowed fmri list, allowed fmri list,
+                conditional_list, dependency_type, required)"""
+
                 dtype = dependency_action.attrs["type"]
                 fmris = [pkg.fmri.PkgFmri(f, "5.11") for f in dependency_action.attrlist("fmri")]
                 fmri = fmris[0]
@@ -1238,6 +1280,18 @@
                                 matching.extend(m)
                                 nonmatching.extend(nm)
 
+                elif dtype == "parent":
+                        if self.__parent_pkgs == None:
+                                # ignore this dependency
+                                matching = nonmatching = frozenset()
+                        else:
+                                matching, nonmatching = \
+                                    self.__comb_auto_fmris(fmri, dotrim=False,
+                                    obsolete_ok=True)
+
+                        # not required in the planned image
+                        required = False
+
                 elif dtype == "origin":
                         matching, nonmatching = \
                             self.__comb_newer_fmris(fmri, dotrim=False,
@@ -1260,7 +1314,7 @@
                         raise api_errors.InvalidPackageErrors(
                             "Unknown dependency type %s" % dtype)
 
-                # cheeck if we're throwing exceptions and we didn't find any
+                # check if we're throwing exceptions and we didn't find any
                 # matches on a required package
 
                 if not check_req or matching or not required:
@@ -1361,9 +1415,7 @@
                 """Generate list of strings describing why currently
                 installed packages cannot be installed, or empty list"""
                 ret = []
-                for f in self.__installed_fmris.values():
-                        if f in self.__removal_list:
-                                continue
+                for f in self.__installed_fmris - self.__removal_fmris:
                         matching, nonmatching = \
                             self.__comb_newer_fmris(f, dotrim=True, obsolete_ok=True)
                         if matching:
@@ -1441,7 +1493,8 @@
 
                 for a in self.__get_dependency_actions(fmri, excludes):
                         try:
-                                match = self.__parse_dependency(a, check_req=True)[1]
+                                match = self.__parse_dependency(a, fmri,
+                                   check_req=True)[1]
                         except DependencyException, e:
                                 self.__trim(fmri, e.reason, e.fmris)
                                 s = _("No suitable version of required package %s found:") % fmri
@@ -1454,7 +1507,8 @@
 
         def __gen_dependency_clauses(self, fmri, da, dotrim=True):
                 """Return clauses to implement this dependency"""
-                nm, m, cond, dtype, req = self.__parse_dependency(da, dotrim)
+                nm, m, cond, dtype, req = self.__parse_dependency(da, fmri,
+                    dotrim)
 
                 if dtype == "require" or dtype == "require-any":
                         return self.__gen_require_clauses(fmri, m)
@@ -1465,8 +1519,9 @@
                                 return self.__gen_require_clauses(fmri, m)
                 elif dtype == "conditional":
                         return self.__gen_require_conditional_clauses(fmri, m, cond)
-                elif dtype == "origin":
-                        return [] # handled by trimming proposed set, not by solver
+                elif dtype in ["origin", "parent"]:
+                        # handled by trimming proposed set, not by solver
+                        return []
                 else:
                         return self.__gen_negation_clauses(fmri, nm)
 
@@ -1537,7 +1592,10 @@
                 #   [!a.1 | !b.2]
                 # ]
                 fmri_id = self.__getid(fmri)
-                return [[-fmri_id, -self.__getid(f)] for f in non_matching_fmri_list]
+                return [
+                    [-fmri_id, -self.__getid(f)]
+                    for f in non_matching_fmri_list
+                ]
 
         def __gen_one_of_these_clauses(self, fmri_list):
                 """generate clauses such that at least one of the fmri_list
@@ -1557,15 +1615,14 @@
                                         self.__addclause_failure = True
                                 self.__clauses += 1
                         except TypeError:
-                                raise TypeError, "List of integers, not %s, expected" % c
+                                e = _("List of integers, not %s, expected") % c
+                                raise TypeError, e
 
         def __get_installed_upgradeable_incorps(self, excludes=EmptyI):
                 """Return the latest version of installed upgradeable incorporations w/ install holds"""
                 installed_incs = []
 
-                for f in self.__installed_fmris.values():
-                        if f in self.__removal_list:
-                                continue
+                for f in self.__installed_fmris - self.__removal_fmris:
                         for d in self.__catalog.get_entry_actions(f,
                             [catalog.Catalog.DEPENDENCY],
                             excludes=excludes):
@@ -1580,7 +1637,7 @@
                                 ret.append(latest)
                 return ret
 
-        def __get_installed_unbound_inc_list(self, proposed_fmris, excludes=EmptyI):
+        def __get_installed_unbound_inc_list(self, proposed_pkgs, excludes=EmptyI):
                 """Return the list of incorporations that are to not to change
                 during this install operation, and the lists of fmris they constrain."""
 
@@ -1593,9 +1650,7 @@
                 # determine those packages that are depended on by explict version,
                 # and those that have pkg.depend.install-hold values.
 
-                for f in self.__installed_fmris.values():
-                        if f in self.__removal_list:
-                                continue
+                for f in self.__installed_fmris - self.__removal_fmris:
                         for d in self.__catalog.get_entry_actions(f,
                             [catalog.Catalog.DEPENDENCY],
                             excludes=excludes):
@@ -1614,7 +1669,7 @@
                 # find install holds that appear on command line and are thus relaxed
                 relaxed_holds = set([
                         install_holds[name]
-                        for name in proposed_fmris
+                        for name in proposed_pkgs
                         if name in install_holds
                         ])
                 # add any other install holds that are relaxed because they have values
@@ -1637,10 +1692,10 @@
                 # 2) are not in the set of versioned_dependents and 3) do
                 # not explicitly appear on the install command line.
                 ret = [
-                    self.__installed_fmris[pkg_name]
+                    self.__installed_dict[pkg_name]
                     for pkg_name in incorps - versioned_dependents
-                    if pkg_name not in proposed_fmris
-                    if self.__installed_fmris[pkg_name] not in self.__removal_list
+                    if pkg_name not in proposed_pkgs
+                    if self.__installed_dict[pkg_name] not in self.__removal_fmris
                 ]
                 # For each incorporation above that will not change, return a list
                 # of the fmris that incorporation constrains
@@ -1675,7 +1730,7 @@
 
                 if pkg_name in self.__publisher:
                         acceptable_pubs = [self.__publisher[pkg_name]]
-                        if pkg_name in self.__installed_fmris:
+                        if pkg_name in self.__installed_dict:
                                 reason = (N_("Currently installed package '{0}' is from sticky publisher '{1}'."),
                                     (pkg_name, self.__publisher[pkg_name]))
                         else:
@@ -1708,7 +1763,7 @@
                 # avoid multiple publishers w/ the exact same fmri to prevent
                 # thrashing in the solver due to many equiv. solutions.
 
-                inst_f = self.__installed_fmris.get(pkg_name, None)
+                inst_f = self.__installed_dict.get(pkg_name, None)
 
                 if inst_f:
                         version_dict[inst_f.version] = [inst_f]
@@ -1755,9 +1810,81 @@
 
                                 self.__trim(fmri, reason)
 
+        def __trim_nonmatching_parents1(self, pkg_fmri, fmri):
+                if fmri in self.__parent_pkgs:
+                        # exact fmri installed in parent
+                        return True
+
+                if fmri.pkg_name not in self.__parent_dict:
+                        # package is not installed in parent
+                        reason = (N_("Package is not installed in "
+                            "parent image: {0}"), (fmri.pkg_name,))
+                        self.__trim(pkg_fmri, reason)
+                        return False
+
+                pf = self.__parent_dict[fmri.pkg_name]
+                if fmri.publisher and fmri.publisher != pf.publisher:
+                        # package is from a different publisher in the parent
+                        reason = (N_("Package in parent is from a "
+                            "different publisher: {0}"), (pf,))
+                        self.__trim(pkg_fmri, reason)
+                        return False
+
+                if pf.version == fmri.version or pf.version.is_successor(
+                    fmri.version, version.CONSTRAINT_AUTO):
+                        # parent dependency is satisfied
+                        return True
+
+                # version mismatch
+                if pf.version.is_successor(fmri.version,
+                    version.CONSTRAINT_NONE):
+                        reason = (N_("Parent image has a incompatible newer "
+                            "version: {0}"), (pf,))
+                else:
+                        reason = (N_("Parent image has an older version of "
+                            "package: {0}"), (pf,))
+
+                self.__trim(pkg_fmri, reason)
+                return False
+
+        def __trim_nonmatching_parents(self, pkg_fmri, excludes):
+                """Trim any pkg_fmri that contains a parent dependency that
+                is not satisfied by the parent image."""
+
+                # the fmri for the package should include a publisher
+                assert pkg_fmri.publisher
+
+                # if we're not a child then ignore "parent" dependencies.
+                if self.__parent_pkgs == None:
+                        return True
+
+                # Find all the fmris that we depend on in our parent.
+                # Use a set() to eliminate any dups.
+                pkg_deps = set([
+                    pkg.fmri.PkgFmri(f, "5.11")
+                    for da in self.__get_dependency_actions(pkg_fmri, excludes)
+                    if da.attrs["type"] == "parent"
+                    for f in da.attrlist("fmri")
+                ])
+
+                if not pkg_deps:
+                        # no parent dependencies.
+                        return True
+
+                allowed = True
+                for f in pkg_deps:
+                        fmri = f
+                        if f.pkg_name == pkg.actions.depend.DEPEND_SELF:
+                                # check if this package depends on itself.
+                                fmri = pkg_fmri
+                        if not self.__trim_nonmatching_parents1(pkg_fmri, fmri):
+                                allowed = False
+                return allowed
+
         def __trim_nonmatching_origins(self, fmri, excludes):
                 """Trim any fmri that contains a origin dependency that is
                 not satisfied by the current image or root-image"""
+
                 for da in self.__get_dependency_actions(fmri, excludes):
 
                         if da.attrs["type"] != "origin":
@@ -1767,23 +1894,31 @@
 
                         if da.attrs.get("root-image", "").lower() == "true":
                                 if self.__root_fmris is None:
-                                        root_img = pkg.client.image.Image("/",
-                                            allow_ondisk_upgrade=False)
+                                        img = pkg.client.image.Image(
+                                            misc.liveroot(),
+                                            allow_ondisk_upgrade=False,
+                                            user_provided_dir=True,
+                                            should_exist=True)
                                         self.__root_fmris = dict([
                                             (f.pkg_name, f)
-                                            for f in root_img.gen_installed_pkgs()
+                                            for f in img.gen_installed_pkgs()
                                         ])
 
-                                installed = self.__root_fmris.get(req_fmri.pkg_name, None)
-                                reason = (N_("Installed version in root image is "
-                                         "too old for origin dependency %s"), (req_fmri,))
+                                installed = self.__root_fmris.get(
+                                    req_fmri.pkg_name, None)
+                                reason = (N_("Installed version in root image "
+                                    "is too old for origin dependency %s"),
+                                    (req_fmri,))
                         else:
-                                installed = self.__installed_fmris.get(req_fmri.pkg_name, None)
-                                reason = (N_("Installed version in image being upgraded is "
-                                         "too old for origin dependency %s"), (req_fmri,))
+                                installed = self.__installed_dict.get(
+                                    req_fmri.pkg_name, None)
+                                reason = (N_("Installed version in image "
+                                    "being upgraded is too old for origin "
+                                    "dependency %s"), (req_fmri,))
 
-                        # assumption is that for root-image, publishers align; otherwise
-                        # these sorts of cross-environment dependencies don't work well
+                        # assumption is that for root-image, publishers align;
+                        # otherwise these sorts of cross-environment
+                        # dependencies don't work well
 
                         if not installed or \
                             not req_fmri.version or \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/modules/client/pkgdefs.py	Sat May 07 00:25:10 2011 -0700
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+"""
+Definitions for values used by the pkg(1) client.
+"""
+
+# pkg exit codes
+EXIT_OK        =  0 # Command succeeded.
+EXIT_OOPS      =  1 # An error occurred.
+EXIT_BADOPT    =  2 # Invalid command line options were specified.
+EXIT_PARTIAL   =  3 # Multiple ops were requested, but not all succeeded.
+EXIT_NOP       =  4 # No changes were made - nothing to do.
+EXIT_NOTLIVE   =  5 # The requested op cannot be performed on a live image.
+EXIT_LICENSE   =  6 # License acceptance required for requested op.
+EXIT_LOCKED    =  7 # Image is currently locked by another process
+EXIT_EACCESS   =  8 # Can't access requested image
+EXIT_DIVERGED  =  9 # Image is not in sync with its constraints
+EXIT_NOPARENT  = 10 # Image is not linked to a parent image
+EXIT_PARENTOP  = 11 # Linked operation must be done from parent
+
+# package operations
+PKG_OP_ATTACH          = "attach-linked"
+PKG_OP_AUDIT_LINKED    = "audit-linked"
+PKG_OP_CHANGE_FACET    = "change-facet"
+PKG_OP_CHANGE_VARIANT  = "change-variant"
+PKG_OP_DETACH          = "detach-linked"
+PKG_OP_INSTALL         = "install"
+PKG_OP_LIST            = "list"
+PKG_OP_LIST_LINKED     = "list-linked"
+PKG_OP_PROP_LINKED     = "property-linked"
+PKG_OP_SET_PROP_LINKED = "set-property-linked"
+PKG_OP_SYNC            = "sync-linked"
+PKG_OP_UNINSTALL       = "uninstall"
+PKG_OP_UPDATE          = "update"
+pkg_op_values          = frozenset([
+    PKG_OP_ATTACH,
+    PKG_OP_AUDIT_LINKED,
+    PKG_OP_CHANGE_FACET,
+    PKG_OP_CHANGE_VARIANT,
+    PKG_OP_DETACH,
+    PKG_OP_INSTALL,
+    PKG_OP_LIST,
+    PKG_OP_LIST_LINKED,
+    PKG_OP_PROP_LINKED,
+    PKG_OP_SET_PROP_LINKED,
+    PKG_OP_SYNC,
+    PKG_OP_UNINSTALL,
+    PKG_OP_UPDATE,
+])
+
+API_OP_ATTACH         = "attach-linked"
+API_OP_CHANGE_FACET   = "change-facet"
+API_OP_CHANGE_VARIANT = "change-variant"
+API_OP_DETACH         = "detach-linked"
+API_OP_INSTALL        = "install"
+API_OP_REVERT         = "revert"
+API_OP_SYNC           = "sync-linked"
+API_OP_UNINSTALL      = "uninstall"
+API_OP_UPDATE         = "update"
+api_op_values         = frozenset([
+    API_OP_ATTACH,
+    API_OP_CHANGE_FACET,
+    API_OP_CHANGE_VARIANT,
+    API_OP_DETACH,
+    API_OP_INSTALL,
+    API_OP_REVERT,
+    API_OP_SYNC,
+    API_OP_UNINSTALL,
+    API_OP_UPDATE
+])
+
+API_STAGE_DEFAULT  = "default"
+API_STAGE_PUBCHECK = "pubcheck"
+API_STAGE_PLAN     = "plan"
+API_STAGE_PREPARE  = "prepare"
+API_STAGE_EXECUTE  = "execute"
+api_stage_values  = frozenset([
+    API_STAGE_PUBCHECK,
+    API_STAGE_DEFAULT,
+    API_STAGE_PLAN,
+    API_STAGE_PREPARE,
+    API_STAGE_EXECUTE,
+])
--- a/src/modules/client/pkgplan.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/client/pkgplan.py	Sat May 07 00:25:10 2011 -0700
@@ -27,6 +27,7 @@
 import errno
 import itertools
 import os
+import cPickle as pickle
 
 from pkg.client import global_settings
 logger = global_settings.logger
@@ -76,10 +77,8 @@
 
         def __str__(self):
                 s = "%s -> %s\n" % (self.origin_fmri, self.destination_fmri)
-
                 for src, dest in itertools.chain(*self.actions):
                         s += "  %s -> %s\n" % (src, dest)
-
                 return s
 
         def __add_license(self, src, dest):
@@ -97,6 +96,74 @@
                     "displayed": False,
                 }
 
+        @staticmethod
+        def __pickle_actions(actions):
+                """Return a list of pickled actions."""
+                action_list = []
+                for pair in actions:
+                        newpair = [None, None]
+                        if pair[0]:
+                                newpair[0] = pickle.dumps(pair[0])
+                        if pair[1]:
+                                newpair[1] = pickle.dumps(pair[1])
+                        action_list.append(newpair)
+                return action_list
+
+        @staticmethod
+        def __unpickle_actions(pickled_actions):
+                """Return a list of unpickled actions."""
+                action_list=[]
+                for pair in pickled_actions:
+                        newpair = [None, None]
+                        if pair[0]:
+                                newpair[0] = pickle.loads(str(pair[0]))
+                        if pair[1]:
+                                newpair[1] = pickle.loads(str(pair[1]))
+                        action_list.append(newpair)
+                return action_list
+
+        def setstate(self, state):
+                """Update the state of this object using the contents of
+                the supplied dictionary."""
+
+                import pkg.fmri
+
+                # if there is no origin, don't allocate an fmri obj
+                if state["src"]:
+                        state["src"] = pkg.fmri.PkgFmri(state["src"])
+
+                # if there is no destination, don't allocate an fmri obj
+                if state["dst"]:
+                        state["dst"] = pkg.fmri.PkgFmri(state["dst"])
+
+                self.origin_fmri = state["src"]
+                self.destination_fmri = state["dst"]
+                self.pkg_summary = state["summary"]
+                self.actions = manifest.ManifestDifference([], [], [])
+                self.actions.added.extend(
+                    self.__unpickle_actions(state["add"]))
+                self.actions.changed.extend(
+                    self.__unpickle_actions(state["change"]))
+                self.actions.removed.extend(
+                    self.__unpickle_actions(state["remove"]))
+                for src, dest in itertools.chain(self.gen_update_actions(),
+                    self.gen_install_actions()):
+                       if dest.name == "license":
+                                self.__add_license(src, dest)
+
+        def getstate(self):
+                """Returns a dictionary containing the state of this object
+                so that it can be easily stored using JSON, pickle, etc."""
+
+                state = {}
+                state["src"] = self.origin_fmri
+                state["dst"] = self.destination_fmri
+                state["summary"] = self.pkg_summary
+                state["add"] = self.__pickle_actions(self.actions.added)
+                state["change"] = self.__pickle_actions(self.actions.changed)
+                state["remove"] = self.__pickle_actions(self.actions.removed)
+                return state
+
         def propose(self, of, om, df, dm):
                 """Propose origin and dest fmri, manifest"""
                 self.origin_fmri = of
--- a/src/modules/client/progress.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/client/progress.py	Sat May 07 00:25:10 2011 -0700
@@ -57,9 +57,17 @@
             External consumers should base their subclasses on the
             NullProgressTracker class. """
 
-        def __init__(self):
+        def __init__(self, quiet=False, verbose=0):
+
+                self.quiet = quiet
+                self.verbose = verbose
+
                 self.reset()
 
+        def set_linked_name(self, lin):
+                """Called once an image determines it's linked image name."""
+                return
+
         def reset_download(self):
                 self.dl_started = False
                 self.dl_goal_nfiles = 0
@@ -406,6 +414,18 @@
                 raise NotImplementedError("eval_output_done() not implemented "
                     "in superclass")
 
+        def li_recurse_start(self, lin):
+                """Called when we recurse into a child linked image."""
+
+                raise NotImplementedError("li_recurse_start() not implemented "
+                    "in superclass")
+
+        def li_recurse_end(self, lin):
+                """Called when we return from a child linked image."""
+
+                raise NotImplementedError("li_recurse_end() not implemented "
+                    "in superclass")
+
         def ver_output(self):
                 raise NotImplementedError("ver_output() not implemented in "
                     "superclass")
@@ -494,7 +514,7 @@
             intended to be "quiet"  See also NullProgressTracker below. """
 
         def __init__(self):
-                ProgressTracker.__init__(self)
+                ProgressTracker.__init__(self, quiet=True)
 
         def cat_output_start(self):
                 return
@@ -523,6 +543,12 @@
         def eval_output_done(self):
                 return
 
+        def li_recurse_start(self, lin):
+                return
+
+        def li_recurse_end(self, lin):
+                return
+
         def ver_output(self):
                 return
 
@@ -596,9 +622,16 @@
             and so is appropriate for sending through a pipe.  This code
             is intended to be platform neutral. """
 
-        def __init__(self):
-                ProgressTracker.__init__(self)
+        def __init__(self, quiet=False, verbose=0):
+                ProgressTracker.__init__(self, quiet=quiet,
+                    verbose=verbose)
                 self.last_printed_pkg = None
+                self.msg_prefix = ""
+
+        def set_linked_name(self, lin):
+                self.msg_prefix = ""
+                if lin:
+                        self.msg_prefix = _("Image %s ") % lin
 
         def cat_output_start(self):
                 return
@@ -627,6 +660,30 @@
         def eval_output_done(self):
                 return
 
+        def li_recurse_start(self, lin):
+                msg = _("Recursing into linked image: %s") % lin
+                msg = "%s%s" % (self.msg_prefix, msg)
+
+                try:
+                        print "%s\n" % msg
+                        sys.stdout.flush()
+                except IOError, e:
+                        if e.errno == errno.EPIPE:
+                                raise PipeError, e
+                        raise
+
+        def li_recurse_end(self, lin):
+                msg = _("Returning from linked image: %s") % lin
+                msg = "%s%s" % (self.msg_prefix, msg)
+
+                try:
+                        print "%s\n" % msg
+                        sys.stdout.flush()
+                except IOError, e:
+                        if e.errno == errno.EPIPE:
+                                raise PipeError, e
+                        raise
+
         def ver_output(self):
                 return
 
@@ -746,14 +803,15 @@
         #
         TERM_DELAY = 0.10
 
-        def __init__(self):
-                ProgressTracker.__init__(self)
+        def __init__(self, quiet=False, verbose=0):
+                ProgressTracker.__init__(self, quiet=quiet, verbose=verbose)
 
                 self.act_started = False
                 self.ind_started = False
                 self.item_started = False
                 self.last_print_time = 0
                 self.clear_eol = ""
+                self.msg_prefix = ""
 
                 try:
                         import curses
@@ -775,6 +833,11 @@
                 self.spinner_chars = "/-\|"
                 self.curstrlen = 0
 
+        def set_linked_name(self, lin):
+                self.msg_prefix = ""
+                if lin:
+                        self.msg_prefix = _("Image %s ") % lin
+
         def __generic_start(self, msg):
                 # Ensure the last message displayed is flushed in case the
                 # corresponding operation did not complete successfully.
@@ -828,13 +891,14 @@
                         print self.cr,
                         print " " * self.curstrlen,
                         print self.cr,
-                        s = _("Refreshing catalog %(current)d/%(total)d "
+                        msg = _("Refreshing catalog %(current)d/%(total)d "
                             "%(publisher)s") % {
                             "current": self.refresh_cur_pub_cnt,
                             "total": self.refresh_pub_cnt,
                             "publisher": self.refresh_cur_pub }
-                        self.curstrlen = len(s)
-                        print "%s" % s,
+                        msg = "%s%s" % (self.msg_prefix, msg)
+                        self.curstrlen = len(msg)
+                        print "%s" % msg,
                         self.needs_cr = True
                         sys.stdout.flush()
                 except IOError, e:
@@ -850,10 +914,12 @@
                 # corresponding operation did not complete successfully.
                 self.__generic_done()
 
-                s = _("Creating Plan")
-                self.curstrlen = len(s)
+                msg = _("Creating Plan")
+                msg = "%s%s" % (self.msg_prefix, msg)
+
+                self.curstrlen = len(msg)
                 try:
-                        print "%s" % s,
+                        print "%s" % msg,
                         self.needs_cr = True
                         sys.stdout.flush()
                 except IOError, e:
@@ -869,10 +935,12 @@
                 self.spinner = (self.spinner + 1) % len(self.spinner_chars)
                 try:
                         print self.cr,
-                        s = _("Creating Plan %c") % self.spinner_chars[
+                        msg = _("Creating Plan %c") % self.spinner_chars[
                             self.spinner]
-                        self.curstrlen = len(s)
-                        print "%s" % s,
+                        msg = "%s%s" % (self.msg_prefix, msg)
+
+                        self.curstrlen = len(msg)
+                        print "%s" % msg,
                         self.needs_cr = True
                         sys.stdout.flush()
                 except IOError, e:
@@ -884,6 +952,33 @@
                 self.__generic_done()
                 self.last_print_time = 0
 
+        def li_recurse_start(self, lin):
+                self.__generic_done()
+
+                msg = _("Recursing into linked image: %s") % lin
+                msg = "%s%s" % (self.msg_prefix, msg)
+
+                try:
+                        print "%s" % msg, self.cr
+                        self.curstrlen = len(msg)
+                        sys.stdout.flush()
+                except IOError, e:
+                        if e.errno == errno.EPIPE:
+                                raise PipeError, e
+                        raise
+
+        def li_recurse_end(self, lin):
+                msg = _("Returning from linked image: %s") % lin
+                msg = "%s%s" % (self.msg_prefix, msg)
+
+                try:
+                        print "%s" % msg, self.cr
+                        sys.stdout.flush()
+                except IOError, e:
+                        if e.errno == errno.EPIPE:
+                                raise PipeError, e
+                        raise
+
         def ver_output(self):
                 try:
                         assert self.ver_cur_fmri != None
@@ -1142,7 +1237,10 @@
                 self.ind_started = False
                 self.last_print_time = 0
                 try:
-                        print _("Optimizing Index...")
+                        msg = _("Optimizing Index...")
+                        msg = "%s%s" % (self.msg_prefix, msg)
+
+                        print msg
                         sys.stdout.flush()
                 except IOError, e:
                         if e.errno == errno.EPIPE:
--- a/src/modules/lint/engine.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/lint/engine.py	Sat May 07 00:25:10 2011 -0700
@@ -21,7 +21,7 @@
 #
 
 #
-# Copyright (c) 2010, 2011 Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
 #
 
 import pkg.client.api
@@ -39,7 +39,7 @@
 import sys
 
 PKG_CLIENT_NAME = "pkglint"
-CLIENT_API_VERSION = 58
+CLIENT_API_VERSION = 59
 pkg.client.global_settings.client_name = PKG_CLIENT_NAME
 
 class LintEngineException(Exception):
--- a/src/modules/manifest.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/manifest.py	Sat May 07 00:25:10 2011 -0700
@@ -264,6 +264,65 @@
                                 out += "%s -> %s\n" % (src, dest)
                 return out
 
+        @staticmethod
+        def _gen_dirs_to_str(dirs):
+                """ from a dictionary of paths, generate contents of dircache
+                file"""
+
+                for d in dirs:
+                        for v in dirs[d]:
+                                yield "dir path=%s %s\n" % \
+                                    (d, " ".join("%s=%s" % t \
+                                    for t in v.iteritems()))
+
+        def _actions_to_dirs(self):
+                """ create dictionary of all directories referenced
+                by actions explicitly or implicitly from self.actions...
+                include variants as values; collapse variants where possible"""
+
+                dirs = {}
+                # build a dictionary containing all directories tagged w/
+                # variants
+                for a in self.actions:
+                        v, f = a.get_varcet_keys()
+                        variants = dict((name, a.attrs[name]) for name in v + f)
+                        for d in expanddirs(a.directory_references()):
+                                if d not in dirs:
+                                        dirs[d] = [variants]
+                                elif variants not in dirs[d]:
+                                        dirs[d].append(variants)
+
+                # remove any tags if any entries are always installed (NULL)
+                for d in dirs:
+                        if {} in dirs[d]:
+                                dirs[d] = [{}]
+                                continue
+                        # could collapse dirs where all variants are present
+                return dirs
+
+        def get_directories(self, excludes):
+                """ return a list of directories implicitly or
+                explicitly referenced by this object"""
+
+                try:
+                        alist = self.dir_alist
+                except:
+                        # generate actions that contain directories
+                        self.dir_alist = [
+                            actions.fromstr(s.strip())
+                            for s in Manifest._gen_dirs_to_str(
+                                self._actions_to_dirs())
+                        ]
+                        alist = self.dir_alist
+
+                s = set([
+                    a.attrs["path"]
+                    for a in alist
+                    if a.include_this(excludes)
+                ])
+
+                return list(s)
+
         def gen_actions(self, excludes=EmptyI):
                 """Generate actions in manifest through ordered callable list"""
                 for a in self.actions:
@@ -838,9 +897,9 @@
                 fd, fn = tempfile.mkstemp(dir=t_dir,
                     prefix="manifest.dircache.")
                 f = os.fdopen(fd, "wb")
-                dirs = self.__actions_to_dirs()
+                dirs = self._actions_to_dirs()
 
-                for s in self.__gen_dirs_to_str(dirs):
+                for s in Manifest._gen_dirs_to_str(dirs):
                         f.write(s)
 
                 f.close()
@@ -848,42 +907,6 @@
                 portable.rename(fn, self.__cache_path("manifest.dircache"))
 
         @staticmethod
-        def __gen_dirs_to_str(dirs):
-                """ from a dictionary of paths, generate contents of dircache
-                file"""
-                for d in dirs:
-                        for v in dirs[d]:
-                                yield "dir path=%s %s\n" % \
-                                    (d, " ".join("%s=%s" % t \
-                                    for t in v.iteritems()))
-
-        def __actions_to_dirs(self):
-                """ create dictionary of all directories referenced
-                by actions explicitly or implicitly from self.actions...
-                include variants as values; collapse variants where possible"""
-                assert self.loaded
-
-                dirs = {}
-                # build a dictionary containing all directories tagged w/
-                # variants
-                for a in self.actions:
-                        v, f = a.get_varcet_keys()
-                        variants = dict((name, a.attrs[name]) for name in v + f)
-                        for d in expanddirs(a.directory_references()):
-                                if d not in dirs:
-                                        dirs[d] = [variants]
-                                elif variants not in dirs[d]:
-                                        dirs[d].append(variants)
-
-                # remove any tags if any entries are always installed (NULL)
-                for d in dirs:
-                        if {} in dirs[d]:
-                                dirs[d] = [{}]
-                                continue
-                        # could collapse dirs where all variants are present
-                return dirs
-
-        @staticmethod
         def clear_cache(cache_root):
                 """Remove all manifest cache files found in the given directory
                 (excluding the manifest itself).
@@ -913,23 +936,14 @@
                         if not self.loaded:
                                 # need to load from disk
                                 self.__load()
-                        # generate actions that contain directories
-                        alist = [
-                            actions.fromstr(s.strip())
-                            for s in self.__gen_dirs_to_str(
-                                self.__actions_to_dirs())
-                        ]
+                        assert self.loaded
                 else:
                         # we have cached copy on disk; use it
                         f = file(mpath)
-                        alist = [actions.fromstr(s.strip()) for s in f]
+                        self.dir_alist = [actions.fromstr(s.strip()) for s in f]
                         f.close()
-                s = set([
-                    a.attrs["path"]
-                    for a in alist
-                    if a.include_this(excludes)
-                ])
-                return list(s)
+
+                return Manifest.get_directories(self, excludes)
 
         def gen_actions_by_type(self, atype, excludes=EmptyI):
                 """ generate actions of the specified type;
--- a/src/modules/misc.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/misc.py	Sat May 07 00:25:10 2011 -0700
@@ -22,13 +22,14 @@
 
 # Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
 
+import OpenSSL.crypto as osc
+import cStringIO
 import calendar
-import cStringIO
 import datetime
 import errno
+import getopt
 import hashlib
 import locale
-import OpenSSL.crypto as osc
 import operator
 import os
 import pkg.client.api_errors as api_errors
@@ -44,9 +45,11 @@
 import urlparse
 import zlib
 
-from pkg.pkggzip import PkgGzipFile
+from pkg import VERSION
+from pkg.client import global_settings
+from pkg.client.debugvalues import DebugValues
 from pkg.client.imagetypes import img_type_names, IMG_NONE
-from pkg import VERSION
+from pkg.pkggzip import PkgGzipFile
 
 # Minimum number of days to issue warning before a certificate expires
 MIN_WARN_DAYS = datetime.timedelta(days=30)
@@ -296,7 +299,7 @@
         """Return its argument; used to mark strings for localization when
         their use is delayed by the program."""
         return message
- 
+
 def bytes_to_str(bytes, format=None):
         """Returns a human-formatted string representing the number of bytes
         in the largest unit possible.  If provided, 'format' should be a string
@@ -601,7 +604,7 @@
                                 raise AttributeError, "can't iterate"
                         return self.__iter(self.__obj)
 
-        def __init__(self, fget=None, fset=None, fdel=None, iteritems=None, 
+        def __init__(self, fget=None, fset=None, fdel=None, iteritems=None,
             keys=None, values=None, iterator=None, doc=None, fgetdefault=None,
             fsetdefault=None, update=None, pop=None):
                 self.__fget = fget
@@ -620,12 +623,12 @@
         def __get__(self, obj, objtype=None):
                 if obj is None:
                         return self
-                return self.__InternalProxy(obj, self.__fget, self.__fset, 
+                return self.__InternalProxy(obj, self.__fget, self.__fset,
                     self.__fdel, self.__iteritems, self.__keys, self.__values,
                     self.__iter, self.__fgetdefault, self.__fsetdefault,
                     self.__update, self.__pop)
 
-        
+
 def build_cert(path, uri=None, pub=None):
         """Take the file given in path, open it, and use it to create
         an X509 certificate object.
@@ -826,14 +829,15 @@
         def __init__(self, name, bases, dictionary):
                 super(Singleton, self).__init__(name, bases, dictionary)
                 self.instance = None
- 
+
         def __call__(self, *args, **kw):
                 if self.instance is None:
                         self.instance = super(Singleton, self).__call__(*args,
                             **kw)
- 
+
                 return self.instance
 
+
 EmptyDict = ImmutableDict()
 
 # Setting the python file buffer size to 128k gives substantial performance
@@ -862,3 +866,200 @@
                 for name in filenames:
                         path = os.path.join(dirpath, name)
                         portable.chown(path, uid, gid)
+def opts_parse(op, api_inst, args, table, pargs_limit, usage_cb):
+        """Generic table-based options parsing function.  Returns a tuple
+        consisting of a dictionary of parsed options and the remaining
+        unparsed options.
+
+        'op' is the operation being performed.
+
+        'api_inst' is an image api object that is passed to options handling
+        callbacks (passed in via 'table').
+
+        'args' is the arguments that should be parsed.
+
+        'table' is a list of options and callbacks.Each entry is either a
+        a tuple or a callback function.
+
+        tuples in 'table' specify allowable options and have the following
+        format:
+
+                (<short opt>, <long opt>, <key>, <default value>)
+
+        An example of a short opt is "f", which maps to a "-f" option.  An
+        example of a long opt is "foo", which maps to a "--foo" option.  Key
+        is the value of this option in the parsed option dictionary.  The
+        default value not only represents the default value assigned to the
+        option, but it also implicitly determines how the option is parsed.  If
+        the default value is True or False, the option doesn't take any
+        arguments, can only be specified once, and if specified it inverts the
+        default value.  If the default value is 0, the option doesn't take any
+        arguments, can be specified multiple times, and if specified its value
+        will be the number of times it was seen.  If the default value is
+        None, the option requires an argument, can only be specified once, and
+        if specified its value will be its argument string.  If the default
+        value is an empty list, the option requires an argument, may be
+        specified multiple times, and if specified its value will be a list
+        with all the specified argument values.
+
+        callbacks in 'table' specify callback functions that are invoked after
+        all options have been parsed.  Callback functions must have the
+        following signature:
+                callback(api_inst, opts, opts_new)
+
+        The opts parameter is a dictionary containing all the raw, parsed
+        options.  Callbacks should never update the contents of this
+        dictionary.  The opts_new parameter is a dictionary which is initially
+        a copy of the opts dictionary.  This is the dictionary that will be
+        returned to the caller of opts_parse().  If a callback function wants
+        to update the arguments dictionary that will be returned to the
+        caller, they should make all their updates to the opts_new dictionary.
+
+        'pargs_limit' specified how to handle extra arguments not parsed by
+        getops.  A value of -1 indicates that we allow an unlimited number of
+        extra arguments.  A value of 0 or greater indicates the number of
+        allowed additional unparsed options.
+
+        'usage_cb' is a function pointer that should display usage information
+        and will be invoked if invalid arguments are detected."""
+
+
+        assert type(table) == list
+
+        # return dictionary
+        rv = dict()
+
+        # option string passed to getopt
+        opts_s_str = ""
+        # long options list passed to getopt
+        opts_l_list = list()
+
+        # dict to map options returned by getopt to keys
+        opts_keys = dict()
+
+        # sanity checking to make sure each option is unique
+        opts_s_set = set()
+        opts_l_set = set()
+        opts_seen = dict()
+
+        # callbacks to invoke after processing options
+        callbacks = []
+
+        # process each option entry
+        for entry in table:
+                # check for a callback
+                if type(entry) != tuple:
+                        callbacks.append(entry)
+                        continue
+
+                # decode the table entry
+                # s: a short option, ex: -f
+                # l: a long option, ex: --foo
+                # k: the key value for the options dictionary
+                # v: the default value
+                (s, l, k, v) = entry
+
+                # make sure an option was specified
+                assert s or l
+                # sanity check the default value
+                assert (v == None) or (v == []) or \
+                    (type(v) == bool) or (type(v) == int)
+                # make sure each key is unique
+                assert k not in rv
+                # initialize the default return dictionary entry.
+                rv[k] = v
+                if l:
+                        # make sure each option is unique
+                        assert set([l]) not in opts_l_set
+                        opts_l_set |= set([l])
+
+                        if type(v) == bool:
+                                v = not v
+                                opts_l_list.append("%s" % l)
+                        elif type(v) == int:
+                                opts_l_list.append("%s" % l)
+                        else:
+                                opts_l_list.append("%s=" % l)
+                        opts_keys["--%s" % l] = k
+                if s:
+                        # make sure each option is unique
+                        assert set([s]) not in opts_s_set
+                        opts_s_set |= set([s])
+
+                        if type(v) == bool:
+                                v = not v
+                                opts_s_str += "%s" % s
+                        elif type(v) == int:
+                                opts_s_str += "%s" % s
+                        else:
+                                opts_s_str += "%s:" % s
+                        opts_keys["-%s" % s] = k
+
+        # parse options
+        try:
+                opts, pargs = getopt.getopt(args, opts_s_str, opts_l_list)
+        except getopt.GetoptError, e:
+                usage_cb(_("illegal option -- %s") % e.opt, cmd=op)
+
+        if (pargs_limit >= 0) and (pargs_limit < len(pargs)):
+                usage_cb(_("illegal argument -- %s") % pargs[pargs_limit],
+                    cmd=op)
+
+        # update options dictionary with the specified options
+        for opt, arg in opts:
+                k = opts_keys[opt]
+                v = rv[k]
+
+                # check for duplicate options
+                if k in opts_seen and (type(v) != list and type(v) != int):
+                        if opt == opts_seen[k]:
+                                opts_err_repeated(opt, op)
+                        usage_cb(_("'%s' and '%s' have the same meaning") %
+                            (opts_seen[k], opt), cmd=op)
+                opts_seen[k] = opt
+
+                # update the return dict value
+                if type(v) == bool:
+                        rv[k] = not rv[k]
+                elif type(v) == list:
+                        rv[k].append(arg)
+                elif type(v) == int:
+                        rv[k] += 1
+                else:
+                        rv[k] = arg
+
+        # invoke callbacks (cast to set() to eliminate dups)
+        rv_updated = rv.copy()
+        for cb in set(callbacks):
+                cb(op, api_inst, rv, rv_updated)
+
+        return (rv_updated, pargs)
+
+def api_cmdpath():
+        """Returns the path to the executable that is invoking the api client
+        interfaces."""
+
+        cmdpath = None
+
+        if global_settings.client_args[0]:
+                cmdpath = os.path.realpath(os.path.join(sys.path[0],
+                    os.path.basename(global_settings.client_args[0])))
+
+        if "PKG_CMDPATH" in os.environ:
+                cmdpath = os.environ["PKG_CMDPATH"]
+
+        if DebugValues.get_value("simulate_cmdpath"):
+                cmdpath = DebugValues.get_value("simulate_cmdpath")
+
+        return cmdpath
+
+def liveroot():
+        """Return path to the current live root image, i.e. the image
+        that we are running from."""
+
+        live_root = DebugValues.get_value("simulate_live_root")
+        if not live_root and "PKG_LIVE_ROOT" in os.environ:
+                live_root = os.environ["PKG_LIVE_ROOT"]
+        if not live_root:
+                live_root = "/"
+        return live_root
--- a/src/modules/pkgsubprocess.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/modules/pkgsubprocess.py	Sat May 07 00:25:10 2011 -0700
@@ -21,8 +21,7 @@
 #
 
 #
-# Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
-# Use is subject to license terms.
+# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
 #
 
 import os
@@ -45,15 +44,10 @@
 STDOUT = subprocess.STDOUT
 
 class Popen(subprocess.Popen):
-        def __init__(self, args, bufsize=128 * 1024, executable=None,
-            stdin=None, stdout=None, stderr=None, preexec_fn=None,
-            close_fds=False, shell=False, cwd=None, env=None,
-            universal_newlines=False, startupinfo=None, creationflags=0):
-
-                subprocess.Popen.__init__(self, args, bufsize, executable,
-                    stdin, stdout, stderr, preexec_fn, close_fds, shell, cwd,
-                    env, universal_newlines, startupinfo, creationflags)
-
+        def __init__(self, args, **kwargs):
+                if "bufsize" not in kwargs:
+                        kwargs["bufsize"] = 128 * 1024
+                subprocess.Popen.__init__(self, args, **kwargs)
 
         if "posix_spawnp" in globals():
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/modules/syscallat.c	Sat May 07 00:25:10 2011 -0700
@@ -0,0 +1,154 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <Python.h>
+
+PyDoc_STRVAR(pmkdirat_doc,
+"mkdirat(fd, path, mode)\n\
+\n\
+Invoke mkdirat(2).");
+
+/*ARGSUSED*/
+static PyObject *
+pmkdirat(PyObject *self, PyObject *args)
+{
+	int		fd, rv;
+	char		*path;
+	mode_t		mode;
+
+	rv = PyArg_ParseTuple(args, "isI", &fd, &path, &mode);
+	if (rv == 0) {
+		PyErr_SetString(PyExc_ValueError, "could not parse argument");
+		return (NULL);
+	}
+
+	rv = mkdirat(fd, path, mode);
+	if (rv != 0)
+		return PyErr_SetFromErrno(PyExc_OSError);
+
+	Py_RETURN_NONE;
+}
+
+PyDoc_STRVAR(popenat_doc,
+"openat(fd, path, oflag, mode) -> fd\n\
+\n\
+Invoke openat(2).");
+
+/*ARGSUSED*/
+static PyObject *
+popenat(PyObject *self, PyObject *args, PyObject *kwds)
+{
+	int		fd, oflag, rv;
+	char		*path;
+	mode_t		mode;
+
+	/* Python based arguments to this function */
+	static char	*kwlist[] = {"fd", "path", "oflag", "mode", NULL};
+
+	rv = PyArg_ParseTupleAndKeywords(args, kwds, "isiI", kwlist,
+	    &fd, &path, &oflag, &mode);
+	if (rv == 0) {
+		PyErr_SetString(PyExc_ValueError, "could not parse argument");
+		return (NULL);
+	}
+
+	rv = openat(fd, path, oflag, mode);
+	if (rv < 0)
+		return PyErr_SetFromErrno(PyExc_OSError);
+
+	return (PyInt_FromLong(rv));
+}
+
+PyDoc_STRVAR(prenameat_doc,
+"renameat(fromfd, old, tofd, new)\n\
+\n\
+Invoke renameat(2).");
+
+/*ARGSUSED*/
+static PyObject *
+prenameat(PyObject *self, PyObject *args)
+{
+	int		fromfd, tofd, rv;
+	char		*old, *new;
+
+	rv = PyArg_ParseTuple(args, "isis", &fromfd, &old, &tofd, &new);
+	if (rv == 0) {
+		PyErr_SetString(PyExc_ValueError, "could not parse argument");
+		return (NULL);
+	}
+
+	rv = renameat(fromfd, old, tofd, new);
+	if (rv != 0)
+		return PyErr_SetFromErrno(PyExc_OSError);
+
+	Py_RETURN_NONE;
+}
+
+PyDoc_STRVAR(punlinkat_doc,
+"unlinkat(fd, path, flag)\n\
+\n\
+Invoke unlinkat(2).");
+
+/*ARGSUSED*/
+static PyObject *
+punlinkat(PyObject *self, PyObject *args)
+{
+	int		fd, flags, rv;
+	char		*path;
+
+	rv = PyArg_ParseTuple(args, "isi", &fd, &path, &flags);
+	if (rv == 0) {
+		PyErr_SetString(PyExc_ValueError, "could not parse argument");
+		return (NULL);
+	}
+
+	rv = unlinkat(fd, path, flags);
+	if (rv != 0)
+		return PyErr_SetFromErrno(PyExc_OSError);
+
+	Py_RETURN_NONE;
+}
+
+static PyMethodDef methods[] = {
+	{ "mkdirat", (PyCFunction)pmkdirat, METH_VARARGS, pmkdirat_doc },
+	{ "openat", (PyCFunction)popenat, METH_KEYWORDS, popenat_doc },
+	{ "renameat", (PyCFunction)prenameat, METH_VARARGS, prenameat_doc },
+	{ "unlinkat", (PyCFunction)punlinkat, METH_VARARGS, punlinkat_doc },
+	{ NULL, NULL }
+};
+
+void
+initsyscallat() {
+	if (Py_InitModule("syscallat", methods) == NULL) {
+		/* Initialization failed */
+		return;
+	}
+}
--- a/src/pkg/manifests/developer%2Fopensolaris%2Fpkg5.p5m	Fri May 06 17:24:48 2011 -0700
+++ b/src/pkg/manifests/developer%2Fopensolaris%2Fpkg5.p5m	Sat May 07 00:25:10 2011 -0700
@@ -32,6 +32,7 @@
 depend fmri=pkg:/developer/object-file type=require
 depend fmri=pkg:/developer/gnome/gettext type=require
 depend fmri=pkg:/developer/gnome/gnome-doc-utils type=require
+depend fmri=pkg:/developer/python/pylint type=require
 depend fmri=pkg:/developer/swig type=require
 depend fmri=pkg:/developer/versioning/mercurial type=require
 depend fmri=pkg:/gnome/accessibility/gnome-a11y-libs type=require
--- a/src/pkg/manifests/package%2Fpkg.p5m	Fri May 06 17:24:48 2011 -0700
+++ b/src/pkg/manifests/package%2Fpkg.p5m	Sat May 07 00:25:10 2011 -0700
@@ -82,6 +82,7 @@
 file path=$(PYDIRVP)/pkg/actions/signature.py
 file path=$(PYDIRVP)/pkg/actions/unknown.py
 file path=$(PYDIRVP)/pkg/actions/user.py
+file path=$(PYDIRVP)/pkg/altroot.py
 file path=$(PYDIRVP)/pkg/api_common.py
 file path=$(PYDIRVP)/pkg/arch.so
 dir path=$(PYDIRVP)/pkg/bundle
@@ -106,7 +107,13 @@
 file path=$(PYDIRVP)/pkg/client/imageplan.py
 file path=$(PYDIRVP)/pkg/client/imagetypes.py
 file path=$(PYDIRVP)/pkg/client/indexer.py
+dir path=$(PYDIRVP)/pkg/client/linkedimage
+file path=$(PYDIRVP)/pkg/client/linkedimage/__init__.py
+file path=$(PYDIRVP)/pkg/client/linkedimage/common.py
+file path=$(PYDIRVP)/pkg/client/linkedimage/system.py
+file path=$(PYDIRVP)/pkg/client/linkedimage/zone.py
 file path=$(PYDIRVP)/pkg/client/pkg_solver.py
+file path=$(PYDIRVP)/pkg/client/pkgdefs.py
 file path=$(PYDIRVP)/pkg/client/pkgplan.py
 file path=$(PYDIRVP)/pkg/client/progress.py
 file path=$(PYDIRVP)/pkg/client/publisher.py
@@ -192,6 +199,7 @@
 file path=$(PYDIRVP)/pkg/server/transaction.py
 file path=$(PYDIRVP)/pkg/smf.py
 file path=$(PYDIRVP)/pkg/solver.so
+file path=$(PYDIRVP)/pkg/syscallat.so
 file path=$(PYDIRVP)/pkg/sysvpkg.py
 file path=$(PYDIRVP)/pkg/updatelog.py
 file path=$(PYDIRVP)/pkg/variant.py
--- a/src/pkg/manifests/system%2Fzones%2Fbrand%2Fipkg.p5m	Fri May 06 17:24:48 2011 -0700
+++ b/src/pkg/manifests/system%2Fzones%2Fbrand%2Fipkg.p5m	Sat May 07 00:25:10 2011 -0700
@@ -45,7 +45,7 @@
 file path=usr/lib/brand/ipkg/clone mode=0755
 file path=usr/lib/brand/ipkg/common.ksh
 file path=usr/lib/brand/ipkg/detach mode=0755
-file path=usr/lib/brand/ipkg/fmri_compare mode=0755
+file path=usr/lib/brand/ipkg/developerenv.ksh
 file path=usr/lib/brand/ipkg/halt mode=0755
 file path=usr/lib/brand/ipkg/image_install mode=0755
 file path=usr/lib/brand/ipkg/p2v mode=0755
--- a/src/pkg/pkglint_whitelist.txt	Fri May 06 17:24:48 2011 -0700
+++ b/src/pkg/pkglint_whitelist.txt	Sat May 07 00:25:10 2011 -0700
@@ -16,6 +16,7 @@
 WARNING pkglint.action005.1       obsolete dependency check skipped: unable to find dependency pkg:/developer/gnome/gettext for pkg://pkg5-nightly/developer/opensolaris/pkg5
 WARNING pkglint.action005.1       obsolete dependency check skipped: unable to find dependency pkg:/developer/gnome/gnome-doc-utils for pkg://pkg5-nightly/system/trusted/trusted-nonglobal
 WARNING pkglint.action005.1       obsolete dependency check skipped: unable to find dependency pkg:/developer/object-file for pkg://pkg5-nightly/developer/opensolaris/pkg5
+WARNING pkglint.action005.1       obsolete dependency check skipped: unable to find dependency pkg:/developer/python/pylint for pkg://pkg5-nightly/developer/opensolaris/pkg5
 WARNING pkglint.action005.1       obsolete dependency check skipped: unable to find dependency pkg:/developer/swig for pkg://pkg5-nightly/developer/opensolaris/pkg5
 WARNING pkglint.action005.1       obsolete dependency check skipped: unable to find dependency pkg:/developer/versioning/mercurial for pkg://pkg5-nightly/developer/opensolaris/pkg5
 WARNING pkglint.action005.1       obsolete dependency check skipped: unable to find dependency pkg:/documentation/gnome/gnome-user-docs for pkg://pkg5-nightly/system/trusted/trusted-nonglobal
--- a/src/pkgdep.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/pkgdep.py	Sat May 07 00:25:10 2011 -0700
@@ -41,7 +41,7 @@
 import pkg.publish.dependencies as dependencies
 from pkg.misc import msg, emsg, PipeError
 
-CLIENT_API_VERSION = 58
+CLIENT_API_VERSION = 59
 PKG_CLIENT_NAME = "pkgdepend"
 
 DEFAULT_SUFFIX = ".res"
--- a/src/setup.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/setup.py	Sat May 07 00:25:10 2011 -0700
@@ -19,7 +19,7 @@
 #
 # CDDL HEADER END
 #
-# Copyright (c) 2008, 2011 Oracle and/or its affiliates.  All rights reserved.
+# Copyright (c) 2008, 2011, Oracle and/or its affiliates.  All rights reserved.
 #
 
 import errno
@@ -45,6 +45,7 @@
 from distutils.command.build_py import build_py as _build_py
 from distutils.command.bdist import bdist as _bdist
 from distutils.command.clean import clean as _clean
+from distutils.dist import Distribution
 
 from distutils.sysconfig import get_python_inc
 import distutils.file_util as file_util
@@ -296,6 +297,7 @@
         'pkg.actions',
         'pkg.bundle',
         'pkg.client',
+        'pkg.client.linkedimage',
         'pkg.client.transport',
         'pkg.file_layout',
         'pkg.flavor',
@@ -305,6 +307,12 @@
         'pkg.server'
         ]
 
+pylint_targets = [
+        'pkg.altroot',
+        'pkg.client.linkedimage',
+        'pkg.client.pkgdefs',
+        ]
+
 web_files = []
 for entry in os.walk("web"):
         web_dir, dirs, files = entry
@@ -356,6 +364,9 @@
         ]
 execattrd_files = ['util/misc/exec_attr.d/SUNWipkg']
 authattrd_files = ['util/misc/auth_attr.d/SUNWipkg']
+syscallat_srcs = [
+        'modules/syscallat.c'
+        ]
 pspawn_srcs = [
         'modules/pspawn.c'
         ]
@@ -378,12 +389,53 @@
 if osname == 'sunos':
         solver_link_args = ["-ztext"] + solver_link_args
 
+# Runs lint on the extension module source code
+class pylint_func(Command):
+        description = "Runs pylint tools over IPS python source code"
+        user_options = []
+
+        def initialize_options(self):
+                pass
+
+        def finalize_options(self):
+                pass
+
+        # Make string shell-friendly
+        @staticmethod
+        def escape(astring):
+                return astring.replace(' ', '\\ ')
+
+        def run(self):
+                proto = os.path.join(root_dir, py_install_dir)
+                sys.path.insert(0, proto)
+
+                # Insert tests directory onto sys.path so any custom checkers
+                # can be found.
+                sys.path.insert(0, os.path.join(pwd, 'tests'))
+                # assumes pylint is accessible on the sys.path
+                from pylint import lint
+
+                #
+                # For some reason, the load-plugins option, when used in the
+                # rcfile, does not work, so we put it here instead, to load
+                # our custom checkers.
+                #
+                # Unfortunately, pylint seems pretty fragile and will crash if
+                # we try to run it over all the current pkg source.  Hence for
+                # now we only run it over a subset of the source.  As source
+                # files are made pylint clean they should be added to the
+                # pylint_targets list.
+                #
+                lint.Run(['--load-plugins=multiplatform', '--rcfile',
+                          os.path.join(pwd, 'tests', 'pylintrc')] +
+                          pylint_targets)
+
 include_dirs = [ 'modules' ]
 lint_flags = [ '-u', '-axms', '-erroff=E_NAME_DEF_NOT_USED2' ]
 
 # Runs lint on the extension module source code
-class lint_func(Command):
-        description = "Runs various lint tools over IPS extension source code"
+class clint_func(Command):
+        description = "Runs lint tools over IPS C extension source code"
         user_options = []
 
         def initialize_options(self):
@@ -417,6 +469,10 @@
                             ["%s%s" % ("-I", k) for k in include_dirs] + \
                             ['-I' + self.escape(get_python_inc())] + \
                             pspawn_srcs
+                        syscallatcmd = ['lint'] + lint_flags + ['-D_FILE_OFFSET_BITS=64'] + \
+                            ["%s%s" % ("-I", k) for k in include_dirs] + \
+                            ['-I' + self.escape(get_python_inc())] + \
+                            syscallat_srcs
 
                         print(" ".join(archcmd))
                         os.system(" ".join(archcmd))
@@ -426,30 +482,29 @@
                         os.system(" ".join(_actionscmd))
                         print(" ".join(pspawncmd))
                         os.system(" ".join(pspawncmd))
+                        print(" ".join(syscallatcmd))
+                        os.system(" ".join(syscallatcmd))
 
-                        proto = os.path.join(root_dir, py_install_dir)
-                        sys.path.insert(0, proto)
 
-                        # Insert tests directory onto sys.path so any custom checkers
-                        # can be found.
-                        sys.path.insert(0, os.path.join(pwd, 'tests'))
-                        print(sys.path)
+# Runs both C and Python lint
+class lint_func(Command):
+        description = "Runs C and Python lint checkers"
+        user_options = []
+
+        def initialize_options(self):
+                pass
 
-                # assumes pylint is accessible on the sys.path
-                from pylint import lint
-                scriptlist = [ 'setup.py' ]
-                for d, m in scripts_sunos.items():
-                        for a in m:
-                                # specify the filenames of the scripts, in addition
-                                # to the package names themselves
-                                scriptlist.append(os.path.join(root_dir, d, a[1]))
+        def finalize_options(self):
+                pass
 
-                # For some reason, the load-plugins option, when used in the
-                # rcfile, does not work, so we put it here instead, to load
-                # our custom checkers.
-                lint.Run(['--load-plugins=multiplatform', '--rcfile',
-                          os.path.join(pwd, 'tests', 'pylintrc')] +
-                          scriptlist + packages)
+        # Make string shell-friendly
+        @staticmethod
+        def escape(astring):
+                return astring.replace(' ', '\\ ')
+
+        def run(self):
+                clint_func(Distribution()).run()
+                pylint_func(Distribution()).run()
 
 class install_func(_install):
         def initialize_options(self):
@@ -514,7 +569,7 @@
 
                 prep_sw(CP, CPARC, CPDIR, CPURL, CPHASH)
                 install_sw(CP, CPDIR, CPIDIR)
-		if osname == "sunos" and platform.uname()[2] == "5.11":
+                if osname == "sunos" and platform.uname()[2] == "5.11":
                         prep_sw(LDTP, LDTPARC, LDTPDIR, LDTPURL,
                             LDTPHASH)
                         saveenv = os.environ.copy()
@@ -523,7 +578,7 @@
                         install_ldtp(LDTP, LDTPDIR, LDTPIDIR)
                         os.environ = saveenv
 
-		if "BUILD_PYOPENSSL" in os.environ and \
+                if "BUILD_PYOPENSSL" in os.environ and \
                     os.environ["BUILD_PYOPENSSL"] != "":
                         #
                         # Include /usr/sfw/lib in the build environment
@@ -908,6 +963,8 @@
         'build_py': build_py_func,
         'bdist': dist_func,
         'lint': lint_func,
+        'clint': clint_func,
+        'pylint': pylint_func,
         'clean': clean_func,
         'clobber': clobber_func,
         'test': test_func,
@@ -973,6 +1030,14 @@
                             extra_link_args = link_args,
                             define_macros = [('_FILE_OFFSET_BITS', '64')]
                             ),
+                    Extension(
+                            'syscallat',
+                            syscallat_srcs,
+                            include_dirs = include_dirs,
+                            extra_compile_args = compile_args,
+                            extra_link_args = link_args,
+                            define_macros = [('_FILE_OFFSET_BITS', '64')]
+                            ),
                     ]
         else:
             elf_libraries += [ 'ssl' ]
--- a/src/sysrepo.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/sysrepo.py	Sat May 07 00:25:10 2011 -0700
@@ -53,7 +53,7 @@
 orig_cwd = None
 
 PKG_CLIENT_NAME = "pkg.sysrepo"
-CLIENT_API_VERSION = 58
+CLIENT_API_VERSION = 59
 pkg.client.global_settings.client_name = PKG_CLIENT_NAME
 
 # exit codes
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/tests/api/t_altroot.py	Sat May 07 00:25:10 2011 -0700
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+import testutils
+if __name__ == "__main__":
+        testutils.setup_environment("../../../proto")
+import pkg5unittest
+
+import errno
+import os
+import sys
+import traceback
+import unittest
+
+import pkg.altroot as ar
+import pkg.client.image as image
+
+class TestAltroot(pkg5unittest.CliTestCase):
+        persistent_setup = True
+
+        def setUp(self):
+                self.i_count = 4
+                pkg5unittest.CliTestCase.setUp(self, image_count=self.i_count)
+
+                # image path
+                self.i = []
+
+                # image files and directories
+                self.p_f1 = "f1"
+                self.p_f2 = "f2"
+                self.p_none = "none"
+                self.p_d = "d"
+                self.p_d_f1 = os.path.join(self.p_d, "f1")
+                self.p_d_f2 = os.path.join(self.p_d, "f2")
+                self.p_d_none = os.path.join(self.p_d, "none")
+                self.p_f1_redir = "f1_redir"
+                self.p_f2_redir = "f2_redir"
+                self.p_d_redir = "d_redir"
+                self.p_d_f1_redir = os.path.join(self.p_d_redir, "f1")
+                self.p_d_f2_redir = os.path.join(self.p_d_redir, "f2")
+                self.p_d_none_redir = os.path.join(self.p_d_redir, "none")
+
+                for i in range(0, self.i_count):
+                        # first assign paths.  we'll use the image paths even
+                        # though we're not actually doing any testing with
+                        # real images.
+                        r = self.img_path(i)
+                        self.i.insert(i, r)
+
+                        os.makedirs(r)
+                        if i == 0:
+                                # simulate a root image
+                                os.makedirs(
+                                    os.path.join(r, image.img_user_prefix))
+                        elif i == 1:
+                                # simulate a user image
+                                os.makedirs(
+                                    os.path.join(r, image.img_root_prefix))
+                        elif i == 2:
+                                # corrupt image: both root and user
+                                os.makedirs(
+                                    os.path.join(r, image.img_user_prefix))
+                                os.makedirs(
+                                    os.path.join(r, image.img_root_prefix))
+
+                for i in range(0, self.i_count):
+                        r = self.i[i]
+                        if i > 0:
+                                r_alt = self.i[i - 1]
+                        else:
+                                r_alt = self.i[self.i_count - 1]
+                        r_redir = os.path.basename(r_alt)
+
+                        # create directories and files within the image
+                        self.make_file(os.path.join(r, self.p_f1), "foo")
+                        self.make_file(os.path.join(r, self.p_f2), "foo")
+                        self.make_file(os.path.join(r, self.p_d_f1), "bar")
+                        self.make_file(os.path.join(r, self.p_d_f2), "bar")
+
+                        # create sym links that point outside that image
+                        os.symlink(os.path.join("..", r_redir, self.p_f1),
+                            os.path.join(r, self.p_f1_redir))
+
+                        os.symlink(os.path.join("..", r_redir, self.p_f2),
+                            os.path.join(r, self.p_f2_redir))
+
+                        os.symlink(os.path.join("..", r_redir, self.p_d),
+                            os.path.join(r, self.p_d_redir))
+
+        def __eremote(self, func, args):
+                e = None
+                try:
+                        func(*args)
+                except:
+                        e_type, e, e_traceback = sys.exc_info()
+
+                if isinstance(e, OSError) and e.errno == errno.EREMOTE:
+                        return
+
+                if e == None:
+                        e_str = str(None)
+                else:
+                        e_str = traceback.format_exc()
+
+                args = ", ".join([str(a) for a in args])
+                self.fail(
+                    "altroot call didn't return OSError EREMOTE exception\n"
+                    "call: %s(%s)\n"
+                    "exception: %s\n" %
+                    (func.__name__, args, e_str))
+
+        def test_ar_err_eremote(self):
+                """Verify that all altroot accessor functions return EREMOTE
+                if they traverse a path which contains a symlink that point
+                somewhere outside the specified altroot namespace."""
+
+                r = self.i[0]
+                invoke = [
+                    (ar.ar_open, (r, self.p_f1_redir, os.O_RDONLY)),
+                    (ar.ar_open, (r, self.p_d_f1_redir, os.O_RDONLY)),
+
+                    (ar.ar_unlink, (r, self.p_d_f1_redir)),
+
+                    (ar.ar_rename, (r, self.p_d_f1_redir, self.p_d_f1)),
+                    (ar.ar_rename, (r, self.p_d_f1, self.p_d_f1_redir)),
+                    (ar.ar_rename, (r, self.p_d_f1_redir, self.p_d_f2_redir)),
+
+                    (ar.ar_mkdir, (r, self.p_d_none_redir, 0777)),
+
+                    (ar.ar_stat, (r, self.p_f1_redir)),
+                    (ar.ar_stat, (r, self.p_d_f1_redir)),
+
+                    (ar.ar_isdir, (r, self.p_d_redir)),
+                    (ar.ar_isdir, (r, self.p_d_f1_redir)),
+
+                    (ar.ar_exists, (r, self.p_f1_redir)),
+                    (ar.ar_exists, (r, self.p_d_redir)),
+                    (ar.ar_exists, (r, self.p_d_f1_redir)),
+
+                    (ar.ar_diff, (r, self.p_f1, self.p_f2_redir)),
+                    (ar.ar_diff, (r, self.p_f1_redir, self.p_f2)),
+                    (ar.ar_diff, (r, self.p_d_f1, self.p_d_f2_redir)),
+                    (ar.ar_diff, (r, self.p_d_f1_redir, self.p_d_f2)),
+                ]
+                for func, args in invoke:
+                        self.__eremote(func, args)
+
+        def __bad_img_prefix(self, func, args):
+                rv = func(*args)
+                if rv == None:
+                        return
+
+                args = ", ".join([str(a) for a in args])
+                self.fail(
+                    "altroot call didn't return None\n"
+                    "call: %s(%s)\n"
+                    "rv: %s\n" %
+                    (func.__name__, args, str(rv)))
+
+        def test_ar_err_img_prefix(self):
+                """Verify that ar_img_prefix() returns None if we have a
+                corrupt image.  image 2 has both user and root image
+                repositories.  image 3 is not an image, it's an empty
+                directory."""
+
+                invoke = [
+                    (ar.ar_img_prefix, (self.i[2],)),
+                    (ar.ar_img_prefix, (self.i[3],)),
+                ]
+                for func, args in invoke:
+                        self.__bad_img_prefix(func, args)
+
+if __name__ == "__main__":
+        unittest.main()
--- a/src/tests/api/t_api.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/api/t_api.py	Sat May 07 00:25:10 2011 -0700
@@ -33,7 +33,6 @@
 import os
 import pkg.client.api as api
 import pkg.client.api_errors as api_errors
-import pkg.client.progress as progress
 import pkg.facet as facet
 import pkg.fmri as fmri
 import sys
@@ -41,9 +40,6 @@
 import time
 import unittest
 
-CLIENT_API_VERSION = 58
-PKG_CLIENT_NAME = "pkg"
-
 class TestPkgApi(pkg5unittest.SingleDepotTestCase):
         # restart the depot for every test
         persistent_setup = False
@@ -57,6 +53,18 @@
             add file libc.so.1 mode=0555 owner=root group=bin path=/lib/libc.so.1
             close """
 
+        foo11v = """
+            open [email protected],5.11-0
+            add set name=variant.arch value=i386 value=sparc
+            add file libc.so.1 mode=0555 owner=root group=bin path=/lib/libc.so.1 variant.arch=i386
+            close """
+
+        foo12v = """
+            open [email protected],5.11-0
+            add set name=variant.arch value=i386 value=sparc
+            add file libc.so.1 mode=0555 owner=root group=bin path=/lib/libc.so.1 variant.arch=i386
+            close """
+
         bar10 = """
             open [email protected],5.11-0
             close """
@@ -155,14 +163,19 @@
         def __try_bad_installs(self, api_obj):
 
                 self.assertRaises(api_errors.PlanExistsException,
-                    api_obj.plan_install,["foo"])
-
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_install(*args, **kwargs)),
+                    ["foo"])
                 self.assertRaises(api_errors.PlanExistsException,
-                    api_obj.plan_uninstall,["foo"], False)
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_uninstall(*args, **kwargs)),
+                    ["foo"], False)
                 self.assertRaises(api_errors.PlanExistsException,
-                    api_obj.plan_update_all, sys.argv[0])
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_update(*args, **kwargs)))
                 try:
-                        api_obj.plan_update_all(sys.argv[0])
+                        for pd in api_obj.gen_plan_update():
+                                continue
                 except api_errors.PlanExistsException:
                         pass
                 else:
@@ -197,7 +210,8 @@
                 self.assertRaises(api_errors.PlanMissingException,
                     api_obj.prepare)
 
-                api_obj.plan_install(["foo"])
+                for pd in api_obj.gen_plan_install(["foo"]):
+                        continue
                 self.__try_bad_combinations_and_complete(api_obj)
                 api_obj.reset()
 
@@ -209,7 +223,8 @@
                 self.pkgsend_bulk(self.rurl, self.foo12)
                 api_obj.refresh(immediate=True)
 
-                api_obj.plan_update_all(sys.argv[0])
+                for pd in api_obj.gen_plan_update():
+                        continue
                 self.__try_bad_combinations_and_complete(api_obj)
                 api_obj.reset()
 
@@ -217,7 +232,8 @@
                     api_obj.prepare)
                 self.assert_(api_obj.describe() is None)
 
-                api_obj.plan_uninstall(["foo"], False)
+                for pd in api_obj.gen_plan_uninstall(["foo"], False):
+                        continue
                 self.__try_bad_combinations_and_complete(api_obj)
                 api_obj.reset()
 
@@ -234,19 +250,23 @@
                 recursive_removal = False
 
                 facets = facet.Facets({ "facet.devel": True })
-                api_obj.plan_change_varcets(facets=facets)
+                for pd in api_obj.gen_plan_change_varcets(facets=facets):
+                        continue
                 self._api_finish(api_obj)
 
-                api_obj.plan_install(["foo"])
+                for pd in api_obj.gen_plan_install(["foo"]):
+                        continue
                 self.assert_(api_obj.describe() is not None)
                 api_obj.reset()
                 self.assert_(api_obj.describe() is None)
-                api_obj.plan_install(["foo"])
+                for pd in api_obj.gen_plan_install(["foo"]):
+                        continue
                 self.assert_(api_obj.describe() is not None)
                 api_obj.prepare()
                 api_obj.reset()
                 self.assert_(api_obj.describe() is None)
-                api_obj.plan_install(["foo"])
+                for pd in api_obj.gen_plan_install(["foo"]):
+                        continue
                 self.assert_(api_obj.describe() is not None)
                 api_obj.prepare()
                 api_obj.execute_plan()
@@ -259,16 +279,19 @@
                 self.pkgsend_bulk(self.rurl, self.foo12)
                 api_obj.refresh(immediate=True)
 
-                api_obj.plan_update_all(sys.argv[0])
+                for pd in api_obj.gen_plan_update():
+                        continue
                 self.assert_(api_obj.describe() is not None)
                 api_obj.reset()
                 self.assert_(api_obj.describe() is None)
-                api_obj.plan_update_all(sys.argv[0])
+                for pd in api_obj.gen_plan_update():
+                        continue
                 self.assert_(api_obj.describe() is not None)
                 api_obj.prepare()
                 api_obj.reset()
                 self.assert_(api_obj.describe() is None)
-                api_obj.plan_update_all(sys.argv[0])
+                for pd in api_obj.gen_plan_update():
+                        continue
                 self.assert_(api_obj.describe() is not None)
                 api_obj.prepare()
                 api_obj.execute_plan()
@@ -278,16 +301,22 @@
                 self.pkg("list")
                 self.pkg("verify")
 
-                api_obj.plan_uninstall(["foo"], recursive_removal)
+                for pd in api_obj.gen_plan_uninstall(["foo"],
+                    recursive_removal):
+                        continue
                 self.assert_(api_obj.describe() is not None)
                 api_obj.reset()
                 self.assert_(api_obj.describe() is None)
-                api_obj.plan_uninstall(["foo"], recursive_removal)
+                for pd in api_obj.gen_plan_uninstall(["foo"],
+                    recursive_removal):
+                        continue
                 self.assert_(api_obj.describe() is not None)
                 api_obj.prepare()
                 api_obj.reset()
                 self.assert_(api_obj.describe() is None)
-                api_obj.plan_uninstall(["foo"], recursive_removal)
+                for pd in api_obj.gen_plan_uninstall(["foo"],
+                    recursive_removal):
+                        continue
                 self.assert_(api_obj.describe() is not None)
                 api_obj.prepare()
                 api_obj.execute_plan()
@@ -355,10 +384,7 @@
                 # Now install a package, and verify that the entries in the
                 # known catalog for installed packages exist in the installed
                 # catalog and are identical.
-                progresstracker = progress.NullProgressTracker()
-                api_obj = api.ImageInterface(self.get_img_path(), 
-                    CLIENT_API_VERSION, progresstracker, lambda x: False, 
-                    PKG_CLIENT_NAME)
+                api_obj = self.get_img_api_obj()
                 img = api_obj.img
 
                 # Get image catalogs.
@@ -372,7 +398,8 @@
                 self.assertTrue("foo" not in icat.names())
 
                 # Install the packages.
-                api_obj.plan_install(["[email protected]"])
+                for pd in api_obj.gen_plan_install(["[email protected]"]):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 api_obj.reset()
@@ -401,10 +428,7 @@
                 self.dc.stop()
                 self.dc.start()
 
-                progresstracker = progress.NullProgressTracker()
-                api_obj = api.ImageInterface(self.get_img_path(), 
-                    CLIENT_API_VERSION, progresstracker, 
-                    lambda x: False, PKG_CLIENT_NAME)
+                api_obj = self.get_img_api_obj()
                 api_obj.refresh(immediate=True)
                 img = api_obj.img
 
@@ -469,7 +493,7 @@
                 accessible and return expected values."""
 
                 api_obj = self.image_create(self.rurl, prefix="bobcat")
-                self.assertEqual(api_obj.root, self.img_path)
+                self.assertEqual(api_obj.root, self.img_path())
 
         def test_publisher_apis(self):
                 """Verify that the publisher api methods work as expected.
@@ -717,6 +741,86 @@
                 self.assertRaises(api_errors.InvalidP5IFile, api_obj.parse_p5i,
                     location=lcpath)
 
+        def test_deprecated(self):
+                """Test deprecated api interfaces."""
+
+                self.pkgsend_bulk(self.rurl, self.foo10)
+                api_obj = self.image_create(self.rurl, prefix="bobcat",
+                    variants={"variant.arch": "i386"})
+                api_obj.reset()
+
+                # verify the old install interface
+                stuff_to_do = api_obj.plan_install(["foo"], noexecute=False)
+                self.assertTrue(stuff_to_do)
+                api_obj.prepare()
+                try:
+                        api_obj.execute_plan()
+                except api_errors.WrapSuccessfulIndexingException:
+                        pass
+                api_obj.reset()
+
+                self.pkgsend_bulk(self.rurl, self.foo11v)
+                self.pkgsend_bulk(self.rurl, self.foo12v)
+                api_obj.refresh(immediate=True)
+
+                # verify the old update interface
+                stuff_to_do = api_obj.plan_update(
+                    ["[email protected],5.11-0"], noexecute=False)
+                self.assertTrue(stuff_to_do)
+                api_obj.prepare()
+                try:
+                        api_obj.execute_plan()
+                except api_errors.WrapSuccessfulIndexingException:
+                        pass
+                api_obj.reset()
+
+                # verify the old update interface
+                stuff_to_do, s_image = api_obj.plan_update_all(noexecute=False)
+                self.assertTrue(stuff_to_do)
+                self.assertFalse(s_image)
+                api_obj.prepare()
+                try:
+                        api_obj.execute_plan()
+                except api_errors.WrapSuccessfulIndexingException:
+                        pass
+                api_obj.reset()
+
+                # remove a file from the image
+                os.remove(os.path.join(self.img_path(), "lib/libc.so.1"))
+
+                # verify the old revert interface
+                stuff_to_do = api_obj.plan_revert(["/lib/libc.so.1"],
+                    noexecute=False)
+                self.assertTrue(stuff_to_do)
+                api_obj.prepare()
+                try:
+                        api_obj.execute_plan()
+                except api_errors.WrapSuccessfulIndexingException:
+                        pass
+                api_obj.reset()
+
+                # verify the old change varcets interface
+                stuff_to_do = api_obj.plan_change_varcets(
+                    variants={"variant.arch": "sparc"}, noexecute=False)
+                self.assertTrue(stuff_to_do)
+                api_obj.prepare()
+                try:
+                        api_obj.execute_plan()
+                except api_errors.WrapSuccessfulIndexingException:
+                        pass
+                api_obj.reset()
+
+                # verify the old change uninstall interface
+                stuff_to_do = api_obj.plan_uninstall(["foo"], False,
+                    noexecute=False)
+                self.assertTrue(stuff_to_do)
+                api_obj.prepare()
+                try:
+                        api_obj.execute_plan()
+                except api_errors.WrapSuccessfulIndexingException:
+                        pass
+                api_obj.reset()
+
         def test_license(self):
                 """ Send various packages and then verify that install and
                 update operations will raise the correct exceptions or
@@ -728,7 +832,8 @@
 
                 # First, test the basic install case to see if expected license
                 # data is returned.
-                api_obj.plan_install(["[email protected]"])
+                for pd in api_obj.gen_plan_install(["[email protected]"]):
+                        continue
 
                 def lic_sort(a, b):
                         adest = a[2]
@@ -783,7 +888,8 @@
                 api_obj.reset()
 
                 # Next, check that an upgrade produces expected license data.
-                api_obj.plan_install(["[email protected]"])
+                for pd in api_obj.gen_plan_install(["[email protected]"]):
+                        continue
 
                 plan = api_obj.describe()
                 lics = sorted(plan.get_licenses(), cmp=lic_sort)
@@ -829,7 +935,8 @@
 
                 # Plan will have to be re-created first before continuing.
                 api_obj.reset()
-                api_obj.plan_install(["[email protected]"])
+                for pd in api_obj.gen_plan_install(["[email protected]"]):
+                        continue
                 plan = api_obj.describe()
 
                 # Set the copyright as having been displayed.
@@ -850,7 +957,8 @@
 
                 # Next, check that an update produces expected license
                 # data.
-                api_obj.plan_update_all(sys.argv[0])
+                for pd in api_obj.gen_plan_update():
+                        continue
 
                 plan = api_obj.describe()
                 lics = [l for l in plan.get_licenses()]
@@ -897,7 +1005,8 @@
 
                 # Plan will have to be re-created first before continuing.
                 api_obj.reset()
-                api_obj.plan_update_all(sys.argv[0])
+                for pd in api_obj.gen_plan_update():
+                        continue
                 plan = api_obj.describe()
 
                 # Set the license status of only one license.
@@ -924,7 +1033,8 @@
                 # Next, verify that an update to a newer version of a package
                 # where the license hasn't changed and it previously required
                 # acceptance is treated as already having been accepted.
-                api_obj.plan_update_all(sys.argv[0])
+                for pd in api_obj.gen_plan_update():
+                        continue
                 plan = api_obj.describe()
                 pfmri = fmri.PkgFmri(plist[5])
                 lics = sorted(plan.get_licenses(), cmp=lic_sort)
@@ -941,7 +1051,8 @@
 
                 # Finally, verify that an uninstall won't trigger license
                 # errors as acceptance should never be applied to it.
-                api_obj.plan_uninstall(["*"], False)
+                for pd in api_obj.gen_plan_uninstall(["*"], False):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 api_obj.reset()
--- a/src/tests/api/t_api_info.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/api/t_api_info.py	Sat May 07 00:25:10 2011 -0700
@@ -131,7 +131,8 @@
                 self.assert_(not ret[api.ImageInterface.INFO_FOUND])
                 self.assert_(len(ret[api.ImageInterface.INFO_MISSING]) == 1)
                 
-                api_obj.plan_install(["jade"])
+                for pd in api_obj.gen_plan_install(["jade"]):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
 
--- a/src/tests/api/t_api_list.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/api/t_api_list.py	Sat May 07 00:25:10 2011 -0700
@@ -39,14 +39,10 @@
 
 import pkg.client.api as api
 import pkg.client.api_errors as api_errors
-import pkg.client.progress as progress
 import pkg.fmri as fmri
 import pkg.misc as misc
 import pkg.version as version
 
-CLIENT_API_VERSION = 58
-PKG_CLIENT_NAME = "pkg"
-
 class TestApiList(pkg5unittest.ManyDepotTestCase):
         # Only start/stop the depot once (instead of for every test)
         persistent_setup = True
@@ -375,10 +371,7 @@
             pubs=misc.EmptyI, variants=False):
 
                 if not api_obj:
-                        progresstracker = progress.NullProgressTracker()
-                        api_obj = api.ImageInterface(self.get_img_path(),
-                            CLIENT_API_VERSION, progresstracker, lambda x: False,
-                            PKG_CLIENT_NAME)
+                        api_obj = self.get_img_api_obj()
 
                 # Set of states exposed by the API.
                 exp_states = set([api.PackageInfo.FROZEN,
@@ -423,9 +416,7 @@
                 """Verify the sort order and content of a full list and
                 combinations thereof."""
 
-                progresstracker = progress.NullProgressTracker()
-                api_obj = api.ImageInterface(self.get_img_path(), CLIENT_API_VERSION,
-                    progresstracker, lambda x: False, PKG_CLIENT_NAME)
+                api_obj = self.get_img_api_obj()
 
                 # First check all variants case.
                 returned = self.__get_returned(api_obj.LIST_ALL,
@@ -506,10 +497,7 @@
                 # Verify that LIST_NEWEST will allow version-specific
                 # patterns such that the newest version allowed by the
                 # pattern is what is listed.
-                progresstracker = progress.NullProgressTracker()
-                api_obj = api.ImageInterface(self.get_img_path(),
-                    CLIENT_API_VERSION, progresstracker, lambda x: False,
-                    PKG_CLIENT_NAME)
+                api_obj = self.get_img_api_obj()
 
                 returned = self.__get_returned(api_obj.LIST_NEWEST,
                     api_obj=api_obj, patterns=["[email protected]", "bat/bar",
@@ -591,9 +579,7 @@
                 """Verify the sort order and content of a list containing
                 only installed packages and combinations thereof."""
 
-                progresstracker = progress.NullProgressTracker()
-                api_obj = api.ImageInterface(self.get_img_path(), CLIENT_API_VERSION,
-                    progresstracker, lambda x: False, PKG_CLIENT_NAME)
+                api_obj = self.get_img_api_obj()
 
                 # Verify no installed packages case.
                 returned = self.__get_returned(api_obj.LIST_INSTALLED,
@@ -605,8 +591,9 @@
                 # won't be installed.
                 af = self.__get_pub_entry("test1", 3, "apple",
                     "1.2.0,5.11-0")[0]
-                api_obj.plan_install(["entire", af.get_fmri(), "corge",
-                    "obsolete", "qux"])
+                for pd in api_obj.gen_plan_install(
+                    ["entire", af.get_fmri(), "corge", "obsolete", "qux"]):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 api_obj.reset()
@@ -746,7 +733,8 @@
 
                 # Verify the results for LIST_INSTALLED_NEWEST after
                 # uninstalling 'quux' and 'qux'.
-                api_obj.plan_uninstall(["quux"], False)
+                for pd in api_obj.gen_plan_uninstall(["quux"], False):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 api_obj.reset()
@@ -781,7 +769,8 @@
 
                 # Verify the results for LIST_INSTALLED_NEWEST after
                 # all packages have been uninstalled.
-                api_obj.plan_uninstall(["*"], False)
+                for pd in api_obj.gen_plan_uninstall(["*"], False):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 api_obj.reset()
@@ -892,7 +881,9 @@
                 # Test results after installing packages and only listing the
                 # installed packages.
                 af = self.__get_pub_entry("test1", 1, "apple", "1.0,5.11-0")[0]
-                api_obj.plan_install([af.get_fmri(), "[email protected]", "[email protected]"])
+                for pd in api_obj.gen_plan_install(
+                    [af.get_fmri(), "[email protected]", "[email protected]"]):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 api_obj.reset()
@@ -955,13 +946,15 @@
                 # LIST_INSTALLED_NEWEST.  corge, grault, qux, and
                 # quux should be listed since none of them are
                 # listed in an installed incorporation.
-                api_obj.plan_uninstall(["corge"], False)
+                for pd in api_obj.gen_plan_uninstall(["corge"], False):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 api_obj.reset()
 
                 af = self.__get_pub_entry("test1", 1, "apple", "1.0,5.11-0")[0]
-                api_obj.plan_install(["pkg://test2/grault"])
+                for pd in api_obj.gen_plan_install(["pkg://test2/grault"]):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 api_obj.reset()
@@ -996,12 +989,14 @@
                 # Now verify that publisher search order determines the entries
                 # that are listed when those entries are part of an installed
                 # incorporation.
-                api_obj.plan_uninstall(["*"], False)
+                for pd in api_obj.gen_plan_uninstall(["*"], False):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 api_obj.reset()
 
-                api_obj.plan_install(["entire"])
+                for pd in api_obj.gen_plan_install(["entire"]):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 api_obj.reset()
@@ -1134,7 +1129,8 @@
                 # Now install one of the incorporated packages and check
                 # that test2 is still listed for the remaining package
                 # for the non-filtered case.
-                api_obj.plan_install(["apple"])
+                for pd in api_obj.gen_plan_install(["apple"]):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 api_obj.reset()
@@ -1196,7 +1192,8 @@
                 self.assertPrettyEqual(returned, expected)
 
                 # Reset image state for following tests.
-                api_obj.plan_uninstall(["*"], False)
+                for pd in api_obj.gen_plan_uninstall(["*"], False):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 api_obj.reset()
@@ -1205,9 +1202,7 @@
                 """Verify the sort order and content of a list containing
                 only upgradable packages and combinations thereof."""
 
-                progresstracker = progress.NullProgressTracker()
-                api_obj = api.ImageInterface(self.get_img_path(), CLIENT_API_VERSION,
-                    progresstracker, lambda x: False, PKG_CLIENT_NAME)
+                api_obj = self.get_img_api_obj()
 
                 # Verify no installed packages case.
                 returned = self.__get_returned(api_obj.LIST_UPGRADABLE,
@@ -1218,7 +1213,9 @@
                 # installed, upgradable packages.
                 af = self.__get_pub_entry("test1", 3, "apple",
                     "1.2.0,5.11-0")[0]
-                api_obj.plan_install([af.get_fmri(), "bat/bar", "qux"])
+                for pd in api_obj.gen_plan_install(
+                    [af.get_fmri(), "bat/bar", "qux"]):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 api_obj.reset()
@@ -1235,7 +1232,8 @@
                 self.assertPrettyEqual(returned, expected)
 
                 # Reset image state for following tests.
-                api_obj.plan_uninstall(["*"], False)
+                for pd in api_obj.gen_plan_uninstall(["*"], False):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 api_obj.reset()
@@ -1243,9 +1241,7 @@
         def test_list_07_get_pkg_categories(self):
                 """Verify that get_pkg_categories returns expected results."""
 
-                progresstracker = progress.NullProgressTracker()
-                api_obj = api.ImageInterface(self.get_img_path(), CLIENT_API_VERSION,
-                    progresstracker, lambda x: False, PKG_CLIENT_NAME)
+                api_obj = self.get_img_api_obj()
 
                 # Verify no installed packages case.
                 returned = api_obj.get_pkg_categories(installed=True)
@@ -1313,7 +1309,8 @@
                             f.get_fmri(anarchy=True, include_scheme=False)
                             for f in combo
                         ]
-                        api_obj.plan_install(pkgs)
+                        for pd in api_obj.gen_plan_install(pkgs):
+                                continue
                         api_obj.prepare()
                         api_obj.execute_plan()
                         api_obj.reset()
@@ -1328,11 +1325,12 @@
 
                         # Prepare for next test.
                         # skip corge since it's renamed
-                        api_obj.plan_uninstall([
-                                                p
-                                                for p in pkgs
-                                                if not p.startswith("[email protected]")
-                                                ], False)
+                        for pd in api_obj.gen_plan_uninstall([
+                                p
+                                for p in pkgs
+                                if not p.startswith("[email protected]")
+                            ], False):
+                                continue
                         api_obj.prepare()
                         api_obj.execute_plan()
                         api_obj.reset()
@@ -1340,9 +1338,7 @@
         def test_list_08_patterns(self):
                 """Verify that pattern filtering works as expected."""
 
-                progresstracker = progress.NullProgressTracker()
-                api_obj = api.ImageInterface(self.get_img_path(), CLIENT_API_VERSION,
-                    progresstracker, lambda x: False, PKG_CLIENT_NAME)
+                api_obj = self.get_img_api_obj()
 
                 # First, check all variants, but with multiple patterns for the
                 # partial, exact, and wildcard match cases.
--- a/src/tests/api/t_api_search.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/api/t_api_search.py	Sat May 07 00:25:10 2011 -0700
@@ -1181,7 +1181,7 @@
                 shutil.move(index_dir_tmp, index_dir)
 
         def _get_index_dirs(self):
-                index_dir = os.path.join(self.img_path, "var", "pkg",
+                index_dir = os.path.join(self.img_path(), "var", "pkg",
                     "cache", "index")
                 index_dir_tmp = index_dir + "TMP"
                 return index_dir, index_dir_tmp
@@ -1312,7 +1312,7 @@
 
                 self._api_install(api_obj, ["[email protected]"])
 
-                index_dir = os.path.join(self.img_path, "var", "pkg",
+                index_dir = os.path.join(self.img_path(), "var", "pkg",
                     "cache", "index")
                 shutil.rmtree(index_dir)
 
@@ -1360,7 +1360,7 @@
 
                 self._api_install(api_obj, ["[email protected]"])
 
-                index_dir = os.path.join(self.img_path, "var", "pkg",
+                index_dir = os.path.join(self.img_path(), "var", "pkg",
                     "cache", "index")
 
                 first = True
@@ -1391,7 +1391,7 @@
                 api_obj = self.image_create(durl)
                 self._api_install(api_obj, ["[email protected]"])
 
-                index_dir = os.path.join(self.img_path, "var", "pkg",
+                index_dir = os.path.join(self.img_path(), "var", "pkg",
                     "cache", "index")
 
                 first = True
@@ -1419,13 +1419,15 @@
                         self._overwrite_version_number(orig_path)
                         self.assertRaises(
                             api_errors.WrapSuccessfulIndexingException,
-                            self._api_uninstall, api_obj, ["example_pkg"])
+                            self._api_uninstall, api_obj, ["example_pkg"],
+                            catch_wsie=False)
                         api_obj.reset()
                         self._search_op(api_obj, False, "example_pkg", set())
                         self._overwrite_version_number(orig_path)
                         self.assertRaises(
                             api_errors.WrapSuccessfulIndexingException,
-                            self._api_install, api_obj, ["example_pkg"])
+                            self._api_install, api_obj, ["example_pkg"],
+                            catch_wsie=False)
                         api_obj.reset()
                         self._search_op(api_obj, False, "example_pkg",
                             self.res_local_pkg)
@@ -1444,7 +1446,8 @@
                     self.res_local_pkg)
                 self._overwrite_hash(ffh_path)
                 self.assertRaises(api_errors.WrapSuccessfulIndexingException,
-                    self._api_uninstall, api_obj, ["example_pkg"])
+                    self._api_uninstall, api_obj, ["example_pkg"],
+                    catch_wsie=False)
                 self._search_op(api_obj, False, "example_pkg", set())
 
         def test_080_weird_patterns(self):
@@ -1467,7 +1470,7 @@
                 durl = self.dc.get_depot_url()
                 api_obj = self.image_create(durl)
 
-                tmp_dir = os.path.join(self.img_path, "var", "pkg",
+                tmp_dir = os.path.join(self.img_path(), "var", "pkg",
                     "cache", "index", "TMP")
                 self._api_install(api_obj, ["example_pkg"])
                 api_obj.rebuild_search_index()
@@ -1534,7 +1537,7 @@
                 self._run_remove_root_search(self._search_op_multi, False,
                     api_obj, ip)
 
-                index_dir = os.path.join(self.img_path, "var", "pkg",
+                index_dir = os.path.join(self.img_path(), "var", "pkg",
                     "cache", "index")
                 shutil.rmtree(index_dir)
                 # Do slow local searches
@@ -1637,7 +1640,8 @@
 
                         self.assertRaises(
                             api_errors.WrapSuccessfulIndexingException,
-                            self._api_uninstall, api_obj, ["example_pkg"])
+                            self._api_uninstall, api_obj, ["example_pkg"],
+                            catch_wsie=False)
 
                         self.image_destroy()
 
@@ -1659,7 +1663,8 @@
 
                         self.assertRaises(
                             api_errors.WrapSuccessfulIndexingException,
-                            self._api_uninstall, api_obj, ["another_pkg"])
+                            self._api_uninstall, api_obj, ["another_pkg"],
+                            catch_wsie=False)
 
                         self.image_destroy()
 
@@ -1682,7 +1687,8 @@
 
                         self.assertRaises(
                             api_errors.WrapSuccessfulIndexingException,
-                            self._api_uninstall, api_obj, ["example_pkg"])
+                            self._api_uninstall, api_obj, ["example_pkg"],
+                            catch_wsie=False)
 
                         self.image_destroy()
 
@@ -1705,7 +1711,7 @@
 
                         self.assertRaises(
                             api_errors.WrapSuccessfulIndexingException,
-                            self._api_image_update, api_obj)
+                            self._api_image_update, api_obj, catch_wsie=False)
 
                         self.image_destroy()
 
@@ -1859,7 +1865,7 @@
                 self.pkgsend_bulk(durl, self.example_pkg10)
                 api_obj = self.image_create(durl)
 
-                index_dir = os.path.join(self.img_path, "var", "pkg",
+                index_dir = os.path.join(self.img_path(), "var", "pkg",
                     "cache", "index")
 
                 orig_fn = os.path.join(index_dir,
@@ -1872,7 +1878,8 @@
 
                 portable.rename(orig_fn, dest_fn)
                 self.assertRaises(api_errors.WrapSuccessfulIndexingException,
-                    self._api_uninstall, api_obj, ["example_pkg"])
+                    self._api_uninstall, api_obj, ["example_pkg"],
+                    catch_wsie=False)
 
         def test_bug_8492(self):
                 """Tests that field queries and phrase queries work together.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/tests/api/t_linked_image.py	Sat May 07 00:25:10 2011 -0700
@@ -0,0 +1,857 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+import testutils
+if __name__ == "__main__":
+	testutils.setup_environment("../../../proto")
+import pkg5unittest
+
+import os
+import sys
+import traceback
+
+import unittest
+
+import pkg.actions
+import pkg.client.api as api
+import pkg.client.api_errors as apx
+import pkg.client.linkedimage as li
+import pkg.client.progress as progress
+import pkg.client.publisher as publisher
+
+from pkg.client.debugvalues import DebugValues
+from pkg.client.pkgdefs import *
+
+p_update_index = 0
+
+def apx_verify(e, e_type, e_member=None):
+
+        if e == None:
+                raise RuntimeError("""
+Expected %s exception.
+Didn't get any exception.
+"""
+                % (str(e_type)))
+
+        if type(e) != e_type:
+                raise RuntimeError("""
+Expected %s exception.
+Got a %s exception:
+
+%s
+"""
+                % (str(e_type),
+                        str(type(e)), traceback.format_exc()))
+
+        if e_member == None:
+                return
+
+        if not getattr(e, e_member):
+                raise RuntimeError("""
+Expected %s exception of type "%s".
+Got a %s exception with a differnt type:
+
+%s
+"""
+                    % (str(e_type), e_member,
+                        str(type(e)), traceback.format_exc()))
+
+def assertRaises(validate_cb, func, *args, **kwargs):
+        (validate_func, validate_args) = validate_cb
+        sys.exc_clear()
+
+        e = None
+        try:
+                func(*args, **kwargs)
+        except:
+                e_type, e, e_tb = sys.exc_info()
+                pass
+        validate_func(e, **validate_args)
+
+
+class TestLinkedImageName(pkg5unittest.Pkg5TestCase):
+
+        def test_linked_name(self):
+
+                # setup bad linked image names
+                bad_name = []
+                bad_name.append("too:many:colons")
+                bad_name.append("notenoughcolons")
+                bad_name.append(":img2")   # no type
+                bad_name.append("system:")   # no name
+                bad_name.append("badtype:img4")
+
+                good_name = ["system:img1", "zone:img1"]
+
+                for name in bad_name:
+                        assertRaises(
+                            (apx_verify, {
+                                "e_type": apx.LinkedImageException,
+                                "e_member": "lin_malformed"}),
+                                li.LinkedImageName, name)
+
+                for name in good_name:
+                       li.LinkedImageName(name)
+
+        def test_linked_zone_name(self):
+                DebugValues["zone_name"] = ["/bin/false"]
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.SubprocessError}),
+                        li.zone._zonename)
+
+
+class TestApiLinked(pkg5unittest.ManyDepotTestCase):
+        # Only start/stop the depot once (instead of for every test)
+        persistent_setup = True
+
+        pub1 = "bobcat"
+        pub2 = "lolcat"
+        pub3 = "pussycat"
+
+        p_all = []
+        p_vers = [
+            "@1.2,5.11-145:19700101T000001Z",
+            "@1.2,5.11-145:19700101T000000Z", # old time
+            "@1.1,5.11-145:19700101T000000Z", # old ver
+            "@1.1,5.11-144:19700101T000000Z", # old build
+            "@1.0,5.11-144:19700101T000000Z", # oldest
+        ]
+        p_files = [
+            "tmp/bar",
+            "tmp/baz",
+        ]
+
+        # generate packages that don't need to be synced
+        p_foo1_name_gen = "foo1"
+        pkgs = [p_foo1_name_gen + ver for ver in p_vers]
+        p_foo1_name = dict(zip(range(len(pkgs)), pkgs))
+        for i in p_foo1_name:
+                p_data = "open %s\n" % p_foo1_name[i]
+                p_data += """
+                    add set name=variant.foo value=bar value=baz
+                    add file tmp/bar mode=0555 owner=root group=bin path=foo_bar variant.foo=bar
+                    add file tmp/baz mode=0555 owner=root group=bin path=foo_baz variant.foo=baz
+                    close\n"""
+                p_all.append(p_data)
+
+        # generate packages that do need to be synced
+        p_sync1_name_gen = "sync1"
+        pkgs = [p_sync1_name_gen + ver for ver in p_vers]
+        p_sync1_name = dict(zip(range(len(pkgs)), pkgs))
+        for i in p_sync1_name:
+                p_data = "open %s\n" % p_sync1_name[i]
+                p_data += "add depend type=parent fmri=%s" % \
+                    pkg.actions.depend.DEPEND_SELF
+                p_data += """
+                    add set name=variant.foo value=bar value=baz
+                    add file tmp/bar mode=0555 owner=root group=bin path=sync1_bar variant.foo=bar
+                    add file tmp/baz mode=0555 owner=root group=bin path=sync1_baz variant.foo=baz
+                    close\n"""
+                p_all.append(p_data)
+
+        def setUp(self):
+                self.i_count = 5
+                pkg5unittest.ManyDepotTestCase.setUp(self,
+                    [self.pub1, self.pub2, self.pub3],
+                    image_count=self.i_count)
+
+                # create files that go in packages
+                self.make_misc_files(self.p_files)
+
+                # get repo urls
+                self.rurl1 = self.dcs[1].get_repo_url()
+                self.rurl2 = self.dcs[2].get_repo_url()
+                self.rurl3 = self.dcs[3].get_repo_url()
+
+                # populate repositories
+                self.pkgsend_bulk(self.rurl1, self.p_all)
+
+                # setup image names and paths
+                self.i_path = []
+                self.i_lin = []
+                self.i_lin2index = {}
+                for i in range(self.i_count):
+                        lin = li.LinkedImageName("system:img%d" % i)
+                        self.i_lin.insert(i, lin)
+                        self.i_lin2index[lin] = i
+                        self.set_image(i)
+                        self.i_path.insert(i, self.img_path())
+
+        def _cat_update(self):
+                global p_update_index
+                p_update_name = "update@%d.0,5.11-143:19700101T000000Z" % \
+                    p_update_index
+                p_update_index += 1
+
+                p_data = "open %s\n" % p_update_name
+                p_data += """
+                    close\n"""
+
+                self.pkgsend_bulk(self.rurl1, [p_data])
+
+        def _list_packages(self, apio):
+                pkg_list = apio.get_pkg_list(api.ImageInterface.LIST_ALL)
+                return set(sorted([
+                        "pkg://%s/%s@%s" % (pfmri[0], pfmri[1], pfmri[2])
+                        for pfmri, summ, cats, states in pkg_list
+                ]))
+
+        # utility functions for use by test cases
+        def _imgs_create(self, limit, **ic_opts):
+                variants = { "variant.foo": "bar" }
+
+                rv = []
+
+                for i in range(0, limit):
+                        self.set_image(i)
+                        api_obj = self.image_create(self.rurl1,
+                            prefix=self.pub1, variants=variants, **ic_opts)
+                        rv.insert(i, api_obj)
+
+                for i in range(limit, self.i_count):
+                        self.set_image(i)
+                        self.image_destroy()
+
+                self.set_image(0)
+                self.api_objs = rv
+                return rv
+
+        def _parent_attach(self, i, cl, **args):
+                assert i not in cl
+
+                for c in cl:
+                        self._api_attach(self.api_objs[c],
+                            lin=self.i_lin[i], li_path=self.i_path[i], **args)
+
+        def _children_attach(self, i, cl, rv=None, rvdict=None, **args):
+                assert i not in cl
+                assert rvdict == None or type(rvdict) == dict
+                assert rv == None or rvdict == None
+
+                if rv == None:
+                        rv = EXIT_OK
+                if rvdict == None:
+                        rvdict = {}
+                        for c in cl:
+                                rvdict[c] = rv
+                assert (set(rvdict) | set(cl)) == set(cl)
+
+                # attach each child to parent
+                for c in cl:
+                        rv = rvdict.get(c, EXIT_OK)
+                        (c_rv, c_err) = self.api_objs[i].attach_linked_child(
+                            lin=self.i_lin[c], li_path=self.i_path[c], **args)
+                        self.assertEqual(c_rv, rv)
+                        self.api_objs[c].reset()
+
+        def _children_op(self, i, cl, op, rv=None, rvdict=None, **args):
+                assert i not in cl
+                assert type(op) == str
+                assert rv == None or type(rv) == int
+                assert rvdict == None or type(rvdict) == dict
+                assert rv == None or rvdict == None
+
+                if rv == None:
+                        rv = EXIT_OK
+                if rvdict == None:
+                        rvdict = {}
+                        for c in cl:
+                                rvdict[c] = rv
+
+                # sync each child from parent
+                li_list = [self.i_lin[c] for c in cl]
+
+                # get a pointer to the function we're invoking
+                func = getattr(self.api_objs[i], op)
+                c_rvdict = func(li_list=li_list, **args)
+
+                # check that the actual return values match up with expected
+                # return values in rvdict
+                for c_lin, (c_rv, c_err) in c_rvdict.items():
+                        rv = rvdict.get(self.i_lin2index[c_lin], EXIT_OK)
+                        self.assertEqual(c_rv, rv)
+
+                if rvdict:
+                        # make sure that we actually got a return value for
+                        # each image that we're expecting a return value from
+                        c_i = [self.i_lin2index[c_lin] for c_lin in c_rvdict]
+                        self.assertEqual(sorted(c_i), sorted(rvdict))
+
+        def _verify_pkg(self, api_objs, i, pfmri):
+                apio = api_objs[i]
+                progtrack = progress.NullProgressTracker()
+
+                for act, err, warn, pinfo in apio.img.verify(pfmri, progtrack,
+                    verbose=True):
+                        self.assertEqual(len(err), 0, """
+unexpected verification error for pkg: %s
+action: %s
+error: %s
+warning: %s
+pinfo: %s""" % \
+                            (pfmri, str(act), str(err), str(warn), str(pinfo)))
+
+
+        def assertKnownPkgCount(self, api_objs, i, pl_init, offset=0):
+                apio = api_objs[i]
+                pl = self._list_packages(apio)
+
+                pl_removed = pl_init - pl
+                pl_added = pl - pl_init
+
+                self.assertEqual(len(pl_init), len(pl) - offset, """
+unexpected packages known in image[%d]: %s
+packages removed:
+    %s
+packages added:
+    %s
+packages known:
+    %s""" % \
+                    (i, self.i_path[i], "\n    ".join(pl_removed),
+                    "\n    ".join(pl_added), "\n    ".join(pl)))
+
+        def test_attach_err_link_to_self(self):
+                api_objs = self._imgs_create(1)
+
+                lin = self.i_lin[0]
+                path = self.i_path[0]
+
+                # Attach p2c, link to ourselves
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.LinkedImageException,
+                        "e_member": "link_to_self"}),
+                    api_objs[0].attach_linked_child, lin=lin, li_path=path)
+
+                # Attach c2p, link to ourselves
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.LinkedImageException,
+                        "e_member": "link_to_self"}),
+                    lambda *args, **kwargs: list(
+                        api_objs[0].gen_plan_attach(*args, **kwargs)),
+                        lin=lin, li_path=path)
+
+        def test_attach_err_liveroot_as_child(self):
+                api_objs = self._imgs_create(2)
+
+                lin = self.i_lin[1]
+                path = self.i_path[1]
+
+                #
+                # The test harness will clear all DebugValues variables for
+                # us after each test run.
+                #
+
+                # Attach p2c, child is liveroot
+                DebugValues["simulate_live_root"] = self.i_path[1]
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.LinkedImageException,
+                        "e_member": "attach_root_as_child"}),
+                    api_objs[0].attach_linked_child,
+                        lin=lin, li_path=path)
+
+                # Attach c2p, child is liveroot
+                # We also need to temporarily disable PKG_NO_LIVE_ROOT.
+                del os.environ["PKG_NO_LIVE_ROOT"]
+                DebugValues["simulate_live_root"] = self.i_path[0]
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.LinkedImageException,
+                        "e_member": "attach_root_as_child"}),
+                    lambda *args, **kwargs: list(
+                        api_objs[0].gen_plan_attach(*args, **kwargs)),
+                        lin=lin, li_path=path)
+
+                os.environ["PKG_NO_LIVE_ROOT"] = "1"
+                del DebugValues["simulate_live_root"]
+
+        def test_linked_p2c_recurse_flags_1_no_refresh_via_attach(self):
+                """test no-refresh option when no catalog is present"""
+
+                # create images but don't cache any catalogs
+                api_objs = self._imgs_create(3, refresh_allowed=False)
+
+                # Attach p2c, 0 -> 1
+                api_objs[0].attach_linked_child(
+                    lin=self.i_lin[1], li_path=self.i_path[1],
+                    refresh_catalogs=False)
+
+                # Attach c2p, 2 -> 0
+                self._api_attach(api_objs[2],
+                    lin=self.i_lin[2], li_path=self.i_path[0],
+                    refresh_catalogs=False)
+
+                for i in range(3):
+                        api_objs[i].reset()
+
+                # make sure the parent didn't refresh
+                # the parent doesn't know about any packages
+                # the child only knows about the constraints package
+                for i in range(3):
+                        self.assertKnownPkgCount(api_objs, i, set())
+
+        def test_linked_p2c_recurse_flags_1_no_refresh_via_sync(self):
+                """test no-refresh option when no catalog is present"""
+
+                # create images but don't cache any catalogs
+                api_objs = self._imgs_create(3, refresh_allowed=False)
+
+                # Attach p2c, 0 -> 1
+                api_objs[0].attach_linked_child(
+                    lin=self.i_lin[1], li_path=self.i_path[1],
+                    refresh_catalogs=False, li_md_only=True)
+
+                # Attach c2p, 2 -> 0
+                self._api_attach(api_objs[2],
+                    lin=self.i_lin[2], li_path=self.i_path[0],
+                    refresh_catalogs=False, li_md_only=True)
+
+                for i in range(3):
+                        api_objs[i].reset()
+
+                # Sync 1
+                api_objs[0].sync_linked_children(li_list=[],
+                    refresh_catalogs=False)
+
+                # Sync 2
+                self._api_sync(api_objs[2],
+                    refresh_catalogs=False)
+
+                for i in range(3):
+                        api_objs[i].reset()
+
+                # make sure the parent didn't refresh
+                # the parent doesn't know about any packages
+                # the child only knows about the constraints package
+                for i in range(3):
+                        self.assertKnownPkgCount(api_objs, i, set())
+
+        def test_linked_p2c_recurse_flags_2_no_refresh_via_attach(self):
+                """test no-refresh option when catalog is updated"""
+
+                # create images
+                api_objs = self._imgs_create(3)
+
+                # get a list of all known packages
+                pl_init = dict()
+                for i in range(3):
+                        pl_init[i] = self._list_packages(api_objs[i])
+
+                # update the catalog with a new package
+                self._cat_update()
+
+                # Attach p2c, 0 -> 1
+                api_objs[0].attach_linked_child(
+                    lin=self.i_lin[1], li_path=self.i_path[1],
+                    refresh_catalogs=False)
+
+                # Attach c2p, 2 -> 0
+                self._api_attach(api_objs[2],
+                    lin=self.i_lin[2], li_path=self.i_path[0],
+                    refresh_catalogs=False)
+
+                for i in range(3):
+                        api_objs[i].reset()
+
+                # make sure the parent didn't refresh
+                # the parent doesn't know about any packages
+                # the child only knows about the constraints package
+                for i in range(3):
+                        self.assertKnownPkgCount(api_objs, i, pl_init[i])
+
+                return (api_objs, pl_init)
+
+        def test_linked_p2c_recurse_flags_2_no_refresh_via_other(self):
+                """test no-refresh option when catalog is updated"""
+
+                # don't need to test uninstall and change-varcets since
+                # they don't accept the refresh_catalogs option
+
+                # create images
+                api_objs = self._imgs_create(3)
+
+                # install different synced packages into each image
+                for i in [0, 1, 2]:
+                        self._api_install(api_objs[i],
+                            [self.p_sync1_name[i + 2]])
+
+                # Attach p2c, 0 -> 1
+                api_objs[0].attach_linked_child(
+                    lin=self.i_lin[1], li_path=self.i_path[1],
+                    li_md_only=True)
+
+                # Attach c2p, 2 -> 0
+                self._api_attach(api_objs[2],
+                    lin=self.i_lin[2], li_path=self.i_path[0],
+                    li_md_only=True)
+
+                for i in range(3):
+                        api_objs[i].reset()
+
+                # get a list of all known packages
+                pl_init = dict()
+                for i in range(3):
+                        pl_init[i] = self._list_packages(api_objs[i])
+
+                # update the catalog with a new package
+                self._cat_update()
+
+                # Sync 1
+                api_objs[0].sync_linked_children(li_list=[],
+                    refresh_catalogs=False)
+
+                # Sync 2
+                self._api_sync(api_objs[2],
+                    refresh_catalogs=False)
+
+                for i in range(3):
+                        api_objs[i].reset()
+
+                # make sure all the images are unaware of new packages
+                for i in range(3):
+                        self.assertKnownPkgCount(api_objs, i, pl_init[i])
+
+                # Install newer package in 0 and 1
+                self._api_install(api_objs[0], [self.p_sync1_name[1]],
+                    refresh_catalogs=False)
+
+                # Install newer package in 2
+                self._api_install(api_objs[2], [self.p_sync1_name[1]],
+                    refresh_catalogs=False)
+
+                for i in range(3):
+                        api_objs[i].reset()
+
+                # make sure all the images are unaware of new packages
+                for i in range(3):
+                        self.assertKnownPkgCount(api_objs, i, pl_init[i])
+
+                # Update to newest package in 0 and 1
+                self._api_image_update(api_objs[0], refresh_catalogs=False)
+
+                # Update to newest package in 2
+                self._api_image_update(api_objs[2], refresh_catalogs=False)
+
+                for i in range(3):
+                        api_objs[i].reset()
+
+                # make sure all the images are unaware of new packages
+                for i in range(3):
+                        self.assertKnownPkgCount(api_objs, i, pl_init[i])
+
+                # change variant in 0
+                self._api_change_varcets(api_objs[0],
+                    variants={"variant.foo": "baz"},
+                    refresh_catalogs=False)
+
+                # change variant in 2
+                self._api_change_varcets(api_objs[2],
+                    variants={"variant.foo": "baz"},
+                    refresh_catalogs=False)
+
+                for i in range(3):
+                        api_objs[i].reset()
+
+                # make sure all the images are unaware of new packages
+                for i in range(3):
+                        self.assertKnownPkgCount(api_objs, i, pl_init[i])
+
+        def test_err_toxic_pkg(self):
+                # create images
+                api_objs = self._imgs_create(2)
+
+                # install a synced package into 1
+                self._api_install(api_objs[1], [self.p_sync1_name[1]])
+
+                # Attach c2p, 1 -> 0
+                self._api_attach(api_objs[1],
+                    lin=self.i_lin[1], li_path=self.i_path[0],
+                    li_md_only=True)
+
+                # try to modify image.
+                # no version of synced package is in the parent
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.PlanCreationException,
+                        "e_member": "no_version"}),
+                    lambda *args, **kwargs: list(
+                        api_objs[1].gen_plan_update(*args, **kwargs)))
+
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.PlanCreationException,
+                        "e_member": "no_version"}),
+                    lambda *args, **kwargs: list(
+                        api_objs[1].gen_plan_sync(*args, **kwargs)))
+
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.PlanCreationException,
+                        "e_member": "no_version"}),
+                    lambda *args, **kwargs: list(
+                        api_objs[1].gen_plan_change_varcets(*args, **kwargs)),
+                        variants={"variant.foo": "baz"})
+
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.PlanCreationException,
+                        "e_member": "no_version"}),
+                    lambda *args, **kwargs: list(
+                        api_objs[1].gen_plan_install(*args, **kwargs)),
+                        [self.p_sync1_name[0]])
+
+                # install a synced package into 1
+                self._api_install(api_objs[0], [self.p_sync1_name[2]],
+                    li_ignore=[])
+
+                # try to modify image.
+                # an older version of synced package is in the parent
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.PlanCreationException,
+                        "e_member": "no_version"}),
+                    lambda *args, **kwargs: list(
+                        api_objs[1].gen_plan_update(*args, **kwargs)))
+
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.PlanCreationException,
+                        "e_member": "no_version"}),
+                    lambda *args, **kwargs: list(
+                        api_objs[1].gen_plan_sync(*args, **kwargs)))
+
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.PlanCreationException,
+                        "e_member": "no_version"}),
+                    lambda *args, **kwargs: list(
+                        api_objs[1].gen_plan_change_varcets(*args,
+                            **kwargs)),
+                        variants={"variant.foo": "baz"})
+
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.PlanCreationException,
+                        "e_member": "no_version"}),
+                    lambda *args, **kwargs: list(
+                        api_objs[1].gen_plan_install(*args, **kwargs)),
+                        [self.p_sync1_name[0]])
+
+        def test_err_pubcheck(self):
+                """Verify the linked image publisher sync check."""
+
+                def configure_pubs1(self):
+                        """change the publishers config in our images."""
+
+                        # add pub 2 to image 0
+                        self.api_objs[0].add_publisher(self.po2)
+
+                        # add pubs 2 and 3 to image 1
+                        self.api_objs[1].add_publisher(self.po2)
+                        self.api_objs[1].add_publisher(self.po3)
+
+                        # leave image 2 alone
+
+                        # add pub 2 to image 3 and reverse the search order
+                        self.api_objs[3].add_publisher(self.po2,
+                            search_before=self.po1)
+
+                        # add pub 2 to image 4 as non-sticky
+                        self.api_objs[4].add_publisher(self.po4)
+
+                # setup publisher objects
+                repouri = publisher.RepositoryURI(self.rurl1)
+                repo1 = publisher.Repository(origins=[repouri])
+                self.po1 = publisher.Publisher(self.pub1, repository=repo1)
+
+                repouri = publisher.RepositoryURI(self.rurl2)
+                repo2 = publisher.Repository(origins=[repouri])
+                self.po2 = publisher.Publisher(self.pub2, repository=repo2)
+
+                repouri = publisher.RepositoryURI(self.rurl3)
+                repo3 = publisher.Repository(origins=[repouri])
+                self.po3 = publisher.Publisher(self.pub3, repository=repo3)
+
+                self.po4 = publisher.Publisher(self.pub2, repository=repo2)
+                self.po4.sticky = False
+
+                # create images and update publishers
+                api_objs = self._imgs_create(5)
+                configure_pubs1(self)
+
+                # Attach p2c, 0 -> 1 (sync ok)
+                api_objs[0].attach_linked_child(
+                    lin=self.i_lin[1], li_path=self.i_path[1])
+                api_objs[0].detach_linked_children(li_list=[self.i_lin[1]])
+                api_objs[1].reset()
+
+                # Attach p2c, 0 -> 2 (sync error)
+                (rv, err) = api_objs[0].attach_linked_child(
+                    lin=self.i_lin[2], li_path=self.i_path[2])
+                self.assertEqual(rv, EXIT_OOPS)
+
+                # Attach p2c, 0 -> 3 (sync error)
+                (rv, err) = api_objs[0].attach_linked_child(
+                    lin=self.i_lin[3], li_path=self.i_path[3])
+                self.assertEqual(rv, EXIT_OOPS)
+
+                # Attach p2c, 0 -> 4 (sync error)
+                (rv, err) = api_objs[0].attach_linked_child(
+                    lin=self.i_lin[4], li_path=self.i_path[4])
+                self.assertEqual(rv, EXIT_OOPS)
+
+                # Attach c2p, 1 -> 0 (sync ok)
+                for pd in api_objs[1].gen_plan_attach(
+                    lin=self.i_lin[0], li_path=self.i_path[0],
+                    noexecute=True):
+                        continue
+
+                # Attach c2p, [2, 3, 4] -> 0 (sync error)
+                for c in [2, 3, 4]:
+                        assertRaises(
+                            (apx_verify, {
+                                "e_type": apx.PlanCreationException,
+                                "e_member": "linked_pub_error"}),
+                            lambda *args, **kwargs: list(
+                                api_objs[c].gen_plan_attach(*args, **kwargs)),
+                                lin=self.i_lin[0], li_path=self.i_path[0],
+                                noexecute=True)
+
+                # create images, attach children (p2c), and update publishers
+                api_objs = self._imgs_create(5)
+                self._children_attach(0, [1, 2, 3, 4])
+                configure_pubs1(self)
+
+                # test recursive parent operations
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.LinkedImageException,
+                        "e_member": "recursive_cmd_fail"}),
+                    lambda *args, **kwargs: list(
+                        api_objs[0].gen_plan_install(*args, **kwargs)),
+                        [self.p_sync1_name[0]])
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.LinkedImageException,
+                        "e_member": "recursive_cmd_fail"}),
+                    lambda *args, **kwargs: list(
+                        api_objs[0].gen_plan_update(*args, **kwargs)))
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.LinkedImageException,
+                        "e_member": "recursive_cmd_fail"}),
+                    lambda *args, **kwargs: list(
+                        api_objs[0].gen_plan_change_varcets(*args, **kwargs)),
+                        variants={"variant.foo": "baz"})
+                assertRaises(
+                    (apx_verify, {
+                        "e_type": apx.LinkedImageException,
+                        "e_member": "recursive_cmd_fail"}),
+                    lambda *args, **kwargs: list(
+                        api_objs[0].gen_plan_uninstall(*args, **kwargs)),
+                        [self.p_sync1_name_gen])
+
+                # test operations on child nodes
+                rvdict = {1: EXIT_NOP, 2: EXIT_OOPS, 3: EXIT_OOPS,
+                    4: EXIT_OOPS}
+                self._children_op(0, [], "sync_linked_children",
+                    rvdict=rvdict)
+                self._children_op(0, [1, 2, 3, 4], "sync_linked_children",
+                    rvdict=rvdict)
+
+                # no pub check during detach
+                self._children_op(0, [], "detach_linked_children")
+
+                # create images, attach children (c2p), and update publishers
+                api_objs = self._imgs_create(5)
+                self._parent_attach(0, [1, 2, 3, 4])
+                configure_pubs1(self)
+
+                # test sync
+                self._api_sync(api_objs[1])
+                for c in [2, 3, 4]:
+                        assertRaises(
+                            (apx_verify, {
+                                "e_type": apx.PlanCreationException,
+                                "e_member": "linked_pub_error"}),
+                            lambda *args, **kwargs: list(
+                                api_objs[c].gen_plan_sync(*args, **kwargs)))
+
+                # test install
+                self._api_install(api_objs[1], [self.p_foo1_name[1]])
+                for c in [2, 3, 4]:
+                        assertRaises(
+                            (apx_verify, {
+                                "e_type": apx.PlanCreationException,
+                                "e_member": "linked_pub_error"}),
+                            lambda *args, **kwargs: list(
+                                api_objs[c].gen_plan_install(*args, **kwargs)),
+                                [self.p_foo1_name[1]])
+
+                # test update
+                self._api_image_update(api_objs[1])
+                for c in [2, 3, 4]:
+                        assertRaises(
+                            (apx_verify, {
+                                "e_type": apx.PlanCreationException,
+                                "e_member": "linked_pub_error"}),
+                            lambda *args, **kwargs: list(
+                                api_objs[c].gen_plan_update(*args, **kwargs)))
+
+                # test change varcets
+                self._api_change_varcets(api_objs[1],
+                    variants={"variant.foo": "baz"})
+                for c in [2, 3, 4]:
+                        assertRaises(
+                            (apx_verify, {
+                                "e_type": apx.PlanCreationException,
+                                "e_member": "linked_pub_error"}),
+                            lambda *args, **kwargs: list(
+                                api_objs[c].gen_plan_change_varcets(*args,
+                                    **kwargs)),
+                                variants={"variant.foo": "baz"})
+
+                # test uninstall
+                self._api_uninstall(api_objs[1], [self.p_foo1_name_gen])
+                for c in [2, 3, 4]:
+                        assertRaises(
+                            (apx_verify, {
+                                "e_type": apx.PlanCreationException,
+                                "e_member": "linked_pub_error"}),
+                            lambda *args, **kwargs: list(
+                                api_objs[c].gen_plan_uninstall(*args,
+                                    **kwargs)),
+                                [self.p_foo1_name_gen])
+
+                # no pub check during detach
+                for c in [1, 2, 3, 4]:
+                        self._api_detach(api_objs[c])
+
+if __name__ == "__main__":
+        unittest.main()
--- a/src/tests/api/t_p5p.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/api/t_p5p.py	Sat May 07 00:25:10 2011 -0700
@@ -104,7 +104,7 @@
         def image_create(self, *args, **kwargs):
                 pkg5unittest.SingleDepotTestCase.image_create(self,
                     *args, **kwargs)
-                self.ta_dir = os.path.join(self.img_path, "etc/certs/CA")
+                self.ta_dir = os.path.join(self.img_path(), "etc/certs/CA")
                 os.makedirs(self.ta_dir)
 
         def setUp(self):
--- a/src/tests/api/t_pkg_api_install.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/api/t_pkg_api_install.py	Sat May 07 00:25:10 2011 -0700
@@ -205,21 +205,24 @@
         @staticmethod
         def __do_install(api_obj, fmris):
                 api_obj.reset()
-                api_obj.plan_install(fmris)
+                for pd in api_obj.gen_plan_install(fmris):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
 
         @staticmethod
         def __do_update(api_obj, fmris):
                 api_obj.reset()
-                api_obj.plan_update(fmris)
+                for pd in api_obj.gen_plan_update(fmris):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
 
         @staticmethod
         def __do_uninstall(api_obj, fmris, recursive_removal=False):
                 api_obj.reset()
-                api_obj.plan_uninstall(fmris, recursive_removal)
+                for pd in api_obj.gen_plan_uninstall(fmris, recursive_removal):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
 
@@ -366,18 +369,24 @@
 
                 # Attempting to update carriage should result in nothing to do.
                 api_obj.reset()
-                self.assertFalse(api_obj.plan_update(["carriage"]))
+                for pd in api_obj.gen_plan_update(["carriage"]):
+                        continue
+                self.assertTrue(api_obj.planned_nothingtodo())
 
                 # Downgrading to [email protected] would force a downgrade to
                 # [email protected] and so should raise an exception...
                 api_obj.reset()
                 self.assertRaises(api_errors.PlanCreationException,
-                    api_obj.plan_update, ["carriage@1"])
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_update(*args, **kwargs)),
+                    ["carriage@1"])
 
                 api_obj.reset()
                 self.assertRaises(api_errors.PlanCreationException,
-                    api_obj.plan_update, ["carriage@1", "horse"])
- 
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_update(*args, **kwargs)),
+                    ["carriage@1", "horse"])
+
                 # ...unless horse is explicitly downgraded as well.
                 api_obj.reset()
                 self.__do_update(api_obj, ["carriage@1", "horse@1"])
@@ -400,14 +409,18 @@
                 # to [email protected] so should be ignored as a possibility by
                 # the solver.
                 api_obj.reset()
-                self.assertFalse(api_obj.plan_update(["carriage"]))
+                for pd in api_obj.gen_plan_update(["carriage"]):
+                        continue
+                self.assertTrue(api_obj.planned_nothingtodo())
 
                 # Upgrading explicitly to [email protected] would force a downgrade
                 # to [email protected] and so should raise an exception...
                 api_obj.reset()
                 self.assertRaises(api_errors.PlanCreationException,
-                    api_obj.plan_update, ["carriage@3", "horse"])
- 
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_update(*args, **kwargs)),
+                    ["carriage@3", "horse"])
+
                 # ...unless horse is explicitly downgraded as well.
                 api_obj.reset()
                 self.__do_update(api_obj, ["carriage@3", "horse@1"])
@@ -514,7 +527,8 @@
                 self.pkg("list")
                 self.pkg("verify")
                 api_obj.reset()
-                api_obj.plan_update_all(sys.argv[0])
+                for pd in api_obj.gen_plan_update():
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
                 self.pkg("verify")
@@ -533,17 +547,14 @@
                     self.corepkgs))
                 self.image_create(self.rurl)
 
-                # We need to pretend that we're running out of the image we just
-                # created, so that the up to date code looks in that image to do
-                # the checking.
-                argv0 = os.path.join(self.get_img_path(), "usr/bin/pkg")
-                api_obj = self.get_img_api_obj(cmd_path=argv0)
+                api_obj = self.get_img_api_obj()
 
                 # Update when it doesn't appear to be an opensolaris image
                 # shouldn't have any issues.
                 self.__do_install(api_obj, ["[email protected]"])
                 api_obj.reset()
-                api_obj.plan_update_all()
+                for pd in api_obj.gen_plan_update():
+                        continue
 
                 # Even though SUNWipkg is on the system, it won't appear as an
                 # opensolaris system.
@@ -552,7 +563,8 @@
                 api_obj.reset()
                 self.__do_install(api_obj, ["[email protected]", "[email protected]"])
                 api_obj.reset()
-                api_obj.plan_update_all()
+                for pd in api_obj.gen_plan_update():
+                        continue
 
                 # Same for package/pkg
                 api_obj.reset()
@@ -560,7 +572,8 @@
                 api_obj.reset()
                 self.__do_install(api_obj, ["[email protected]", "package/[email protected]"])
                 api_obj.reset()
-                api_obj.plan_update_all()
+                for pd in api_obj.gen_plan_update():
+                        continue
 
                 # Same for SUNWcs
                 api_obj.reset()
@@ -568,7 +581,8 @@
                 api_obj.reset()
                 self.__do_install(api_obj, ["[email protected]", "SUNWcs"])
                 api_obj.reset()
-                api_obj.plan_update_all()
+                for pd in api_obj.gen_plan_update():
+                        continue
 
                 # There are still no problems if the packaging system is up to
                 # date.  We can't test with SUNWipkg installed instead, because
@@ -581,7 +595,8 @@
                 api_obj.reset()
                 self.__do_install(api_obj, ["[email protected]", "SUNWcs", "package/[email protected]"])
                 api_obj.reset()
-                api_obj.plan_update_all()
+                for pd in api_obj.gen_plan_update():
+                        continue
 
                 # We should run into a problem if pkg(5) is out of date.
                 api_obj.reset()
@@ -591,7 +606,8 @@
                     ["[email protected]", "SUNWcs", "package/[email protected]"])
                 api_obj.reset()
                 self.assertRaises(api_errors.IpkgOutOfDateException,
-                    api_obj.plan_update_all)
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_update(*args, **kwargs)))
 
                 # Use the metadata on release/name to determine it's an
                 # opensolaris system.
@@ -602,7 +618,8 @@
                     ["[email protected]", "release/[email protected]", "package/[email protected]"])
                 api_obj.reset()
                 self.assertRaises(api_errors.IpkgOutOfDateException,
-                    api_obj.plan_update_all)
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_update(*args, **kwargs)))
 
                 # An older release/name which doesn't have the metadata should
                 # cause us to skip the check.
@@ -612,7 +629,8 @@
                 self.__do_install(api_obj,
                     ["[email protected]", "release/[email protected]", "package/[email protected]"])
                 api_obj.reset()
-                api_obj.plan_update_all()
+                for pd in api_obj.gen_plan_update():
+                        continue
 
                 # Verify that if the installed version of pkg is from an
                 # unconfigured publisher and is newer than what is available
@@ -642,11 +660,12 @@
                 api_obj.reset()
                 api_obj.remove_publisher(prefix="test")
 
-                # Now verify that plan_update_all succeeds still since the
+                # Now verify that plan_update succeeds still since the
                 # version of pkg installed is newer than the versions that
                 # are offered by the current publishers.
                 api_obj.reset()
-                api_obj.plan_update_all()
+                for pd in api_obj.gen_plan_update():
+                        continue
 
         def test_recursive_uninstall(self):
                 """Install [email protected], dependent on [email protected], uninstall foo
@@ -780,33 +799,54 @@
                         return e.missing_matches
 
                 pkg5unittest.eval_assert_raises(api_errors.PlanCreationException,
-                    check_unfound, api_obj.plan_install, ["foo"])
+                    check_unfound,
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_install(*args, **kwargs)),
+                    ["foo"])
 
                 api_obj.reset()
                 pkg5unittest.eval_assert_raises(api_errors.PlanCreationException,
-                    check_missing, api_obj.plan_uninstall, ["foo"], False)
+                    check_missing,
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_uninstall(*args, **kwargs)),
+                    ["foo"], False)
 
                 api_obj.reset()
                 pkg5unittest.eval_assert_raises(api_errors.PlanCreationException,
-                    check_illegal, api_obj.plan_install, ["@/foo"])
+                    check_illegal,
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_install(*args, **kwargs)),
+                    ["@/foo"])
 
                 api_obj.reset()
                 pkg5unittest.eval_assert_raises(api_errors.PlanCreationException,
-                    check_illegal, api_obj.plan_uninstall, ["_foo"], False)
+                    check_illegal,
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_uninstall(*args, **kwargs)),
+                    ["_foo"], False)
 
                 self.pkgsend_bulk(self.rurl, self.foo10)
 
                 api_obj.refresh(False)
                 pkg5unittest.eval_assert_raises(api_errors.PlanCreationException,
-                    check_missing, api_obj.plan_uninstall, ["foo"], False)
+                    check_missing,
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_uninstall(*args, **kwargs)),
+                    ["foo"], False)
 
                 api_obj.reset()
                 pkg5unittest.eval_assert_raises(api_errors.PlanCreationException,
-                    check_illegal, api_obj.plan_uninstall, ["_foo"], False)
+                    check_illegal,
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_uninstall(*args, **kwargs)),
+                    ["_foo"], False)
 
                 api_obj.reset()
                 pkg5unittest.eval_assert_raises(api_errors.PlanCreationException,
-                    check_missing, api_obj.plan_update, ["foo"])
+                    check_missing,
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_update(*args, **kwargs)),
+                    ["foo"])
 
                 api_obj.reset()
                 api_obj.refresh(True)
@@ -815,13 +855,18 @@
                 # Verify update plan has nothing to do result for installed
                 # package that can't be updated.
                 api_obj.reset()
-                self.assertEqual(api_obj.plan_update(["foo"]), False)
+                for pd in api_obj.gen_plan_update(["foo"]):
+                        continue
+                self.assertTrue(api_obj.planned_nothingtodo())
 
                 self.__do_uninstall(api_obj, ["foo"])
 
                 api_obj.reset()
                 pkg5unittest.eval_assert_raises(api_errors.PlanCreationException,
-                    check_missing, api_obj.plan_uninstall, ["foo"], False)
+                    check_missing,
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_uninstall(*args, **kwargs)),
+                    ["foo"], False)
 
         def test_bug_4109(self):
 
@@ -832,7 +877,10 @@
 
                 api_obj.reset()
                 pkg5unittest.eval_assert_raises(api_errors.PlanCreationException,
-                    check_illegal, api_obj.plan_install, ["_foo"])
+                    check_illegal,
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_install(*args, **kwargs)),
+                    ["_foo"])
 
         def test_catalog_v0(self):
                 """Test install from a publisher's repository that only supports
@@ -1006,7 +1054,8 @@
         def __do_install(api_obj, fmris):
                 fmris = [str(f) for f in fmris]
                 api_obj.reset()
-                api_obj.plan_install(fmris)
+                for pd in api_obj.gen_plan_install(fmris):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
 
@@ -1024,7 +1073,8 @@
         def __do_uninstall(api_obj, fmris, recursive_removal=False):
                 fmris = [str(f) for f in fmris]
                 api_obj.reset()
-                api_obj.plan_uninstall(fmris, recursive_removal)
+                for pd in api_obj.gen_plan_uninstall(fmris, recursive_removal):
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
 
--- a/src/tests/cli/t_change_variant.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/cli/t_change_variant.py	Sat May 07 00:25:10 2011 -0700
@@ -20,7 +20,7 @@
 # CDDL HEADER END
 #
 
-# Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
 
 import testutils
 if __name__ == "__main__":
@@ -32,6 +32,7 @@
 import re
 import unittest
 
+from pkg.client.pkgdefs import *
 
 class TestPkgChangeVariant(pkg5unittest.SingleDepotTestCase):
         # Only start/stop the depot once (instead of for every test)
@@ -58,6 +59,7 @@
         pkg_shared = """
         open [email protected],5.11-0
         add set name=variant.arch value=sparc value=i386 value=zos
+        add set name=variant.opensolaris.zone value=global value=nonglobal
         add dir mode=0755 owner=root group=bin path=/shared
         add dir mode=0755 owner=root group=bin path=/unique
         add file tmp/pkg_shared/shared/common mode=0555 owner=root group=bin path=shared/common
@@ -244,7 +246,9 @@
                         self.assert_(False,
                             "unable to determine image arch variant")
                 if ic.variants["variant.arch"] != v_arch:
-                        self.assert_(False, "unexpected arch variant")
+                        self.assert_(False,
+                            "unexpected arch variant: %s != %s" % \
+                            (ic.variants["variant.arch"], v_arch))
 
                 if "variant.opensolaris.zone" not in ic.variants:
                         self.assert_(False,
@@ -283,7 +287,8 @@
                         for p in pl:
                                 self.pkg("search -l %s" % p )
 
-        def cv_test(self, v_arch, v_zone, pl, v_arch2, v_zone2, pl2):
+        def cv_test(self, v_arch, v_zone, pl, v_arch2, v_zone2, pl2,
+            rv=EXIT_OK):
                 """ test if change-variant works """
 
                 assert v_arch == 'i386' or v_arch == 'sparc' or v_arch == 'zos'
@@ -315,7 +320,8 @@
                 cv_args += " -v"
                 cv_args += " variant.arch=%s" % v_arch2
                 cv_args += " variant.opensolaris.zone=%s" % v_zone2
-                self.pkg("change-variant -v" + cv_args)
+
+                self.pkg("change-variant" + cv_args, exit=rv)
                 # verify the updated image
                 self.i_verify(v_arch2, v_zone2, pl2)
 
@@ -325,20 +331,20 @@
                 self.image_destroy()
 
         def test_cv_01_none_1(self):
-                self.cv_test("i386", "global", ["pkg_cluster",],
-                    "i386", "global", ["pkg_cluster"])
+                self.cv_test("i386", "global", ["pkg_cluster"],
+                    "i386", "global", ["pkg_cluster"], rv=EXIT_NOP)
 
         def test_cv_01_none_2(self):
-                self.cv_test("i386", "nonglobal", ["pkg_cluster",],
-                    "i386", "nonglobal", ["pkg_cluster"])
+                self.cv_test("i386", "nonglobal", ["pkg_cluster"],
+                    "i386", "nonglobal", ["pkg_cluster"], rv=EXIT_NOP)
 
         def test_cv_01_none_3(self):
-                self.cv_test("sparc", "global", ["pkg_cluster",],
-                    "sparc", "global", ["pkg_cluster"])
+                self.cv_test("sparc", "global", ["pkg_cluster"],
+                    "sparc", "global", ["pkg_cluster"], rv=EXIT_NOP)
 
         def test_cv_01_none_4(self):
-                self.cv_test("sparc", "nonglobal", ["pkg_cluster",],
-                    "sparc", "nonglobal", ["pkg_cluster"])
+                self.cv_test("sparc", "nonglobal", ["pkg_cluster"],
+                    "sparc", "nonglobal", ["pkg_cluster"], rv=EXIT_NOP)
 
         def test_cv_02_arch_1(self):
                 self.cv_test("i386", "global", ["pkg_shared"],
@@ -349,7 +355,7 @@
                     "i386", "global", ["pkg_shared"])
 
         def test_cv_03_arch_1(self):
-                self.cv_test("i386", "global", ["pkg_inc",],
+                self.cv_test("i386", "global", ["pkg_inc"],
                     "sparc", "global", ["pkg_inc"])
 
         def test_cv_03_arch_2(self):
@@ -375,7 +381,7 @@
                     "i386", "global", ["pkg_shared", "pkg_inc"])
 
         def test_cv_06_arch_1(self):
-                self.cv_test("i386", "global", ["pkg_cluster",],
+                self.cv_test("i386", "global", ["pkg_cluster"],
                     "sparc", "global", ["pkg_cluster"])
 
         def test_cv_06_arch_2(self):
@@ -391,7 +397,7 @@
                     "i386", "global", ["pkg_cluster", "pkg_inc"])
 
         def test_cv_08_zone_1(self):
-                self.cv_test("i386", "global", ["pkg_cluster",],
+                self.cv_test("i386", "global", ["pkg_cluster"],
                     "i386", "nonglobal", ["pkg_cluster"])
 
         def test_cv_08_zone_2(self):
@@ -399,7 +405,7 @@
                     "i386", "global", ["pkg_cluster"])
 
         def test_cv_09_zone_1(self):
-                self.cv_test("sparc", "global", ["pkg_cluster",],
+                self.cv_test("sparc", "global", ["pkg_cluster"],
                     "sparc", "nonglobal", ["pkg_cluster"])
 
         def test_cv_09_zone_2(self):
@@ -407,7 +413,7 @@
                     "sparc", "global", ["pkg_cluster"])
 
         def test_cv_10_arch_and_zone_1(self):
-                self.cv_test("i386", "global", ["pkg_cluster",],
+                self.cv_test("i386", "global", ["pkg_cluster"],
                     "sparc", "nonglobal", ["pkg_cluster"])
 
         def test_cv_10_arch_and_zone_2(self):
@@ -415,7 +421,7 @@
                     "i386", "global", ["pkg_cluster"])
 
         def test_cv_11_arch_and_zone_1(self):
-                self.cv_test("i386", "nonglobal", ["pkg_cluster",],
+                self.cv_test("i386", "nonglobal", ["pkg_cluster"],
                     "sparc", "global", ["pkg_cluster"])
 
         def test_cv_11_arch_and_zone_2(self):
--- a/src/tests/cli/t_https.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/cli/t_https.py	Sat May 07 00:25:10 2011 -0700
@@ -50,13 +50,13 @@
         def pkg_image_create(self, *args, **kwargs):
                 pkg5unittest.SingleDepotTestCase.pkg_image_create(self,
                     *args, **kwargs)
-                self.ta_dir = os.path.join(self.img_path, "etc/certs/CA")
+                self.ta_dir = os.path.join(self.img_path(), "etc/certs/CA")
                 os.makedirs(self.ta_dir)
 
         def image_create(self, *args, **kwargs):
                 pkg5unittest.SingleDepotTestCase.image_create(self,
                     *args, **kwargs)
-                self.ta_dir = os.path.join(self.img_path, "etc/certs/CA")
+                self.ta_dir = os.path.join(self.img_path(), "etc/certs/CA")
                 os.makedirs(self.ta_dir)
 
         def pkg(self, command, *args, **kwargs):
--- a/src/tests/cli/t_lock.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/cli/t_lock.py	Sat May 07 00:25:10 2011 -0700
@@ -77,7 +77,9 @@
                 self.assertTrue(os.path.exists(lfpath))
 
                 # Verify that an API function will fail the same way.
-                self.assertRaises(nrlock.NRLockException, api_obj.plan_install,
+                self.assertRaises(nrlock.NRLockException,
+                    lambda *args, **kwargs: list(
+                        api_obj.gen_plan_install(*args, **kwargs)),
                     ["foo"])
                 api_obj.reset()
 
@@ -89,7 +91,8 @@
                 self.assertTrue(os.path.exists(lfpath))
                 self.assertEqual(os.stat(lfpath).st_size, 0)
 
-                api_obj.plan_install(["foo"])
+                for pd in api_obj.gen_plan_install(["foo"]):
+                        continue
                 api_obj.reset()
 
                 # Verify that if a state change occurs at any point after
@@ -98,8 +101,10 @@
                 api_obj2 = self.get_img_api_obj()
 
                 # Both of these should succeed since no state change exists yet.
-                api_obj.plan_install(["foo"])
-                api_obj2.plan_install(["foo"])
+                for pd in api_obj.gen_plan_install(["foo"]):
+                        continue
+                for pd in api_obj2.gen_plan_install(["foo"]):
+                        continue
 
                 # Execute the first plan.
                 api_obj.prepare()
@@ -112,8 +117,10 @@
 
                 # Restart plan process.
                 api_obj2.reset()
-                api_obj2.plan_uninstall(["foo"], False)
-                api_obj.plan_uninstall(["foo"], False)
+                for pd in api_obj2.gen_plan_uninstall(["foo"], False):
+                        continue
+                for pd in api_obj.gen_plan_uninstall(["foo"], False):
+                        continue
 
                 # Prepare second and first plan.
                 api_obj2.prepare()
@@ -149,7 +156,8 @@
 
                 # Now plan an uninstall using the API object.
                 api_obj.reset()
-                api_obj.plan_uninstall(["foo"], False)
+                for pd in api_obj.gen_plan_uninstall(["foo"], False):
+                        continue
                 api_obj.prepare()
 
                 # Execute the client to actually uninstall the package, and then
--- a/src/tests/cli/t_pkg_R_option.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/cli/t_pkg_R_option.py	Sat May 07 00:25:10 2011 -0700
@@ -20,7 +20,7 @@
 # CDDL HEADER END
 #
 
-# Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
 
 import testutils
 if __name__ == "__main__":
@@ -57,7 +57,7 @@
                 """Ensure that pkg explicit image specification works as
                 expected."""
 
-                imgpath = self.img_path
+                imgpath = self.img_path()
                 badpath = self.test_root
 
                 # Verify that bad paths cause exit and good paths succeed.
@@ -87,14 +87,14 @@
                 """Ensure that pkg implicit image finding works as expected."""
 
                 # Should fail because $PKG_IMAGE is set to test root by default,
-                # and default test behaviour to use -R self.img_path was
+                # and default test behaviour to use -R self.img_path() was
                 # disabled.
                 self.pkg("install foo", exit=1, use_img_root=False)
 
                 # Unset unit testing default bogus image dir.
                 del os.environ["PKG_IMAGE"]
-                os.chdir(self.img_path)
-                self.assertEqual(os.getcwd(), self.img_path)
+                os.chdir(self.img_path())
+                self.assertEqual(os.getcwd(), self.img_path())
 
                 if portable.osname != "sunos":
                         # For other platforms, first install a package uses an
@@ -111,7 +111,7 @@
 
                 # Should succeed because image is found at simulated live root.
                 self.pkg("-D simulate_live_root=%s install foo" %
-                    self.img_path, use_img_root=False)
+                    self.img_path(), use_img_root=False)
 
 
 if __name__ == "__main__":
--- a/src/tests/cli/t_pkg_image_create.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/cli/t_pkg_image_create.py	Sat May 07 00:25:10 2011 -0700
@@ -172,8 +172,12 @@
         def __verify_pub_cfg(self, img_path, prefix, pub_cfg):
                 """Private helper method to verify publisher configuration."""
 
+                # pretend like the Image object is being allocated from
+                # a pkg command run from within the target image.
+                cmdpath = os.path.join(self.get_img_path(), "pkg")
+
                 img = image.Image(img_path, should_exist=True,
-                    user_provided_dir=True)
+                    user_provided_dir=True, cmdpath=cmdpath)
                 pub = img.get_publisher(prefix=prefix)
                 for section in pub_cfg:
                         for prop, val in pub_cfg[section].iteritems():
--- a/src/tests/cli/t_pkg_install.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/cli/t_pkg_install.py	Sat May 07 00:25:10 2011 -0700
@@ -31,9 +31,6 @@
 
 import errno
 import os
-import pkg.fmri as fmri
-import pkg.manifest as manifest
-import pkg.portable as portable
 import platform
 import re
 import shutil
@@ -41,6 +38,12 @@
 import time
 import unittest
 
+import pkg.actions
+import pkg.fmri as fmri
+import pkg.manifest as manifest
+import pkg.portable as portable
+
+from pkg.client.pkgdefs import EXIT_OOPS
 
 class TestPkgInstallBasics(pkg5unittest.SingleDepotTestCase):
         # Only start/stop the depot once (instead of for every test)
@@ -1688,10 +1691,8 @@
                 self.pkg("verify -v")
 
                 # Make sure all directories are gone save /var in test image.
-                # 'pkg' will also be present as a file because image create
-                # places it there.
                 self.assertEqual(set(os.listdir(self.get_img_path())),
-                    set(["pkg", ".SELF-ASSEMBLY-REQUIRED", "var"]))
+                    set([".SELF-ASSEMBLY-REQUIRED", "var"]))
 
         def test_upgrade2(self):
                 """ test incorporations:
@@ -3374,6 +3375,46 @@
             close
         """
 
+        pkg121 = """
+            open [email protected],5.11-0
+        """
+        pkg121 += "add depend type=parent fmri=%s" % \
+            pkg.actions.depend.DEPEND_SELF
+        pkg121 += """
+            close
+        """
+
+        pkg122 = """
+            open [email protected],5.11-0
+        """
+        pkg122 += "add depend type=parent fmri=%s" % \
+            pkg.actions.depend.DEPEND_SELF
+        pkg122 += """
+            close
+        """
+
+        pkg123 = """
+            open [email protected],5.11-0
+        """
+        pkg123 += "add depend type=parent fmri=%s" % \
+            pkg.actions.depend.DEPEND_SELF
+        pkg123 += """
+            close
+        """
+
+        pkg132 = """
+            open [email protected],5.11-0
+            add depend type=parent [email protected],5.11-0
+            close
+        """
+
+        pkg142 = """
+            open [email protected],5.11-0
+            add depend type=parent [email protected],5.11-0
+            add depend type=parent [email protected],5.11-0
+            close
+        """
+
         pkg_renames = """
             open [email protected],5.11-0
             add depend type=require fmri=pkg_rename
@@ -3392,6 +3433,11 @@
             close
         """
 
+        pkgSUNWcs075 = """
+            open [email protected]
+            close
+        """
+
         leaf_template = """
             open pkg%s%s@%s,5.11-0
             add depend type=require fmri=pkg:/%s_incorp%s
@@ -3558,14 +3604,16 @@
         """
 
         def setUp(self):
-                pkg5unittest.SingleDepotTestCase.setUp(self)
+                pkg5unittest.SingleDepotTestCase.setUp(self, image_count=2)
                 self.pkgsend_bulk(self.rurl, (self.pkg10, self.pkg20,
                     self.pkg11, self.pkg21, self.pkg30, self.pkg40, self.pkg50,
                     self.pkg505, self.pkg51, self.pkg60, self.pkg61,
                     self.pkg70, self.pkg80, self.pkg81, self.pkg90,
                     self.pkg91, self.bug_7394_incorp,
                     self.pkg100, self.pkg101, self.pkg102,
-                    self.pkg110, self.pkg111, self.pkg_renames))
+                    self.pkg110, self.pkg111,
+                    self.pkg121, self.pkg122, self.pkg123, self.pkg132,
+                    self.pkg142, self.pkg_renames, self.pkgSUNWcs075))
 
                 for t in self.leaf_expansion:
                         self.pkgsend_bulk(self.rurl, self.leaf_template % t)
@@ -3774,7 +3822,11 @@
 
         def test_origin_dependencies(self):
                 """Get origin dependencies working"""
+                self.set_image(0)
                 self.image_create(self.rurl)
+                self.set_image(1)
+                self.image_create(self.rurl)
+                self.set_image(0)
                 # check install behavior
                 self.pkg("install [email protected]")
                 self.pkg("install pkg10")
@@ -3800,11 +3852,77 @@
                 self.pkg("install [email protected]")
                 self.pkg("uninstall '*'")
                 # check origin root-image=true dependencies
-                # relies on SUNWcs in root image; may need to change
-                self.pkg("install [email protected]")
-                self.pkg("install [email protected]", exit=1)
+                # relies on SUNWcs in root image; make image 1 the root image
+                self.set_image(1)
+                self.pkg("install [email protected]")
+                self.set_image(0)
+                live_root = self.img_path(1)
+                self.pkg("-D simulate_live_root=%s install [email protected]" % \
+                    live_root)
+                self.pkg("-D simulate_live_root=%s install [email protected]" %
+                    live_root, exit=1)
                 self.pkg("uninstall '*'")
 
+        def test_parent_dependencies(self):
+                self.set_image(0)
+                self.image_create(self.rurl)
+                self.set_image(1)
+                self.image_create(self.rurl)
+
+                # attach c2p 1 -> 0
+                self.pkg("attach-linked -p system:img1 %s" % self.img_path(0))
+
+                # try to install packages that have unmet parent dependencies
+                self.pkg("install [email protected]", exit=EXIT_OOPS)
+                self.pkg("install [email protected]", exit=EXIT_OOPS)
+                self.pkg("install [email protected]", exit=EXIT_OOPS)
+
+                # install packages in parent
+                self.set_image(0)
+                self.pkg("install [email protected]")
+                self.set_image(1)
+
+                # try to install packages that have unmet parent dependencies
+                self.pkg("install [email protected]", exit=EXIT_OOPS)
+                self.pkg("install [email protected]", exit=EXIT_OOPS)
+                self.pkg("install [email protected]", exit=EXIT_OOPS)
+
+                # install packages in parent
+                self.set_image(0)
+                self.pkg("install [email protected]")
+                self.set_image(1)
+
+                # try to install packages that have unmet parent dependencies
+                self.pkg("install [email protected]", exit=EXIT_OOPS)
+                self.pkg("install [email protected]", exit=EXIT_OOPS)
+                self.pkg("install [email protected]", exit=EXIT_OOPS)
+
+                # install packages in parent
+                self.set_image(0)
+                self.pkg("update [email protected]")
+                self.set_image(1)
+
+                # try to install packages that have unmet parent dependencies
+                self.pkg("install [email protected]", exit=EXIT_OOPS)
+
+                # try to install packages that have satisfied parent deps
+                self.pkg("install [email protected]")
+                self.pkg("verify")
+                self.pkg("uninstall [email protected]")
+                self.pkg("install [email protected]")
+                self.pkg("verify")
+                self.pkg("uninstall [email protected]")
+
+                # install packages in parent
+                self.set_image(0)
+                self.pkg("install [email protected]")
+                self.set_image(1)
+
+                # try to install packages that have satisfied parent deps
+                self.pkg("install [email protected]")
+                self.pkg("verify")
+                self.pkg("uninstall [email protected]")
+
 
 class TestMultipleDepots(pkg5unittest.ManyDepotTestCase):
         # Only start/stop the depot once (instead of for every test)
@@ -4422,7 +4540,7 @@
                     set(["cfg_cache_absent"]), ["var/pkg"])
 
                 self.pkg("-D simulate_live_root=%s install [email protected]" %
-                    self.backup_img_path, use_img_root=False)
+                    self.backup_img_path(), use_img_root=False)
 
         def test_var_pkg_missing_index(self):
                 """ Creates bad_dir with only the index dir missing. """
@@ -4524,7 +4642,7 @@
                     set(["cfg_cache_absent"]), [".org.opensolaris,pkg"])
 
                 self.pkg("-D simulate_live_root=%s install [email protected]" %
-                    self.backup_img_path, use_img_root=False)
+                    self.backup_img_path(), use_img_root=False)
 
         def test_ospkg_missing_index(self):
                 """ Creates a corrupted image at bad_dir by creating
@@ -4602,7 +4720,7 @@
                     set(["cfg_cache_absent"]), ["var/pkg"], destroy=False)
 
                 self.pkg("-D simulate_live_root=%s install [email protected]" %
-                    self.backup_img_path, use_img_root=False)
+                    self.backup_img_path(), use_img_root=False)
 
         def test_var_pkg_ospkg_missing_cfg_cache_alongside(self):
                 """ Complete Full image besides a User image missing cfg_cache
@@ -4620,7 +4738,7 @@
 
                 # Found full image before we reached root image.
                 self.pkg("-D simulate_live_root=%s install [email protected]" %
-                    self.backup_img_path, use_img_root=False, exit=1)
+                    self.backup_img_path(), use_img_root=False, exit=1)
 
                 # Only possible if user specifies full image's root since
                 # user image is at the top level.
@@ -4640,7 +4758,7 @@
 
                 # Found user image before we reached root image.
                 self.pkg("-D simulate_live_root=%s install [email protected]" %
-                    self.backup_img_path, use_img_root=False, exit=1)
+                    self.backup_img_path(), use_img_root=False, exit=1)
 
                 # Should succeed and install package in user image since
                 # test suite will add -R self.get_img_path().
@@ -5846,6 +5964,7 @@
 
         pkg_dupfilesv1 = """
             open dupfilesv1@0,5.11-0
+            add set name=variant.arch value=sparc value=i386
             add dir path=dir/pathname mode=0755 owner=root group=bin variant.arch=i386
             close
         """
@@ -5858,12 +5977,14 @@
 
         pkg_dupfilesv3 = """
             open dupfilesv3@0,5.11-0
+            add set name=variant.arch value=sparc value=i386
             add dir path=dir/pathname mode=0777 owner=root group=bin variant.arch=sparc
             close
         """
 
         pkg_dupfilesv4 = """
             open dupfilesv4@0,5.11-0
+            add set name=variant.arch value=sparc value=i386
             add file tmp/file1 path=dir/pathname mode=0777 owner=root group=bin variant.arch=sparc
             add file tmp/file2 path=dir/pathname mode=0777 owner=root group=bin variant.arch=sparc
             add file tmp/file3 path=dir/pathname mode=0777 owner=root group=bin variant.arch=i386
@@ -5872,6 +5993,7 @@
 
         pkg_dupfilesv5 = """
             open dupfilesv5@0,5.11-0
+            add set name=variant.opensolaris.zone value=global value=nonglobal
             add file tmp/file1 path=dir/pathname mode=0777 owner=root group=bin variant.opensolaris.zone=nonglobal
             close
         """
@@ -6511,13 +6633,13 @@
                 self.pkg("uninstall implicitdirs7")
                 # XXX We don't currently fix up anything beneath a directory
                 # that was restored, so we have to do it by hand.
-                os.mkdir("%s/usr/bin" % self.img_path)
+                os.mkdir("%s/usr/bin" % self.img_path())
                 shutil.copy("%s/tmp/file1" % self.test_root,
-                    "%s/usr/bin/something" % self.img_path)
-                owner = portable.get_user_by_name("root", self.img_path, True)
-                group = portable.get_group_by_name("bin", self.img_path, True)
-                os.chown("%s/usr/bin/something" % self.img_path, owner, group)
-                os.chmod("%s/usr/bin/something" % self.img_path, 0755)
+                    "%s/usr/bin/something" % self.img_path())
+                owner = portable.get_user_by_name("root", self.img_path(), True)
+                group = portable.get_group_by_name("bin", self.img_path(), True)
+                os.chown("%s/usr/bin/something" % self.img_path(), owner, group)
+                os.chmod("%s/usr/bin/something" % self.img_path(), 0755)
                 self.pkg("verify")
 
                 # Removing one of more than two offending actions can't do much
@@ -6534,7 +6656,7 @@
                     stat.S_IFLNK: "symbolic link",
                     stat.S_IFSOCK: "socket",
                 }
-                thepath = "%s/dir/pathname" % self.img_path
+                thepath = "%s/dir/pathname" % self.img_path()
                 fmt = stat.S_IFMT(os.lstat(thepath).st_mode)
                 # XXX The checks here rely on verify failing due to action types
                 # not matching what's on the system; they should probably report
@@ -6736,7 +6858,7 @@
                 self.pkg("uninstall '*'")
                 self.pkg("-D broken-conflicting-action-handling=1 install "
                     "duppath-nonidenticallinksp1 duppath-nonidenticallinksp2@0")
-                link = os.readlink("%s/dir/pathname" % self.img_path)
+                link = os.readlink("%s/dir/pathname" % self.img_path())
                 if link == "dir/something":
                         self.pkg("uninstall duppath-nonidenticallinksp2")
                 else:
@@ -6748,7 +6870,7 @@
                 self.pkg("uninstall '*'")
                 self.pkg("-D broken-conflicting-action-handling=1 install "
                     "duppath-nonidenticallinksp1 duppath-nonidenticallinksp2@0")
-                link = os.readlink("%s/dir/pathname" % self.img_path)
+                link = os.readlink("%s/dir/pathname" % self.img_path())
                 if link == "dir/something":
                         self.pkg("uninstall duppath-nonidenticallinksp1")
                 else:
--- a/src/tests/cli/t_pkg_intent.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/cli/t_pkg_intent.py	Sat May 07 00:25:10 2011 -0700
@@ -21,7 +21,7 @@
 #
 
 #
-# Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
 #
 
 import testutils
@@ -131,7 +131,8 @@
         @staticmethod
         def __do_install(api_obj, fmris, noexecute=False):
                 api_obj.reset()
-                api_obj.plan_install(fmris, noexecute=noexecute)
+                for pd in api_obj.gen_plan_install(fmris, noexecute=noexecute):
+                        continue
                 if not noexecute:
                         api_obj.prepare()
                         api_obj.execute_plan()
@@ -140,8 +141,9 @@
         def __do_uninstall(api_obj, fmris, recursive_removal=False,
             noexecute=False):
                 api_obj.reset()
-                api_obj.plan_uninstall(fmris, recursive_removal,
-                    noexecute=noexecute)
+                for pd in api_obj.gen_plan_uninstall(fmris, recursive_removal,
+                    noexecute=noexecute):
+                        continue
                 if not noexecute:
                         api_obj.prepare()
                         api_obj.execute_plan()
@@ -294,7 +296,8 @@
                 api_obj.refresh(immediate=True)
 
                 api_obj.reset()
-                api_obj.plan_update_all(sys.argv[0])
+                for pd in api_obj.gen_plan_update():
+                        continue
                 api_obj.prepare()
                 api_obj.execute_plan()
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/tests/cli/t_pkg_linked.py	Sat May 07 00:25:10 2011 -0700
@@ -0,0 +1,1543 @@
+#!/usr/bin/python
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+
+import testutils
+if __name__ == "__main__":
+	testutils.setup_environment("../../../proto")
+import pkg5unittest
+
+import difflib
+import os
+import re
+import shutil
+import tempfile
+import unittest
+import sys
+
+import pkg.actions
+import pkg.client.image as image
+
+from pkg.client.pkgdefs import *
+
+
+class TestPkgLinked(pkg5unittest.ManyDepotTestCase):
+        # Only start/stop the depot once (instead of for every test)
+        persistent_setup = True
+
+        p_all = []
+        p_vers = [
+            "@1.2,5.11-145:19700101T000001Z",
+            "@1.2,5.11-145:19700101T000000Z", # old time
+            "@1.1,5.11-145:19700101T000000Z", # old ver
+            "@1.1,5.11-144:19700101T000000Z", # old build
+            "@1.0,5.11-144:19700101T000000Z", # oldest
+        ]
+        p_files = [
+            "tmp/bar",
+            "tmp/baz",
+        ]
+
+        # generate packages that don't need to be synced
+        p_foo1_name_gen = "foo1"
+        pkgs = [p_foo1_name_gen + ver for ver in p_vers]
+        p_foo1_name = dict(zip(range(len(pkgs)), pkgs))
+        for i in p_foo1_name:
+                p_data = "open %s\n" % p_foo1_name[i]
+                p_data += """
+                    add set name=variant.foo value=bar value=baz
+                    add file tmp/bar mode=0555 owner=root group=bin path=foo_bar variant.foo=bar
+                    add file tmp/baz mode=0555 owner=root group=bin path=foo_baz variant.foo=baz
+                    close\n"""
+                p_all.append(p_data)
+
+        p_foo2_name_gen = "foo2"
+        pkgs = [p_foo2_name_gen + ver for ver in p_vers]
+        p_foo2_name = dict(zip(range(len(pkgs)), pkgs))
+        for i in p_foo2_name:
+                p_data = "open %s\n" % p_foo2_name[i]
+                p_data += """
+                    add set name=variant.foo value=bar value=baz
+                    add file tmp/bar mode=0555 owner=root group=bin path=foo_bar variant.foo=bar
+                    add file tmp/baz mode=0555 owner=root group=bin path=foo_baz variant.foo=baz
+                    close\n"""
+                p_all.append(p_data)
+
+        # generate packages that do need to be synced
+        p_sunc1_name_gen = "sync1"
+        pkgs = [p_sunc1_name_gen + ver for ver in p_vers]
+        p_sync1_name = dict(zip(range(len(pkgs)), pkgs))
+        for i in p_sync1_name:
+                p_data = "open %s\n" % p_sync1_name[i]
+                p_data += "add depend type=parent fmri=%s" % \
+                    pkg.actions.depend.DEPEND_SELF
+                p_data += """
+                    add set name=variant.foo value=bar value=baz
+                    add file tmp/bar mode=0555 owner=root group=bin path=sync1_bar variant.foo=bar
+                    add file tmp/baz mode=0555 owner=root group=bin path=sync1_baz variant.foo=baz
+                    close\n"""
+                p_all.append(p_data)
+
+        # generate packages that do need to be synced
+        p_sync2_name_gen = "sync2"
+        pkgs = [p_sync2_name_gen + ver for ver in p_vers]
+        p_sync2_name = dict(zip(range(len(pkgs)), pkgs))
+        for i in p_sync2_name:
+                p_data = "open %s\n" % p_sync2_name[i]
+                p_data += "add depend type=parent fmri=%s" % \
+                    pkg.actions.depend.DEPEND_SELF
+                p_data += """
+                    add set name=variant.foo value=bar value=baz
+                    add file tmp/bar mode=0555 owner=root group=bin path=sync2_bar variant.foo=bar
+                    add file tmp/baz mode=0555 owner=root group=bin path=sync2_baz variant.foo=baz
+                     close\n"""
+                p_all.append(p_data)
+
+        # these packages will be synced indirectly by virtue of being
+        # incorporated by an osnet incorporation
+        p_sync3_name = dict()
+        p_sync3_name[0] = "[email protected],5.11-144:19700101T000001Z"
+        p_sync3_name[1] = "[email protected],5.11-144:19700101T000000Z" # old time
+        for i in p_sync3_name:
+                p_data = "open %s\n" % p_sync3_name[i]
+                p_data += """
+                    close\n"""
+                p_all.append(p_data)
+
+        # create an osnet incorporation
+        p_osnet_name = dict()
+        p_osnet_name[0] = "consolidation/osnet/[email protected],5.11-143:19700101T000001Z"
+        p_osnet_name[1] = "consolidation/osnet/[email protected],5.11-143:19700101T000000Z" # old time
+        p_osnet_dep = dict()
+        p_osnet_dep[0] = p_sync3_name[0]
+        p_osnet_dep[1] = p_sync3_name[1]
+        for i in p_osnet_name:
+                p_data = "open %s\n" % p_osnet_name[i]
+                p_data += "add depend fmri=%s type=incorporate\n" % p_osnet_dep[i]
+                p_data += """
+                    close\n"""
+                p_all.append(p_data)
+
+        def setUp(self):
+                self.i_count = 5
+                pkg5unittest.ManyDepotTestCase.setUp(self, ["test"],
+                    image_count=self.i_count)
+
+                # create files that go in packages
+                self.make_misc_files(self.p_files)
+
+                # get repo url
+                self.rurl1 = self.dcs[1].get_repo_url()
+
+                # populate repository
+                self.pkgsend_bulk(self.rurl1, self.p_all)
+
+                # setup image names and paths
+                self.i_name = []
+                self.i_path = []
+                self.i_api = []
+                self.i_api_reset = []
+                for i in range(self.i_count):
+                        name = "system:img%d" % i
+                        self.i_name.insert(i, name)
+                        self.i_path.insert(i, self.img_path(i))
+
+        def __img_api_reset(self, i):
+                """__img_api_reset() - reset the api object associated with an
+                image if that object has been updated via a pkg(1) cli
+                invocation."""
+
+                if self.i_api_reset[i]:
+                        self.i_api[i].reset()
+                        self.i_api_reset[i] = False
+
+        def __img_children_names(self, i):
+                """__img_children_names() - find the children of an image and
+                return their names"""
+
+                self.__img_api_reset(i)
+                return set([
+                        str(name)
+                        for name, rel, path in self.i_api[i].list_linked()
+                        if rel == "child"
+                ])
+
+        def __img_has_parent(self, i):
+                """__img_has_parent() - check if an image has a parent"""
+
+                self.__img_api_reset(i)
+                return self.i_api[i].ischild()
+
+        # public verification functions for use by test cases.
+        def _v_has_children(self, i, cl):
+                assert i not in cl
+
+                cl_found = self.__img_children_names(i)
+                cl_expected = set([self.i_name[j] for j in cl])
+                self.assertEqual(cl_found, cl_expected,
+                    "error: image has unexpected children\n"
+                    "image: %d, %s, %s\n"
+                    "expected children: %s\n"
+                    "found children: %s\n" %
+                    (i, self.i_name[i], self.i_path[i],
+                    str(cl_expected),
+                    str(cl_found)))
+
+        def _v_no_children(self, il):
+                for i in il:
+                        # make sure the we don't have any children
+                        cl_found = self.__img_children_names(i)
+                        self.assertEqual(set(), cl_found,
+                           "error: image has children\n"
+                           "image: %d, %s, %s\n"
+                           "found children: %s\n" %
+                           (i, self.i_name[i], self.i_path[i],
+                           str(cl_found)))
+
+        def _v_has_parent(self, il):
+                # make sure a child has a parent
+                for i in il:
+                        self.assertEqual(True, self.__img_has_parent(i),
+                           "error: image has no parent\n"
+                           "image: %d, %s, %s\n" %
+                           (i, self.i_name[i], self.i_path[i]))
+
+        def _v_no_parent(self, il):
+                for i in il:
+                        self.assertEqual(False, self.__img_has_parent(i),
+                           "error: image has a parent\n"
+                           "image: %d, %s, %s\n" %
+                           (i, self.i_name[i], self.i_path[i]))
+
+        def _v_not_linked(self, il):
+                self._v_no_parent(il)
+                self._v_no_children(il)
+
+        # utility functions for use by test cases
+        def _imgs_create(self, limit):
+                variants = {
+                    "variant.foo": "bar",
+                    "variant.opensolaris.zone": "nonglobal",
+                }
+
+                for i in range(0, limit):
+                        self.set_image(i)
+                        self.i_api.insert(i, self.image_create(self.rurl1,
+                            variants=variants, destroy=True))
+                        self.i_api_reset.insert(i, False)
+
+                del self.i_api[limit:]
+                del self.i_api_reset[limit:]
+                for i in range(limit, self.i_count):
+                        self.set_image(i)
+                        self.image_destroy()
+
+                self.set_image(0)
+
+        def _cmd(self, args, rv=0):
+                assert type(args) == str
+
+                self.cmdline_run("%s" % args, exit=rv)
+
+        def _pkg(self, il, cmd, args=None, rv=None, rvdict=None):
+                assert type(il) == list
+                assert type(cmd) == str
+                assert args == None or type(args) == str
+                assert rv == None or type(rv) == int
+                assert rvdict == None or type(rvdict) == dict
+                assert rv == None or rvdict == None
+
+                if rv == None:
+                        rv = EXIT_OK
+                if rvdict == None:
+                        rvdict = {}
+                        for i in il:
+                                rvdict[i] = rv
+                assert (set(rvdict) | set(il)) == set(il)
+
+                if args == None:
+                        args = ""
+
+                # we're updating one or more images, so make sure to reset all
+                # our api instances before using them.
+                self.i_api_reset[:] = [True] * len(self.i_api_reset)
+
+                for i in il:
+                        rv = rvdict.get(i, EXIT_OK)
+                        self.pkg("-R %s %s %s" % (self.i_path[i], cmd, args),
+                            exit=rv)
+
+        def _pkg_child(self, i, cl, cmd, args=None, rv=None, rvdict=None):
+                assert type(i) == int
+                assert type(cl) == list
+                assert i not in cl
+                assert type(cmd) == str
+                assert args == None or type(args) == str
+                assert rv == None or type(rv) == int
+                assert rvdict == None or type(rvdict) == dict
+                assert rv == None or rvdict == None
+
+                if rv == None:
+                        rv = EXIT_OK
+                if rvdict == None:
+                        rvdict = {}
+                        for c in cl:
+                                rvdict[c] = rv
+                assert (set(rvdict) | set(cl)) == set(cl)
+
+                if args == None:
+                        args = ""
+
+                # sync each child from parent
+                for c in cl:
+                        rv = rvdict.get(c, EXIT_OK)
+                        self._pkg([i], "%s -l %s" % (cmd, self.i_name[c]),
+                            args=args, rv=rv)
+
+        def _pkg_child_all(self, i, cmd, args=None, rv=EXIT_OK):
+                assert type(i) == int
+                assert type(cmd) == str
+                assert args == None or type(args) == str
+                assert type(rv) == int
+
+                if args == None:
+                        args = ""
+                self._pkg([i], "%s -a %s" % (cmd, args), rv=rv)
+
+        def _attach_parent(self, il, p, args=None, rv=EXIT_OK):
+                assert type(il) == list
+                assert type(p) == int
+                assert p not in il
+                assert args == None or type(args) == str
+                assert type(rv) == int
+
+                if args == None:
+                        args = ""
+
+                for i in il:
+                        self._pkg([i], "attach-linked -p %s %s %s" %
+                            (args, self.i_name[i], self.i_path[p]), rv=rv)
+
+        def _attach_child(self, i, cl, args=None, rv=None, rvdict=None):
+                assert type(i) == int
+                assert type(cl) == list
+                assert i not in cl
+                assert args == None or type(args) == str
+                assert rvdict == None or type(rvdict) == dict
+                assert rv == None or rvdict == None
+
+                if rv == None:
+                        rv = EXIT_OK
+                if rvdict == None:
+                        rvdict = {}
+                        for c in cl:
+                                rvdict[c] = rv
+                assert (set(rvdict) | set(cl)) == set(cl)
+
+                if args == None:
+                        args = ""
+
+                # attach each child to parent
+                for c in cl:
+                        rv = rvdict.get(c, EXIT_OK)
+                        self._pkg([i], "attach-linked -c %s %s %s" %
+                            (args, self.i_name[c], self.i_path[c]),
+                            rv=rv)
+
+        def test_not_linked(self):
+                self._imgs_create(1)
+
+                self._pkg([0], "list-linked")
+
+                # operations that require a parent
+                rv = EXIT_NOPARENT
+                self._pkg([0], "detach-linked", rv=rv)
+                self._pkg([0], "sync-linked", rv=rv)
+                self._pkg([0], "audit-linked", rv=rv)
+
+        def test_opts_1_invalid(self):
+                self._imgs_create(3)
+
+                # parent has one child
+                self._attach_child(0, [1])
+                self._attach_parent([2], 0)
+
+                # invalid options
+                rv = EXIT_BADOPT
+
+                args = "--foobar"
+                self._pkg([0], "attach-linked", args=args, rv=rv)
+                self._pkg([0], "detach-linked", args=args, rv=rv)
+                self._pkg([0], "sync-linked", args=args, rv=rv)
+                self._pkg([0], "audit-linked", args=args, rv=rv)
+                self._pkg([0], "list-linked", args=args, rv=rv)
+                self._pkg([0], "property-linked", args=args, rv=rv)
+                self._pkg([0], "set-property-linked", args=args, rv=rv)
+
+                # can't combine -a and -l
+                args = "-a -l %s" % self.i_name[1]
+                self._pkg([0], "detach-linked", args=args, rv=rv)
+                self._pkg([0], "sync-linked", args=args, rv=rv)
+                self._pkg([0], "audit-linked", args=args, rv=rv)
+
+                # can't combine -I and -i
+                args = "-I -i %s" % self.i_name[1]
+                self._pkg([0], "detach-linked", args=args, rv=rv)
+                self._pkg([0], "sync-linked", args=args, rv=rv)
+                self._pkg([0], "audit-linked", args=args, rv=rv)
+                self._pkg([0], "list-linked", args=args, rv=rv)
+
+                # can't combine -i and -a
+                args = "-a -i %s" % self.i_name[1]
+                self._pkg([0], "detach-linked", args=args, rv=rv)
+                self._pkg([0], "sync-linked", args=args, rv=rv)
+                self._pkg([0], "audit-linked", args=args, rv=rv)
+
+                # can't combine -I and -a
+                args = "-I -a"
+                self._pkg([0], "detach-linked", args=args, rv=rv)
+                self._pkg([0], "sync-linked", args=args, rv=rv)
+                self._pkg([0], "audit-linked", args=args, rv=rv)
+
+                # can't combine -I and -l
+                args = "-I -l %s" % self.i_name[1]
+                self._pkg([0], "detach-linked", args=args, rv=rv)
+                self._pkg([0], "sync-linked", args=args, rv=rv)
+                self._pkg([0], "audit-linked", args=args, rv=rv)
+
+                # can't combine -i and -l with same target
+                args = "-i %s -l %s" % (self.i_name[1], self.i_name[1])
+                self._pkg([0], "detach-linked", args=args, rv=rv)
+                self._pkg([0], "sync-linked", args=args, rv=rv)
+                self._pkg([0], "audit-linked", args=args, rv=rv)
+
+                # doesn't accept -a
+                args = "-a"
+                self._pkg([0], "attach-linked", args=args, rv=rv)
+                self._pkg([0], "list-linked", args=args, rv=rv)
+                self._pkg([0], "property-linked", args=args, rv=rv)
+                self._pkg([0], "set-property-linked", args=args, rv=rv)
+
+                # doesn't accept -l
+                args = "-l %s" % self.i_name[1]
+                self._pkg([0], "attach-linked", args=args, rv=rv)
+                self._pkg([0], "list-linked", args=args, rv=rv)
+
+                # can't combine --no-parent-sync and --linked-md-only
+                args = "--no-parent-sync --linked-md-only"
+                self._pkg([0], "sync-linked -a", args=args, rv=rv)
+                self._pkg([2], "sync-linked", args=args, rv=rv)
+
+                # can't use --no-parent-sync when invoking from parent
+                args = "--no-parent-sync"
+                self._pkg([0], "sync-linked -a", args=args, rv=rv)
+                self._pkg_child(0, [1], "sync-linked", args=args, rv=rv)
+
+                # can't use be options when managing children
+                for arg in ["--deny-new-be", "--require-new-be",
+                    "--be-name=foo"]:
+                        args = "-a %s" % arg
+                        self._pkg([0], "sync-linked", args=args, rv=rv)
+
+                        args = "-l %s %s" % (self.i_name[1], arg)
+                        self._pkg([0], "sync-linked", args=args, rv=rv)
+                        self._pkg([0], "set-property-linked", args=args, rv=rv)
+
+        def test_opts_2_invalid_bad_child(self):
+                self._imgs_create(2)
+
+                rv = EXIT_OOPS
+
+                # try using an invalid child name
+                self._pkg([0], "attach-linked -c foobar %s" % \
+                    self.i_path[1], rv=rv)
+
+                for lin in ["foobar", self.i_name[1]]:
+                        # try using an invalid and unknown child name
+                        args = "-l %s" % lin
+
+                        self._pkg([0], "sync-linked", args=args, rv=rv)
+                        self._pkg([0], "audit-linked", args=args, rv=rv)
+                        self._pkg([0], "property-linked", args=args, rv=rv)
+                        self._pkg([0], "set-property-linked", args=args, rv=rv)
+                        self._pkg([0], "detach-linked", args=args, rv=rv)
+
+                        # try to ignore invalid unknown children
+                        args = "-i %s" % lin
+
+                        # operations on the parent image
+                        self._pkg([0], "sync-linked", args=args, rv=rv)
+                        self._pkg([0], "list-linked", args=args, rv=rv)
+                        self._pkg([0], "update", args=args, rv=rv)
+                        self._pkg([0], "install", args= \
+                            "-i %s %s" % (lin, self.p_foo1_name[1]), rv=rv)
+                        self._pkg([0], "change-variant", args= \
+                            "-i %s -v variant.foo=baz" % lin, rv=rv)
+                        # TODO: test change-facet
+
+        def test_opts_3_all(self):
+                self._imgs_create(1)
+
+                # the -a option is always valid
+                self._pkg([0], "sync-linked -a")
+                self._pkg([0], "audit-linked -a")
+                self._pkg([0], "detach-linked -a")
+
+        def test_opts_4_noop(self):
+                self._imgs_create(4)
+
+                # plan operations
+                self._attach_child(0, [1, 2], args="-vn")
+                self._attach_child(0, [1, 2], args="-vn")
+                self._attach_parent([3], 0, args="-vn")
+                self._attach_parent([3], 0, args="-vn")
+
+                # do operations
+                self._attach_child(0, [1, 2], args="-v")
+                self._attach_parent([3], 0, args="-v")
+
+                # plan operations
+                self._pkg_child(0, [1, 2], "detach-linked", args="-vn")
+                self._pkg_child(0, [1, 2], "detach-linked", args="-vn")
+                self._pkg_child_all(0, "detach-linked", args="-vn")
+                self._pkg_child_all(0, "detach-linked", args="-vn")
+                self._pkg([3], "detach-linked", args="-vn")
+                self._pkg([3], "detach-linked", args="-vn")
+
+                # do operations
+                self._pkg_child(0, [1], "detach-linked", args="-v")
+                self._pkg_child_all(0, "detach-linked", args="-v")
+                self._pkg([3], "detach-linked", args="-v")
+
+        def test_attach_p2c_1(self):
+                self._imgs_create(4)
+                self._v_not_linked([0, 1, 2, 3])
+
+                # link parents to children as follows:
+                #     0 -> 1 -> 2
+                #          1 -> 3
+
+                # attach parent (0) to child (1), (0 -> 1)
+                self._attach_child(0, [1])
+                self._v_has_children(0, [1])
+                self._v_has_parent([1])
+                self._v_not_linked([2, 3])
+
+                # attach parent (1) to child (2), (1 -> 2)
+                self._attach_child(1, [2])
+                self._v_has_children(0, [1])
+                self._v_has_children(1, [2])
+                self._v_has_parent([1, 2])
+                self._v_no_children([2])
+                self._v_not_linked([3])
+
+                # attach parent (1) to child (3), (1 -> 3)
+                self._attach_child(1, [3])
+                self._v_has_children(0, [1])
+                self._v_has_children(1, [2, 3])
+                self._v_has_parent([1, 2, 3])
+                self._v_no_children([2, 3])
+
+        def test_detach_p2c_1(self):
+                self._imgs_create(4)
+
+                # link parents to children as follows:
+                #     0 -> 1 -> 2
+                #          1 -> 3
+                self._attach_child(0, [1])
+                self._attach_child(1, [2, 3])
+
+                # detach child (1) from parent (0)
+                self._pkg_child(0, [1], "detach-linked")
+                self._v_has_children(1, [2, 3])
+                self._v_has_parent([2, 3])
+                self._v_no_children([2, 3])
+                self._v_not_linked([0])
+
+                # detach child (3) from parent (1)
+                self._pkg_child(1, [3], "detach-linked")
+                self._v_has_children(1, [2])
+                self._v_has_parent([2])
+                self._v_no_children([2])
+                self._v_not_linked([0, 3])
+
+                # detach child (2) from parent (1)
+                self._pkg_child(1, [2], "detach-linked")
+                self._v_not_linked([0, 1, 2, 3])
+
+        def test_detach_p2c_2(self):
+                self._imgs_create(4)
+
+                # link parents to children as follows:
+                #     0 -> 1 -> 2
+                #          1 -> 3
+                self._attach_child(0, [1])
+                self._attach_child(1, [2, 3])
+
+                # detach child (1) from parent (0)
+                self._pkg_child_all(0, "detach-linked")
+                self._v_has_children(1, [2, 3])
+                self._v_has_parent([2, 3])
+                self._v_no_children([2, 3])
+                self._v_not_linked([0])
+
+                # detach child (3) and child (2) from parent (1)
+                self._pkg_child_all(1, "detach-linked")
+                self._v_not_linked([0, 1, 2, 3])
+
+                # detach all children (there are none)
+                self._pkg_child_all(0, "detach-linked")
+
+        def test_attach_c2p_1(self):
+                self._imgs_create(4)
+                self._v_not_linked([0, 1, 2, 3])
+
+                # link children to parents as follows:
+                #     2 -> 1 -> 0
+                #     3 -> 1
+
+                # attach child (2) to parent (1), (2 -> 1)
+                self._attach_parent([2], 1)
+                self._v_has_parent([2])
+                self._v_no_children([2])
+                self._v_not_linked([0, 1, 3])
+
+                # attach child (3) to parent (1), (3 -> 1)
+                self._attach_parent([3], 1)
+                self._v_has_parent([2, 3])
+                self._v_no_children([2, 3])
+                self._v_not_linked([0, 1])
+
+                # attach child (1) to parent (0), (1 -> 0)
+                self._attach_parent([1], 0)
+                self._v_has_parent([1, 2, 3])
+                self._v_no_children([1, 2, 3])
+                self._v_not_linked([0])
+
+        def test_detach_c2p_1(self):
+                self._imgs_create(4)
+
+                # link children to parents as follows:
+                #     2 -> 1 -> 0
+                #     3 -> 1
+                self._attach_parent([2, 3], 1)
+                self._attach_parent([1], 0)
+
+                # detach parent (0) from child (1)
+                self._pkg([1], "detach-linked")
+                self._v_has_parent([2, 3])
+                self._v_no_children([2, 3])
+                self._v_not_linked([0, 1])
+
+                # detach parent (1) from child (3)
+                self._pkg([3], "detach-linked")
+                self._v_has_parent([2])
+                self._v_no_children([2])
+                self._v_not_linked([0, 1, 3])
+
+                # detach parent (1) from child (2)
+                self._pkg([2], "detach-linked")
+                self._v_not_linked([0, 1, 2, 3])
+
+        def test_attach_already_linked_1_err(self):
+                self._imgs_create(4)
+                self._attach_child(0, [1])
+                self._attach_parent([2], 0)
+
+                rv = EXIT_OOPS
+
+                # try to link the parent image to a new child with a dup name
+                self._pkg([0], "attach-linked -c %s %s" %
+                    (self.i_name[1], self.i_path[2]), rv=rv)
+
+                # have a new parent try to link to the p2c child
+                self._attach_child(3, [1], rv=rv)
+
+                # have the p2c child try to link to a new parent
+                self._attach_parent([1], 3, rv=rv)
+
+                # have the c2p child try to link to a new parent
+                self._attach_parent([2], 3, rv=rv)
+
+        def test_attach_already_linked_2_relink(self):
+                self._imgs_create(4)
+                self._attach_child(0, [1])
+                self._attach_parent([2], 0)
+
+                # have a new parent try to link to the p2c child
+                self._attach_child(3, [1], args="--allow-relink")
+
+                # have the p2c child try to link to a new parent
+                self._attach_parent([1], 3, args="--allow-relink")
+
+                # have the c2p child try to link to a new parent
+                self._attach_parent([2], 3, args="--allow-relink")
+
+        def test_zone_attach_detach(self):
+                self._imgs_create(2)
+
+                rv = EXIT_OOPS
+
+                # by default we can't attach (p2c) zone image
+                self._pkg([0], "attach-linked -v -c zone:foo %s" %
+                    self.i_path[1], rv=rv)
+                self._v_not_linked([0, 1])
+
+                # force attach (p2c) zone image
+                self._pkg([0], "attach-linked -v -f -c zone:foo %s" %
+                    self.i_path[1])
+                self._v_not_linked([0])
+                self._v_has_parent([1])
+
+                self._imgs_create(2)
+
+                # by default we can't attach (c2p) zone image
+                self._pkg([1], "attach-linked -v -p zone:foo %s" %
+                    self.i_path[0], rv=rv)
+                self._v_not_linked([0, 1])
+
+                # force attach (c2p) zone image
+                self._pkg([1], "attach-linked -v -f -p zone:foo %s" %
+                    self.i_path[0])
+                self._v_not_linked([0])
+                self._v_has_parent([1])
+
+                # by default we can't detach (c2p) zone image
+                self._pkg([1], "detach-linked -v", rv=rv)
+                self._v_not_linked([0])
+                self._v_has_parent([1])
+
+                # force detach (c2p) zone image
+                self._pkg([1], "detach-linked -v -f")
+                self._v_not_linked([0, 1])
+
+        def test_parent_ops_error(self):
+                self._imgs_create(2)
+
+                # attach a child
+                self._attach_child(0, [1])
+
+                rv = EXIT_PARENTOP
+
+                # some operations can't be done from a child when linked to
+                # from a parent
+                self._pkg([1], "detach-linked", rv=EXIT_PARENTOP)
+
+                # TODO: enable this once we support set-property-linked
+                #self._pkg([1], "set-property-linked", rv=EXIT_PARENTOP)
+
+        def test_eaccess_1_parent(self):
+                self._imgs_create(3)
+                self._attach_parent([1], 0)
+
+                rv = EXIT_EACCESS
+
+                for i in [0, 1]:
+                        if i == 0:
+                                # empty the parent image
+                                self.set_image(0)
+                                self.image_destroy()
+                                self.cmdline_run("mkdir -p %s" % self.i_path[0])
+                        if i == 1:
+                                # delete the parent image
+                                self.set_image(0)
+                                self.image_destroy()
+
+                        # operations that need to access the parent should fail
+                        self._pkg([1], "sync-linked", rv=rv)
+                        self._pkg([1], "audit-linked", rv=rv)
+                        self._pkg([1], "install %s" % self.p_foo1_name[1], \
+                            rv=rv)
+                        self._pkg([1], "image-update", rv=rv)
+
+                        # operations that need to access the parent should fail
+                        self._attach_parent([2], 0, rv=rv)
+
+                # detach should still work
+                self._pkg([1], "detach-linked")
+
+        def test_eaccess_1_child(self):
+                self._imgs_create(2)
+                self._attach_child(0, [1])
+
+                outfile = os.path.join(self.test_root, "res")
+                rv = EXIT_EACCESS
+
+                for i in [0, 1, 2]:
+                        if i == 0:
+                                # corrupt the child image
+                                self.cmdline_run("mkdir -p "
+                                    "%s/%s" % (self.i_path[1],
+                                    image.img_user_prefix))
+                                self.cmdline_run("mkdir -p "
+                                    "%s/%s" % (self.i_path[1],
+                                    image.img_root_prefix))
+                        if i == 1:
+                                # delete the child image
+                                self.set_image(1)
+                                self.image_destroy()
+                                self.cmdline_run("mkdir -p %s" % self.i_path[1])
+                        if i == 2:
+                                # delete the child image
+                                self.set_image(1)
+                                self.image_destroy()
+
+
+                        # child should still be listed
+                        self._pkg([0], "list-linked -H > %s" % outfile)
+                        self._cmd("cat %s" % outfile)
+                        self._cmd("egrep '^%s[ 	]' %s" %
+                            (self.i_name[1], outfile))
+
+                        # child should still be listed
+                        self._pkg([0], "property-linked -H -l %s > %s" %
+                            (self.i_name[1], outfile))
+                        self._cmd("cat %s" % outfile)
+                        self._cmd("egrep '^li-' %s" % outfile)
+
+                        # operations that need to access child should fail
+                        self._pkg_child(0, [1], "sync-linked", rv=rv)
+                        self._pkg_child_all(0, "sync-linked", rv=rv)
+
+                        self._pkg_child(0, [1], "audit-linked", rv=rv)
+                        self._pkg_child_all(0, "audit-linked", rv=rv)
+
+                        self._pkg_child(0, [1], "detach-linked", rv=rv)
+                        self._pkg_child_all(0, "detach-linked", rv=rv)
+
+                        # TODO: test more recursive ops here
+                        # image-update, install, uninstall, etc
+
+        def test_ignore_1_no_children(self):
+                self._imgs_create(1)
+                outfile = os.path.join(self.test_root, "res")
+
+                # it's ok to use -I with no children
+                self._pkg([0], "list-linked -H -I > %s" % outfile)
+                self._cmd("cat %s" % outfile)
+                self._cmd("egrep '^$|.' %s" % outfile, rv=EXIT_OOPS)
+
+        def test_ignore_2_ok(self):
+                self._imgs_create(3)
+                self._attach_child(0, [1, 2])
+                outfile = os.path.join(self.test_root, "res")
+
+                # ignore one child
+                self._pkg([0], "list-linked -H -i %s > %s" %
+                    (self.i_name[1], outfile))
+                self._cmd("cat %s" % outfile)
+                self._cmd("egrep '^%s[ 	]' %s" %
+                    (self.i_name[1], outfile), rv=EXIT_OOPS)
+                self._cmd("egrep '^%s[ 	]' %s" %
+                    (self.i_name[2], outfile))
+
+                # manually ignore all children
+                self._pkg([0], "list-linked -H -i %s -i %s > %s" %
+                    (self.i_name[1], self.i_name[2], outfile))
+                self._cmd("cat %s" % outfile)
+                self._cmd("egrep '^$|.' %s" % outfile, rv=EXIT_OOPS)
+
+                # automatically ignore all children
+                self._pkg([0], "list-linked -H -I > %s" % outfile)
+                self._cmd("cat %s" % outfile)
+                self._cmd("egrep '^$|.' %s" % outfile, rv=EXIT_OOPS)
+
+        def test_no_pkg_updates_1_empty_via_attach(self):
+                """test --no-pkg-updates with an empty image."""
+                self._imgs_create(3)
+
+                self._attach_child(0, [1], args="--no-pkg-updates")
+                self._attach_parent([2], 0, args="--no-pkg-updates")
+
+        def test_no_pkg_updates_1_empty_via_sync(self):
+                """test --no-pkg-updates with an empty image."""
+                self._imgs_create(4)
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1, 2], args="--linked-md-only")
+                self._attach_parent([3], 0, args="--linked-md-only")
+
+                self._pkg_child(0, [1], "sync-linked -v --no-pkg-updates",
+                    rv=EXIT_NOP)
+                self._pkg_child_all(0, "sync-linked -v --no-pkg-updates",
+                    rv=EXIT_NOP)
+                self._pkg([3], "sync-linked -v --no-pkg-updates",
+                    rv=EXIT_NOP)
+
+        def test_no_pkg_updates_1_empty_via_set_property_linked_TODO(self):
+                """test --no-pkg-updates with an empty image."""
+                pass
+
+        def test_no_pkg_updates_2_foo_via_attach(self):
+                """test --no-pkg-updates with a non-empty image."""
+                self._imgs_create(3)
+
+                # install different un-synced packages into each image
+                for i in [0, 1, 2]:
+                        self._pkg([i], "install -v %s" % self.p_foo1_name[i])
+
+                self._attach_child(0, [1], args="--no-pkg-updates")
+                self._attach_parent([2], 0, args="--no-pkg-updates")
+
+                # verify the un-synced packages
+                for i in [0, 1, 2]:
+                        self._pkg([i], "list -v %s" % self.p_foo1_name[i])
+
+        def test_no_pkg_updates_2_foo_via_sync(self):
+                """test --no-pkg-updates with a non-empty image."""
+                self._imgs_create(4)
+
+                # install different un-synced packages into each image
+                for i in range(4):
+                        self._pkg([i], "install -v %s" % self.p_foo1_name[i])
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1, 2], args="--linked-md-only")
+                self._attach_parent([3], 0, args="--linked-md-only")
+
+                self._pkg_child(0, [1], "sync-linked -v --no-pkg-updates",
+                    rv=EXIT_NOP)
+                self._pkg_child_all(0, "sync-linked -v --no-pkg-updates",
+                    rv=EXIT_NOP)
+                self._pkg([3], "sync-linked -v --no-pkg-updates",
+                    rv=EXIT_NOP)
+
+                # verify the un-synced packages
+                for i in range(4):
+                        self._pkg([i], "list -v %s" % self.p_foo1_name[i])
+
+        def test_no_pkg_updates_2_foo_via_set_property_linked_TODO(self):
+                """test --no-pkg-updates with a non-empty image."""
+                pass
+
+        def test_no_pkg_updates_3_sync_via_attach(self):
+                """test --no-pkg-updates with an in sync package"""
+                self._imgs_create(3)
+
+                # install the same synced packages into each image
+                for i in range(3):
+                        self._pkg([i], "install -v %s" % self.p_sync1_name[1])
+
+                self._attach_child(0, [1], args="--no-pkg-updates")
+                self._attach_parent([2], 0, args="--no-pkg-updates")
+
+                # verify the synced packages
+                for i in range(3):
+                        self._pkg([i], "list -v %s" % self.p_sync1_name[1])
+
+        def test_no_pkg_updates_3_sync_via_sync(self):
+                """test --no-pkg-updates with an in sync package"""
+                self._imgs_create(4)
+
+                # install the same synced packages into each image
+                for i in range(4):
+                        self._pkg([i], "install -v %s" % self.p_sync1_name[1])
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1, 2], args="--linked-md-only")
+                self._attach_parent([3], 0, args="--linked-md-only")
+
+                # verify the synced packages
+                for i in range(4):
+                        self._pkg([i], "list -v %s" % self.p_sync1_name[1])
+
+                self._pkg_child(0, [1], "sync-linked -v --no-pkg-updates",
+                    rv=EXIT_NOP)
+                self._pkg_child_all(0, "sync-linked -v --no-pkg-updates",
+                    rv=EXIT_NOP)
+                self._pkg([3], "sync-linked -v --no-pkg-updates",
+                    rv=EXIT_NOP)
+
+        def test_no_pkg_updates_3_sync_via_set_property_linked_TODO(self):
+                """test --no-pkg-updates with an in sync package"""
+                pass
+
+        def test_no_pkg_updates_3_fail_via_attach(self):
+                """test --no-pkg-updates with an out of sync package"""
+                self._imgs_create(3)
+
+                # install different synced packages into each image
+                for i in range(3):
+                        self._pkg([i], "install -v %s" % self.p_sync1_name[i+1])
+
+                self._attach_child(0, [1], args="--no-pkg-updates",
+                    rv=EXIT_OOPS)
+                self._attach_parent([2], 0, args="--no-pkg-updates",
+                    rv=EXIT_OOPS)
+
+                # verify packages
+                for i in range(3):
+                        self._pkg([i], "list -v %s" % self.p_sync1_name[i+1])
+
+        def test_no_pkg_updates_3_fail_via_sync(self):
+                """test --no-pkg-updates with an out of sync package"""
+                self._imgs_create(4)
+
+                # install different synced packages into each image
+                for i in range(4):
+                        self._pkg([i], "install -v %s" % self.p_sync1_name[i+1])
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1, 2], args="--linked-md-only")
+                self._attach_parent([3], 0, args="--linked-md-only")
+
+                self._pkg_child(0, [1], "sync-linked -v --no-pkg-updates",
+                    rv=EXIT_OOPS)
+                self._pkg_child_all(0, "sync-linked -v --no-pkg-updates",
+                    rv=EXIT_OOPS)
+                self._pkg([3], "sync-linked -v --no-pkg-updates",
+                    rv=EXIT_OOPS)
+
+                # verify packages
+                for i in range(3):
+                        self._pkg([i], "list -v %s" % self.p_sync1_name[i+1])
+
+        def test_no_pkg_updates_3_fail_via_set_property_linked_TODO(self):
+                pass
+
+        def test_audit_synced_1(self):
+                self._imgs_create(4)
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1, 2], args="--linked-md-only")
+                self._attach_parent([3], 0, args="--linked-md-only")
+
+                # audit with empty parent and child
+                self._pkg([1, 2, 3], "audit-linked")
+                self._pkg_child(0, [1, 2], "audit-linked")
+                self._pkg_child_all(0, "audit-linked")
+                self._pkg_child_all(3, "audit-linked")
+
+        def test_audit_synced_2(self):
+                self._imgs_create(4)
+
+                # install different un-synced packages into each image
+                for i in [0, 1, 2, 3]:
+                        self._pkg([i], "install -v %s" % self.p_foo1_name[i])
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1, 2, 3], args="--linked-md-only")
+
+                self._pkg([1, 2, 3], "audit-linked")
+                self._pkg_child(0, [1, 2, 3], "audit-linked")
+                self._pkg_child_all(0, "audit-linked")
+
+        def test_audit_synced_3(self):
+                self._imgs_create(4)
+
+                # install synced package into parent
+                self._pkg([0], "install -v %s" % self.p_sync1_name[0])
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1, 2, 3], args="--linked-md-only")
+
+                self._pkg([1, 2, 3], "audit-linked")
+                self._pkg_child(0, [1, 2, 3], "audit-linked")
+                self._pkg_child_all(0, "audit-linked")
+
+        def test_audit_synced_4(self):
+                self._imgs_create(4)
+
+                # install same synced packages into parent and some children
+                for i in [0, 1, 2, 3]:
+                        self._pkg([i], "install -v %s" % self.p_sync1_name[0])
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1, 2, 3], args="--linked-md-only")
+
+                self._pkg([1, 2, 3], "audit-linked")
+                self._pkg_child(0, [1, 2, 3], "audit-linked")
+                self._pkg_child_all(0, "audit-linked")
+
+        def test_audit_diverged_1(self):
+                self._imgs_create(4)
+
+                # install different synced package into some child images
+                for i in [1, 3]:
+                        self._pkg([i], "install -v %s" % self.p_sync1_name[i])
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1, 2, 3], args="--linked-md-only")
+
+                rvdict = {1: EXIT_DIVERGED, 3: EXIT_DIVERGED}
+                self._pkg([1, 2, 3], "audit-linked", rvdict=rvdict)
+                self._pkg_child(0, [1, 2, 3], "audit-linked", rvdict=rvdict)
+                self._pkg_child_all(0, "audit-linked", rv=EXIT_DIVERGED)
+
+        def test_audit_diverged_2(self):
+                self._imgs_create(4)
+
+                # install different synced package into each image
+                for i in range(4):
+                        self._pkg([i], "install -v %s" % self.p_sync1_name[i])
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1, 2, 3], args="--linked-md-only")
+
+                rv = EXIT_DIVERGED
+                self._pkg([1, 2, 3], "audit-linked", rv=rv)
+                self._pkg_child(0, [1, 2, 3], "audit-linked", rv=rv)
+                self._pkg_child_all(0, "audit-linked", rv=rv)
+
+        def test_audit_osnet_1_synced(self):
+                self._imgs_create(2)
+
+                # install same version of osnet in parent and child
+                for i in [0, 1]:
+                        self._pkg([i], "install -v %s %s" % \
+                            (self.p_osnet_dep[0], self.p_osnet_name[0]))
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1], args="--linked-md-only")
+
+                self._pkg([1], "audit-linked")
+                self._pkg_child(0, [1], "audit-linked")
+                self._pkg_child_all(0, "audit-linked")
+
+        def test_audit_osnet_2_diverged(self):
+                self._imgs_create(2)
+
+                # install different version of osnet in parent and child
+                for i in [0, 1]:
+                        self._pkg([i], "install -v %s %s" % \
+                            (self.p_osnet_dep[i], self.p_osnet_name[i]))
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1], args="--linked-md-only")
+
+                rv = EXIT_DIVERGED
+                self._pkg([1], "audit-linked", rv=rv)
+                self._pkg_child(0, [1], "audit-linked", rv=rv)
+                self._pkg_child_all(0, "audit-linked", rv=rv)
+
+        def test_sync_fail(self):
+                self._imgs_create(3)
+
+                # install newer sync'ed package into child
+                self._pkg([0], "install -v %s" % self.p_sync1_name[2])
+                self._pkg([1], "install -v %s" % self.p_sync1_name[1])
+                self._pkg([2], "install -v %s" % self.p_sync1_name[1])
+
+                # attach should fail
+                self._attach_child(0, [1], args="-vn", rv=EXIT_OOPS)
+                self._attach_child(0, [1], args="-v", rv=EXIT_OOPS)
+                self._attach_parent([2], 0, args="-vn", rv=EXIT_OOPS)
+                self._attach_parent([2], 0, args="-v", rv=EXIT_OOPS)
+
+                # use --linked-md-only so we don't install constraints package
+                # attach should succeed
+                self._attach_child(0, [1], args="-vn --linked-md-only")
+                self._attach_child(0, [1], args="-v --linked-md-only")
+                self._attach_parent([2], 0, args="-vn --linked-md-only")
+                self._attach_parent([2], 0, args="-v --linked-md-only")
+
+                # trying to sync the child should fail
+                self._pkg([1, 2], "sync-linked -vn", rv=EXIT_OOPS)
+                self._pkg([1, 2], "sync-linked -v", rv=EXIT_OOPS)
+                self._pkg_child(0, [1], "sync-linked -vn", rv=EXIT_OOPS)
+                self._pkg_child(0, [1], "sync-linked -v", rv=EXIT_OOPS)
+
+                # use --linked-md-only so we don't install constraints package
+                # sync should succeed
+                rv = EXIT_NOP
+                self._pkg([1, 2], "sync-linked -vn --linked-md-only", rv=rv)
+                self._pkg([1, 2], "sync-linked -v --linked-md-only", rv=rv)
+                self._pkg_child(0, [1], "sync-linked -vn --linked-md-only",
+                    rv=rv)
+                self._pkg_child(0, [1], "sync-linked -v --linked-md-only",
+                    rv=rv)
+
+                # trying to sync via image-update should fail
+                self._pkg([1, 2], "image-update -vn", rv=EXIT_OOPS)
+                self._pkg([1, 2], "image-update -v", rv=EXIT_OOPS)
+
+                # trying to sync via install should fail
+                self._pkg([1, 2], "install -vn %s", self.p_sync1_name[0],
+                    rv=EXIT_OOPS)
+                self._pkg([1, 2], "install -v %s", self.p_sync1_name[0],
+                    rv=EXIT_OOPS)
+
+                # verify the child is still divereged
+                rv = EXIT_DIVERGED
+                self._pkg([1, 2], "audit-linked", rv=rv)
+
+        def test_sync_1(self):
+                self._imgs_create(5)
+
+                # install different synced package into each image
+                for i in [0, 1, 2, 3, 4]:
+                        self._pkg([i], "install -v %s" % self.p_sync1_name[i])
+
+                # install unsynced packages to make sure they aren't molested
+                self._pkg([0], "install -v %s" % self.p_foo1_name[1])
+                self._pkg([1, 2, 3, 4], "install -v %s" % self.p_foo1_name[2])
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1, 2, 3], args="--linked-md-only")
+                self._attach_parent([4], 0, args="--linked-md-only")
+
+                # everyone should be diverged
+                self._pkg([1, 2, 3, 4], "audit-linked", rv=EXIT_DIVERGED)
+
+                # plan sync (direct)
+                self._pkg([1, 4], "sync-linked -vn")
+                self._pkg([1, 2, 3, 4], "audit-linked", rv=EXIT_DIVERGED)
+
+                # sync child (direct)
+                self._pkg([1, 4], "sync-linked -v")
+                rvdict = {2: EXIT_DIVERGED, 3: EXIT_DIVERGED}
+                self._pkg([1, 2, 3, 4], "audit-linked", rvdict=rvdict)
+                self._pkg([1, 4], "sync-linked -v", rv=EXIT_NOP)
+
+                # plan sync (indirectly via -l)
+                self._pkg_child(0, [2], "sync-linked -vn")
+                self._pkg([1, 2, 3], "audit-linked", rvdict=rvdict)
+
+                # sync child (indirectly via -l)
+                self._pkg_child(0, [2], "sync-linked -v")
+                rvdict = {3: EXIT_DIVERGED}
+                self._pkg([1, 2, 3], "audit-linked", rvdict=rvdict)
+                self._pkg_child(0, [2], "sync-linked -vn", rv=EXIT_NOP)
+
+                # plan sync (indirectly via -a)
+                self._pkg_child_all(0, "sync-linked -vn")
+                self._pkg([1, 2, 3], "audit-linked", rvdict=rvdict)
+
+                # sync child (indirectly via -a)
+                self._pkg_child_all(0, "sync-linked -v")
+                self._pkg([1, 2, 3], "audit-linked")
+                self._pkg_child_all(0, "sync-linked -v", rv=EXIT_NOP)
+
+                # check unsynced packages
+                self._pkg([1, 2, 3, 4], "list -v %s" % self.p_foo1_name[2])
+
+        def test_sync_2_via_attach(self):
+                self._imgs_create(3)
+
+                # install different synced package into each image
+                self._pkg([0], "install -v %s" % self.p_sync1_name[1])
+                self._pkg([1, 2], "install -v %s" % self.p_sync1_name[2])
+
+                # install unsynced packages to make sure they aren't molested
+                self._pkg([0], "install -v %s" % self.p_foo1_name[1])
+                self._pkg([1, 2], "install -v %s" % self.p_foo1_name[2])
+
+                # attach children
+                self._attach_child(0, [1])
+                self._attach_parent([2], 0)
+
+                # check synced and unsynced packages
+                self._pkg([1, 2], "list -v %s" % self.p_sync1_name[1])
+                self._pkg([1, 2], "list -v %s" % self.p_foo1_name[2])
+
+        def test_sync_2_via_image_update(self):
+                self._imgs_create(3)
+
+                # install different synced package into each image
+                self._pkg([0], "install -v %s" % self.p_sync1_name[1])
+                self._pkg([1, 2], "install -v %s" % self.p_sync1_name[2])
+
+                # install unsynced packages to make sure they are updated
+                self._pkg([0], "install -v %s" % self.p_foo1_name[1])
+                self._pkg([1, 2], "install -v %s" % self.p_foo1_name[2])
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1], args="--linked-md-only")
+                self._attach_parent([2], 0, args="--linked-md-only")
+
+                # plan sync
+                self._pkg([1, 2], "image-update -vn")
+                self._pkg([1, 2], "audit-linked", rv=EXIT_DIVERGED)
+
+                # sync child
+                self._pkg([1, 2], "image-update -v")
+                self._pkg([1, 2], "audit-linked")
+                self._pkg([1, 2], "image-update -v", rv=EXIT_NOP)
+                self._pkg([1, 2], "sync-linked -v", rv=EXIT_NOP)
+
+                # check unsynced packages
+                self._pkg([1, 2], "list -v %s" % self.p_foo1_name[0])
+
+        def test_sync_2_via_install(self):
+                self._imgs_create(3)
+
+                # install different synced package into each image
+                self._pkg([0], "install -v %s" % self.p_sync1_name[1])
+                self._pkg([1, 2], "install -v %s" % self.p_sync1_name[2])
+
+                # install unsynced packages to make sure they aren't molested
+                self._pkg([0], "install -v %s" % self.p_foo1_name[1])
+                self._pkg([1, 2], "install -v %s" % self.p_foo1_name[2])
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1], args="--linked-md-only")
+                self._attach_parent([2], 0, args="--linked-md-only")
+
+                # plan sync
+                self._pkg([1, 2], "install -vn %s" % self.p_sync1_name[1])
+                self._pkg([1, 2], "audit-linked", rv=EXIT_DIVERGED)
+
+                # sync child
+                self._pkg([1, 2], "install -v %s" % self.p_sync1_name[1])
+                self._pkg([1, 2], "audit-linked")
+                self._pkg([1, 2], "install -v %s" % self.p_sync1_name[1],
+                    rv=EXIT_NOP)
+                self._pkg([1, 2], "sync-linked -v", rv=EXIT_NOP)
+
+                # check unsynced packages
+                self._pkg([1, 2], "list -v %s" % self.p_foo1_name[2])
+
+        def test_sync_2_via_change_variant(self):
+                self._imgs_create(3)
+
+                # install different synced package into each image
+                self._pkg([0], "install -v %s" % self.p_sync1_name[1])
+                self._pkg([1, 2], "install -v %s" % self.p_sync1_name[2])
+
+                # install unsynced packages to make sure they aren't molested
+                self._pkg([0], "install -v %s" % self.p_foo1_name[1])
+                self._pkg([1, 2], "install -v %s" % self.p_foo1_name[2])
+
+                # use --linked-md-only so we don't install constraints package
+                self._attach_child(0, [1], args="--linked-md-only")
+                self._attach_parent([2], 0, args="--linked-md-only")
+
+                # plan sync
+                self._pkg([1, 2], "change-variant -vn variant.foo=baz")
+                self._pkg([1, 2], "audit-linked", rv=EXIT_DIVERGED)
+
+                # sync child
+                self._pkg([1, 2], "change-variant -v variant.foo=baz")
+                self._pkg([1, 2], "audit-linked")
+                self._pkg([1, 2], "change-variant -v variant.foo=baz",
+                    rv=EXIT_NOP)
+                self._pkg([1, 2], "sync-linked -v", rv=EXIT_NOP)
+
+                # check unsynced packages
+                self._pkg([1, 2], "list -v %s" % self.p_foo1_name[2])
+
+        def test_sync_2_via_set_property_linked_TODO(self):
+                pass
+
+        def test_parent_sync_1_nosync(self):
+                self._imgs_create(2)
+
+                # install synced package into each image
+                self._pkg([0, 1], "install -v %s" % self.p_sync1_name[1])
+
+                self._attach_parent([1], 0)
+
+                # update parent image
+                self._pkg([0], "install -v %s" % self.p_sync1_name[0])
+
+                # there should be no updates with --no-parent-sync
+                self._pkg([1], "sync-linked -v --no-parent-sync", rv=EXIT_NOP)
+                self._pkg([1], "image-update -v --no-parent-sync", rv=EXIT_NOP)
+                self._pkg([1], "install -v --no-parent-sync %s" % \
+                    self.p_sync1_name[1], rv=EXIT_NOP)
+                self._pkg([1], "change-variant -v --no-parent-sync "
+                    "variant.foo=bar", rv=EXIT_NOP)
+                # TODO: test set-property-linked
+
+                # an audit without a parent sync should thingk we're in sync
+                self._pkg([1], "audit-linked --no-parent-sync")
+
+                # an full audit should realize we're not in sync
+                self._pkg([1], "audit-linked", rv=EXIT_DIVERGED)
+
+                # the audit above should not have updated our image, so we
+                # should still be out of sync.
+                self._pkg([1], "audit-linked", rv=EXIT_DIVERGED)
+
+        def test_parent_sync_2_via_sync(self):
+                self._imgs_create(2)
+
+                # install synced package into each image
+                self._pkg([0, 1], "install -v %s" % self.p_sync1_name[1])
+
+                self._attach_parent([1], 0)
+
+                # update parent image
+                self._pkg([0], "install -v %s" % self.p_sync1_name[0])
+
+                # verify that pkg operations sync parent metadata
+                self._pkg([1], "sync-linked -v -n")
+                self._pkg([1], "sync-linked -v")
+                self._pkg([1], "sync-linked -v", rv=EXIT_NOP)
+                self._pkg([1], "audit-linked")
+
+        def test_parent_sync_2_via_image_update(self):
+                self._imgs_create(2)
+
+                # install synced package into each image
+                self._pkg([0, 1], "install -v %s" % self.p_sync1_name[1])
+
+                self._attach_parent([1], 0)
+
+                # update parent image
+                self._pkg([0], "install -v %s" % self.p_sync1_name[0])
+
+                # verify that pkg operations sync parent metadata
+                self._pkg([1], "image-update -v -n")
+                self._pkg([1], "image-update -v")
+                self._pkg([1], "image-update -v", rv=EXIT_NOP)
+                self._pkg([1], "audit-linked")
+
+        def test_parent_sync_2_via_install(self):
+                self._imgs_create(2)
+
+                # install synced package into each image
+                self._pkg([0, 1], "install -v %s" % self.p_sync1_name[1])
+
+                self._attach_parent([1], 0)
+
+                # update parent image
+                self._pkg([0], "install -v %s" % self.p_sync1_name[0])
+
+                # verify that pkg operations sync parent metadata
+                self._pkg([1], "install -v -n %s" % self.p_sync1_name[0])
+                self._pkg([1], "install -v %s" % self.p_sync1_name[0])
+                self._pkg([1], "install -v %s" % self.p_sync1_name[0],
+                    rv=EXIT_NOP)
+                self._pkg([1], "audit-linked")
+
+        def test_parent_sync_2_via_change_variant(self):
+                self._imgs_create(2)
+
+                # install synced package into each image
+                self._pkg([0, 1], "install -v %s" % self.p_sync1_name[1])
+
+                self._attach_parent([1], 0)
+
+                # update parent image
+                self._pkg([0], "install -v %s" % self.p_sync1_name[0])
+
+                # verify that pkg operations sync parent metadata
+                self._pkg([1], "change-variant -v -n variant.foo=baz")
+                self._pkg([1], "change-variant -v variant.foo=baz")
+                self._pkg([1], "change-variant -v variant.foo=baz", rv=EXIT_NOP)
+                self._pkg([1], "audit-linked")
+
+        def test_parent_sync_2_via_set_property_linked_TODO(self):
+                pass
+
+        def test_install_constrainted(self):
+                self._imgs_create(3)
+
+                # install synced package into parent
+                self._pkg([0], "install -v %s" % self.p_sync1_name[1])
+
+                # attach children
+                self._attach_child(0, [1])
+                self._attach_parent([2], 0)
+
+                # try to install a different vers of synced package
+                for i in [0, 2, 3, 4]:
+                        self._pkg([1, 2], "install -v %s" % \
+                            self.p_sync1_name[i], rv=EXIT_OOPS)
+
+                # try to install a different synced package
+                for i in [0, 1, 2, 3, 4]:
+                        self._pkg([1, 2], "install -v %s" % \
+                            self.p_sync2_name[i], rv=EXIT_OOPS)
+
+                # install random un-synced package
+                self._pkg([1, 2], "install -v %s" % self.p_foo1_name[0])
+
+                # install the same ver of a synced package in the child
+                self._pkg([1, 2], "install -v %s" % self.p_sync1_name[1])
+
+        def test_p2c_recurse_1_image_update(self):
+                self._imgs_create(3)
+
+                # install different synced package into each image
+                for i in [0, 1]:
+                        self._pkg([i], "install -v %s" % self.p_sync1_name[1])
+                for i in [2]:
+                        self._pkg([i], "install -v %s" % self.p_sync1_name[2])
+
+                # attach --linked-md-only doesn't install constraints package
+                self._attach_child(0, [1])
+                self._attach_child(0, [2], args="--linked-md-only")
+
+                self._pkg([0], "image-update -v -n")
+                self._pkg([0], "image-update -v")
+                self._pkg([0], "image-update -v", rv=EXIT_NOP)
+
+                # make sure the latest synced packaged is in every image
+                for i in [0, 1, 2]:
+                        self._pkg([i], "list -v %s " % self.p_sync1_name[0])
+
+                # children should be synced
+                self._pkg([1, 2], "audit-linked")
+
+        def test_p2c_recurse_1_install_1(self):
+                self._imgs_create(3)
+
+                # install different synced package into each image
+                for i in [0, 1]:
+                        self._pkg([i], "install -v %s" % self.p_sync1_name[1])
+                for i in [2]:
+                        self._pkg([i], "install -v %s" % self.p_sync1_name[2])
+
+                # attach --linked-md-only doesn't install constraints package
+                self._attach_child(0, [1])
+                self._attach_child(0, [2], args="--linked-md-only")
+
+                self._pkg([0], "install -v -n %s" % self.p_sync1_name[0])
+                self._pkg([0], "install -v %s" % self.p_sync1_name[0])
+                self._pkg([0], "install -v %s" % self.p_sync1_name[0],
+                    rv=EXIT_NOP)
+
+                # make sure the latest synced packaged is in every image
+                for i in [0, 1, 2]:
+                        self._pkg([i], "list -v %s " % self.p_sync1_name[0])
+
+                # children should be synced
+                self._pkg([1, 2], "audit-linked")
+
+        def test_verify(self):
+                self._imgs_create(5)
+
+                # install synced package into each image
+                self._pkg([0, 1], "install -v %s" % self.p_sync1_name[1])
+
+                # test with a newer synced package
+                self._pkg([2], "install -v %s" % self.p_sync1_name[0])
+
+                # test with an older synced package
+                self._pkg([3], "install -v %s" % self.p_sync1_name[2])
+
+                # test with a different synced package
+                self._pkg([4], "install -v %s" % self.p_sync2_name[2])
+
+                self._attach_parent([1], 0)
+                self._attach_parent([2, 3, 4], 0, args="--linked-md-only")
+
+                self._pkg([1], "verify")
+                self._pkg([2, 3, 4], "verify", rv=EXIT_OOPS)
+
+if __name__ == "__main__":
+        unittest.main()
--- a/src/tests/cli/t_pkg_publisher.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/cli/t_pkg_publisher.py	Sat May 07 00:25:10 2011 -0700
@@ -311,9 +311,10 @@
                 base_string = ("test\ttrue\tfalse\ttrue\torigin\tonline\t"
                     "%s/\n"
                     "test1\ttrue\tfalse\ttrue\torigin\tonline\t"
-                    "https://test.invalid1/\n"
+                    "https://%s1/\n"
                     "test2\ttrue\tfalse\ttrue\torigin\tonline\t"
-                    "http://test.invalid2/\n" % self.rurl)
+                    "http://%s2/\n" % (self.rurl, self.bogus_url,
+                    self.bogus_url))
                 # With headers
                 self.pkg("publisher -F tsv")
                 expected = "PUBLISHER\tSTICKY\tSYSPUB\tENABLED" \
@@ -521,8 +522,12 @@
         def __verify_pub_cfg(self, prefix, pub_cfg):
                 """Private helper method to verify publisher configuration."""
 
+                # pretend like the Image object is being allocated from
+                # a pkg command run from within the target image.
+                cmdpath = os.path.join(self.get_img_path(), "pkg")
+
                 img = image.Image(self.get_img_path(), should_exist=True,
-                    user_provided_dir=True)
+                    user_provided_dir=True, cmdpath=cmdpath)
                 pub = img.get_publisher(prefix=prefix)
                 for section in pub_cfg:
                         for prop, val in pub_cfg[section].iteritems():
--- a/src/tests/cli/t_pkg_sysrepo.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/cli/t_pkg_sysrepo.py	Sat May 07 00:25:10 2011 -0700
@@ -382,7 +382,7 @@
                 self.assertEqualDiff(expected, output)
 
         def __check_publisher_dirs(self, pubs):
-                pub_dir = os.path.join(self.img_path, "var/pkg/publisher")
+                pub_dir = os.path.join(self.img_path(), "var/pkg/publisher")
                 for p in pubs:
                         if not os.path.isdir(os.path.join(pub_dir, p)):
                                 raise RuntimeError("Publisher %s was expected "
@@ -407,7 +407,7 @@
                 api_obj = self.image_create(props={"use-system-repo": True})
                 # Make sure that the publisher catalogs were created.
                 for n in ("test1", "test12", "test3"):
-                        self.assert_(os.path.isdir(os.path.join(self.img_path,
+                        self.assert_(os.path.isdir(os.path.join(self.img_path(),
                             "var/pkg/publisher/%s" % n)))
                 expected = """\
 PUBLISHER\tSTICKY\tSYSPUB\tENABLED\tTYPE\tSTATUS\tURI
--- a/src/tests/cli/t_pkg_temp_sources.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/cli/t_pkg_temp_sources.py	Sat May 07 00:25:10 2011 -0700
@@ -48,6 +48,7 @@
         foo_pkg = """
             open pkg://test/[email protected]
             add set name=pkg.summary value="Example package foo."
+            add set name=variant.debug.foo value=true value=false
             add dir mode=0755 owner=root group=bin path=lib
             add dir mode=0755 owner=root group=bin path=usr
             add dir mode=0755 owner=root group=bin path=usr/bin
@@ -122,7 +123,7 @@
         def image_create(self, *args, **kwargs):
                 pkg5unittest.ManyDepotTestCase.image_create(self,
                     *args, **kwargs)
-                self.ta_dir = os.path.join(self.img_path, "etc/certs/CA")
+                self.ta_dir = os.path.join(self.img_path(), "etc/certs/CA")
                 os.makedirs(self.ta_dir)
 
         def __publish_packages(self, rurl):
@@ -613,6 +614,7 @@
                 expected = """\
 set name=pkg.fmri value=%s
 set name=pkg.summary value="Example package foo."
+set name=variant.debug.foo value=true value=false
 dir group=bin mode=0755 owner=root path=lib
 dir group=bin mode=0755 owner=root path=usr
 dir group=bin mode=0755 owner=root path=usr/bin
@@ -795,7 +797,8 @@
                 #
                 # Verify change-facet can use temporary origins.
                 #
-                fpath = os.path.join(self.img_path, "usr/share/man/man1/foo.1")
+                fpath = os.path.join(self.img_path(),
+                    "usr/share/man/man1/foo.1")
                 assert os.path.exists(fpath)
 
                 # Now set facet.doc.man to false and verify faceted item is
@@ -824,7 +827,8 @@
                 #
                 # Verify change-variant can use temporary origins.
                 #
-                vpath = os.path.join(self.img_path, "lib/libfoo.so.1")
+                vpath = os.path.join(self.img_path(),
+                    "lib/libfoo.so.1")
                 assert os.path.exists(vpath)
                 self.assertEqual(os.stat(vpath).st_size, 15)
 
--- a/src/tests/cli/t_pkgrepo.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/cli/t_pkgrepo.py	Sat May 07 00:25:10 2011 -0700
@@ -108,7 +108,7 @@
 
                 # --help, -? should exit with 0.
                 self.pkgrepo("--help", exit=0)
-                self.pkgrepo("-?", exit=0)
+                self.pkgrepo("'-?'", exit=0)
 
                 # unknown options should exit with 2.
                 self.pkgrepo("-U", exit=2)
--- a/src/tests/cli/t_pkgsend.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/cli/t_pkgsend.py	Sat May 07 00:25:10 2011 -0700
@@ -792,7 +792,7 @@
                 self.pkg("info --license %s" % pkgname)
 
                 for entry in contents_dict:
-                        name = os.path.join(self.img_path, entry)
+                        name = os.path.join(self.img_path(), entry)
                         ftype, mode, user, group, digest = contents_dict[entry]
 
                         if ftype in "fl":
@@ -810,13 +810,13 @@
                                 pkg5_digest = misc.get_data_digest(name)[0]
                                 self.assertEqual(digest, pkg5_digest)
 
-                        st = os.stat(os.path.join(self.img_path, name))
+                        st = os.stat(os.path.join(self.img_path(), name))
                         if mode is not None:
                                 portable.assert_mode(name, stat.S_IMODE(mode))
                         self.assertEqual(portable.get_user_by_name(user,
-                            self.img_path, use_file=True), st.st_uid)
+                            self.img_path(), use_file=True), st.st_uid)
                         self.assertEqual(portable.get_group_by_name(group,
-                            self.img_path, use_file=True), st.st_gid)
+                            self.img_path(), use_file=True), st.st_gid)
     
         def test_13_pkgsend_indexcontrol(self):
                 """Verify that "pkgsend refresh-index" triggers indexing."""
--- a/src/tests/cli/t_pkgsign.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/cli/t_pkgsign.py	Sat May 07 00:25:10 2011 -0700
@@ -115,19 +115,19 @@
         def pkg_image_create(self, *args, **kwargs):
                 pkg5unittest.SingleDepotTestCase.pkg_image_create(self,
                     *args, **kwargs)
-                self.ta_dir = os.path.join(self.img_path, "etc/certs/CA")
+                self.ta_dir = os.path.join(self.img_path(), "etc/certs/CA")
                 os.makedirs(self.ta_dir)
                 for f in self.image_files:
-                        with open(os.path.join(self.img_path, f), "wb") as fh:
+                        with open(os.path.join(self.img_path(), f), "wb") as fh:
                                 fh.close()
 
         def image_create(self, *args, **kwargs):
                 pkg5unittest.SingleDepotTestCase.image_create(self,
                     *args, **kwargs)
-                self.ta_dir = os.path.join(self.img_path, "etc/certs/CA")
+                self.ta_dir = os.path.join(self.img_path(), "etc/certs/CA")
                 os.makedirs(self.ta_dir)
                 for f in self.image_files:
-                        with open(os.path.join(self.img_path, f), "wb") as fh:
+                        with open(os.path.join(self.img_path(), f), "wb") as fh:
                                 fh.close()
 
         def pkg(self, command, *args, **kwargs):
@@ -140,7 +140,7 @@
                     *args, **kwargs)
 
         def setUp(self):
-                pkg5unittest.SingleDepotTestCase.setUp(self)
+                pkg5unittest.SingleDepotTestCase.setUp(self, image_count=2)
                 self.make_misc_files(self.misc_files)
                 self.durl1 = self.dcs[1].get_depot_url()
                 self.rurl1 = self.dcs[1].get_repo_url()
@@ -319,7 +319,7 @@
                 self._api_install(api_obj, ["example_pkg"])
                 self._api_uninstall(api_obj, ["example_pkg"])
 
-                emptyCA = os.path.join(self.img_path, "emptyCA")
+                emptyCA = os.path.join(self.img_path(), "emptyCA")
                 os.makedirs(emptyCA)
                 self.pkg("set-property trust-anchor-directory emptyCA")
                 # This should fail because the chain is rooted in an untrusted
@@ -510,7 +510,7 @@
                           "ch1_ta3_cert.pem"),
                       "pkg": plist[0]
                     }
-                
+
                 self.pkgsign(self.rurl1, sign_args)
                 self.pkg_image_create(self.rurl1)
                 self.seed_ta_dir("ta1")
@@ -556,7 +556,7 @@
                     "cert": os.path.join(self.cs_dir, "cs1_ta2_cert.pem")
                 }
                 self.pkgsign(self.rurl1, sign_args)
-                
+
                 self.pkg_image_create(self.rurl1)
                 self.seed_ta_dir(["ta1", "ta2"])
                 self.pkg("set-property signature-policy verify")
@@ -648,7 +648,7 @@
                       "ta": ta_path,
                       "pkg": plist[0]
                     }
-                
+
                 self.pkgsign(self.rurl1, sign_args)
                 self.pkg_image_create(self.rurl1)
 
@@ -757,7 +757,7 @@
                 # Test that passing neither sign-all nor a fmri results in an
                 # error.
                 self.pkgsign(self.durl1, "", exit=2)
-                
+
                 # Test bad sig.alg setting.
                 self.pkgsign(self.durl1, "-a foo -k %(key)s -c %(cert)s "
                     "%(name)s" % {
@@ -991,13 +991,15 @@
 
                 # Make sure the manifest is locally stored.
                 api_obj = self.get_img_api_obj()
-                api_obj.plan_install(["example_pkg"], noexecute=True)
+                for pd in api_obj.gen_plan_install(["example_pkg"],
+                    noexecute=True):
+                        continue
                 # Change the signature action.
                 pfmri = fmri.PkgFmri(plist[0])
                 s = self.get_img_manifest(pfmri)
                 s = s.replace("rsa-sha256", "rsa-foobar")
                 self.write_img_manifest(pfmri, s)
-                
+
                 self.pkg("set-property signature-policy require-signatures")
                 api_obj = self.get_img_api_obj()
                 self.assertRaises(apx.RequiredSignaturePolicyException,
@@ -1380,7 +1382,7 @@
 
                 # Replace the client CS cert.
                 hsh = self.calc_file_hash(cs_path)
-                pth = os.path.join(self.img_path, "var", "pkg", "publisher",
+                pth = os.path.join(self.img_path(), "var", "pkg", "publisher",
                     "test", "certs", hsh)
                 portable.copyfile(cs2_path, pth)
                 api_obj = self.get_img_api_obj()
@@ -1402,7 +1404,7 @@
 
                 # Replace the client chain cert.
                 hsh = self.calc_file_hash(chain_cert_path)
-                pth = os.path.join(self.img_path, "var", "pkg", "publisher",
+                pth = os.path.join(self.img_path(), "var", "pkg", "publisher",
                     "test", "certs", hsh)
                 portable.copyfile(cs2_path, pth)
                 api_obj = self.get_img_api_obj()
@@ -1495,7 +1497,7 @@
                 self.pkgsign(self.rurl1, sign_args)
 
                 self.dcs[1].start()
-                
+
                 self.pkg_image_create(self.durl1)
                 self.seed_ta_dir("ta4")
 
@@ -1516,7 +1518,7 @@
                 portable.copyfile(os.path.join(self.crl_dir,
                     "ta5_crl.pem"),
                     os.path.join(rstore.file_root, "ta", "ta5_crl.pem"))
-                
+
                 plist = self.pkgsend_bulk(self.rurl1, self.example_pkg10)
 
                 sign_args = "-k %(key)s -c %(cert)s -i %(i1)s %(name)s" % {
@@ -1531,7 +1533,7 @@
                 self.pkgsign(self.rurl1, sign_args)
 
                 self.dcs[1].start()
-                
+
                 self.pkg_image_create(self.durl1)
                 self.seed_ta_dir("ta5")
 
@@ -1550,7 +1552,7 @@
                 portable.copyfile(os.path.join(self.test_root,
                     "tmp/example_file"),
                     os.path.join(rstore.file_root, "ex", "example_file"))
-                
+
                 plist = self.pkgsend_bulk(self.rurl1, self.example_pkg10)
 
                 sign_args = "-k %(key)s -c %(cert)s -i %(i1)s %(name)s" % {
@@ -1565,7 +1567,7 @@
                 self.pkgsign(self.rurl1, sign_args)
 
                 self.dcs[1].start()
-                
+
                 self.pkg_image_create(self.durl1)
                 self.seed_ta_dir("ta4")
 
@@ -1591,7 +1593,7 @@
                 self.pkgsign(self.rurl1, sign_args)
 
                 self.dcs[1].start()
-                
+
                 self.pkg_image_create(self.durl1)
                 self.seed_ta_dir("ta4")
 
@@ -1611,7 +1613,7 @@
                     os.path.join(rstore.file_root, "ch", "ch5_ta1_crl.pem"))
 
                 self.dcs[1].start()
-                
+
                 plist = self.pkgsend_bulk(self.durl1, self.example_pkg10)
                 sign_args = "-k %(key)s -c %(cert)s -i %(i1)s -i %(i2)s " \
                     "-i %(i3)s -i %(i4)s -i %(i5)s %(pkg)s" % {
@@ -1629,7 +1631,7 @@
                           "ch5_ta1_cert.pem"),
                       "pkg": plist[0]
                     }
-                
+
                 self.pkgsign(self.durl1, sign_args)
                 self.pkg_image_create(self.durl1)
                 self.seed_ta_dir("ta1")
@@ -1651,7 +1653,7 @@
                     os.path.join(rstore.file_root, "ch", "ch5_ta1_crl.pem"))
 
                 self.dcs[1].start()
-                
+
                 plist = self.pkgsend_bulk(self.durl1, self.example_pkg10)
                 sign_args = "-k %(key)s -c %(cert)s -i %(i1)s -i %(i2)s " \
                     "-i %(i3)s -i %(i4)s -i %(i5)s %(pkg)s" % {
@@ -1669,7 +1671,7 @@
                           "ch5_ta1_cert.pem"),
                       "pkg": plist[0]
                     }
-                
+
                 self.pkgsign(self.durl1, sign_args)
                 self.pkg_image_create(self.durl1)
                 self.seed_ta_dir("ta1")
@@ -1698,7 +1700,7 @@
                 self.pkgsign(self.rurl1, sign_args)
 
                 self.dcs[1].start()
-                
+
                 self.pkg_image_create(self.durl1)
                 self.seed_ta_dir("ta4")
 
@@ -1739,9 +1741,9 @@
                 self.pkg("set-property signature-policy require-signatures")
                 api_obj = self.get_img_api_obj()
                 self._api_install(api_obj, ["var_pkg"])
-                self.assert_(os.path.exists(os.path.join(self.img_path, "baz")))
+                self.assert_(os.path.exists(os.path.join(self.img_path(), "baz")))
                 self.assert_(not os.path.exists(
-                    os.path.join(self.img_path, "bin")))
+                    os.path.join(self.img_path(), "bin")))
 
                 self.pkg("verify")
 
@@ -1751,9 +1753,9 @@
 
                 self.dcs[1].set_disable_ops(["append"])
                 self.dcs[1].start()
-                
+
                 plist = self.pkgsend_bulk(self.durl1, self.example_pkg10)
-                
+
                 sign_args = "-k %(key)s -c %(cert)s %(pkg)s" % {
                         "key": os.path.join(self.keys_dir,
                             "cs1_ch1_ta3_key.pem"),
@@ -1771,7 +1773,7 @@
 
                 self.dcs[1].set_disable_ops(["add"])
                 self.dcs[1].start()
-                
+
                 sign_args = "-k %(key)s -c %(cert)s %(pkg)s" % {
                         "key": os.path.join(self.keys_dir,
                             "cs1_ch1_ta3_key.pem"),
@@ -1789,7 +1791,7 @@
                 self.dcs[1].start()
 
                 plist = self.pkgsend_bulk(self.durl1, self.example_pkg10)
-                
+
                 sign_args = "-k %(key)s -c %(cert)s -i %(i1)s %(pkg)s" % {
                     "key": os.path.join(self.keys_dir, "cs1_ch1_ta3_key.pem"),
                     "cert": os.path.join(self.cs_dir, "cs1_ch1_ta3_cert.pem"),
@@ -1914,7 +1916,7 @@
                 self.pkgsign(self.rurl1, sign_args)
 
                 self.dcs[1].start()
-                
+
                 self.pkg_image_create(self.durl1)
                 self.seed_ta_dir("ta3")
 
@@ -1968,7 +1970,7 @@
                         "i1": os.path.join(self.chain_certs_dir,
                             "ch1_ta3_cert.pem")
                 }
-                
+
                 self.pkgsign(self.rurl1, sign_args)
                 mp = r.manifest(plist[0])
                 with open(mp, "r") as fh:
@@ -2021,7 +2023,7 @@
                             "ch1_ta3_cert.pem")
                 }
                 self.pkgsign(self.rurl1, sign_args)
-                
+
                 self.pkg_image_create(self.rurl1,
                     additional_args="--set-property signature-policy=require-signatures")
                 self.seed_ta_dir("ta3")
@@ -2044,15 +2046,13 @@
                             "ch1_ta3_cert.pem")
                 }
                 self.pkgsign(self.rurl1, sign_args)
-                
+
                 self.pkg_image_create(self.rurl1)
                 self.seed_ta_dir("ta3")
 
-                orig_img_path = self.img_path
-                
-                # This changes self.img_path to point to the newly created
-                # sub image.
-                self.create_sub_image(self.rurl1)
+                # This changes the default image we're operating on.
+                self.set_image(1)
+                self.image_create(self.rurl1, destroy=False)
                 self.pkg("set-property signature-policy require-signatures")
                 api_obj = self.get_img_api_obj()
                 # This raises an exception because the command is run from
@@ -2062,20 +2062,20 @@
                 # This should work because the command is run from within the
                 # original image which contains the trust anchors needed to
                 # validate the chain.
-                api_obj = self.get_img_api_obj(
-                    cmd_path=os.path.join(orig_img_path, "pkg"))
+                cmd_path = os.path.join(self.img_path(0), "pkg")
+                api_obj = self.get_img_api_obj(cmd_path=cmd_path)
                 self._api_install(api_obj, ["example_pkg"])
                 # Check that the package is installed into the correct image.
                 self.pkg("list example_pkg")
-                self.pkg("-R %s list example_pkg" % orig_img_path, exit=1)
+                self.pkg("-R %s list example_pkg" % self.img_path(0), exit=1)
                 api_obj = self.get_img_api_obj()
                 self._api_uninstall(api_obj, ["example_pkg"])
                 # Repeat the test using the pkg command interface instead of the
                 # api.
-                self.pkg("-R %s install example_pkg" % self.img_path,
-                    alt_img_path=orig_img_path)
+                self.pkg("-D simulate_cmdpath=%s -R %s install example_pkg" % \
+                    (cmd_path, self.img_path()))
                 self.pkg("list example_pkg")
-                self.pkg("-R %s list example_pkg" % orig_img_path, exit=1)
+                self.pkg("-R %s list example_pkg" % self.img_path(0), exit=1)
 
         def test_big_pathlen(self):
                 """Test that a chain cert which has a larger pathlen value than
@@ -2361,19 +2361,19 @@
         def pkg_image_create(self, *args, **kwargs):
                 pkg5unittest.ManyDepotTestCase.pkg_image_create(self,
                     *args, **kwargs)
-                self.ta_dir = os.path.join(self.img_path, "etc/certs/CA")
+                self.ta_dir = os.path.join(self.img_path(), "etc/certs/CA")
                 os.makedirs(self.ta_dir)
                 for f in self.image_files:
-                        with open(os.path.join(self.img_path, f), "wb") as fh:
+                        with open(os.path.join(self.img_path(), f), "wb") as fh:
                                 fh.close()
 
         def image_create(self, *args, **kwargs):
                 pkg5unittest.ManyDepotTestCase.image_create(self,
                     *args, **kwargs)
-                self.ta_dir = os.path.join(self.img_path, "etc/certs/CA")
+                self.ta_dir = os.path.join(self.img_path(), "etc/certs/CA")
                 os.makedirs(self.ta_dir)
                 for f in self.image_files:
-                        with open(os.path.join(self.img_path, f), "wb") as fh:
+                        with open(os.path.join(self.img_path(), f), "wb") as fh:
                                 fh.close()
 
         def setUp(self):
--- a/src/tests/cli/t_sysrepo.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/cli/t_sysrepo.py	Sat May 07 00:25:10 2011 -0700
@@ -352,7 +352,8 @@
                         url = "http://localhost:%(port)s/%(pub)s/%(hash)s/" % \
                             {"port": self.sysrepo_port, "hash": hash,
                             "pub": pub}
-                        self.img_path = os.path.join(self.test_root, "sysrepo_image")
+                        self.set_img_path(os.path.join(self.test_root,
+                            "sysrepo_image"))
                         self.pkg_image_create(prefix=pub, repourl=url)
                         self.pkg("-R %s install sample" % self.get_img_path())
 
--- a/src/tests/pkg5unittest.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/pkg5unittest.py	Sat May 07 00:25:10 2011 -0700
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-
 # CDDL HEADER START
 #
 # The contents of this file are subject to the terms of the
@@ -22,6 +20,17 @@
 
 # Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
 
+#
+# Define the basic classes that all test cases are inherited from.
+# The currently defined test case classes are:
+#
+# CliTestCase
+# ManyDepotTestCase
+# Pkg5TestCase
+# SingleDepotTestCase
+# SingleDepotTestCaseCorruptImage
+#
+
 import baseline
 import ConfigParser
 import copy
@@ -49,6 +58,12 @@
 import ssl
 import textwrap
 
+import pkg.client.api_errors as apx
+import pkg.client.publisher as publisher
+import pkg.server.repository as sr
+
+from pkg.client.debugvalues import DebugValues
+
 EmptyI = tuple()
 EmptyDict = dict()
 
@@ -62,6 +77,9 @@
 g_proto_area = "TOXIC"
 # User's value for TEMPDIR
 g_tempdir = "/tmp"
+g_fakeroot = "TOXIC"
+g_fakeroot_repo = "TOXIC"
+g_pkg_cmdpath = "TOXIC"
 
 g_debug_output = False
 if "DEBUG" in os.environ:
@@ -108,7 +126,7 @@
 
 # Version test suite is known to work with.
 PKG_CLIENT_NAME = "pkg"
-CLIENT_API_VERSION = 58
+CLIENT_API_VERSION = 59
 
 ELIDABLE_ERRORS = [ TestSkippedException, depotcontroller.DepotStateException ]
 
@@ -200,10 +218,16 @@
 
 class Pkg5TestCase(unittest.TestCase):
 
-        # Needed for compatability
+        # Needed for compatibility
         failureException = AssertionError
 
-        bogus_url = "test.invalid"
+        #
+        # Some dns servers return results for unknown dns names to redirect
+        # callers to a common landing page.  To avoid getting tripped up by
+        # these stupid servers make sure that bogus_url actually contains an
+        # syntactically invalid dns name so we'll never succeed at the lookup.
+        #
+        bogus_url = "test.0.invalid"
         __debug_buf = ""
 
         smf_cmds = { \
@@ -256,6 +280,9 @@
 
         ro_data_root = property(fget=__get_ro_data_root)
 
+        def persistent_setup_copy(self, orig):
+                pass
+
         def cmdline_run(self, cmdline, comment="", coverage=True, exit=0,
             handle=False, out=False, prefix="", raise_error=True, su_wrap=None,
             stderr=False, env_arg=None):
@@ -429,9 +456,20 @@
                 # We have some sloppy subclasses which don't call the superclass
                 # setUp-- in which case the dir might not exist.  Tolerate it.
                 #
+                # Also, avoid deleting our fakeroot since then we'd have to
+                # keep re-creating it.
+                #
                 if self.__test_root is not None and \
                     os.path.exists(self.__test_root):
-                        shutil.rmtree(self.__test_root)
+                        for d in os.listdir(self.__test_root):
+                                path = os.path.join(self.__test_root, d)
+                                if path in [g_fakeroot, g_fakeroot_repo]:
+                                        continue
+                                self.debug("removing: %s" % path)
+                                if os.path.isdir(path):
+                                        shutil.rmtree(path)
+                                else:
+                                        os.remove(path)
 
         def tearDown(self):
                 # In reality this call does nothing.
@@ -590,6 +628,16 @@
                             "Tried: %s.  Try setting $CC to a valid"
                             "compiler." % compilers)
 
+        def make_file(self, path, content, mode=0644):
+                if not os.path.exists(os.path.dirname(path)):
+                        os.makedirs(os.path.dirname(path), 0777)
+                self.debugfilecreate(content, path)
+                fh = open(path, 'wb')
+                if isinstance(content, unicode):
+                        content = content.encode("utf-8")
+                fh.write(content)
+                fh.close()
+                os.chmod(path, mode)
 
         def make_misc_files(self, files, prefix=None, mode=0644):
                 """ Make miscellaneous text files.  Files can be a
@@ -623,15 +671,7 @@
                         assert not f.startswith("/"), \
                             ("%s: misc file paths must be relative!" % f)
                         path = os.path.join(prefix, f)
-                        if not os.path.exists(os.path.dirname(path)):
-                                os.makedirs(os.path.dirname(path))
-                        self.debugfilecreate(content, path)
-                        file_handle = open(path, 'wb')
-                        if isinstance(content, unicode):
-                                content = content.encode("utf-8")
-                        file_handle.write(content)
-                        file_handle.close()
-                        os.chmod(path, mode)
+                        self.make_file(path, content, mode)
                         outpaths.append(path)
                 return outpaths
 
@@ -1213,6 +1253,7 @@
         def __init__(self, tests=()):
                 unittest.TestSuite.__init__(self, tests)
                 self.timing = {}
+                self.__pid = os.getpid()
 
                 # The site module deletes the function to change the
                 # default encoding so a forced reload of sys has to
@@ -1229,6 +1270,45 @@
                 print >> sys.stderr, "Stopping tests..."
                 raise TestStopException()
 
+        def env_sanitize(self):
+                # save some DebugValues settings
+                smf_cmds_dir = DebugValues["smf_cmds_dir"]
+
+                # clear any existing DebugValues settings
+                DebugValues.clear()
+
+                # clear misc environment variables
+                for e in ["PKG_CMDPATH"]:
+                        if e in os.environ:
+                                del os.environ[e]
+
+                # Set image path to a path that's not actually an
+                # image to force failure of tests that don't
+                # explicitly provide an image root either through the
+                # default behaviour of the pkg() helper routine or
+                # another method.
+                os.environ["PKG_IMAGE"] = g_tempdir
+
+                # Test suite should never attempt to access the
+                # live root image.
+                os.environ["PKG_NO_LIVE_ROOT"] = "1"
+
+                # Pkg interfaces should never know they are being
+                # run from within the test suite.
+                os.environ["PKG_NO_RUNPY_CMDPATH"] = "1"
+
+                # always print out recursive linked image commands
+                os.environ["PKG_DISP_LINKED_CMDS"] = "1"
+
+                # Pretend that we're being run from the fakeroot image.
+                DebugValues["simulate_cmdpath"] = g_pkg_cmdpath
+
+                # Update the path to smf commands
+                DebugValues["smf_cmds_dir"] = smf_cmds_dir
+
+                # always get detailed data from the solver
+                DebugValues["plan"] = True
+
         def run(self, result):
                 self.timing = {}
                 inst = None
@@ -1263,6 +1343,11 @@
                         raise TestSkippedException(
                             "Persistent setUp Failed, skipping test.")
 
+                # test case setUp() may require running pkg commands
+                # so setup a fakeroot to run them from.
+                fakeroot_init()
+                self.env_sanitize()
+
                 if persistent_setup:
                         setUpFailed = False
 
@@ -1300,6 +1385,14 @@
                         suite_name = test._Pkg5TestCase__suite_name
                         cname = test.__class__.__name__
 
+                        #
+                        # Update test environment settings. We redo this
+                        # before running each test case since previously
+                        # executed test cases may have messed with these
+                        # environment settings.
+                        #
+                        self.env_sanitize()
+
                         # Populate test with the data from the instance
                         # already constructed, but update the method name.
                         # We need to do this so that we have all the state
@@ -1311,12 +1404,14 @@
                                 test = copy.copy(inst)
                                 test._testMethodName = name
                                 test._testMethodDoc = doc
+                                test.persistent_setup_copy(inst)
 
                         test_start = time.time()
                         test(result)
                         test_end = time.time()
                         self.timing[suite_name, cname, real_test_name] = \
                             test_end - test_start
+
                 if persistent_setup:
                         try:
                                 inst.reallytearDown()
@@ -1406,63 +1501,77 @@
 class CliTestCase(Pkg5TestCase):
         bail_on_fail = False
 
-        def setUp(self):
+        def setUp(self, image_count=1):
                 Pkg5TestCase.setUp(self)
 
-                self.image_dir = None
-                self.img_path = os.path.join(self.test_root, "image")
-                self.image_created = False
-
-                # Set image path to a path that's not actually an image to
-                # force failure of tests that don't explicitly provide an
-                # image root either through the default behaviour of the
-                # pkg() helper routine or another method.
-                os.environ["PKG_IMAGE"] = self.test_root
+                self.__imgs_path = {}
+                self.__imgs_index = -1
+
+                for i in range(0, image_count):
+                        path = os.path.join(self.test_root, "image%d" % i)
+                        self.__imgs_path[i] = path
+
+                self.set_image(0)
 
         def tearDown(self):
                 Pkg5TestCase.tearDown(self)
 
+        def persistent_setup_copy(self, orig):
+                Pkg5TestCase.persistent_setup_copy(self, orig)
+                self.__imgs_path = copy.copy(orig.__imgs_path)
+
+        def set_image(self, ii):
+                # ii is the image index
+                if self.__imgs_index == ii:
+                        return
+
+                self.__imgs_index = ii
+                path = self.__imgs_path[self.__imgs_index]
+                assert path and path != "/"
+
+                self.debug("image %d selected: %s" % (ii, path))
+
+        def set_img_path(self, path):
+                self.__imgs_path[self.__imgs_index] = path
+
+        def img_index(self):
+                return self.__imgs_index
+
+        def img_path(self, ii=None):
+                if ii != None:
+                        return self.__imgs_path[ii]
+                return self.__imgs_path[self.__imgs_index]
+
         def get_img_path(self):
-                return self.img_path
-
-        def get_img_api_obj(self, cmd_path=None, img_path=None):
-                if not img_path:
-                        img_path = self.img_path
-                from pkg.client import global_settings
+                # for backward compatibilty
+                return self.img_path()
+
+        def get_img_api_obj(self, cmd_path=None, ii=None):
                 progresstracker = pkg.client.progress.NullProgressTracker()
                 if not cmd_path:
-                        cmd_path = os.path.join(img_path, "pkg")
-                old_val = global_settings.client_args
-                global_settings.client_args[0] = cmd_path
-                res = pkg.client.api.ImageInterface(img_path,
+                        cmd_path = os.path.join(self.img_path(), "pkg")
+                res = pkg.client.api.ImageInterface(self.img_path(ii=ii),
                     CLIENT_API_VERSION, progresstracker, lambda x: False,
-                    PKG_CLIENT_NAME)
-                global_settings.client_args = old_val
+                    PKG_CLIENT_NAME, cmdpath=cmd_path)
                 return res
 
-        def image_create(self, repourl=None, prefix=None, variants=EmptyDict,
-            destroy=True, ssl_cert=None, ssl_key=None, props=EmptyDict):
+        def image_create(self, repourl=None, destroy=True, **kwargs):
                 """A convenience wrapper for callers that only need basic image
                 creation functionality.  This wrapper creates a full (as opposed
                 to user) image using the pkg.client.api and returns the related
                 API object."""
 
-                assert self.img_path
-                assert self.img_path != "/"
-
                 if destroy:
                         self.image_destroy()
-                os.mkdir(self.img_path)
-
+                os.mkdir(self.img_path())
+
+                self.debug("image_create %s" % self.img_path())
                 progtrack = pkg.client.progress.NullProgressTracker()
                 api_inst = pkg.client.api.image_create(PKG_CLIENT_NAME,
-                    CLIENT_API_VERSION, self.img_path,
+                    CLIENT_API_VERSION, self.img_path(),
                     pkg.client.api.IMG_TYPE_ENTIRE, False, repo_uri=repourl,
-                    prefix=prefix, progtrack=progtrack, variants=variants,
-                    ssl_cert=ssl_cert, ssl_key=ssl_key, props=props)
-                shutil.copy("%s/usr/bin/pkg" % g_proto_area,
-                    os.path.join(self.img_path, "pkg"))
-                self.image_created = True
+                    progtrack=progtrack,
+                    **kwargs)
                 return api_inst
 
         def pkg_image_create(self, repourl=None, prefix=None,
@@ -1471,19 +1580,17 @@
                 image; returns exit code of client or raises an exception if
                 exit code doesn't match 'exit' or equals 99.."""
 
-                assert self.img_path
-                assert self.img_path != "/"
-
                 if repourl and prefix is None:
                         prefix = "test"
 
                 self.image_destroy()
-                os.mkdir(self.img_path)
-                cmdline = "pkg image-create -F "
+                os.mkdir(self.img_path())
+                self.debug("pkg_image_create %s" % self.img_path())
+                cmdline = "%s image-create -F " % g_pkg_cmdpath
                 if repourl:
                         cmdline = "%s -p %s=%s " % (cmdline, prefix, repourl)
                 cmdline += additional_args
-                cmdline = "%s %s" % (cmdline, self.img_path)
+                cmdline = "%s %s" % (cmdline, self.img_path())
                 self.debugcmd(cmdline)
 
                 p = subprocess.Popen(cmdline, shell=True,
@@ -1498,38 +1605,27 @@
                 if retcode != exit:
                         raise UnexpectedExitCodeException(cmdline, 0,
                             retcode, output)
-                shutil.copy("%s/usr/bin/pkg" % g_proto_area,
-                    os.path.join(self.img_path, "pkg"))
-                self.image_created = True
                 return retcode
 
-        def image_set(self, imgdir):
-                self.debug("image_set: %s" % imgdir)
-                self.img_path = imgdir
-                os.environ["PKG_IMAGE"] = self.img_path
-
         def image_destroy(self):
-                self.debug("image_destroy %s" % self.img_path)
-                # Make sure we're not in the image.
-                if os.path.exists(self.img_path):
+                if os.path.exists(self.img_path()):
+                        self.debug("image_destroy %s" % self.img_path())
+                        # Make sure we're not in the image.
                         os.chdir(self.test_root)
-                        shutil.rmtree(self.img_path)
+                        shutil.rmtree(self.img_path())
 
         def pkg(self, command, exit=0, comment="", prefix="", su_wrap=None,
-            out=False, stderr=False, alt_img_path=None, use_img_root=True,
+            out=False, stderr=False, cmd_path=None, use_img_root=True,
             debug_smf=True):
-                pth = self.img_path
-                if alt_img_path:
-                        pth = alt_img_path
-                elif not self.image_created:
-                        pth = "%s/usr/bin" % g_proto_area
                 if debug_smf and "smf_cmds_dir" not in command:
                         command = "--debug smf_cmds_dir=%s %s" % \
                             (DebugValues["smf_cmds_dir"], command)
                 if use_img_root and "-R" not in command and \
                     "image-create" not in command and "version" not in command:
                         command = "-R %s %s" % (self.get_img_path(), command)
-                cmdline = "%s/pkg %s" % (pth, command)
+                if not cmd_path:
+                        cmd_path = g_pkg_cmdpath
+                cmdline = "%s %s" % (cmd_path, command)
                 return self.cmdline_run(cmdline, exit=exit, comment=comment,
                     prefix=prefix, su_wrap=su_wrap, out=out, stderr=stderr)
 
@@ -1834,10 +1930,10 @@
                                                 continue
                                         copy_manifests(src_root, dest_root)
 
-        def get_img_manifest_cache_dir(self, pfmri, img_path=None):
+        def get_img_manifest_cache_dir(self, pfmri, ii=None):
                 """Returns the path to the manifest for the given fmri."""
 
-                img = self.get_img_api_obj(img_path=img_path).img
+                img = self.get_img_api_obj(ii=ii).img
 
                 if not pfmri.publisher:
                         # Allow callers to not specify a fully-qualified FMRI
@@ -1851,10 +1947,10 @@
                         pfmri.publisher = pubs[0]
                 return img.get_manifest_dir(pfmri)
 
-        def get_img_manifest_path(self, pfmri, img_path=None):
+        def get_img_manifest_path(self, pfmri):
                 """Returns the path to the manifest for the given fmri."""
 
-                img = self.get_img_api_obj(img_path=img_path).img
+                img = self.get_img_api_obj().img
 
                 if not pfmri.publisher:
                         # Allow callers to not specify a fully-qualified FMRI
@@ -1868,25 +1964,22 @@
                         pfmri.publisher = pubs[0]
                 return img.get_manifest_path(pfmri)
 
-        def get_img_manifest(self, pfmri, img_path=None):
+        def get_img_manifest(self, pfmri):
                 """Retrieves the client's cached copy of the manifest for the
                 given package FMRI and returns it as a string.  Callers are
                 responsible for all error handling."""
 
-                mpath = self.get_img_manifest_path(pfmri, img_path=img_path)
+                mpath = self.get_img_manifest_path(pfmri)
                 with open(mpath, "rb") as f:
                         return f.read()
 
-        def write_img_manifest(self, pfmri, mdata, img_path=None):
+        def write_img_manifest(self, pfmri, mdata):
                 """Overwrites the client's cached copy of the manifest for the
                 given package FMRI using the provided string.  Callers are
                 responsible for all error handling."""
 
-                if not img_path:
-                        img_path = self.get_img_path()
-
-                mpath = self.get_img_manifest_path(pfmri, img_path=img_path)
-                mdir = self.get_img_manifest_cache_dir(pfmri, img_path=img_path)
+                mpath = self.get_img_manifest_path(pfmri)
+                mdir = self.get_img_manifest_cache_dir(pfmri)
 
                 # Dump the manifest directory for the package to ensure any
                 # cached information related to it is gone.
@@ -1898,15 +1991,14 @@
                 with open(mpath, "wb") as f:
                         f.write(mdata)
 
-        def validate_fsobj_attrs(self, act, target=None, img_path=None):
+        def validate_fsobj_attrs(self, act, target=None):
                 """Used to verify that the target item's mode, attrs, timestamp,
                 etc. match as expected.  The actual"""
 
                 if act.name not in ("file", "dir"):
                         return
 
-                if not img_path:
-                        img_path = self.get_img_path()
+                img_path = self.img_path()
                 if not target:
                         target = act.attrs["path"]
 
@@ -2045,24 +2137,55 @@
                 cmd = "%s %s" % (prog, " ".join(args))
                 return self.cmdline_run(cmd, out=out, stderr=stderr, exit=exit)
 
-        def _api_install(self, api_obj, pkg_list, **kwargs):
+        def _api_attach(self, api_obj, catch_wsie=True, **kwargs):
+                self.debug("attach: %s" % str(kwargs))
+                for pd in api_obj.gen_plan_attach(**kwargs):
+                        continue
+                self._api_finish(api_obj, catch_wsie=catch_wsie)
+
+        def _api_detach(self, api_obj, catch_wsie=True, **kwargs):
+                self.debug("detach: %s" % str(kwargs))
+                for pd in api_obj.gen_plan_detach(**kwargs):
+                        continue
+                self._api_finish(api_obj, catch_wsie=catch_wsie)
+
+        def _api_sync(self, api_obj, catch_wsie=True, **kwargs):
+                self.debug("sync: %s" % str(kwargs))
+                for pd in api_obj.gen_plan_sync(**kwargs):
+                        continue
+                self._api_finish(api_obj, catch_wsie=catch_wsie)
+
+        def _api_install(self, api_obj, pkg_list, catch_wsie=True, **kwargs):
                 self.debug("install %s" % " ".join(pkg_list))
-                api_obj.plan_install(pkg_list, **kwargs)
-                self._api_finish(api_obj)
-
-        def _api_uninstall(self, api_obj, pkg_list, **kwargs):
+                for pd in api_obj.gen_plan_install(pkg_list, **kwargs):
+                        continue
+                self._api_finish(api_obj, catch_wsie=catch_wsie)
+
+        def _api_uninstall(self, api_obj, pkg_list, catch_wsie=True, **kwargs):
                 self.debug("uninstall %s" % " ".join(pkg_list))
-                api_obj.plan_uninstall(pkg_list, False, **kwargs)
-                self._api_finish(api_obj)
-
-        def _api_image_update(self, api_obj, **kwargs):
+                for pd in api_obj.gen_plan_uninstall(pkg_list, False, **kwargs):
+                        continue
+                self._api_finish(api_obj, catch_wsie=catch_wsie)
+
+        def _api_image_update(self, api_obj, catch_wsie=True, **kwargs):
                 self.debug("planning update")
-                api_obj.plan_update_all(**kwargs)
-                self._api_finish(api_obj)
-
-        def _api_finish(self, api_obj):
+                for pd in api_obj.gen_plan_update(**kwargs):
+                        continue
+                self._api_finish(api_obj, catch_wsie=catch_wsie)
+
+        def _api_change_varcets(self, api_obj, catch_wsie=True, **kwargs):
+                self.debug("change varcets: %s" % str(kwargs))
+                for pd in api_obj.gen_plan_change_varcets(**kwargs):
+                        continue
+                self._api_finish(api_obj, catch_wsie=catch_wsie)
+
+        def _api_finish(self, api_obj, catch_wsie=True):
                 api_obj.prepare()
-                api_obj.execute_plan()
+                try:
+                        api_obj.execute_plan()
+                except apx.WrapSuccessfulIndexingException:
+                        if not catch_wsie:
+                                raise
                 api_obj.reset()
 
         def file_inode(self, path):
@@ -2153,8 +2276,9 @@
                 super(ManyDepotTestCase, self).__init__(methodName)
                 self.dcs = {}
 
-        def setUp(self, publishers, debug_features=EmptyI, start_depots=False):
-                CliTestCase.setUp(self)
+        def setUp(self, publishers, debug_features=EmptyI, start_depots=False,
+            image_count=1):
+                CliTestCase.setUp(self, image_count=image_count)
 
                 self.debug("setup: %s" % self.id())
                 self.debug("creating %d repo(s)" % len(publishers))
@@ -2265,10 +2389,10 @@
 class SingleDepotTestCase(ManyDepotTestCase):
 
         def setUp(self, debug_features=EmptyI, publisher="test",
-            start_depot=False):
+            start_depot=False, image_count=1):
                 ManyDepotTestCase.setUp(self, [publisher],
-                    debug_features=debug_features, start_depots=start_depot)
-                self.backup_img_path = None
+                    debug_features=debug_features, start_depots=start_depot,
+                    image_count=image_count)
 
         def __get_dc(self):
                 if self.dcs:
@@ -2288,13 +2412,6 @@
         # for convenience of writing test cases.
         dc = property(fget=__get_dc)
 
-        def create_sub_image(self, repourl, prefix="test", variants=EmptyDict):
-                if not self.backup_img_path:
-                        self.backup_img_path = self.img_path
-                self.image_set(os.path.join(self.img_path, "sub"))
-                self.image_create(repourl, prefix, variants, destroy=False)
-
-
 class SingleDepotTestCaseCorruptImage(SingleDepotTestCase):
         """ A class which allows manipulation of the image directory that
         SingleDepotTestCase creates. Specifically, it supports removing one
@@ -2312,15 +2429,15 @@
                 SingleDepotTestCase.setUp(self, debug_features=debug_features,
                     publisher=publisher, start_depot=start_depot)
 
+                self.__imgs_path_backup = {}
+
         def tearDown(self):
-                self.__uncorrupt_img_path()
                 SingleDepotTestCase.tearDown(self)
 
-        def __uncorrupt_img_path(self):
-                """ Function which restores the img_path back to the original
-                level. """
-                if self.backup_img_path:
-                        self.img_path = self.backup_img_path
+        def backup_img_path(self, ii=None):
+                if ii != None:
+                        return self.__imgs_path_backup[ii]
+                return self.__imgs_path_backup[self.img_index()]
 
         def corrupt_image_create(self, repourl, config, subdirs, prefix="test",
             destroy = True):
@@ -2337,10 +2454,12 @@
                 was made optional to allow testing of two images installed next
                 to each other (a user and full image created in the same
                 directory for example). """
-                if not self.backup_img_path:
-                        self.backup_img_path = self.img_path
-                self.img_path = os.path.join(self.img_path, "bad")
-                assert self.img_path and self.img_path != "/"
+
+                ii = self.img_index()
+                if ii not in self.__imgs_path_backup:
+                        self.__imgs_path_backup[ii] = self.img_path()
+
+                self.set_img_path(os.path.join(self.img_path(), "bad"))
 
                 if destroy:
                         self.image_destroy()
@@ -2348,10 +2467,10 @@
                 for s in subdirs:
                         if s == "var/pkg":
                                 cmdline = "pkg image-create -F -p %s=%s %s" % \
-                                    (prefix, repourl, self.img_path)
+                                    (prefix, repourl, self.img_path())
                         elif s == ".org.opensolaris,pkg":
                                 cmdline = "pkg image-create -U -p %s=%s %s" % \
-                                    (prefix, repourl, self.img_path)
+                                    (prefix, repourl, self.img_path())
                         else:
                                 raise RuntimeError("Got unknown subdir option:"
                                     "%s\n" % s)
@@ -2372,7 +2491,7 @@
                                 raise UnexpectedExitCodeException(cmdline, 0,
                                     retcode, output)
 
-                        tmpDir = os.path.join(self.img_path, s)
+                        tmpDir = os.path.join(self.img_path(), s)
 
                         # This is where the actual corruption of the
                         # image takes place. A normal image was created
@@ -2394,16 +2513,88 @@
                         if "index_absent" in config:
                                 shutil.rmtree(os.path.join(tmpDir, "cache",
                                     "index"))
-                shutil.copy("%s/usr/bin/pkg" % g_proto_area,
-                    os.path.join(self.img_path, "pkg"))
 
                 # Make find root start at final. (See the doc string for
                 # more explanation.)
-                cmd_path = os.path.join(self.img_path, "final")
+                cmd_path = os.path.join(self.img_path(), "final")
 
                 os.mkdir(cmd_path)
                 return cmd_path
 
+def debug(s):
+        s = str(s)
+        for x in s.splitlines():
+                if g_debug_output:
+                        print >> sys.stderr, "# %s" % x
+
+def mkdir_eexist_ok(p):
+        try:
+                os.mkdir(p)
+        except OSError, e:
+                if e.errno != errno.EEXIST:
+                        raise e
+
+def fakeroot_init():
+        global g_fakeroot
+        global g_fakeroot_repo
+        global g_pkg_cmdpath
+
+        try:
+                os.stat(g_pkg_cmdpath)
+        except OSError, e:
+                pass
+        else:
+                # fakeroot already exists
+                return
+
+        #
+        # When accessing images via the pkg apis those apis will try
+        # to access the image containing the command from which the
+        # apis were invoked.  Normally when running the test suite the
+        # command is run.py in a developers workspace, and that
+        # workspace lives in the root image.  But accessing the root
+        # image during a test suite run is verboten.  Hence, here we
+        # create a temporary image from which we can run the pkg
+        # command.
+        #
+        test_root = os.path.join(g_tempdir, "ips.test.%d" % os.getpid())
+        fakeroot_repo = os.path.join(test_root, "fakeroot_repo")
+        fakeroot = os.path.join(test_root, "fakeroot")
+
+        # create directories
+        mkdir_eexist_ok(test_root)
+        mkdir_eexist_ok(fakeroot_repo)
+        mkdir_eexist_ok(fakeroot)
+
+        debug("fakeroot repo create %s" % fakeroot_repo)
+        repo = sr.repository_create(fakeroot_repo)
+        repo.add_publisher(publisher.Publisher("bobcat"))
+
+        debug("fakeroot image create %s" % fakeroot)
+        cmd_path = os.path.join(fakeroot, "pkg")
+        progtrack = pkg.client.progress.NullProgressTracker()
+        api_inst = pkg.client.api.image_create(PKG_CLIENT_NAME,
+            CLIENT_API_VERSION, fakeroot,
+            pkg.client.api.IMG_TYPE_ENTIRE, False,
+            repo_uri="file://" + fakeroot_repo,
+            prefix="bobcat", progtrack=progtrack, cmdpath=cmd_path)
+
+        #
+        # put a copy of the pkg command in our fake root directory.
+        # we do this because when recursive linked image commands are
+        # run, the pkg interfaces may fork and exec additional copies
+        # of pkg(1), and in this case we want them to run the copy of
+        # pkg from the fake root.
+        #
+        fakeroot_cmdpath = os.path.join(fakeroot, "pkg")
+        shutil.copy(os.path.join(g_proto_area, "usr", "bin", "pkg"),
+            fakeroot_cmdpath)
+
+        g_fakeroot = fakeroot
+        g_fakeroot_repo = fakeroot_repo
+        g_pkg_cmdpath = fakeroot_cmdpath
+
+
 def eval_assert_raises(ex_type, eval_ex_func, func, *args):
         try:
                 func(*args)
--- a/src/tests/pylintrc	Fri May 06 17:24:48 2011 -0700
+++ b/src/tests/pylintrc	Sat May 07 00:25:10 2011 -0700
@@ -18,8 +18,9 @@
 #
 # CDDL HEADER END
 
-# Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
-# Use is subject to license terms.
+#
+# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+#
 
 # This file is used to control pylint when checking python source
 #
@@ -48,11 +49,7 @@
 
 # Disable the message(s) with the given id(s).
 # C0103 Invalid name "%s" Used when the const/var/class name doesn't match regex
-# C0111 Missing Docstring
-# W0311 Bad Indentation
-# W0311 Mixed (Space/Tab) Indentation
-# W0511 "XXX" / "FIXME" Comments
-disable-msg=C0103,C0111,W0311,W0312,W0511
+disable-msg=C0103
 
 [REPORTS]
 # set the output format. Available formats are text, parseable, colorized, msvs
--- a/src/util/distro-import/importer.py	Fri May 06 17:24:48 2011 -0700
+++ b/src/util/distro-import/importer.py	Sat May 07 00:25:10 2011 -0700
@@ -56,7 +56,7 @@
 from pkg.misc import emsg
 from pkg.portable import PD_LOCAL_PATH, PD_PROTO_DIR, PD_PROTO_DIR_LIST
 
-CLIENT_API_VERSION = 58
+CLIENT_API_VERSION = 59
 PKG_CLIENT_NAME = "importer.py"
 pkg.client.global_settings.client_name = PKG_CLIENT_NAME