7490 ipkg brand attach needs similar logic to pkgcreatezone for 'entire' incorporation
8500 ipkg brand should mimic live cd, not create search index until user asks for it
8501 Chaff on the screen in zone install due to image-create's refresh progress display
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
m_installing=$(gettext "Installing...")
m_usage=$(gettext "ipkg brand attach arguments [-a archive] [-d dataset] [-n] [-r zfs-recv] [-u]\n\tThe -a archive option specifies a tar file or cpio archive.\n\tThe -d dataset option specifies an existing dataset.\n\tThe -r zfs-recv option receives the output of a 'zfs send' command\n\tof an existing zone root dataset.\n\tThe -u option indicates that the software should be updated to match\n\tthe current host.")
m_gzinc=$( gettext " Global zone version: %s")
m_zinc=$( gettext " Non-Global zone version: %s")
m_insync=$(gettext " Evaluation: Packages in %s are in sync with global zone.")
m_uprev=$(gettext " Evaluation: %s is newer than the global zone.")
m_dnrev=$(gettext " Evaluation: %s is downrev of the global zone.")
m_resetpub=$(gettext " Publisher Check: Zone preferred publisher does not contain")
m_resetpub2=$(gettext " %s.")
m_resetpub3=$(gettext " Publisher Reset: Copying preferred publisher from global zone.")
m_pubfine=$(gettext " Publisher Check: Looks good.")
m_cache=$(gettext " Cache: Using %s.")
m_updating=$(gettext " Updating non-global zone: (Stage 1). Output follows")
m_updating2=$(gettext " Updating non-global zone: (Stage 2). Output follows")
m_sync_done=$(gettext " Updating non-global zone: Zone updated to %s")
m_complete=$(gettext "Attach complete.")
f_option_no_ds=$(gettext "The -a, -d or -r option is required when there is no active root dataset.")
f_option_ds=$(gettext "The -a, -d or -r option is invalid when there is already an active root dataset.")
f_bad_archive=$(gettext "Unknown archive format.")
f_unpack=$(gettext "Unable to install the archive.")
f_bad_dataset=$(gettext "Non-existent or invalid zone root dataset.")
f_nogzinc=$(gettext "Could not find 'entire' incorporation for global zone.")
f_nozinc=$(gettext "Could not find 'entire' incorporation for attaching zone.")
f_downrev=$(gettext "Zone is downrev of global zone. Specify -u to update it.")
f_uprev=$(gettext "Zone is uprev of global zone. Global zone will need to be updated before attach can proceed.")
f_update=$(gettext "Could not update attaching zone")
f_bad_publisher=$(gettext "Syntax error in publisher information.\n")
f_gz_entire=$(gettext "Could not find 'entire' incorporation for global zone.")
f_zone_entire=$(gettext "Could not find 'entire' incorporation for non-global zone.")
f_fmri_compare=$(gettext "Failed to compare 'entire' FMRIs")
f_reset_pub=$(gettext "Failed to reset publisher to %s %s")
f_no_pref_publisher=$(gettext "Unable to get preferred publisher information for zone '%s'.")
f_pkg_list=$(gettext "Unable to get zone package list.")
#set -o xtrace
. /usr/lib/brand/ipkg/common.ksh
# Setup i18n output
TEXTDOMAIN="SUNW_OST_OSCMD"
export TEXTDOMAIN
PKG="LC_ALL=C /usr/bin/pkg"
aarg=0
darg=0
noexecute=0
rarg=0
# Other brand attach options are invalid for this brand.
while getopts "a:d:nR:r:uz:" opt; do
case $opt in
a) ARCHIVE="$OPTARG"
if [ $darg -eq 1 -o $rarg -eq 1 ]; then
fail_usage "$m_usage"
fi
aarg=1
;;
d) DATASET="$OPTARG"
if [ $aarg -eq 1 -o $rarg -eq 1 ]; then
fail_usage "$m_usage"
fi
darg=1
;;
n) noexecute=1 ;;
R) zonepath="$OPTARG" ;;
r) ZFSRCV="$OPTARG"
if [ $aarg -eq 1 -o $darg -eq 1 ]; then
fail_usage "$m_usage"
fi
rarg=1
;;
u) allow_update=1 ;;
z) zonename="$OPTARG" ;;
?) fail_usage "$m_usage" ;;
*) fail_usage "$m_usage";;
esac
done
shift $((OPTIND-1))
zoneroot=$zonepath/root
if [ $noexecute -eq 1 ]; then
#
# The zone doesn't have to exist when the -n option is used, so do
# this work early.
#
if [ $aarg -eq 1 -o $rarg -eq 1 -o $allow_update -eq 1 ]; then
fail_usage "$m_usage";
fi
# XXX There is no sw validation for IPS right now, so just pretend
# everything will be ok.
exit 0
fi
get_current_gzbe
#
# XXX we can't do the following since 'zoneadm verify' depends on the zonepath
# already existing - maybe we can fix this in zoneadm in the future.
#
# First make the top-level zonepath dataset and be sure to tolerate errors
# since this dataset could already exist from a different BE.
#
#pdir=`/usr/bin/dirname $zonepath`
#zpname=`/usr/bin/basename $zonepath`
#
#get_zonepath_ds $pdir
#zpds=$ZONEPATH_DS
#
# Note, this dataset might already exist so tolerate an error.
#/usr/sbin/zfs create $zpds/$zpname
#
fail_zonepath_in_rootds $zpds
# Top-level zonepath dataset should now exist.
get_zonepath_ds $zonepath
#
# Note that the root dataset might already exist and be populated from either
# a SNAP clone or from an earlier attach that failed because the sw was
# out of sync but the -u option was not provided. The user might be
# re-running the attach with the -u option this time. We need to be sure
# to handle this case gracefully.
#
#
# We first want to see if there is a pre-existing active root dataset, but
# we can't call get_active_ds() since it errors out if there is no active
# dataset.
#
ACTIVE_DS=`/usr/sbin/zfs list -H -r -t filesystem \
-o name,$PROP_PARENT,$PROP_ACTIVE $ZONEPATH_DS/ROOT | \
/usr/bin/nawk -v gzbe=$CURRENT_GZBE ' {
if ($1 ~ /ROOT\/[^\/]+$/ && $2 == gzbe && $3 == "on") {
print $1
if (found == 1)
exit 1
found = 1
}
}'`
if [ $? -ne 0 ]; then
fail_fatal "$f_multiple_ds"
fi
if [ ! -d $zoneroot ]; then
/usr/bin/mkdir -p $zoneroot
/usr/bin/chmod 700 $zonepath
fi
if [ -z "$ACTIVE_DS" ]; then
#
# There is no pre-existing active dataset. In this case we need either
# the -a or -r option to populate a newly created dataset or the -d
# option to use a pre-existing dataset.
#
if [ $aarg -eq 1 -o $rarg -eq 1 ]; then
/usr/sbin/zfs create -o mountpoint=legacy -o zoned=on \
$ZONEPATH_DS/ROOT
BENAME=zbe
BENUM=0
# Try 100 different names before giving up.
while [ $BENUM -lt 100 ]; do
/usr/sbin/zfs create -o $PROP_ACTIVE=on \
-o $PROP_PARENT=$CURRENT_GZBE \
-o canmount=noauto $ZONEPATH_DS/ROOT/$BENAME
if [ $? = 0 ]; then
break
fi
BENUM=`expr $BENUM + 1`
BENAME="zbe-$BENUM"
done
if [ $BENUM -ge 100 ]; then
fail_fatal "$f_zfs_create"
fi
ACTIVE_DS=$ZONEPATH_DS/ROOT/$BENAME
elif [ $darg -eq 1 ]; then
#
# Verify that the dataset exists
#
/usr/sbin/zfs list -H -o name $DATASET >/dev/null 2>&1
if [ $? -ne 0 ]; then
fail_fatal "$f_bad_dataset"
fi
#
# Verify that the dataset looks like it contains a zone root.
#
oldmnt=`/usr/sbin/zfs list -H -o mountpoint $DATASET`
if [ "$oldmnt" = "legacy" ]; then
mntpnt=`/usr/sbin/mount -p | \
/usr/bin/nawk -v fs=$DATASET \
'{if ($1 == fs) print $3}'`
if [ -n "$mntpnt" ]; then
/usr/sbin/umount $mntpnt
fi
else
/usr/sbin/zfs set mountpoint=legacy $DATASET || \
fail_fatal "$f_zfs_create"
fi
/usr/sbin/mount -F zfs $DATASET $zoneroot
if [ $? -ne 0 ]; then
/usr/sbin/zfs set mountpoint=$oldmnt $DATASET
if [ "$oldmnt" = "legacy" -a -n "$mntpnt" ]; then
/usr/sbin/mount -F zfs $DATASET $mntpnt
fi
fail_fatal "$f_zfs_mount"
fi
if [ ! -d $zoneroot/etc -o ! -d $zoneroot/usr ]; then
/usr/sbin/umount $zoneroot
/usr/sbin/zfs set mountpoint=$oldmnt $DATASET
if [ "$oldmnt" = "legacy" -a -n "$mntpnt" ]; then
/usr/sbin/mount -F zfs $DATASET $mntpnt
fi
fail_fatal "$f_bad_dataset"
fi
#
# The dataset seems reasonable, make it the active dataset.
#
/usr/sbin/umount $zoneroot
/usr/sbin/zfs set zoned=on $DATASET || \
fail_fatal "$f_zfs_create"
/usr/sbin/zfs set canmount=noauto $DATASET || \
fail_fatal "$f_zfs_create"
/usr/sbin/zfs set $PROP_ACTIVE=on $DATASET || \
fail_fatal "$f_zfs_create"
/usr/sbin/zfs set $PROP_PARENT=$CURRENT_GZBE $DATASET || \
fail_fatal "$f_zfs_create"
ACTIVE_DS=$DATASET
else
#
# There is no pre-existing active dataset and no
# -a, -d or -r option so this is an error.
#
fail_fatal "$f_option_no_ds"
fi
elif [ $aarg -eq 1 -o $darg -eq 1 -o $rarg -eq 1 ]; then
#
# We already have an active root dataset. In this case the -a, -d or -r
# option is invalid (XXX unless we want to overwrite the contents of
# the pre-existing dataset).
#
fail_fatal "$f_option_ds"
fi
if [ $aarg -eq 1 ]; then
#
# Given a tar or cpio archive, unpack the archive into the dataset.
#
ftype="`LC_ALL=C file $ARCHIVE | cut -d: -f 2`"
case "$ftype" in
*cpio*) filetype="cpio"
stage1="cat"
filetypename="cpio archive"
;;
*bzip2*) filetype="cpio"
stage1="bzcat"
filetypename="bzipped cpio archive"
;;
*gzip*) filetype="cpio"
stage1="gzcat"
filetypename="gzipped cpio archive"
;;
*USTAR\ tar\ archive)
filetype="tar"
filetypename="tar archive"
;;
*USTAR\ tar\ archive\ extended\ format*)
filetype="xustar"
filetypename="pax (xustar) archive"
;;
*) fail_fatal "$f_bad_archive"
;;
esac
if [ ! -d $zoneroot ]; then
/usr/bin/mkdir -p $zoneroot
/usr/bin/chmod 700 $zonepath
fi
/usr/sbin/mount -F zfs $ACTIVE_DS $zoneroot || fail_fatal "$f_zfs_mount"
echo $m_installing
echo $filetypename
#
# XXX What does the archive contain? Do we just get stuff under
# zonepath/root or do we get zonepath? For zfs send, we send the
# zonepath/root dataset, so maybe we should assume only zonepath/root
# here as well?
#
unpack_result=0
if [[ "$filetype" = "cpio" ]]; then
cpioopts="-idm"
(cd "$zonepath" && $stage1 "$ARCHIVE" | cpio $cpioopts)
unpack_result=$?
elif [[ "$filetype" = "tar" ]]; then
(cd "$zonepath" && tar -xf "$ARCHIVE")
unpack_result=$?
elif [[ "$filetype" = "xustar" ]]; then
(cd "$zonepath" && pax -r -f "$ARCHIVE")
unpack_result=$?
fi
/usr/sbin/umount $zoneroot
if [[ $unpack_result -ne 0 ]]; then
fail_fatal "$f_unpack"
fi
elif [ $rarg -eq 1 ]; then
#
# Given 'zfs send' output, receive the snapshot into the new dataset.
# XXX handle piped input
#
/usr/sbin/zfs receive -F $ACTIVE_DS < $ZFSRCV || \
fail_fatal "$f_zfs_create"
else
#
# If neither of the -a or -r options are provided, assume
# detach/attach behavior with an existing SNAP clone dataset already
# in existence, or we're using one from an earlier detach that was
# specified with -d, or possibly the dataset exists from a previous
# attach run that did not finish successfully because the sw was not
# updated (due to missing -u option).
#
#
# We know we already have a valid dataset from earlier validation.
#
#
# If we are attaching a zone that was previously detached on this
# system, then the dataset is not zoned and is mounted at this point.
# Look to see if the dataset is in this state.
#
zoned=`/usr/sbin/zfs list -H -o zoned $ACTIVE_DS`
if [ "$zoned" = "off" ]; then
#
# When we detached the SNAP clone zone, we changed some of the
# properties and then left it mounted. Undo that work now.
#
/usr/sbin/umount $zonepath/root >/dev/null 2>&1
/usr/sbin/zfs set zoned=on $ACTIVE_DS || \
fail_fatal "$f_zfs_create"
/usr/sbin/zfs set mountpoint=legacy $ACTIVE_DS || \
fail_incomplete "$f_zfs_create"
/usr/sbin/zfs set canmount=noauto $ACTIVE_DS || \
fail_fatal "$f_zfs_create"
fi
fi
/usr/sbin/mount -F zfs $ACTIVE_DS $zoneroot || fail_fatal "$f_zfs_mount"
#
# unmount the zoneroot and clean up our temp files if anything goes wrong
#
trap "/usr/sbin/umount $zoneroot > /dev/null 2>&1" EXIT
#
# Look for the 'entire' incorporation's FMRI in the current image; due to users
# doing weird machinations with their publishers, we strip off the publisher
# from the FMRI if it is present.
#
gz_entire_fmri=$(get_entire_incorp) || fail_fatal "$f_gz_entire"
#
# Get publisher information for global zone.
# If we were not able to get the zone's preferred publisher, complain.
#
get_preferred_publisher | IFS=" " read gz_publisher gz_publisher_url
[[ -z $gz_publisher ]] && fail_usage "$f_no_pref_publisher" "global"
[[ -z $gz_publisher_url ]] && fail_usage "$f_no_pref_publisher" "global"
PKG_IMAGE="$zoneroot"
export PKG_IMAGE
#
# Get publisher information for non global zone.
# If we were not able to get the zone's preferred publisher, complain.
#
get_preferred_publisher | IFS=" " read zone_publisher zone_publisher_url
[[ -z $zone_publisher ]] && fail_usage "$f_no_pref_publisher" $zonename
[[ -z $zone_publisher_url ]] && fail_usage "$f_no_pref_publisher" $zonename
#
# Get entire incorp for non-global zone
#
zone_entire_fmri=$(get_entire_incorp) || fail_fatal "$f_zone_entire"
printf "$m_gzinc\n" $gz_entire_fmri
printf "$m_zinc\n" $zone_entire_fmri
#
# if the zone entire and the gz entire match, we're good.
#
comp=$(/usr/lib/brand/ipkg/fmri_compare $zone_entire_fmri $gz_entire_fmri)
if [[ $? -ne 0 ]]; then
fail_fatal "$f_fmri_compare"
fi
if [[ $comp = "=" ]]; then
printf "$m_insync\n" $zonename
echo $m_complete
exit $ZONE_SUBPROC_OK
fi
if [[ $comp = ">" ]]; then
printf "$m_uprev\n" $zonename
fail_fatal "$f_uprev"
fi
#
# If we're here, the zone is downrev of the global zone
#
if [ -z $allow_update ]; then
# zone is downrev
printf "$m_dnrev\n" $zonename
fail_fatal "$f_downrev"
fi
#
# See if the zone knows about the gz entire fmri in question. If yes,
# we'll try using that.
#
$PKG list --no-refresh -a $gz_entire_fmri > /dev/null 2>&1
#
# If this doesn't exist, then we reset the preferred pub for
# the zone to that of the global zone on attach, and try again.
#
if [[ $? -ne 0 ]]; then
printf "$m_resetpub\n"
printf "$m_resetpub2\n" $gz_entire_fmri
printf "$m_resetpub3\n"
# Note that we do cause a refresh here-- at some point we need the
# catalog to be updated.
pkg -R $zoneroot set-publisher -P -O $gz_publisher_url $gz_publisher
if [[ $? -ne 0 ]]; then
fail_fatal "$f_reset_pub"
fi
zone_publisher=$gz_publisher
zone_publisher_url=$gz_publisher_url
else
printf "$m_pubfine\n"
fi
if [ -d /var/pkg/download ]; then
PKG_CACHEDIR=/var/pkg/download
export PKG_CACHEDIR
printf "$m_cache\n" $PKG_CACHEDIR
fi
printf "$m_updating\n"
#
# Updating 'entire' is likely to do us a lot of good, but it can miss
# some updates-- particularly on a respin of a package (say, where only
# the datestamps change).
#
$PKG install $gz_entire_fmri
if [ $? -ne 0 ]; then
fail_fatal "$f_update"
fi
printf "$m_updating2\n"
#
# So to get around the aforementioned problem, a hack we can apply is
# to force all packages to the latest available against the incorporation.
# This could allow the zone to get slightly ahead of the global zone.
#
# First, list all the packages for the preferred publisher, then
# do an 'install' on those.
#
zone_pkgs=$($PKG list --no-refresh -H "pkg://$zone_publisher/*" | \
awk '{print $1}' | egrep -v '^entire$')
if [[ $? -ne 0 ]]; then
fail_fatal "$f_pkg_list"
fi
$PKG install $zone_pkgs
if [ $? -ne 0 ]; then
fail_fatal "$f_update"
fi
printf "$m_sync_done\n" $gz_entire_fmri
printf "$m_complete\n"
exit $ZONE_SUBPROC_OK