18686478 kstat warning every minute in nova-compute log on SPARC s11-update
authordavid.comay@oracle.com
Thu, 10 Jul 2014 13:27:03 -0700
branchs11-update
changeset 3200 16d08ab96b7f
parent 3199 dfef830f0d5d
child 3204 c386cd8bfed4
18686478 kstat warning every minute in nova-compute log on SPARC 19061438 checkboxes related with 'create volume from Image' in the Horizon don't work 19064962 power state mapping for incomplete zone is incorrect 19065445 Horizon doesn't deal well with shutdown zones 19130253 pausing/suspension "successful" in horizon, though nothing happens 19136473 ZFSSA iSCSI Cinder Attach Volume Failed 19146728 missing an upstream utility 'dhcp_release' needed by instance termination 19148389 jsonpointer package missing as a dependency for jsonpatch 19158668 associating a new floating ip removes existing ipnat rules and re-adds them 19161623 problem in SERVICE/KEYSTONE 19166348 cinder & nova should default signing_dir explicitly for consistency 19166359 minor pkgfmt(1) issues with OpenStack manifests 19168609 System Info page doesn't work properly 19173435 problem in SERVICE/HORIZON 19181971 OpenStack pkg.summaries should include service type
components/dnsmasq/dnsmasq.p5m
components/dnsmasq/patches/02_compile_dhcp_release.patch
components/openstack/cinder/cinder.p5m
components/openstack/cinder/files/api-paste.ini
components/openstack/cinder/files/zfssa/zfssaiscsi.py
components/openstack/glance/glance.p5m
components/openstack/horizon/files/overrides.py
components/openstack/horizon/horizon.p5m
components/openstack/horizon/patches/09-disable-unsupported-bootsource.patch
components/openstack/horizon/patches/10-network-agents.patch
components/openstack/horizon/patches/11-CVE-2014-3473-3474-3475.patch
components/openstack/keystone/keystone.p5m
components/openstack/keystone/patches/07-CVE-2014-3520.patch
components/openstack/neutron/files/agent/evs_l3_agent.py
components/openstack/neutron/files/agent/solaris/dhcp.py
components/openstack/neutron/neutron.p5m
components/openstack/nova/files/api-paste.ini
components/openstack/nova/files/solariszones/driver.py
components/openstack/nova/nova.p5m
components/openstack/swift/swift.p5m
components/python/jsonpatch/jsonpatch-PYVER.p5m
--- a/components/dnsmasq/dnsmasq.p5m	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/dnsmasq/dnsmasq.p5m	Thu Jul 10 13:27:03 2014 -0700
@@ -42,7 +42,8 @@
 file files/dnsmasq.prof_attr \
     path=etc/security/prof_attr.d/service:network:dnsmasq group=sys
 file files/dnsmasq.xml path=lib/svc/manifest/network/dnsmasq.xml
-file /usr/sbin/dnsmasq path=usr/lib/inet/dnsmasq
+file path=usr/lib/inet/dhcp_release
+file usr/sbin/dnsmasq path=usr/lib/inet/dnsmasq
 file path=usr/share/locale/de/LC_MESSAGES/dnsmasq.mo
 file path=usr/share/locale/es/LC_MESSAGES/dnsmasq.mo
 file path=usr/share/locale/fi/LC_MESSAGES/dnsmasq.mo
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/dnsmasq/patches/02_compile_dhcp_release.patch	Thu Jul 10 13:27:03 2014 -0700
@@ -0,0 +1,221 @@
+Add hooks to compile contrib/wrt/dhcp_release.c. This utility is used to
+send a DHCPRELEASE message to tell the local DHCP server to delete a
+particular lease.
+
+Solaris doesn't support AF_NETLINK, so use getifaddrs() to determine the
+IP address of an interface on which Dnsmasq process is listening.
+
+This patch was developed in-house. Since it is Solaris-specific it is not
+suitable for upstream.
+
+--- dnsmasq-2.68/Makefile	2013-12-08 07:58:29.000000000 -0800
++++ NEW/Makefile	2014-07-06 20:06:34.886232993 -0700
+@@ -19,6 +19,7 @@
+ # Variables you may well want to override.
+ 
+ PREFIX        = /usr/local
++LIBDIR        = $(PREFIX)/lib/inet
+ BINDIR        = $(PREFIX)/sbin
+ MANDIR        = $(PREFIX)/share/man
+ LOCALEDIR     = $(PREFIX)/share/locale
+@@ -43,6 +44,7 @@
+ SRC = src
+ PO  = po
+ MAN = man
++WRT = contrib/wrt
+ 
+ #################################################################
+ 
+@@ -67,6 +69,8 @@
+        helper.o tftp.o log.o conntrack.o dhcp6.o rfc3315.o \
+        dhcp-common.o outpacket.o radv.o slaac.o auth.o ipset.o domain.o
+ 
++dhcp_release_objs = dhcp_release.o
++
+ hdrs = dnsmasq.h config.h dhcp-protocol.h dhcp6-protocol.h \
+        dns-protocol.h radv-protocol.h
+ 
+@@ -75,11 +79,12 @@
+  top="$(top)" \
+  build_cflags="$(version) $(dbus_cflags) $(idn_cflags) $(ct_cflags) $(lua_cflags)" \
+  build_libs="$(dbus_libs) $(idn_libs) $(ct_libs) $(lua_libs) $(sunos_libs)" \
+- -f $(top)/Makefile dnsmasq 
++ -f $(top)/Makefile dnsmasq dhcp_release
+ 
+ mostly_clean :
+ 	rm -f $(BUILDDIR)/*.mo $(BUILDDIR)/*.pot 
+-	rm -f $(BUILDDIR)/.configured $(BUILDDIR)/*.o $(BUILDDIR)/dnsmasq.a $(BUILDDIR)/dnsmasq 
++	rm -f $(BUILDDIR)/.configured $(BUILDDIR)/*.o $(BUILDDIR)/dnsmasq.a \
++		$(BUILDDIR)/dnsmasq $(BUILDDIR)/dhcp_release
+ 
+ clean : mostly_clean
+ 	rm -f $(BUILDDIR)/dnsmasq_baseline
+@@ -89,9 +94,11 @@
+ install : all install-common
+ 
+ install-common :
+-	$(INSTALL) -d $(DESTDIR)$(BINDIR) -d $(DESTDIR)$(MANDIR)/man8
++	$(INSTALL) -d $(DESTDIR)$(BINDIR) -d $(DESTDIR)$(MANDIR)/man8 \
++		-d $(DESTDIR)/$(LIBDIR)
+ 	$(INSTALL) -m 644 $(MAN)/dnsmasq.8 $(DESTDIR)$(MANDIR)/man8 
+ 	$(INSTALL) -m 755 $(BUILDDIR)/dnsmasq $(DESTDIR)$(BINDIR)
++	$(INSTALL) -m 755 $(BUILDDIR)/dhcp_release $(DESTDIR)$(LIBDIR)
+ 
+ all-i18n : $(BUILDDIR)
+ 	@cd $(BUILDDIR) && $(MAKE) \
+@@ -99,7 +106,7 @@
+  i18n=-DLOCALEDIR=\'\"$(LOCALEDIR)\"\' \
+  build_cflags="$(version) $(dbus_cflags) $(ct_cflags) $(lua_cflags) `$(PKG_CONFIG) --cflags libidn`" \
+  build_libs="$(dbus_libs) $(ct_libs) $(lua_libs) $(sunos_libs) `$(PKG_CONFIG) --libs libidn`"  \
+- -f $(top)/Makefile dnsmasq
++ -f $(top)/Makefile dnsmasq dhcp_release
+ 	for f in `cd $(PO); echo *.po`; do \
+ 		cd $(top) && cd $(BUILDDIR) && $(MAKE) top="$(top)" -f $(top)/Makefile $${f%.po}.mo; \
+ 	done
+@@ -142,12 +149,18 @@
+ $(objs:.o=.c) $(hdrs):
+ 	ln -s $(top)/$(SRC)/$@ .
+ 
++$(dhcp_release_objs:.o=.c):
++	ln -s $(top)/$(WRT)/$@ .
++
+ .c.o:
+ 	$(CC) $(CFLAGS) $(COPTS) $(i18n) $(build_cflags) $(RPM_OPT_FLAGS) -c $<	
+ 
+ dnsmasq : .configured $(hdrs) $(objs)
+ 	$(CC) $(LDFLAGS) -o $@ $(objs) $(build_libs) $(LIBS) 
+ 
++dhcp_release : .configured $(hdrs) $(dhcp_release_objs)
++	$(CC) $(LDFLAGS) -o $@ $(dhcp_release_objs) $(build_libs) $(LIBS) 
++
+ dnsmasq.pot : $(objs:.o=.c) $(hdrs)
+ 	$(XGETTEXT) -d dnsmasq --foreign-user --omit-header --keyword=_ -o $@ -i $(objs:.o=.c)
+ 
+--- dnsmasq-2.68/contrib/wrt/dhcp_release.c	2013-12-08 07:58:29.000000000 -0800
++++ NEW/contrib/wrt/dhcp_release.c	2014-07-04 14:57:37.992103839 -0700
+@@ -33,6 +33,10 @@
+    The client-id is optional. If it is "*" then it treated as being missing.
+ */
+ 
++#if defined(__sun) || defined(__sun__)
++#define HAVE_SOLARIS_NETWORK
++#endif
++
+ #include <sys/types.h> 
+ #include <netinet/in.h>
+ #include <net/if.h>
+@@ -44,9 +48,13 @@
+ #include <stdlib.h>
+ #include <net/if_arp.h>
+ #include <sys/ioctl.h>
++#ifdef HAVE_SOLARIS_NETWORK
++#include <ifaddrs.h>
++#else
+ #include <linux/types.h>
+ #include <linux/netlink.h>
+ #include <linux/rtnetlink.h>
++#endif
+ #include <errno.h>
+ 
+ #define DHCP_CHADDR_MAX          16
+@@ -73,6 +81,7 @@
+   unsigned char options[308];
+ };
+ 
++#if !defined(HAVE_SOLARIS_NETWORK)
+ static struct iovec iov;
+ 
+ static int expand_buf(struct iovec *iov, size_t size)
+@@ -139,6 +148,8 @@
+   return rc;
+ }
+ 
++#endif
++
+ static int parse_hex(char *in, unsigned char *out, int maxlen, int *mac_type)
+ {
+   int i = 0;
+@@ -178,6 +189,7 @@
+   return (a.s_addr & mask.s_addr) == (b.s_addr & mask.s_addr);
+ }
+ 
++#if !defined(HAVE_SOLARIS_NETWORK)
+ static struct in_addr find_interface(struct in_addr client, int fd, unsigned int index)
+ {
+   struct sockaddr_nl addr;
+@@ -244,6 +256,7 @@
+  
+   exit(0);
+ }
++#endif
+ 
+ int main(int argc, char **argv)
+ { 
+@@ -254,7 +267,11 @@
+   struct sockaddr_in dest;
+   struct ifreq ifr;
+   int fd = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP);
++#ifdef HAVE_SOLARIS_NETWORK
++  int nl = 0;
++#else
+   int nl = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
++#endif
+ 
+   if (argc < 4 || argc > 5)
+     { 
+@@ -267,7 +284,8 @@
+       perror("cannot create socket");
+       exit(1);
+     }
+-  
++
++#if !defined(HAVE_SOLARIS_NETWORK)
+   /* This voodoo fakes up a packet coming from the correct interface, which really matters for 
+      a DHCP server */
+   strcpy(ifr.ifr_name, argv[1]);
+@@ -276,11 +294,43 @@
+       perror("cannot setup interface");
+       exit(1);
+     }
+-  
++#endif
+   
+   lease.s_addr = inet_addr(argv[2]);
++#ifdef HAVE_SOLARIS_NETWORK
++  struct ifaddrs *ifp_head, *ifp;
++
++  if (getifaddrs(&ifp_head) < 0)
++    {
++      perror("could not retrieve IP addresses");
++      exit(1);
++    }
++  for (ifp = ifp_head; ifp != NULL; ifp = ifp->ifa_next) { 
++    if (ifp->ifa_addr->sa_family != AF_INET)
++      continue;
++    if (strcmp(ifp->ifa_name, argv[1]) == 0 &&
++        is_same_net(lease, ((struct sockaddr_in *)ifp->ifa_addr)->sin_addr,
++	((struct sockaddr_in *)ifp->ifa_netmask)->sin_addr))
++      break;
++    }
++  if (ifp == NULL) {
++    freeifaddrs(ifp_head);
++    exit(0);
++  }
++  memcpy(&server, &((struct sockaddr_in *)ifp->ifa_addr)->sin_addr,
++	sizeof(server));
++  /* bind to the socket */
++  if (bind(fd, ifp->ifa_addr, sizeof(struct sockaddr_in)) == -1)
++    {
++      freeifaddrs(ifp_head);
++      perror("cannot bind to socket");
++      exit(1);
++    }
++  freeifaddrs(ifp_head);
++#else
+   server = find_interface(lease, nl, if_nametoindex(argv[1]));
+-  
++#endif
++
+   memset(&packet, 0, sizeof(packet));
+  
+   packet.hlen = parse_hex(argv[3], packet.chaddr, DHCP_CHADDR_MAX, &mac_type);
--- a/components/openstack/cinder/cinder.p5m	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/openstack/cinder/cinder.p5m	Thu Jul 10 13:27:03 2014 -0700
@@ -25,7 +25,7 @@
 
 set name=pkg.fmri \
     value=pkg:/cloud/openstack/cinder@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
-set name=pkg.summary value="OpenStack Cinder"
+set name=pkg.summary value="OpenStack Cinder (Block Storage Service)"
 set name=pkg.description \
     value="OpenStack Cinder provides an infrastructure for managing block storage volumes in OpenStack. It allows block devices to be exposed and connected to compute instances for expanded storage, better performance and integration with enterprise storage platforms."
 set name=pkg.human-version value="Havana $(COMPONENT_VERSION)"
--- a/components/openstack/cinder/files/api-paste.ini	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/openstack/cinder/files/api-paste.ini	Thu Jul 10 13:27:03 2014 -0700
@@ -58,4 +58,4 @@
 # signing_dir is configurable, but the default behavior of the authtoken
 # middleware should be sufficient.  It will create a temporary directory
 # in the home directory for the user the cinder process is running as.
-#signing_dir = /var/lib/cinder/keystone-signing
+signing_dir = /var/lib/cinder/keystone-signing
--- a/components/openstack/cinder/files/zfssa/zfssaiscsi.py	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/openstack/cinder/files/zfssa/zfssaiscsi.py	Thu Jul 10 13:27:03 2014 -0700
@@ -342,19 +342,6 @@
                                 volume['name'],
                                 new_size * SIZE_GB)
 
-    def _get_iscsi_properties(self, volume):
-        lcfg = self.configuration
-        lun = self.zfssa.get_lun(lcfg.zfssa_pool,
-                                 lcfg.zfssa_project,
-                                 volume['name'])
-        iqn = self.zfssa.get_target(self._get_target_alias())
-
-        return {'target_discovered': True,
-                'target_iqn': iqn,
-                'target_portal': lcfg.zfssa_target_portal,
-                'volume_id': lun['number'],
-                'access_mode': 'rw'}
-
     def create_cloned_volume(self, volume, src_vref):
         """Create a clone of the specified volume."""
         zfssa_snapshot = {'volume_name': src_vref['name'],
--- a/components/openstack/glance/glance.p5m	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/openstack/glance/glance.p5m	Thu Jul 10 13:27:03 2014 -0700
@@ -25,7 +25,7 @@
 
 set name=pkg.fmri \
     value=pkg:/cloud/openstack/glance@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
-set name=pkg.summary value="OpenStack Glance"
+set name=pkg.summary value="OpenStack Glance (Image Service)"
 set name=pkg.description \
     value="OpenStack Glance provides services for discovering, registering, and retrieving virtual machine images. Glance has a RESTful API that allows querying of VM image metadata as well as retrieval of the actual image. VM images made available through Glance can be stored in a variety of locations from simple file systems to object-storage systems like OpenStack Swift."
 set name=pkg.human-version value="Havana $(COMPONENT_VERSION)"
--- a/components/openstack/horizon/files/overrides.py	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/openstack/horizon/files/overrides.py	Thu Jul 10 13:27:03 2014 -0700
@@ -29,12 +29,17 @@
     DeleteNetwork, NetworksTable
 from openstack_dashboard.dashboards.project.access_and_security.tabs import \
     AccessAndSecurityTabs, APIAccessTab, FloatingIPsTab, KeypairsTab
+from \
+    openstack_dashboard.dashboards.project.images_and_snapshots.images.tables \
+    import EditImage, DeleteImage, ImagesTable, LaunchImage
 from openstack_dashboard.dashboards.project.instances import tables \
     as project_tables
 from openstack_dashboard.dashboards.project.instances.tabs import \
     InstanceDetailTabs, LogTab, OverviewTab
 from openstack_dashboard.dashboards.project.instances.workflows import \
     create_instance
+from openstack_dashboard.dashboards.project.instances.workflows import \
+    update_instance
 from openstack_dashboard.dashboards.project.networks.ports.tables import \
     PortsTable as projectPortsTable
 from openstack_dashboard.dashboards.project.networks.subnets.tables import \
@@ -50,7 +55,7 @@
 from openstack_dashboard.dashboards.project.networks.workflows import \
     CreateNetworkInfoAction, CreateSubnetDetailAction, CreateSubnetInfoAction
 
-# remove VolumeOptions and PostCreationStep from LaunchInstance
+# Remove 'PostCreationStep' from Project/Instances/Launch Instance
 create_instance.LaunchInstance.default_steps = (
     create_instance.SelectProjectUser,
     create_instance.SetInstanceDetails,
@@ -58,63 +63,89 @@
     create_instance.SetNetwork
 )
 
-# Remove the Security Groups tab from Project/Access and Security
+# Remove 'UpdateInstanceSecurityGroups' from
+# Project/Instances/Actions/Edit Instance
+update_instance.UpdateInstance.default_steps = (
+    update_instance.UpdateInstanceInfo,
+)
+
+# Remove 'SecurityGroupsTab' tab from Project/Access & Security
 AccessAndSecurityTabs.tabs = (KeypairsTab, FloatingIPsTab, APIAccessTab)
 
-# remove the 'Console' tab from Instance Detail
+# Remove 'ConsoleTab' tab from Project/Instances/Instance Name
 InstanceDetailTabs.tabs = (OverviewTab, LogTab)
 
-# remove the 'Console' option in the Admin Instances pulldown by removing the
-# action for project_tables.ConsoleLink
+# Remove 'ConfirmResize', 'RevertResize', 'ConsoleLink', 'TogglePause',
+# 'ToggleSuspend', 'MigrateInstance' actions from Admin/Instances/Actions
 admin_tables.AdminInstancesTable._meta.row_actions = (
-    project_tables.ConfirmResize,
-    project_tables.RevertResize,
     admin_tables.AdminEditInstance,
     project_tables.LogLink,
     project_tables.CreateSnapshot,
-    project_tables.TogglePause,
-    project_tables.ToggleSuspend,
-    admin_tables.MigrateInstance,
     project_tables.SoftRebootInstance,
     project_tables.RebootInstance,
     project_tables.TerminateInstance
 )
 
-# Disable 'admin_state' and 'shared' checkboxes on Create Networks
+# Remove 'ConfirmResize', 'RevertResize', 'EditInstanceSecurityGroups',
+# 'ConsoleLink', 'TogglePause', 'ToggleSuspend', 'ResizeLink',
+# 'RebuildInstance' actions from Project/Instances/Actions
+project_tables.InstancesTable._meta.row_actions = (
+    project_tables.StartInstance,
+    project_tables.CreateSnapshot,
+    project_tables.SimpleAssociateIP,
+    project_tables.AssociateIP,
+    project_tables.SimpleDisassociateIP,
+    project_tables.EditInstance,
+    project_tables.LogLink,
+    project_tables.SoftRebootInstance,
+    project_tables.RebootInstance,
+    project_tables.StopInstance,
+    project_tables.TerminateInstance
+)
+
+# Disable 'admin_state' and 'shared' checkboxes in
+# Admin/Networks/Create Network
 CreateNetwork.base_fields['admin_state'].widget.attrs['disabled'] = True
 CreateNetwork.base_fields['shared'].widget.attrs['disabled'] = True
 
-# Disable 'admin_state' checkbox on Create Port
+# Disable 'admin_state' checkbox in
+# Admin/Networks/Network Name/Create Port
 CreatePort.base_fields['admin_state'].widget.attrs['disabled'] = True
 
-# Remove the ability to Update Ports
+# Remove 'UpdatePort' action from Admin/Networks/Network Name/Actions
 PortsTable._meta.row_actions = (DeletePort,)
 
-# Remove the ability to Update Subnets
+# Remove 'UpdateSubnet' action from Admin/Networks/Network Name/Actions
 SubnetsTable._meta.row_actions = (DeleteSubnet,)
 
-# Remove the ability to Edit Networks
+# Remove the 'EditNetwork' action from Admin/Networks/Actions
 NetworksTable._meta.row_actions = (DeleteNetwork,)
 
-# Remove the ability to Update project Ports
+# Remove the 'UpdatePort' action from Project/Networks/Name/Ports/Actions
 projectPortsTable._meta.row_actions = ()
 
-# Remove the ability to Update project Subnets
+# Remove the 'UpdateSubnet' action from
+# Project/Networks/Name/Subnets/Actions
 projectSubnetsTable._meta.row_actions = (projectDeleteSubnet,)
 
-# Remove the ability to Edit project Networks
+# Remove the 'EditNetwork' action from Project/Networks/Actions
 projectNetworksTable._meta.row_actions = (projectCreateSubnet,
                                           projectDeleteNetwork)
 
-# Disable 'admin_state' checkbox on Create project Networks
+# Disable 'admin_state' checkboxes in Project/Networks/Create Network/Network
 CreateNetworkInfoAction.base_fields['admin_state'].widget.attrs['disabled'] = \
     True
 
-# Disable 'no_gateway' checkbox on Create project Networks
+# Disable 'no_gateway' checkboxes in Project/Networks/Create Network/Subnet
 CreateSubnetInfoAction.base_fields['no_gateway'].widget.attrs['disabled'] = \
     True
 
-# change 'allocation_pools' and 'host_routes' to readonly
+# Remove 'CreateVolumeFromImage' checkbox from
+# Project/Images & Snapshots/Actions
+ImagesTable._meta.row_actions = (LaunchImage, EditImage, DeleteImage,)
+
+# Change 'allocation_pools' and 'host_routes' fields to read-only in
+# Project/Networks/Create Network/Subnet Detail
 base_fields = CreateSubnetDetailAction.base_fields
 base_fields['allocation_pools'].widget.attrs['readonly'] = 'readonly'
 base_fields['host_routes'].widget.attrs['readonly'] = 'readonly'
--- a/components/openstack/horizon/horizon.p5m	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/openstack/horizon/horizon.p5m	Thu Jul 10 13:27:03 2014 -0700
@@ -42,7 +42,7 @@
 #
 set name=pkg.fmri \
     value=pkg:/cloud/openstack/horizon@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
-set name=pkg.summary value="OpenStack Horizon"
+set name=pkg.summary value="OpenStack Horizon (Dashboard)"
 set name=pkg.description \
     value="OpenStack Horizon is the canonical implementation of Openstack's Dashboard, which provides a web based user interface to OpenStack services including Nova, Swift, Keystone, etc."
 set name=pkg.human-version value="Havana $(COMPONENT_VERSION)"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/horizon/patches/09-disable-unsupported-bootsource.patch	Thu Jul 10 13:27:03 2014 -0700
@@ -0,0 +1,28 @@
+Internal patch to only enable the current supported boot source (Boot from Image).
+This patch will not be committed upsteam.
+
+--- horizon-2013.2.3/openstack_dashboard/dashboards/project/instances/workflows/create_instance.py.orig	2014-04-03 11:45:53.000000000 -0700
++++ horizon-2013.2.3/openstack_dashboard/dashboards/project/instances/workflows/create_instance.py	2014-07-07 14:53:44.291365109 -0700
+@@ -137,22 +137,7 @@
+         source_type_choices = [
+             ('', _("--- Select source ---")),
+             ("image_id", _("Boot from image")),
+-            ("instance_snapshot_id", _("Boot from snapshot")),
+         ]
+-        if base.is_service_enabled(request, 'volume'):
+-            source_type_choices.append(("volume_id", _("Boot from volume")))
+-
+-            try:
+-                if api.nova.extension_supported("BlockDeviceMappingV2Boot",
+-                                                request):
+-                    source_type_choices.append(("volume_image_id",
+-                            _("Boot from image (creates a new volume).")))
+-            except Exception:
+-                exceptions.handle(request, _('Unable to retrieve extensions '
+-                                            'information.'))
+-
+-            source_type_choices.append(("volume_snapshot_id",
+-                    _("Boot from volume snapshot (creates a new volume).")))
+         self.fields['source_type'].choices = source_type_choices
+ 
+     def clean(self):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/horizon/patches/10-network-agents.patch	Thu Jul 10 13:27:03 2014 -0700
@@ -0,0 +1,27 @@
+In-house patch to handle Resource Not Found exception while trying
+to display the Network Agents panel. This is needed as our Neutron
+doesn't support Agents extension.
+
+--- horizon-2013.2.3/openstack_dashboard/api/neutron.py	2014-04-03 11:45:53.000000000 -0700
++++ NEW/openstack_dashboard/api/neutron.py	2014-07-08 09:35:42.219185826 -0700
+@@ -33,6 +33,7 @@
+ from openstack_dashboard.api import network_base
+ from openstack_dashboard.api import nova
+ 
++from neutronclient.common.exceptions import NeutronClientException
+ from neutronclient.v2_0 import client as neutron_client
+ 
+ LOG = logging.getLogger(__name__)
+@@ -718,7 +719,11 @@
+ 
+ 
+ def agent_list(request):
+-    agents = neutronclient(request).list_agents()
++    try:
++        agents = neutronclient(request).list_agents()
++    except NeutronClientException as nce:
++        if nce.status_code == 404:
++            return []
+     return [Agent(a) for a in agents['agents']]
+ 
+ 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/horizon/patches/11-CVE-2014-3473-3474-3475.patch	Thu Jul 10 13:27:03 2014 -0700
@@ -0,0 +1,175 @@
+Upstream patch fixed in Havana 2013.2.4 and Icehouse 2014.1.2
+
+From c844bd692894353c60b320005b804970605e910f Mon Sep 17 00:00:00 2001
+From: Julie Pichon <[email protected]>
+Date: Thu, 22 May 2014 16:45:03 +0100
+Subject: [PATCH] Fix multiple Cross-Site Scripting (XSS) vulnerabilities
+
+ * Ensure user emails are properly escaped
+
+User emails in the Users and Groups panel are being passed through the
+urlize filter to transform them into clickable links. However, urlize
+expects input to be already escaped and safe. We should make sure to
+escape the strings first as email addresses are not validated and can
+contain any type of string.
+
+Closes-Bug: #1320235
+
+ * Ensure network names are properly escaped in the Launch Instance menu
+
+Closes-Bug: #1322197
+
+ * Escape the URLs generated for the Horizon tables
+
+When generating the Horizon tables, there was an assumption that only
+the anchor text needed to be escaped. However some URLs are generated
+based on user-provided data and should be escaped as well. Also escape
+the link attributes for good measure.
+
+ * Use 'reverse' to generate the Resource URLs in the stacks tables
+
+Closes-Bug: #1308727
+
+Conflicts:
+	horizon/tables/base.py
+	openstack_dashboard/dashboards/admin/users/tables.py
+
+Change-Id: Ic8a92e69f66c2d265a802f350e30f091181aa42e
+---
+ horizon/static/horizon/js/horizon.instances.js     |    9 ++++++++-
+ horizon/tables/base.py                             |    4 +++-
+ .../dashboards/admin/groups/tables.py              |    3 ++-
+ .../dashboards/admin/users/tables.py               |    3 ++-
+ .../dashboards/project/stacks/tables.py            |   10 ++++++++--
+ .../dashboards/project/stacks/tabs.py              |    6 ++++++
+ 6 files changed, 29 insertions(+), 6 deletions(-)
+
+diff --git a/horizon/static/horizon/js/horizon.instances.js b/horizon/static/horizon/js/horizon.instances.js
+index c901180..c6ff323 100644
+--- a/horizon/static/horizon/js/horizon.instances.js
++++ b/horizon/static/horizon/js/horizon.instances.js
+@@ -51,8 +51,15 @@ horizon.instances = {
+     $(this.get_network_element("")).each(function(){
+       var $this = $(this);
+       var $input = $this.children("input");
++      var name = $this.text().replace(/^\s+/,"")
++                             .replace(/&/g, '&amp;')
++                             .replace(/</g, '&lt;')
++                             .replace(/>/g, '&gt;')
++                             .replace(/"/g, '&quot;')
++                             .replace(/'/g, '&#x27;')
++                             .replace(/\//g, '&#x2F;');
+       var network_property = {
+-        name:$this.text().replace(/^\s+/,""),
++        name:name,
+         id:$input.attr("id"),
+         value:$input.attr("value")
+       };
+diff --git a/horizon/tables/base.py b/horizon/tables/base.py
+index adc284c..9011b77 100644
+--- a/horizon/tables/base.py
++++ b/horizon/tables/base.py
+@@ -585,7 +585,9 @@ class Cell(html.HTMLElement):
+             link_classes = ' '.join(self.column.link_classes)
+             # Escape the data inside while allowing our HTML to render
+             data = mark_safe('<a href="%s" class="%s">%s</a>' %
+-                             (self.url, link_classes, escape(data)))
++                             (escape(self.url),
++                              escape(link_classes),
++                              escape(data)))
+         return data
+ 
+     @property
+diff --git a/openstack_dashboard/dashboards/admin/groups/tables.py b/openstack_dashboard/dashboards/admin/groups/tables.py
+index bce8f50..ff8103b 100644
+--- a/openstack_dashboard/dashboards/admin/groups/tables.py
++++ b/openstack_dashboard/dashboards/admin/groups/tables.py
+@@ -161,7 +161,8 @@ class AddMembersLink(tables.LinkAction):
+ class UsersTable(tables.DataTable):
+     name = tables.Column('name', verbose_name=_('User Name'))
+     email = tables.Column('email', verbose_name=_('Email'),
+-                          filters=[defaultfilters.urlize])
++                          filters=[defaultfilters.escape,
++                                   defaultfilters.urlize])
+     id = tables.Column('id', verbose_name=_('User ID'))
+     enabled = tables.Column('enabled', verbose_name=_('Enabled'),
+                             status=True,
+diff --git a/openstack_dashboard/dashboards/admin/users/tables.py b/openstack_dashboard/dashboards/admin/users/tables.py
+index d47d68d..c0b0ea5 100644
+--- a/openstack_dashboard/dashboards/admin/users/tables.py
++++ b/openstack_dashboard/dashboards/admin/users/tables.py
+@@ -117,7 +117,8 @@ class UsersTable(tables.DataTable):
+     )
+     name = tables.Column('name', verbose_name=_('User Name'))
+     email = tables.Column('email', verbose_name=_('Email'),
+-                          filters=[defaultfilters.urlize])
++                          filters=[defaultfilters.escape,
++                                   defaultfilters.urlize])
+     # Default tenant is not returned from Keystone currently.
+     #default_tenant = tables.Column('default_tenant',
+     #                               verbose_name=_('Default Project'))
+diff --git a/openstack_dashboard/dashboards/project/stacks/tables.py b/openstack_dashboard/dashboards/project/stacks/tables.py
+index f0bc731..822726b 100644
+--- a/openstack_dashboard/dashboards/project/stacks/tables.py
++++ b/openstack_dashboard/dashboards/project/stacks/tables.py
+@@ -12,6 +12,7 @@
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ 
++from django.core import urlresolvers
+ from django.http import Http404  # noqa
+ from django.template.defaultfilters import timesince  # noqa
+ from django.template.defaultfilters import title  # noqa
+@@ -94,11 +95,16 @@ class StacksTable(tables.DataTable):
+         row_actions = (DeleteStack, )
+ 
+ 
++def get_resource_url(obj):
++    return urlresolvers.reverse('horizon:project:stacks:resource',
++                                args=(obj.stack_id, obj.resource_name))
++
++
+ class EventsTable(tables.DataTable):
+ 
+     logical_resource = tables.Column('resource_name',
+                                      verbose_name=_("Stack Resource"),
+-                                     link=lambda d: d.resource_name,)
++                                     link=get_resource_url)
+     physical_resource = tables.Column('physical_resource_id',
+                                       verbose_name=_("Resource"),
+                                       link=mappings.resource_to_url)
+@@ -142,7 +148,7 @@ class ResourcesTable(tables.DataTable):
+ 
+     logical_resource = tables.Column('resource_name',
+                                      verbose_name=_("Stack Resource"),
+-                                     link=lambda d: d.resource_name)
++                                     link=get_resource_url)
+     physical_resource = tables.Column('physical_resource_id',
+                                      verbose_name=_("Resource"),
+                                      link=mappings.resource_to_url)
+diff --git a/openstack_dashboard/dashboards/project/stacks/tabs.py b/openstack_dashboard/dashboards/project/stacks/tabs.py
+index 15ef833..b5886f3 100644
+--- a/openstack_dashboard/dashboards/project/stacks/tabs.py
++++ b/openstack_dashboard/dashboards/project/stacks/tabs.py
+@@ -75,6 +75,9 @@ class StackEventsTab(tabs.Tab):
+             stack_identifier = '%s/%s' % (stack.stack_name, stack.id)
+             events = api.heat.events_list(self.request, stack_identifier)
+             LOG.debug('got events %s' % events)
++            # The stack id is needed to generate the resource URL.
++            for event in events:
++                event.stack_id = stack.id
+         except Exception:
+             events = []
+             messages.error(request, _(
+@@ -95,6 +98,9 @@ class StackResourcesTab(tabs.Tab):
+             stack_identifier = '%s/%s' % (stack.stack_name, stack.id)
+             resources = api.heat.resources_list(self.request, stack_identifier)
+             LOG.debug('got resources %s' % resources)
++            # The stack id is needed to generate the resource URL.
++            for r in resources:
++                r.stack_id = stack.id
+         except Exception:
+             resources = []
+             messages.error(request, _(
+-- 
+1.7.9.5
--- a/components/openstack/keystone/keystone.p5m	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/openstack/keystone/keystone.p5m	Thu Jul 10 13:27:03 2014 -0700
@@ -25,7 +25,7 @@
 
 set name=pkg.fmri \
     value=pkg:/cloud/openstack/keystone@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
-set name=pkg.summary value="OpenStack Keystone"
+set name=pkg.summary value="OpenStack Keystone (Identity Service)"
 set name=pkg.description \
     value="OpenStack Keystone is a service that provides Identity, Token, Catalog, and Policy services for use specifically by projects in the OpenStack family."
 set name=pkg.human-version value="Havana $(COMPONENT_VERSION)"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/keystone/patches/07-CVE-2014-3520.patch	Thu Jul 10 13:27:03 2014 -0700
@@ -0,0 +1,91 @@
+This upstream patch addresses CVE-2014-3520 and is tracked under
+Launchpad bug 1331912. It is addressed in Icehouse 2014.1.2 and Havana
+2013.2.4.
+
+commit 96d9bcf230a74d6122a2b14e00ef10915c8f76e3
+Author: Jamie Lennox <[email protected]>
+Date:   Thu Jun 19 14:41:22 2014 +1000
+
+    Ensure that in v2 auth tenant_id matches trust
+    
+    Previously if a trustee requests a trust scoped token for a project that
+    is different to the one in the trust, however the trustor has the
+    appropriate roles then a token would be issued.
+    
+    Ensure that the trust that was given matches the project that was
+    specified in the scope.
+    
+    (cherry picked from commit 1556faec2f65dba60584f0a9657d5b717a6ede3a)
+    
+    Closes-Bug: #1331912
+    Change-Id: I00ad783bcb93cea9e5622965f81b91c80f4570cc
+
+diff --git a/keystone/tests/test_auth.py b/keystone/tests/test_auth.py
+index 6371caf..0d97f44 100644
+--- a/keystone/tests/test_auth.py
++++ b/keystone/tests/test_auth.py
+@@ -624,13 +624,15 @@ class AuthWithTrust(AuthTest):
+         self.new_trust = self.trust_controller.create_trust(
+             context, trust=trust_data)['trust']
+ 
+-    def build_v2_token_request(self, username, password):
++    def build_v2_token_request(self, username, password, tenant_id=None):
++        if not tenant_id:
++            tenant_id = self.tenant_bar['id']
+         body_dict = _build_user_auth(username=username, password=password)
+         self.unscoped_token = self.controller.authenticate({}, body_dict)
+         unscoped_token_id = self.unscoped_token['access']['token']['id']
+         request_body = _build_user_auth(token={'id': unscoped_token_id},
+                                         trust_id=self.new_trust['id'],
+-                                        tenant_id=self.tenant_bar['id'])
++                                        tenant_id=tenant_id)
+         return request_body
+ 
+     def test_create_trust_bad_data_fails(self):
+@@ -704,6 +706,15 @@ class AuthWithTrust(AuthTest):
+             exception.Forbidden,
+             self.controller.authenticate, {}, request_body)
+ 
++    def test_token_from_trust_wrong_project_fails(self):
++        for assigned_role in self.assigned_roles:
++            self.assignment_api.add_role_to_user_and_project(
++                self.trustor['id'], self.tenant_baz['id'], assigned_role)
++        request_body = self.build_v2_token_request('TWO', 'two2',
++                                                   self.tenant_baz['id'])
++        self.assertRaises(exception.Forbidden, self.controller.authenticate,
++                          {}, request_body)
++
+     def fetch_v2_token_from_trust(self):
+         request_body = self.build_v2_token_request('TWO', 'two2')
+         auth_response = self.controller.authenticate({}, request_body)
+diff --git a/keystone/token/controllers.py b/keystone/token/controllers.py
+index 72486a1..de7e473 100644
+--- a/keystone/token/controllers.py
++++ b/keystone/token/controllers.py
+@@ -160,6 +160,8 @@ class Auth(controller.V2Controller):
+ 
+         user_ref = old_token_ref['user']
+         user_id = user_ref['id']
++        tenant_id = self._get_project_id_from_auth(auth)
++
+         if not CONF.trust.enabled and 'trust_id' in auth:
+             raise exception.Forbidden('Trusts are disabled.')
+         elif CONF.trust.enabled and 'trust_id' in auth:
+@@ -168,6 +170,9 @@ class Auth(controller.V2Controller):
+                 raise exception.Forbidden()
+             if user_id != trust_ref['trustee_user_id']:
+                 raise exception.Forbidden()
++            if (trust_ref['project_id'] and
++                    tenant_id != trust_ref['project_id']):
++                raise exception.Forbidden()
+             if ('expires' in trust_ref) and (trust_ref['expires']):
+                 expiry = trust_ref['expires']
+                 if expiry < timeutils.parse_isotime(timeutils.isotime()):
+@@ -190,7 +195,6 @@ class Auth(controller.V2Controller):
+             current_user_ref = self.identity_api.get_user(user_id)
+ 
+         metadata_ref = {}
+-        tenant_id = self._get_project_id_from_auth(auth)
+         tenant_ref, metadata_ref['roles'] = self._get_project_roles_and_ref(
+             user_id, tenant_id)
+ 
--- a/components/openstack/neutron/files/agent/evs_l3_agent.py	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/openstack/neutron/files/agent/evs_l3_agent.py	Thu Jul 10 13:27:03 2014 -0700
@@ -180,26 +180,29 @@
         ipintf = net_lib.IPInterface(ifname)
         ipaddr_list = ipintf.ipaddr_list()['static']
 
-        # Clear out all ipnat rules for floating ips
-        ri.ipfilters_manager.remove_nat_rules(ri.ipfilters_manager.ipv4['nat'])
-
         existing_cidrs = set([addr for addr in ipaddr_list])
         new_cidrs = set()
 
+        existing_nat_rules = [nat_rule for nat_rule in
+                              ri.ipfilters_manager.ipv4['nat']]
+        new_nat_rules = []
+
         # Loop once to ensure that floating ips are configured.
         for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []):
             fip_ip = fip['floating_ip_address']
             fip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX
-
             new_cidrs.add(fip_cidr)
+            fixed_cidr = str(fip['fixed_ip_address']) + '/32'
+            nat_rule = 'bimap %s %s -> %s' % (ifname, fixed_cidr, fip_cidr)
 
             if fip_cidr not in existing_cidrs:
                 ipintf.create_address(fip_cidr)
+                ri.ipfilters_manager.add_nat_rules([nat_rule])
+            new_nat_rules.append(nat_rule)
 
-            # Rebuild iptables rules for the floating ip.
-            fixed_cidr = str(fip['fixed_ip_address']) + '/32'
-            nat_rules = ['bimap %s %s -> %s' % (ifname, fixed_cidr, fip_cidr)]
-            ri.ipfilters_manager.add_nat_rules(nat_rules)
+        # remove all the old NAT rules
+        ri.ipfilters_manager.remove_nat_rules(list(set(existing_nat_rules) -
+                                              set(new_nat_rules)))
 
         # Clean up addresses that no longer belong on the gateway interface.
         for ip_cidr in existing_cidrs - new_cidrs:
--- a/components/openstack/neutron/files/agent/solaris/dhcp.py	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/openstack/neutron/files/agent/solaris/dhcp.py	Thu Jul 10 13:27:03 2014 -0700
@@ -372,8 +372,11 @@
         utils.execute(cmd, self.root_helper)
 
     def release_lease(self, mac_address, removed_ips):
-        # TODO(gmoodalb): we need to support dnsmasq's dhcp_release
-        pass
+        """Release a DHCP lease."""
+        for ip in removed_ips or []:
+            cmd = ['/usr/lib/inet/dhcp_release', self.interface_name,
+                   ip, mac_address]
+            utils.execute(cmd, self.root_helper)
 
     def reload_allocations(self):
         """Rebuild the dnsmasq config and signal the dnsmasq to reload."""
--- a/components/openstack/neutron/neutron.p5m	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/openstack/neutron/neutron.p5m	Thu Jul 10 13:27:03 2014 -0700
@@ -25,7 +25,7 @@
 
 set name=pkg.fmri \
     value=pkg:/cloud/openstack/neutron@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
-set name=pkg.summary value="OpenStack Neutron"
+set name=pkg.summary value="OpenStack Neutron (Networking Service)"
 set name=pkg.description \
     value="Neutron provides an API to dynamically request and configure virtual networks. These networks connect 'interfaces' from other OpenStack services (e.g., VNICs from Nova VMs). The Neutron API supports extensions to provide advanced network capabilities (e.g., QoS, ACLs, network monitoring, etc)."
 set name=pkg.human-version value="Havana $(COMPONENT_VERSION)"
--- a/components/openstack/nova/files/api-paste.ini	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/openstack/nova/files/api-paste.ini	Thu Jul 10 13:27:03 2014 -0700
@@ -122,6 +122,6 @@
 # signing_dir is configurable, but the default behavior of the authtoken
 # middleware should be sufficient.  It will create a temporary directory
 # in the home directory for the user the nova process is running as.
-#signing_dir = /var/lib/nova/keystone-signing
+signing_dir = /var/lib/nova/keystone-signing
 # Workaround for https://bugs.launchpad.net/nova/+bug/1154809
 auth_version = v2.0
--- a/components/openstack/nova/files/solariszones/driver.py	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/openstack/nova/files/solariszones/driver.py	Thu Jul 10 13:27:03 2014 -0700
@@ -95,7 +95,7 @@
 # Mapping between zone state and Nova power_state.
 SOLARISZONES_POWER_STATE = {
     ZONE_STATE_CONFIGURED:      power_state.NOSTATE,
-    ZONE_STATE_INCOMPLETE:      power_state.BUILDING,
+    ZONE_STATE_INCOMPLETE:      power_state.NOSTATE,
     ZONE_STATE_UNAVAILABLE:     power_state.NOSTATE,
     ZONE_STATE_INSTALLED:       power_state.SHUTDOWN,
     ZONE_STATE_READY:           power_state.RUNNING,
@@ -498,9 +498,9 @@
             kstat_object = self._rad_instance.get_object(
                 kstat.Kstat(), rad.client.ADRGlobPattern(pattern))
         except Exception as reason:
-            LOG.warning(_("Unable to retrieve kstat object '%s:%s:%s' of "
-                          "class '%s' via kstat(3RAD): %s")
-                        % (module, instance, name, kstat_class, reason))
+            LOG.info(_("Unable to retrieve kstat object '%s:%s:%s' of class "
+                       "'%s' via kstat(3RAD): %s")
+                     % (module, instance, name, kstat_class, reason))
             return None
 
         kstat_data = {}
@@ -727,7 +727,8 @@
                 LOG.error(_("Cannot attach Fibre Channel volume '%s' because "
                           "no Fibre Channel HBA initiators were found")
                           % (target_wwn))
-                raise exception.InvalidVolume(reason="No host FC initiator")
+                raise exception.InvalidVolume(
+                    reason="No host Fibre Channel initiator found")
 
             target_lun = data['target_lun']
             # If the volume was exported just a few seconds previously then
@@ -1404,6 +1405,10 @@
         if zone is None:
             raise exception.InstanceNotFound(instance_id=name)
 
+        if self._get_state(zone) == power_state.SHUTDOWN:
+            self._power_on(instance)
+            return
+
         try:
             if reboot_type == 'SOFT':
                 zone.shutdown(['-r'])
--- a/components/openstack/nova/nova.p5m	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/openstack/nova/nova.p5m	Thu Jul 10 13:27:03 2014 -0700
@@ -25,7 +25,7 @@
 
 set name=pkg.fmri \
     value=pkg:/cloud/openstack/nova@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
-set name=pkg.summary value="OpenStack Nova"
+set name=pkg.summary value="OpenStack Nova (Compute Service)"
 set name=pkg.description \
     value="OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of virtualization technologies. In addition to its native API, it includes compatibility with the commonly encountered Amazon EC2 and S3 APIs."
 set name=pkg.human-version value="Havana $(COMPONENT_VERSION)"
--- a/components/openstack/swift/swift.p5m	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/openstack/swift/swift.p5m	Thu Jul 10 13:27:03 2014 -0700
@@ -24,10 +24,9 @@
 #
 
 <transform file path=usr.*/man/.+ -> default mangler.man.stability uncommitted>
-
 set name=pkg.fmri \
     value=pkg:/cloud/openstack/swift@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
-set name=pkg.summary value="OpenStack Swift"
+set name=pkg.summary value="OpenStack Swift (Object Storage Service)"
 set name=pkg.description \
     value="The OpenStack Object Store project, known as Swift, offers cloud storage software so that you can store and retrieve data in virtual containers"
 set name=pkg.human-version value="Havana 2013.2.3"
@@ -71,7 +70,6 @@
 file path=etc/swift/swift-bench.conf
 file path=etc/swift/swift.conf
 <transform file path=lib/svc/.*/([^/]*) -> set action.hash files/%<1>>
-<transform file path=lib/svc/method/swift-proxy-server -> set action.hash files/swift-smf-method>
 file path=lib/svc/manifest/application/openstack/swift-account-auditor.xml
 file path=lib/svc/manifest/application/openstack/swift-account-reaper.xml
 file path=lib/svc/manifest/application/openstack/swift-account-replicator.xml
@@ -89,6 +87,7 @@
 file path=lib/svc/manifest/application/openstack/swift-proxy-server.xml
 file path=lib/svc/manifest/application/openstack/swift-replicator-rsync.xml
 <transform hardlink path=lib/svc/method -> default target swift-proxy-server>
+<transform file path=lib/svc/method/swift-proxy-server -> set action.hash files/swift-smf-method>
 hardlink path=lib/svc/method/swift-account-auditor
 hardlink path=lib/svc/method/swift-account-reaper
 hardlink path=lib/svc/method/swift-account-replicator
--- a/components/python/jsonpatch/jsonpatch-PYVER.p5m	Thu Jul 10 05:21:16 2014 -0700
+++ b/components/python/jsonpatch/jsonpatch-PYVER.p5m	Thu Jul 10 13:27:03 2014 -0700
@@ -18,7 +18,9 @@
 #
 # CDDL HEADER END
 #
-# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+
+#
+# Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 #
 
 set name=pkg.fmri \
@@ -54,3 +56,6 @@
 # force a dependency on the jsonpatch package
 depend type=require \
     fmri=library/python-2/jsonpatch@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
+
+# force a dependency on jsonpointer; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python-2/jsonpointer-$(PYV)