19073547 editing built-in flavors fails with name too long s11u2-sru
authorDrew Fisher <drew.fisher@oracle.com>
Wed, 02 Jul 2014 06:53:43 -0700
branchs11u2-sru
changeset 3198 46289f36c1ca
parent 3192 b9a078e64012
child 3201 6839f7d1f036
19073547 editing built-in flavors fails with name too long 19056291 A couple of links to zone consoles still exist 19049098 unable to set gateway interface or gateway port for neutron router 19034270 IPv6 filter rules are not properly handled 19031678 The Instances pop-up in Network Topology should remove 'open console' 19031443 nova package needs a require dependency on library/python-2/sqlalchemy 19026639 solaris.css doesn't display network topology pop-ups correctly 19026240 Extract tenantid using the framework provided function 19026100 every tenant should be able to allocate floating IPs for themselves 19022958 Unable to specify solaris device names when attaching volumes to instances 18646500 Unable to attach a volume to a KZ instance using Horizon
components/openstack/cinder/files/cinder-volume-setup
components/openstack/horizon/files/branding/css/solaris.css
components/openstack/horizon/files/local_settings.py
components/openstack/horizon/files/overrides.py
components/openstack/horizon/patches/06-launchpad-1255136.patch
components/openstack/horizon/patches/07-remove-console.patch
components/openstack/horizon/patches/08-flavor-names.patch
components/openstack/neutron/files/agent/evs_l3_agent.py
components/openstack/neutron/files/agent/solaris/net_lib.py
components/openstack/neutron/files/evs/plugin.py
components/openstack/neutron/files/neutron-l3-agent
components/openstack/nova/nova.p5m
components/openstack/nova/resolve.deps
components/openstack/swift/files/swift-replicator-rsync
--- a/components/openstack/cinder/files/cinder-volume-setup	Thu Jun 26 11:50:03 2014 -0700
+++ b/components/openstack/cinder/files/cinder-volume-setup	Wed Jul 02 06:53:43 2014 -0700
@@ -85,7 +85,7 @@
         print "delegation of %s to 'cinder' failed:  %s" % (top_ds, err)
         return smf_include.SMF_EXIT_ERR_CONFIG
 
-    return int(smf_include.SMF_EXIT_OK)
+    return smf_include.SMF_EXIT_OK
 
 if __name__ == "__main__":
     os.putenv("LC_ALL", "C")
--- a/components/openstack/horizon/files/branding/css/solaris.css	Thu Jun 26 11:50:03 2014 -0700
+++ b/components/openstack/horizon/files/branding/css/solaris.css	Wed Jul 02 06:53:43 2014 -0700
@@ -2037,20 +2037,9 @@
 }
 .btn-danger {
   background: #0074CD;
-  font-weight: bold;
-  /*background-image: -moz-linear-gradient(top, #ee5f5b, #bd362f);
-  background-image: -ms-linear-gradient(top, #ee5f5b, #bd362f);
-  background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#ee5f5b), to(#bd362f));
-  background-image: -webkit-linear-gradient(top, #ee5f5b, #bd362f);
-  background-image: -o-linear-gradient(top, #ee5f5b, #bd362f);
-  background-image: linear-gradient(top, #ee5f5b, #bd362f);
-  background-repeat: repeat-x;
-  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ee5f5b', endColorstr='#bd362f', GradientType=0);
-  border-color: #bd362f #bd362f #802420;
-  border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25);
-  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);*/
- border: 1px solid #0074CD;
- color: #FFFFFF
+  /*font-weight: bold;*/
+  border: 1px solid #0074CD;
+  color: #FFFFFF
 }
 
 .btn-danger.active,
@@ -2183,7 +2172,8 @@
 }
 .topologyNavi .btn-group {
 	margin-left: 5px;
-	float: right;
+	margin-top: 5px;
+	float: left;
 }
 .btn-group:before,
 .btn-group:after {
@@ -6037,6 +6027,185 @@
 .topologyNavi {
   height: 80px;
 }
+
+.topologyBalloon {
+  display: none;
+  background: #ffffff;
+  position: absolute;
+  left: 100px;
+  top: 20px;
+  z-index: 600;
+  border-radius: 5px;
+  color: #333333;
+  min-width: 200px;
+  line-height: 1.2;
+  font-size: 11px;
+  -webkit-box-shadow: 0px 1px 6px #777777;
+  -moz-box-shadow: 0px 1px 6px #777777;
+  box-shadow: 0px 1px 6px #777777;
+}
+.topologyBalloon.on {
+  display: block;
+}
+.topologyBalloon .vnc_window {
+  margin-left: 10px;
+}
+.topologyBalloon .closeTopologyBalloon {
+  font-size: 16px;
+  line-height: 1;
+  display: block;
+  position: absolute;
+  font-weight: bold;
+  right: 6px;
+  top: 0px;
+  cursor: pointer;
+  padding: 3px;
+  color: #aaaaaa;
+}
+.topologyBalloon .closeTopologyBalloon:hover {
+  color: #777777;
+  text-decoration: none;
+}
+.topologyBalloon .contentBody {
+  padding: 8px 8px 0;
+}
+.topologyBalloon span.active:before, .topologyBalloon span.down:before {
+  content: "";
+  width: 9px;
+  height: 9px;
+  display: inline-block;
+  background: #0d925b;
+  margin-right: 3px;
+  border-radius: 10px;
+  vertical-align: middle;
+}
+.topologyBalloon span.down:before {
+  background: #e64b41;
+}
+.topologyBalloon .footer {
+  background: #efefef;
+  border-top: 1px solid #d9d9d9;
+  padding: 8px;
+  border-radius: 0px 0px 7px 7px;
+}
+.topologyBalloon .footer .footerInner {
+  display: table;
+  width: 100%;
+}
+.topologyBalloon .footer .cell {
+  display: table-cell;
+  padding-right: 10px;
+}
+.topologyBalloon .footer .link {
+  font-size: 12px;
+}
+.topologyBalloon .footer .delete {
+  padding-right: 0;
+  text-align: right;
+}
+.topologyBalloon .footer .delete .btn:before {
+  content: "Delete ";
+}
+.topologyBalloon .footer .delete .btn.deleting:before {
+  content: "Deleting ";
+}
+.topologyBalloon .footer .delete .btn.instance:before {
+  content: "Terminate ";
+}
+.topologyBalloon .footer .delete .btn.instance.deleting:before {
+  content: "Terminating ";
+}
+.topologyBalloon table.detaiInfoTable {
+  margin: 0px 0px 8px 0px;
+
+}
+.topologyBalloon table.detaiInfoTable caption {
+  text-align: left;
+  font-size: 16px;
+  font-weight: bold;
+  margin-bottom: 3px;
+}
+.topologyBalloon table.detaiInfoTable th, .topologyBalloon table.detaiInfoTable td {
+  text-align: left;
+  vertical-align: middle;
+  padding-bottom: 3px;
+  background: transparent;
+}
+.topologyBalloon table.detaiInfoTable th {
+  color: #999999;
+  padding-right: 8px;
+  width: 80px;
+}
+.topologyBalloon table.detaiInfoTable th span {
+  vertical-align: middle;
+  width: 80px;
+  white-space: nowrap;
+  overflow: hidden;
+  text-overflow: ellipsis;
+  display: inline-block;
+}
+.topologyBalloon table.detaiInfoTable th.device {
+  text-align: right;
+  font-size: 12px;
+  color: #333333;  
+  padding-left: 12px;
+}
+.topologyBalloon table.detaiInfoTable td {
+  padding-right: 5px;
+  white-space: nowrap;
+}
+.topologyBalloon table.detaiInfoTable td.delete {
+  padding-right: 0;
+  text-align: right;
+}
+.topologyBalloon table.detaiInfoTable .btn {
+  line-height: 1.4;
+}
+.topologyBalloon table.detaiInfoTable .btn:before {
+  content: "Delete ";
+}
+.topologyBalloon table.detaiInfoTable .btn.deleting:before {
+  content: "Deleting ";
+}
+.topologyBalloon:before {
+  border-top: 7px solid transparent;
+  border-bottom: 7px solid transparent;
+  border-right: 9px solid #bbbbbb;
+  display: block;
+  position: absolute;
+  top: 30px;
+  left: -9px;
+  width: 0;
+  height: 0;
+  content: "";
+}
+.topologyBalloon:after {
+  border-top: 6px solid transparent;
+  border-bottom: 6px solid transparent;
+  border-right: 8px solid #ffffff;
+  display: block;
+  position: absolute;
+  top: 31px;
+  left: -8px;
+  width: 0;
+  height: 0;
+  content: "";
+}
+.topologyBalloon.leftPosition:before {
+  border-right: none;
+  border-left: 9px solid #bbbbbb;
+  right: -9px;
+  top: 30px;
+  left: auto;
+}
+.topologyBalloon.leftPosition:after {
+  border-right: none;
+  border-left: 8px solid #ffffff;
+  right: -8px;
+  top: 31px;
+  left: auto;
+}
+
 .quota-dynamic {
   height: 200px; 
   min-width: 840px;
--- a/components/openstack/horizon/files/local_settings.py	Thu Jun 26 11:50:03 2014 -0700
+++ b/components/openstack/horizon/files/local_settings.py	Wed Jul 02 06:53:43 2014 -0700
@@ -161,8 +161,12 @@
     'can_edit_role': True
 }
 
+# The Xen Hypervisor has the ability to set the mount point for volumes
+# attached to instances (other Hypervisors currently do not). Setting
+# can_set_mount_point to True will add the option to set the mount point
+# from the UI.
 OPENSTACK_HYPERVISOR_FEATURES = {
-    'can_set_mount_point': True,
+    'can_set_mount_point': False,
 }
 
 # The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
--- a/components/openstack/horizon/files/overrides.py	Thu Jun 26 11:50:03 2014 -0700
+++ b/components/openstack/horizon/files/overrides.py	Wed Jul 02 06:53:43 2014 -0700
@@ -16,6 +16,8 @@
 Solaris-specific customizations for Horizon
 """
 
+from openstack_dashboard.dashboards.admin.instances import tables \
+    as admin_tables
 from openstack_dashboard.dashboards.admin.networks.forms import CreateNetwork
 from openstack_dashboard.dashboards.admin.networks.ports.forms import \
     CreatePort
@@ -27,6 +29,8 @@
     DeleteNetwork, NetworksTable
 from openstack_dashboard.dashboards.project.access_and_security.tabs import \
     AccessAndSecurityTabs, APIAccessTab, FloatingIPsTab, KeypairsTab
+from openstack_dashboard.dashboards.project.instances import tables \
+    as project_tables
 from openstack_dashboard.dashboards.project.instances.tabs import \
     InstanceDetailTabs, LogTab, OverviewTab
 from openstack_dashboard.dashboards.project.instances.workflows import \
@@ -47,11 +51,12 @@
     CreateNetworkInfoAction, CreateSubnetDetailAction, CreateSubnetInfoAction
 
 # remove VolumeOptions and PostCreationStep from LaunchInstance
-create_instance.LaunchInstance.default_steps = \
-    (create_instance.SelectProjectUser,
-     create_instance.SetInstanceDetails,
-     create_instance.SetAccessControls,
-     create_instance.SetNetwork)
+create_instance.LaunchInstance.default_steps = (
+    create_instance.SelectProjectUser,
+    create_instance.SetInstanceDetails,
+    create_instance.SetAccessControls,
+    create_instance.SetNetwork
+)
 
 # Remove the Security Groups tab from Project/Access and Security
 AccessAndSecurityTabs.tabs = (KeypairsTab, FloatingIPsTab, APIAccessTab)
@@ -59,6 +64,22 @@
 # remove the 'Console' tab from Instance Detail
 InstanceDetailTabs.tabs = (OverviewTab, LogTab)
 
+# remove the 'Console' option in the Admin Instances pulldown by removing the
+# action for project_tables.ConsoleLink
+admin_tables.AdminInstancesTable._meta.row_actions = (
+    project_tables.ConfirmResize,
+    project_tables.RevertResize,
+    admin_tables.AdminEditInstance,
+    project_tables.LogLink,
+    project_tables.CreateSnapshot,
+    project_tables.TogglePause,
+    project_tables.ToggleSuspend,
+    admin_tables.MigrateInstance,
+    project_tables.SoftRebootInstance,
+    project_tables.RebootInstance,
+    project_tables.TerminateInstance
+)
+
 # Disable 'admin_state' and 'shared' checkboxes on Create Networks
 CreateNetwork.base_fields['admin_state'].widget.attrs['disabled'] = True
 CreateNetwork.base_fields['shared'].widget.attrs['disabled'] = True
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/horizon/patches/06-launchpad-1255136.patch	Wed Jul 02 06:53:43 2014 -0700
@@ -0,0 +1,107 @@
+Although the following patch has been addressed in Icehouse 2014.1, it
+still has not yet been released for Havana.  It has been modified to
+apply cleanly into our current Havana implementation
+
+From 1b13a42b9edbd34e8bb4339a962cd2c30bda7d81 Mon Sep 17 00:00:00 2001
+From: Zhenguo Niu <[email protected]>
+Date: Wed, 4 Dec 2013 09:45:56 +0800
+Subject: [PATCH] Set can_set_mount_point default to False
+
+Patch https://review.openstack.org/#/c/59229/ only changed the default
+value in the local_settings.py example file. If the setting isn't defined,
+it still defaults to True
+Change-Id: Ia3525786e5eb9ec83b8057c1cb6158ab153adc59
+Closes-Bug: #1255136
+
+--- horizon-2013.2.3/doc/source/topics/settings.rst.orig	2014-06-20 05:00:21.858630069 -0700
++++ horizon-2013.2.3/doc/source/topics/settings.rst	2014-06-20 06:27:39.151996410 -0700
[email protected]@ -204,14 +204,15 @@
+ ``OPENSTACK_HYPERVISOR_FEATURES``
+ ---------------------------------
+ 
+-Default: ``{'can_set_mount_point': True, 'can_encrypt_volumes': False}``
++Default: ``{'can_set_mount_point': False, 'can_encrypt_volumes': False}``
+ 
+ A dictionary containing settings which can be used to identify the
+ capabilities of the hypervisor for Nova.
+ 
+-Some hypervisors have the ability to set the mount point for volumes attached
+-to instances (KVM does not). Setting ``can_set_mount_point`` to ``False`` will
+-remove the option to set the mount point from the UI.
++The Xen Hypervisor has the ability to set the mount point for volumes attached
++to instances (other Hypervisors currently do not). Setting
++``can_set_mount_point`` to ``True`` will add the option to set the mount point
++from the UI.
+ 
+ In the Havana release, there will be a feature for encrypted volumes
+ which will be controlled by the ``can_encrypt_volumes``. Setting it to ``True``
+--- horizon-2013.2.3/openstack_dashboard/dashboards/project/volumes/tests.py.orig	2014-06-20 04:56:16.805828523 -0700
++++ horizon-2013.2.3/openstack_dashboard/dashboards/project/volumes/tests.py	2014-06-20 06:32:26.947240676 -0700
[email protected]@ -555,6 +555,8 @@
+ 
+     @test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
+     def test_edit_attachments(self):
++        PREV = settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point']
++        settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point'] = True
+         volume = self.volumes.first()
+         servers = [s for s in self.servers.list()
+                    if s.tenant_id == self.request.user.tenant_id]
[email protected]@ -573,11 +575,10 @@
+         self.assertEqual(res.status_code, 200)
+         self.assertTrue(isinstance(form.fields['device'].widget,
+                                    widgets.TextInput))
++        settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point'] = PREV
+ 
+     @test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
+     def test_edit_attachments_cannot_set_mount_point(self):
+-        PREV = settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point']
+-        settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point'] = False
+ 
+         volume = self.volumes.first()
+         servers = [s for s in self.servers.list()
[email protected]@ -593,7 +594,6 @@
+         form = res.context['form']
+         self.assertTrue(isinstance(form.fields['device'].widget,
+                                    widgets.HiddenInput))
+-        settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point'] = PREV
+ 
+     @test.create_stubs({cinder: ('volume_get',),
+                         api.nova: ('server_get', 'server_list',),
+--- horizon-2013.2.3/openstack_dashboard/dashboards/project/volumes/forms.py.orig	2014-06-20 04:54:21.534957784 -0700
++++ horizon-2013.2.3/openstack_dashboard/dashboards/project/volumes/forms.py	2014-06-20 06:33:14.199293693 -0700
[email protected]@ -230,7 +230,7 @@
+                                       "OPENSTACK_HYPERVISOR_FEATURES",
+                                       {})
+         can_set_mount_point = hypervisor_features.get("can_set_mount_point",
+-                                                      True)
++                                                      False)
+         if not can_set_mount_point:
+             self.fields['device'].widget = forms.widgets.HiddenInput()
+             self.fields['device'].required = False
+--- horizon-2013.2.3/openstack_dashboard/local/local_settings.py.example.orig	2014-06-20 06:49:38.061148762 -0700
++++ horizon-2013.2.3/openstack_dashboard/local/local_settings.py.example	2014-06-20 03:03:06.949623969 -0700
[email protected]@ -149,8 +149,12 @@
+     'can_edit_role': True
+ }
+ 
++# The Xen Hypervisor has the ability to set the mount point for volumes
++# attached to instances (other Hypervisors currently do not). Setting
++# can_set_mount_point to True will add the option to set the mount point
++# from the UI.
+ OPENSTACK_HYPERVISOR_FEATURES = {
+-    'can_set_mount_point': True,
++    'can_set_mount_point': False,
+ }
+ 
+ # The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
+--- horizon-2013.2.3/openstack_dashboard/test/settings.py.orig	2014-06-20 04:59:02.205734858 -0700
++++ horizon-2013.2.3/openstack_dashboard/test/settings.py	2014-06-20 06:28:57.101070812 -0700
[email protected]@ -107,7 +107,7 @@
+ }
+ 
+ OPENSTACK_HYPERVISOR_FEATURES = {
+-    'can_set_mount_point': True,
++    'can_set_mount_point': False,
+ }
+ 
+ OPENSTACK_IMAGE_BACKEND = {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/horizon/patches/07-remove-console.patch	Wed Jul 02 06:53:43 2014 -0700
@@ -0,0 +1,15 @@
+In-house patch to remove the ">> open console" link from instance
+elements in the Network Topology screen.  This patch is
+Solaris-specific and not suitable for upstream contribution.
+
+--- horizon-2013.2.3/openstack_dashboard/dashboards/project/network_topology/templates/network_topology/client_side/_balloon_container.html.orig    2014-06-20 08:04:57.394535111 -0600
++++ horizon-2013.2.3/openstack_dashboard/dashboards/project/network_topology/templates/network_topology/client_side/_balloon_container.html     2014-06-20 08:05:47.621066190 -0600
[email protected]@ -16,7 +16,6 @@
+       <div class="cell link">
+         <a href="[[url]]">» view [[type]] details</a>
+         [[#console_id]]
+-        <a href="[[url]][[console]]" class="vnc_window">» open console</a>
+         [[/console_id]]
+       </div>
+       <div class="cell delete">
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/horizon/patches/08-flavor-names.patch	Wed Jul 02 06:53:43 2014 -0700
@@ -0,0 +1,41 @@
+In-house patch to increate the maximum length of Flavor names, Flavor
+keys, and Flavor values.  This patch has not yet been submitted
+upstream.
+
+--- horizon-2013.2.3/openstack_dashboard/dashboards/admin/flavors/workflows.py.orig       2014-06-27 09:55:09.196764242 -0600
++++ horizon-2013.2.3/openstack_dashboard/dashboards/admin/flavors/workflows.py        2014-06-27 09:55:12.447224496 -0600
[email protected]@ -35,7 +35,7 @@
+                              "Leave this field blank or use 'auto' to set "
+                              "a random UUID4.")
+     name = forms.RegexField(label=_("Name"),
+-                            max_length=25,
++                            max_length=255,
+                             regex=r'^[\w\.\- ]+$',
+                             error_messages={'invalid': _('Name may only '
+                                 'contain letters, numbers, underscores, '
+
+--- horizon-2013.2.3/openstack_dashboard/dashboards/admin/flavors/extras/forms.py.orig   2014-06-27 09:57:05.195849313 -0600
++++ horizon-2013.2.3/openstack_dashboard/dashboards/admin/flavors/extras/forms.py    2014-06-27 09:57:19.351010746 -0600
[email protected]@ -28,8 +28,8 @@
+ 
+ 
+ class CreateExtraSpec(forms.SelfHandlingForm):
+-    key = forms.CharField(max_length="25", label=_("Key"))
+-    value = forms.CharField(max_length="25", label=_("Value"))
++    key = forms.CharField(max_length="255", label=_("Key"))
++    value = forms.CharField(max_length="255", label=_("Value"))
+     flavor_id = forms.CharField(widget=forms.widgets.HiddenInput)
+ 
+     def handle(self, request, data):
[email protected]@ -46,8 +46,8 @@
+ 
+ 
+ class EditExtraSpec(forms.SelfHandlingForm):
+-    key = forms.CharField(max_length="25", label=_("Key"))
+-    value = forms.CharField(max_length="25", label=_("Value"))
++    key = forms.CharField(max_length="255", label=_("Key"))
++    value = forms.CharField(max_length="255", label=_("Value"))
+     flavor_id = forms.CharField(widget=forms.widgets.HiddenInput)
+ 
+     def handle(self, request, data):
+
--- a/components/openstack/neutron/files/agent/evs_l3_agent.py	Thu Jun 26 11:50:03 2014 -0700
+++ b/components/openstack/neutron/files/agent/evs_l3_agent.py	Wed Jul 02 06:53:43 2014 -0700
@@ -241,7 +241,7 @@
                 return
             vid = stdout.splitlines()[0].strip()
             if vid == "":
-                LOG.error(_("External Network does not has a VLAN ID "
+                LOG.error(_("External Network does not have a VLAN ID "
                             "associated with it, and it is required to "
                             "create external gateway port"))
                 return
@@ -306,7 +306,8 @@
         # now setup the IPF rule
         rules = ['block in quick on %s from %s to pool/%d' %
                  (internal_dlname, subnet_cidr, new_ippool_name)]
-        ri.ipfilters_manager.add_ipf_rules(rules)
+        ipversion = netaddr.IPNetwork(subnet_cidr).version
+        ri.ipfilters_manager.add_ipf_rules(rules, ipversion)
 
     def internal_network_removed(self, ri, port):
         internal_dlname = self.get_internal_device_name(port['id'])
@@ -318,7 +319,8 @@
         ippool_name = self._get_ippool_name(port['mac_address'])
         rules = ['block in quick on %s from %s to pool/%d' %
                  (internal_dlname, port['subnet']['cidr'], ippool_name)]
-        ri.ipfilters_manager.remove_ipf_rules(rules)
+        ipversion = netaddr.IPNetwork(port['subnet']['cidr']).version
+        ri.ipfilters_manager.remove_ipf_rules(rules, ipversion)
         # remove the ippool
         ri.ipfilters_manager.remove_ippool(ippool_name, None)
         for internal_port in ri.internal_ports:
--- a/components/openstack/neutron/files/agent/solaris/net_lib.py	Thu Jun 26 11:50:03 2014 -0700
+++ b/components/openstack/neutron/files/agent/solaris/net_lib.py	Wed Jul 02 06:53:43 2014 -0700
@@ -262,9 +262,11 @@
 class IPfilterCommand(CommandBase):
     '''Wrapper around Solaris ipf(1m) command'''
 
-    def split_rules(self, rules):
+    def _split_rules(self, rules, version):
         # assumes that rules are inbound!
         cmd = ['/usr/sbin/ipfstat', '-i']
+        if version == 6:
+            cmd.insert(1, '-6')
         stdout = self.execute_with_pfexec(cmd)
         existing_rules = []
         non_existing_rules = []
@@ -276,20 +278,20 @@
 
         return existing_rules, non_existing_rules
 
-    def add_rules(self, rules, version='4'):
-        rules = self.split_rules(rules)[1]
+    def add_rules(self, rules, version=4):
+        rules = self._split_rules(rules, version)[1]
         process_input = '\n'.join(rules)
         cmd = ['/usr/sbin/ipf', '-f', '-']
-        if version == '6':
-            cmd.append('-6')
+        if version == 6:
+            cmd.insert(1, '-6')
         return self.execute_with_pfexec(cmd, process_input=process_input)
 
-    def remove_rules(self, rules, version='4'):
-        rules = self.split_rules(rules)[0]
+    def remove_rules(self, rules, version=4):
+        rules = self._split_rules(rules, version)[0]
         process_input = '\n'.join(rules)
         cmd = ['/usr/sbin/ipf', '-r', '-f', '-']
-        if version == '6':
-            cmd.append('-6')
+        if version == 6:
+            cmd.insert(1, '-6')
         return self.execute_with_pfexec(cmd, process_input=process_input)
 
 
--- a/components/openstack/neutron/files/evs/plugin.py	Thu Jun 26 11:50:03 2014 -0700
+++ b/components/openstack/neutron/files/evs/plugin.py	Wed Jul 02 06:53:43 2014 -0700
@@ -432,10 +432,8 @@
         if proplist:
             propstr = ",".join(proplist)
 
-        # TODO(gmoodalb): extract the tenant id if an admin is creating for
-        # someone else
         evsname = subnet['subnet']['network_id']
-        tenantname = subnet['subnet']['tenant_id']
+        tenantname = self._get_tenant_id_for_create(context, subnet['subnet'])
         ipnet = self.evs_controller_addIPnet(tenantname, evsname, ipnetname,
                                              propstr)
         retval = self._convert_ipnet_to_subnet(ipnet)
@@ -626,7 +624,8 @@
         if not evsname:
             evsname = None
 
-        tenantname = network['network']['tenant_id']
+        tenantname = self._get_tenant_id_for_create(context,
+                                                    network['network'])
         proplist = []
         network_type = network['network'][providernet.NETWORK_TYPE]
         if attributes.is_attr_set(network_type):
@@ -817,10 +816,10 @@
             propstr = ",".join(proplist)
 
         evsname = port['port']['network_id']
-        tenantname = port['port']['tenant_id']
-        # TODO(gmoodalb): -- pull it from the network_id!!
+        tenantname = self._get_tenant_id_for_create(context, port['port'])
         if not tenantname:
-            tenantname = context.tenant_id
+            network = self.get_network(context, evsname)
+            tenantname = network['tenant_id']
         vport = self.evs_controller_addVPort(tenantname, evsname, vportname,
                                              propstr)
         retval = self._convert_vport_to_port(context, vport)
@@ -952,9 +951,10 @@
         a notification to L3 agent to release the port before we can
         delete the port"""
 
-        if port['device_owner'] not in [l3_constants.DEVICE_OWNER_ROUTER_INTF,
-                                        l3_constants.DEVICE_OWNER_ROUTER_GW,
-                                        l3_constants.DEVICE_OWNER_FLOATINGIP]:
+        device_owner = port['device_owner']
+        if device_owner not in [l3_constants.DEVICE_OWNER_ROUTER_INTF,
+                                l3_constants.DEVICE_OWNER_ROUTER_GW,
+                                l3_constants.DEVICE_OWNER_FLOATINGIP]:
             return
         router_id = port['device_id']
         port_update = {
@@ -964,10 +964,12 @@
             }
         }
         self.update_port(context, port['id'], port_update)
-        msg = l3_rpc_agent_api.L3AgentNotify.make_msg("routers_updated",
-                                                      routers=[router_id])
-        l3_rpc_agent_api.L3AgentNotify.call(context, msg,
-                                            topic=topics.L3_AGENT)
+        if device_owner in [l3_constants.DEVICE_OWNER_ROUTER_INTF,
+                            l3_constants.DEVICE_OWNER_ROUTER_GW]:
+            msg = l3_rpc_agent_api.L3AgentNotify.make_msg("routers_updated",
+                                                          routers=[router_id])
+            l3_rpc_agent_api.L3AgentNotify.call(context, msg,
+                                                topic=topics.L3_AGENT)
 
     @lockutils.synchronized('evs-plugin', 'neutron-')
     def evs_controller_removeVPort(self, tenantname, evsname, vportuuid):
--- a/components/openstack/neutron/files/neutron-l3-agent	Thu Jun 26 11:50:03 2014 -0700
+++ b/components/openstack/neutron/files/neutron-l3-agent	Wed Jul 02 06:53:43 2014 -0700
@@ -61,6 +61,50 @@
     smf_include.smf_subprocess(cmd)
 
 
+def remove_ipfilter_rules(version):
+    # remove IP Filter rules added by neutron-l3-agent
+    cmd = ["/usr/bin/pfexec", "/usr/sbin/ipfstat", "-io"]
+    if version == 6:
+        cmd.insert(2, "-6")
+    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+    output, error = p.communicate()
+    if p.returncode != 0:
+        print "failed to retrieve IP Filter rules"
+        return smf_include.SMF_EXIT_ERR_FATAL
+
+    ipfilters = output.splitlines()
+    # L3 agent IP Filter rules are of the form
+    # block in quick on l3i64cbb496_a_0 from ... to pool/15417332
+    prog = re.compile('on l3i[0-9A-Fa-f\_]{10}_0')
+    ippool_names = []
+    for ipf in ipfilters:
+        if not prog.search(ipf):
+            continue
+        # capture the IP pool name
+        ippool_names.append(ipf.split('pool/')[1])
+
+        try:
+            # remove the IP Filter rule
+            p = Popen(["echo", ipf], stdout=PIPE)
+            cmd = ["/usr/bin/pfexec", "/usr/sbin/ipf", "-r", "-f", "-"]
+            if version == 6:
+                cmd.insert(2, "-6")
+            check_call(cmd, stdin=p.stdout)
+        except CalledProcessError as err:
+            print "failed to remove IP Filter rule %s: %s" % (ipf, err)
+            return smf_include.SMF_EXIT_ERR_FATAL
+
+    # remove IP Pools added by neutron-l3-agent
+    for ippool_name in ippool_names:
+        try:
+            check_call(["/usr/bin/pfexec", "/usr/sbin/ippool", "-R",
+                        "-m", ippool_name, "-t", "tree"])
+        except CalledProcessError as err:
+            print "failed to remove IP Pool %s: %s" % (ippool_name, err)
+            return smf_include.SMF_EXIT_ERR_FATAL
+    return smf_include.SMF_EXIT_OK
+
+
 def stop():
     try:
         # first kill the SMF contract
@@ -95,42 +139,15 @@
             print "failed to remove datalinks used by L3 agent: %s" % (err)
             return smf_include.SMF_EXIT_ERR_FATAL
 
-    # remove IP Filter rules added by neutron-l3-agent
-    cmd = ["/usr/bin/pfexec", "/usr/sbin/ipfstat", "-io"]
-    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
-    output, error = p.communicate()
-    if p.returncode != 0:
-        print "failed to retrieve IP Filter rules"
-        return smf_include.SMF_EXIT_ERR_FATAL
+    # remove IPv4 Filter rules added by neutron-l3-agent
+    rv = remove_ipfilter_rules(4)
+    if rv != smf_include.SMF_EXIT_OK:
+        return rv
 
-    ipfilters = output.splitlines()
-    # L3 agent IP Filter rules are of the form
-    # block in quick on l3i64cbb496_a_0 from ... to pool/15417332
-    prog = re.compile('on l3i[0-9A-Fa-f\_]{10}_0')
-    ippool_names = []
-    for ipf in ipfilters:
-        if not prog.search(ipf):
-            continue
-        # capture the IP pool name
-        ippool_names.append(ipf.split('pool/')[1])
-
-        try:
-            # remove the IP Filter rule
-            p = Popen(["echo", ipf], stdout=PIPE)
-            check_call(["/usr/bin/pfexec", "/usr/sbin/ipf", "-r", "-f", "-"],
-                       stdin=p.stdout)
-        except CalledProcessError as err:
-            print "failed to remove IP Filter rule %s: %s" % (ipf, err)
-            return smf_include.SMF_EXIT_ERR_FATAL
-
-    # remove IP Pools added by neutron-l3-agent
-    for ippool_name in ippool_names:
-        try:
-            check_call(["/usr/bin/pfexec", "/usr/sbin/ippool", "-R",
-                        "-m", ippool_name, "-t", "tree"])
-        except CalledProcessError as err:
-            print "failed to remove IP Pool %s: %s" % (ippool_name, err)
-            return smf_include.SMF_EXIT_ERR_FATAL
+    # remove IPv6 Filter rules added by neutron-l3-agent
+    rv = remove_ipfilter_rules(6)
+    if rv != smf_include.SMF_EXIT_OK:
+        return rv
 
     # remove IP NAT rules added by neutron-l3-agent
     cmd = ["/usr/bin/pfexec", "/usr/sbin/ipnat", "-lR"]
--- a/components/openstack/nova/nova.p5m	Thu Jun 26 11:50:03 2014 -0700
+++ b/components/openstack/nova/nova.p5m	Wed Jul 02 06:53:43 2014 -0700
@@ -847,6 +847,9 @@
 # force a dependency on routes; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python-2/routes-26
 
+# force a dependency on sqlalchemy; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python-2/sqlalchemy-26
+
 # force a dependency on stevedore; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python-2/stevedore-26
 
--- a/components/openstack/nova/resolve.deps	Thu Jun 26 11:50:03 2014 -0700
+++ b/components/openstack/nova/resolve.deps	Wed Jul 02 06:53:43 2014 -0700
@@ -6,7 +6,6 @@
 library/python-2/webob-26
 library/python/eventlet-26
 library/python/oslo.config-26
-library/python/sqlalchemy-26
 library/python/sqlalchemy-migrate-26
 library/python/webob-26
 runtime/python-26
--- a/components/openstack/swift/files/swift-replicator-rsync	Thu Jun 26 11:50:03 2014 -0700
+++ b/components/openstack/swift/files/swift-replicator-rsync	Wed Jul 02 06:53:43 2014 -0700
@@ -54,7 +54,7 @@
             smf_include.SMF_EXIT_ERR_FATAL, "exec_fail",
             "rsync daemon failed to start (see service log)")
 
-    return int(smf_include.SMF_EXIT_OK)
+    return smf_include.SMF_EXIT_OK
 
 if __name__ == "__main__":
     os.putenv("LC_ALL", "C")