PSARC/2016/116 OpenStack Neutron support for Packet Filter
authorGirish Moodalbail <Girish.Moodalbail@oracle.COM>
Thu, 10 Mar 2016 14:12:18 -0800
changeset 5579 48110757c6c6
parent 5578 fd608d60ca64
child 5580 80858b37b98e
PSARC/2016/116 OpenStack Neutron support for Packet Filter 22300681 update neutron L3 agent to use PF instead of IPF
components/openstack/neutron/Makefile
components/openstack/neutron/files/agent/evs_l3_agent.py
components/openstack/neutron/files/agent/solaris/ipfilters_manager.py
components/openstack/neutron/files/agent/solaris/net_lib.py
components/openstack/neutron/files/agent/solaris/packetfilter.py
components/openstack/neutron/files/neutron-l3-agent
components/openstack/neutron/files/neutron-l3-agent.xml
components/openstack/neutron/files/neutron.prof_attr
components/openstack/neutron/files/services/vpn/device_drivers/solaris_ipsec.py
components/openstack/neutron/neutron.p5m
--- a/components/openstack/neutron/Makefile	Thu Mar 10 13:27:59 2016 -0800
+++ b/components/openstack/neutron/Makefile	Thu Mar 10 14:12:18 2016 -0800
@@ -106,8 +106,8 @@
 	 files/agent/solaris/__init__.py \
 	 files/agent/solaris/dhcp.py \
 	 files/agent/solaris/interface.py \
-	 files/agent/solaris/ipfilters_manager.py \
 	 files/agent/solaris/net_lib.py \
+	 files/agent/solaris/packetfilter.py \
 	 files/agent/solaris/ra.py \
 	 $(PROTO_DIR)/$(PYTHON_LIB)/neutron/agent/solaris; \
     $(MKDIR) $(PROTO_DIR)/$(PYTHON_LIB)/neutron/plugins/evs; \
@@ -152,7 +152,7 @@
 REQUIRED_PACKAGES += library/python/simplejson-27
 REQUIRED_PACKAGES += library/python/six-27
 REQUIRED_PACKAGES += library/python/sqlalchemy-27
-REQUIRED_PACKAGES += network/ipfilter
+REQUIRED_PACKAGES += network/firewall
 REQUIRED_PACKAGES += service/network/dnsmasq
 REQUIRED_PACKAGES += service/network/evs
 REQUIRED_PACKAGES += system/core-os
--- a/components/openstack/neutron/files/agent/evs_l3_agent.py	Thu Mar 10 13:27:59 2016 -0800
+++ b/components/openstack/neutron/files/agent/evs_l3_agent.py	Thu Mar 10 14:12:18 2016 -0800
@@ -31,8 +31,8 @@
 from neutron.agent.l3 import router_info as router
 from neutron.agent.linux import utils
 from neutron.agent.solaris import interface
-from neutron.agent.solaris import ipfilters_manager
 from neutron.agent.solaris import net_lib
+from neutron.agent.solaris import packetfilter
 from neutron.agent.solaris import ra
 from neutron.callbacks import events
 from neutron.callbacks import registry
@@ -59,7 +59,7 @@
                  use_ipv6=False):
         super(SolarisRouterInfo, self).__init__(router_id, router, agent_conf,
                                                 interface_driver, use_ipv6)
-        self.ipfilters_manager = ipfilters_manager.IPfiltersManager()
+        self.pf = packetfilter.PacketFilter("_auto/neutron:l3:agent")
         self.iptables_manager = None
         self.remove_route = False
 
@@ -96,7 +96,7 @@
         pass
 
     def _get_existing_devices(self):
-        return net_lib.Datalink.show_vnic()
+        return net_lib.Datalink.show_link()
 
     def internal_network_added(self, port):
         internal_dlname = self.get_internal_device_name(port['id'])
@@ -107,14 +107,9 @@
         self.driver.init_l3(internal_dlname, ip_cidrs)
 
         # Since we support shared router model, we need to block the new
-        # internal port from reaching other tenant's ports
-        block_pname = self._get_ippool_name(port['mac_address'])
-        self.ipfilters_manager.add_ippool(block_pname, None)
-        if self.agent_conf.allow_forwarding_between_networks:
-            # If allow_forwarding_between_networks is set, then we need to
-            # allow forwarding of packets between same tenant's ports.
-            allow_pname = self._get_ippool_name(port['mac_address'], '0')
-            self.ipfilters_manager.add_ippool(allow_pname, None)
+        # internal port from reaching other tenant's ports. However, if
+        # allow_forwarding_between_networks is set, then we need to
+        # allow forwarding of packets between same tenant's ports.
 
         # walk through the other internal ports and retrieve their
         # cidrs and at the same time add the new internal port's
@@ -123,103 +118,113 @@
         block_subnets = []
         allow_subnets = []
         for internal_port in self.internal_ports:
+            # skip the port being added
             if internal_port['mac_address'] == port['mac_address']:
                 continue
+            internal_port_dlname = \
+                self.get_internal_device_name(internal_port['id'])
             if (self.agent_conf.allow_forwarding_between_networks and
                     internal_port['tenant_id'] == port['tenant_id']):
                 allow_subnets.append(internal_port['subnets'][0]['cidr'])
                 # we need to add the port's subnet to this internal_port's
-                # allowed_subnet_pool
-                iport_allow_pname = \
-                    self._get_ippool_name(internal_port['mac_address'], '0')
-                self.ipfilters_manager.add_ippool(iport_allow_pname,
-                                                  [port_subnet])
+                # allowed_subnet_table
+                iport_allow_tblname = 'allow_' + internal_port_dlname
+                self.pf.add_table_entry(iport_allow_tblname, [port_subnet],
+                                        [internal_port_dlname, 'normal'])
             else:
                 block_subnets.append(internal_port['subnets'][0]['cidr'])
-                iport_block_pname = \
-                    self._get_ippool_name(internal_port['mac_address'])
-                self.ipfilters_manager.add_ippool(iport_block_pname,
-                                                  [port_subnet])
-        # update the new port's pool with other ports' subnet
-        self.ipfilters_manager.add_ippool(block_pname, block_subnets)
+                iport_block_tblname = 'block_' + internal_port_dlname
+                self.pf.add_table_entry(iport_block_tblname, [port_subnet],
+                                        [internal_port_dlname, 'normal'])
+
+        # update the new port's table with other ports' subnet
+        block_tblname = 'block_' + internal_dlname
+        self.pf.add_table_entry(block_tblname, block_subnets,
+                                [internal_dlname, 'normal'])
         if self.agent_conf.allow_forwarding_between_networks:
-            self.ipfilters_manager.add_ippool(allow_pname, allow_subnets)
+            allow_tblname = 'allow_' + internal_dlname
+            self.pf.add_table_entry(allow_tblname, allow_subnets,
+                                    [internal_dlname, 'normal'])
 
-        # now setup the IPF rules
-        rules = ['block in quick on %s from %s to pool/%d' %
-                 (internal_dlname, port_subnet, block_pname)]
+        # now setup the PF rules
+        label = 'block_%s' % internal_dlname
+        rules = ['block in quick from %s to <%s> label %s' %
+                 (port_subnet, block_tblname, label)]
         # pass in packets between networks that belong to same tenant
         if self.agent_conf.allow_forwarding_between_networks:
-            rules.append('pass in quick on %s from %s to pool/%d' %
-                         (internal_dlname, port_subnet, allow_pname))
+            label = 'allow_%s' % internal_dlname
+            rules.append('pass in quick from %s to <%s> label %s' %
+                         (port_subnet, allow_tblname, label))
+
+        # if metadata is enabled, then we need to redirect all the packets
+        # arriving at 169.254.169.254:80 to neutron-metadata-proxy server
+        # listening at self.agent_conf.metadata_port
+        ipversion = netaddr.IPNetwork(port_subnet).version
+        if self.agent_conf.enable_metadata_proxy and ipversion == 4:
+            fixed_ip_address = port['fixed_ips'][0]['ip_address']
+            label = 'metadata_%s' % fixed_ip_address
+            rules.append('pass in quick proto tcp to 169.254.169.254/32 '
+                         'port 80 rdr-to %s port %s label %s' %
+                         (fixed_ip_address, self.agent_conf.metadata_port,
+                          label))
+        # finally add all the rules in one shot
+        anchor_option = "on %s" % internal_dlname
+        self.pf.add_nested_anchor_rule(None, internal_dlname, anchor_option)
+        self.pf.add_rules(rules, [internal_dlname, 'normal'])
+
+        ex_gw_port = self.ex_gw_port
+        if not ex_gw_port:
+            return
+
+        ex_gw_ip = ex_gw_port['subnets'][0]['gateway_ip']
+        if not ex_gw_ip:
+            return
+
+        if netaddr.IPAddress(ex_gw_ip).version != 4 or ipversion != 4:
+            return
+
         # if the external gateway is already setup for the shared router,
         # then we need to add Policy Based Routing (PBR) for this internal
         # network
-        ex_gw_port = self.ex_gw_port
-        ex_gw_ip = (ex_gw_port['subnets'][0]['gateway_ip']
-                    if ex_gw_port else None)
-        if ex_gw_ip:
-            external_dlname = self.get_external_device_name(ex_gw_port['id'])
-            rules.append('pass in on %s to %s:%s from any to !%s' %
-                         (internal_dlname, external_dlname, ex_gw_ip,
-                          port_subnet))
+        external_dlname = self.get_external_device_name(ex_gw_port['id'])
+        label = 'pbr_%s' % port_subnet.replace('/', '_')
+        # don't forward broadcast packets out of the internal subnet
+        pbr_rules = ['pass in quick to 255.255.255.255 label %s_bcast' %
+                     label]
+        pbr_rules.append('pass in to !%s route-to {(%s %s)} label %s' %
+                         (port_subnet, external_dlname, ex_gw_ip, label))
 
-        ipversion = netaddr.IPNetwork(port_subnet).version
-        self.ipfilters_manager.add_ipf_rules(rules, ipversion)
-        if self.agent_conf.enable_metadata_proxy and ipversion == 4:
-            rdr_rule = ['rdr %s 169.254.169.254/32 port 80 -> %s port %d tcp' %
-                        (internal_dlname, port['fixed_ips'][0]['ip_address'],
-                         self.agent_conf.metadata_port)]
-            self.ipfilters_manager.add_nat_rules(rdr_rule)
+        self.pf.add_rules(pbr_rules, [internal_dlname, 'pbr'])
+        if self._snat_enabled:
+            ex_gw_ip_cidrs = \
+                common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])
+            snat_rule = 'pass out from %s to any nat-to %s' % \
+                (ip_cidrs[0], ex_gw_ip_cidrs[0])
+            self.pf.add_rules([snat_rule],
+                              [external_dlname, '%s' % internal_dlname])
 
     def internal_network_removed(self, port):
         internal_dlname = self.get_internal_device_name(port['id'])
         port_subnet = port['subnets'][0]['cidr']
-        # remove all the IP filter rules that we added during
-        # internal network addition
-        block_pname = self._get_ippool_name(port['mac_address'])
-        rules = ['block in quick on %s from %s to pool/%d' %
-                 (internal_dlname, port_subnet, block_pname)]
-        if self.agent_conf.allow_forwarding_between_networks:
-            allow_pname = self._get_ippool_name(port['mac_address'], '0')
-            rules.append('pass in quick on %s from %s to pool/%d' %
-                         (internal_dlname, port_subnet, allow_pname))
-
-        # remove all the IP filter rules that we added during
-        # external network addition
-        ex_gw_port = self.ex_gw_port
-        ex_gw_ip = (ex_gw_port['subnets'][0]['gateway_ip']
-                    if ex_gw_port else None)
-        if ex_gw_ip:
-            external_dlname = self.get_external_device_name(ex_gw_port['id'])
-            rules.append('pass in on %s to %s:%s from any to !%s' %
-                         (internal_dlname, external_dlname, ex_gw_ip,
-                          port_subnet))
-        ipversion = netaddr.IPNetwork(port['subnets'][0]['cidr']).version
-        self.ipfilters_manager.remove_ipf_rules(rules, ipversion)
-
-        # remove the ippool
-        self.ipfilters_manager.remove_ippool(block_pname, None)
-        if self.agent_conf.allow_forwarding_between_networks:
-            self.ipfilters_manager.remove_ippool(allow_pname, None)
 
         for internal_port in self.internal_ports:
+            internal_port_dlname = \
+                self.get_internal_device_name(internal_port['id'])
             if (self.agent_conf.allow_forwarding_between_networks and
                     internal_port['tenant_id'] == port['tenant_id']):
-                iport_allow_pname = \
-                    self._get_ippool_name(internal_port['mac_address'], '0')
-                self.ipfilters_manager.remove_ippool(iport_allow_pname,
-                                                     [port_subnet])
+                iport_allow_tblname = 'allow_' + internal_port_dlname
+                self.pf.remove_table_entry(iport_allow_tblname, [port_subnet],
+                                           [internal_port_dlname, 'normal'])
             else:
-                iport_block_pname = \
-                    self._get_ippool_name(internal_port['mac_address'])
-                self.ipfilters_manager.remove_ippool(iport_block_pname,
-                                                     [port_subnet])
-        if self.agent_conf.enable_metadata_proxy and ipversion == 4:
-            rdr_rule = ['rdr %s 169.254.169.254/32 port 80 -> %s port %d tcp' %
-                        (internal_dlname, port['fixed_ips'][0]['ip_address'],
-                         self.agent_conf.metadata_port)]
-            self.ipfilters_manager.remove_nat_rules(rdr_rule)
+                iport_block_tblname = 'block_' + internal_port_dlname
+                self.pf.remove_table_entry(iport_block_tblname, [port_subnet],
+                                           [internal_port_dlname, 'normal'])
+
+        # remove the nested anchors rule from neutron:l3:agent
+        self.pf.remove_nested_anchor_rule(None, internal_dlname)
+
+        # remove the anchor and tables associated with this internal port
+        self.pf.remove_anchor_recursively([internal_dlname])
 
         if net_lib.Datalink.datalink_exists(internal_dlname):
             self.driver.fini_l3(internal_dlname)
@@ -278,15 +283,6 @@
             self.driver.fini_l3(stale_dev)
             self.driver.unplug(stale_dev)
 
-    def _get_ippool_name(self, mac_address, suffix=None):
-        # Generate a unique-name for ippool(1m) from that last 3
-        # bytes of mac-address. It is called pool name, but it is
-        # actually a 32 bit integer
-        name = mac_address.split(':')[3:]
-        if suffix:
-            name.append(suffix)
-        return int("".join(name), 16)
-
     def process_floating_ip_addresses(self, interface_name):
         """Configure IP addresses on router's external gateway interface.
 
@@ -306,10 +302,6 @@
         existing_cidrs = set(ipaddr_list)
         new_cidrs = set()
 
-        existing_nat_rules = [nat_rule for nat_rule in
-                              self.ipfilters_manager.ipv4['nat']]
-        new_nat_rules = []
-
         floating_ips = self.get_floating_ips()
         # Loop once to ensure that floating ips are configured.
         for fip in floating_ips:
@@ -317,42 +309,40 @@
             fip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX
             new_cidrs.add(fip_cidr)
             fixed_cidr = str(fip['fixed_ip_address']) + '/32'
-            nat_rule = 'bimap %s %s -> %s' % (interface_name, fixed_cidr,
-                                              fip_cidr)
+            label = 'fip_%s' % fip_cidr.replace('/', '_')
+            binat_rule = 'pass quick from %s to any binat-to %s label %s' % \
+                (fixed_cidr, fip_cidr, label)
 
             if fip_cidr not in existing_cidrs:
                 try:
                     ipintf.create_address(fip_cidr)
-                    self.ipfilters_manager.add_nat_rules([nat_rule])
+                    self.pf.add_rules([binat_rule], [interface_name,
+                                                     fip_cidr.split('/')[0]])
                 except Exception as err:
-                    # TODO(gmoodalb): If we fail in add_nat_rules(), then
-                    # we need to remove the fip_cidr address
-
                     # any exception occurred here should cause the floating IP
                     # to be set in error state
                     fip_statuses[fip['id']] = (
                         l3_constants.FLOATINGIP_STATUS_ERROR)
                     LOG.warn(_("Unable to configure IP address for "
                                "floating IP: %s: %s") % (fip['id'], err))
+                    # remove the fip_cidr address if it was added
+                    try:
+                        ipintf.delete_address(fip_cidr)
+                    except:
+                        pass
                     continue
             fip_statuses[fip['id']] = (
                 l3_constants.FLOATINGIP_STATUS_ACTIVE)
+
             LOG.debug("Floating ip %(id)s added, status %(status)s",
                       {'id': fip['id'],
                        'status': fip_statuses.get(fip['id'])})
 
-            new_nat_rules.append(nat_rule)
-
-        # remove all the old NAT rules
-        old_nat_rules = list(set(existing_nat_rules) - set(new_nat_rules))
-        # Filter out 'bimap' NAT rules as we don't want to remove NAT rules
-        # that were added for Metadata server
-        old_nat_rules = [rule for rule in old_nat_rules if "bimap" in rule]
-        self.ipfilters_manager.remove_nat_rules(old_nat_rules)
-
-        # Clean up addresses that no longer belong on the gateway interface.
+        # Clean up addresses that no longer belong on the gateway interface and
+        # remove the binat-to PF rule associated with them
         for ip_cidr in existing_cidrs - new_cidrs:
             if ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX):
+                self.pf.remove_anchor([interface_name, ip_cidr.split('/')[0]])
                 ipintf.delete_address(ip_cidr)
         return fip_statuses
 
@@ -398,6 +388,10 @@
         ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])
         self.driver.init_l3(external_dlname, ip_cidrs)
 
+        # add nested anchor rule first
+        anchor_option = "on %s" % external_dlname
+        self.pf.add_nested_anchor_rule(None, external_dlname, anchor_option)
+
         gw_ip = ex_gw_port['subnets'][0]['gateway_ip']
         if gw_ip:
             cmd = ['/usr/bin/pfexec', '/usr/sbin/route', 'add', 'default',
@@ -406,33 +400,37 @@
             if 'entry exists' not in stdout:
                 self.remove_route = True
 
-            # for each of the internal ports, add Policy Based
-            # Routing (PBR) rule
+            # for each of the internal ports, add Policy Based Routing (PBR)
+            # rule iff ex_gw_ip is IPv4 and the internal port is IPv4
+            if netaddr.IPAddress(gw_ip).version != 4:
+                return
             for port in self.internal_ports:
+                port_subnet = port['subnets'][0]['cidr']
+                if netaddr.IPNetwork(port_subnet).version != 4:
+                    continue
                 internal_dlname = self.get_internal_device_name(port['id'])
-                rules = ['pass in on %s to %s:%s from any to !%s' %
-                         (internal_dlname, external_dlname, gw_ip,
-                          port['subnets'][0]['cidr'])]
-                ipversion = \
-                    netaddr.IPNetwork(port['subnets'][0]['cidr']).version
-                self.ipfilters_manager.add_ipf_rules(rules, ipversion)
+                label = 'pbr_%s' % port_subnet.replace('/', '_')
+                pbr_rules = ['pass in quick to 255.255.255.255 '
+                             'label %s_bcast' % label]
+                pbr_rules.append('pass in to !%s route-to {(%s %s)} '
+                                 'label %s' % (port_subnet, external_dlname,
+                                               gw_ip, label))
+                self.pf.add_rules(pbr_rules, [internal_dlname, 'pbr'])
 
     def external_gateway_updated(self, ex_gw_port, external_dlname):
         # There is nothing to do on Solaris
         pass
 
     def external_gateway_removed(self, ex_gw_port, external_dlname):
+        # remove nested anchor rule first
+        self.pf.remove_nested_anchor_rule(None, external_dlname)
+
         gw_ip = ex_gw_port['subnets'][0]['gateway_ip']
         if gw_ip:
             # remove PBR rules
             for port in self.internal_ports:
                 internal_dlname = self.get_internal_device_name(port['id'])
-                rules = ['pass in on %s to %s:%s from any to !%s' %
-                         (internal_dlname, external_dlname, gw_ip,
-                          port['subnets'][0]['cidr'])]
-                ipversion = \
-                    netaddr.IPNetwork(port['subnets'][0]['cidr']).version
-                self.ipfilters_manager.remove_ipf_rules(rules, ipversion)
+                self.pf.remove_anchor([internal_dlname, 'pbr'])
 
             if self.remove_route:
                 cmd = ['/usr/bin/pfexec', '/usr/sbin/route', 'delete',
@@ -484,28 +482,27 @@
         self.perform_snat_action(self._handle_router_snat_rules,
                                  interface_name)
 
-    def external_gateway_snat_rules(self, ex_gw_ip, interface_name):
-        rules = []
-        ip_cidrs = []
+    def external_gateway_snat_rules(self, ex_gw_ip, external_dlname):
+        rules = {}
         for port in self.internal_ports:
             if netaddr.IPNetwork(port['subnets'][0]['cidr']).version == 4:
-                ip_cidrs.extend(common_utils.fixed_ip_cidrs(port['fixed_ips']))
+                ip_cidrs = common_utils.fixed_ip_cidrs(port['fixed_ips'])
+                label = 'snat_%s' % ip_cidrs[0].replace('/', '_')
+                rule = 'pass out from %s to any nat-to %s label %s' % \
+                    (ip_cidrs[0], ex_gw_ip, label)
+                rules[port['id']] = [rule]
 
-        for ip_cidr in ip_cidrs:
-            rules.append('map %s %s -> %s/32' %
-                         (interface_name, ip_cidr, ex_gw_ip))
         return rules
 
-    def _handle_router_snat_rules(self, ex_gw_port, interface_name, action):
+    def _handle_router_snat_rules(self, ex_gw_port, external_dlname, action):
         # Remove all the old SNAT rules
         # This is safe because if use_namespaces is set as False
         # then the agent can only configure one router, otherwise
         # each router's SNAT rules will be in their own namespace
 
-        # get only the SNAT rules
-        old_snat_rules = [rule for rule in self.ipfilters_manager.ipv4['nat']
-                          if rule.startswith('map')]
-        self.ipfilters_manager.remove_nat_rules(old_snat_rules)
+        for port in self.internal_ports:
+            internal_dlname = self.get_internal_device_name(port['id'])
+            self.pf.remove_anchor([external_dlname, internal_dlname])
 
         # And add them back if the action is add_rules
         if action == 'add_rules' and ex_gw_port:
@@ -514,9 +511,14 @@
                 ex_gw_ip = ip_addr['ip_address']
                 if netaddr.IPAddress(ex_gw_ip).version == 4:
                     rules = self.external_gateway_snat_rules(ex_gw_ip,
-                                                             interface_name)
-                    self.ipfilters_manager.add_nat_rules(rules)
-                    break
+                                                             external_dlname)
+                    if not rules:
+                        continue
+                    for port_id, rule in rules.iteritems():
+                        internal_dlname = \
+                            self.get_internal_device_name(port_id)
+                        self.pf.add_rules(rule, [external_dlname,
+                                                 internal_dlname])
 
     def process_external(self, agent):
         existing_floating_ips = self.floating_ips
--- a/components/openstack/neutron/files/agent/solaris/ipfilters_manager.py	Thu Mar 10 13:27:59 2016 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,75 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-# @author: Girish Moodalbail, Oracle, Inc.
-#
-
-"""Implements ipfilter and ipnat rules using Solaris utilities."""
-
-from neutron.agent.solaris import net_lib
-
-
-class IPfiltersManager(object):
-    """Wrapper for Solaris IPF commands -- ipf(1m), ipnat(1m),
-    and ippool(1m)."""
-
-    def __init__(self):
-        self.ipv4 = {'filter': [], 'nat': []}
-        self.ipv6 = {'filter': [], 'nat': []}
-
-    def add_ippool(self, number, ip_cidrs):
-        ippool = net_lib.IPpoolCommand(number)
-        if ip_cidrs:
-            ippool.add_pool_nodes(ip_cidrs)
-        else:
-            ippool.add_pool()
-
-    def remove_ippool(self, number, ip_cidrs):
-        ippool = net_lib.IPpoolCommand(number)
-        if ip_cidrs:
-            ippool.remove_pool_nodes(ip_cidrs)
-        else:
-            ippool.remove_pool()
-
-    def add_nat_rules(self, rules):
-        ipnat = net_lib.IPnatCommand()
-        ipnat.add_rules(rules)
-        # we successfully added the nat rules, update the local copy
-        for rule in rules:
-            self.ipv4['nat'].append(rule)
-
-    def remove_nat_rules(self, rules):
-        ipnat = net_lib.IPnatCommand()
-        ipnat.remove_rules(rules)
-        # we successfully removed the nat rules, update the local copy
-        for rule in rules:
-            self.ipv4['nat'].remove(rule)
-
-    def add_ipf_rules(self, rules, version=4):
-        ipf = net_lib.IPfilterCommand()
-        ipf.add_rules(rules, version)
-        version_rules = (self.ipv4['filter'] if version == 4 else
-                         self.ipv6['filter'])
-        for rule in rules:
-            version_rules.append(rule)
-
-    def remove_ipf_rules(self, rules, version=4):
-        ipf = net_lib.IPfilterCommand()
-        ipf.remove_rules(rules, version)
-        version_rules = (self.ipv4['filter'] if version == 4 else
-                         self.ipv6['filter'])
-        for rule in rules:
-            version_rules.remove(rule)
--- a/components/openstack/neutron/files/agent/solaris/net_lib.py	Thu Mar 10 13:27:59 2016 -0800
+++ b/components/openstack/neutron/files/agent/solaris/net_lib.py	Thu Mar 10 14:12:18 2016 -0800
@@ -1,6 +1,6 @@
 # vim: tabstop=4 shiftwidth=4 softtabstop=4
 
-# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
@@ -42,11 +42,12 @@
 
     @classmethod
     def ifname_exists(cls, ifname):
-
-        cmd = ['/usr/sbin/ipadm', 'show-if', '-po', 'ifname']
-        stdout = cls.execute(cmd)
-
-        return ifname in stdout
+        try:
+            cmd = ['/usr/sbin/ipadm', 'show-if', '-po', 'ifname', ifname]
+            cls.execute(cmd, log_fail_as_error=False)
+        except Exception:
+            return False
+        return True
 
     @classmethod
     def ipaddr_exists(cls, ifname, ipaddr):
@@ -167,11 +168,12 @@
 
     @classmethod
     def datalink_exists(cls, dlname):
-
-        cmd = ['/usr/sbin/dladm', 'show-link', '-po', 'link']
-        stdout = utils.execute(cmd)
-
-        return dlname in stdout
+        try:
+            cmd = ['/usr/sbin/dladm', 'show-link', '-po', 'link', dlname]
+            utils.execute(cmd, log_fail_as_error=False)
+        except Exception:
+            return False
+        return True
 
     def connect_vnic(self, evsvport, tenantname=None, temp=True):
         if self.datalink_exists(self._dlname):
@@ -216,128 +218,8 @@
         self.execute_with_pfexec(cmd)
 
     @classmethod
-    def show_vnic(cls):
-        cmd = ['/usr/sbin/dladm', 'show-vnic', '-po', 'link']
+    def show_link(cls):
+        cmd = ['/usr/sbin/dladm', 'show-link', '-po', 'link']
         stdout = utils.execute(cmd)
 
         return stdout.splitlines()
-
-
-class IPpoolCommand(CommandBase):
-    '''Wrapper around Solaris ippool(1m) command'''
-
-    def __init__(self, pool_name, role='ipf', pool_type='tree'):
-        self._pool_name = pool_name
-        self._role = role
-        self._pool_type = pool_type
-
-    def pool_exists(self):
-        cmd = ['/usr/sbin/ippool', '-l', '-m', self._pool_name,
-               '-t', self._pool_type]
-        stdout = self.execute_with_pfexec(cmd)
-        return str(self._pool_name) in stdout
-
-    def pool_split_nodes(self, ip_cidrs):
-        cmd = ['/usr/sbin/ippool', '-l', '-m', self._pool_name,
-               '-t', self._pool_type]
-        stdout = self.execute_with_pfexec(cmd)
-        existing_nodes = []
-        non_existing_nodes = []
-        for ip_cidr in ip_cidrs:
-            if ip_cidr in stdout:
-                existing_nodes.append(ip_cidr)
-            else:
-                non_existing_nodes.append(ip_cidr)
-        return existing_nodes, non_existing_nodes
-
-    def add_pool_nodes(self, ip_cidrs):
-        ip_cidrs = self.pool_split_nodes(ip_cidrs)[1]
-
-        for ip_cidr in ip_cidrs:
-            cmd = ['/usr/sbin/ippool', '-a', '-m', self._pool_name,
-                   '-i', ip_cidr]
-            self.execute_with_pfexec(cmd)
-
-    def remove_pool_nodes(self, ip_cidrs):
-        ip_cidrs = self.pool_split_nodes(ip_cidrs)[0]
-
-        for ip_cidr in ip_cidrs:
-            cmd = ['/usr/sbin/ippool', '-r', '-m', self._pool_name,
-                   '-i', ip_cidr]
-            self.execute_with_pfexec(cmd)
-
-    def add_pool(self):
-        if self.pool_exists():
-            return
-
-        cmd = ['/usr/sbin/ippool', '-A', '-m', self._pool_name,
-               '-o', self._role, '-t', self._pool_type]
-        self.execute_with_pfexec(cmd)
-
-    def remove_pool(self):
-        if not self.pool_exists():
-            return
-
-        # This command will fail if ippool is in use by ipf, so the
-        # caller has to ensure that it's not being used in an ipf rule
-        cmd = ['/usr/sbin/ippool', '-R', '-m', self._pool_name,
-               '-o', self._role, '-t', self._pool_type]
-        self.execute_with_pfexec(cmd)
-
-
-class IPfilterCommand(CommandBase):
-    '''Wrapper around Solaris ipf(1m) command'''
-
-    def _split_rules(self, rules, version):
-        # assumes that rules are inbound!
-        cmd = ['/usr/sbin/ipfstat', '-i']
-        if version == 6:
-            cmd.insert(1, '-6')
-        stdout = self.execute_with_pfexec(cmd)
-        existing_rules = []
-        non_existing_rules = []
-        for rule in rules:
-            if rule in stdout:
-                existing_rules.append(rule)
-            else:
-                non_existing_rules.append(rule)
-
-        return existing_rules, non_existing_rules
-
-    def add_rules(self, rules, version=4):
-        rules = self._split_rules(rules, version)[1]
-        if not rules:
-            return
-        process_input = '\n'.join(rules) + '\n'
-        cmd = ['/usr/sbin/ipf', '-f', '-']
-        if version == 6:
-            cmd.insert(1, '-6')
-        self.execute_with_pfexec(cmd, process_input=process_input)
-
-    def remove_rules(self, rules, version=4):
-        rules = self._split_rules(rules, version)[0]
-        if not rules:
-            return
-        process_input = '\n'.join(rules) + '\n'
-        cmd = ['/usr/sbin/ipf', '-r', '-f', '-']
-        if version == 6:
-            cmd.insert(1, '-6')
-        self.execute_with_pfexec(cmd, process_input=process_input)
-
-
-class IPnatCommand(CommandBase):
-    '''Wrapper around Solaris ipnat(1m) command'''
-
-    def add_rules(self, rules):
-        if not rules:
-            return
-        process_input = '\n'.join(rules) + '\n'
-        cmd = ['/usr/sbin/ipnat', '-f', '-']
-        self.execute_with_pfexec(cmd, process_input=process_input)
-
-    def remove_rules(self, rules):
-        if not rules:
-            return
-        process_input = '\n'.join(rules) + '\n'
-        cmd = ['/usr/sbin/ipnat', '-r', '-f', '-']
-        self.execute_with_pfexec(cmd, process_input=process_input)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/agent/solaris/packetfilter.py	Thu Mar 10 14:12:18 2016 -0800
@@ -0,0 +1,245 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_log import log as logging
+
+from neutron.agent.linux import utils
+
+LOG = logging.getLogger(__name__)
+
+
+class PacketFilter(object):
+    '''Wrapper around Solaris pfctl(1M) command'''
+
+    def __init__(self, anchor_name):
+        '''All the PF rules/anchors will be placed under anchor_name.
+
+        An anchor is a collection of rules, tables, and other anchors. They
+        can be nested which allows PF rulesets to be chained together.
+
+        Anchor names are always specified as absolute path starting from the
+        root or main ruleset. For examples:
+
+            _auto/neutron:l3:agent/l3eb1b8d0e7_1_0
+            _auto/neutron:l3:agent/l3i45b26bf5_2_0/normal
+            _auto/neutron:l3:agent/l3i45b26bf5_2_0/pbr
+
+        If the root_anchor_path is set to _auto/neutron:l3:agent, then all
+        the methods in this class will operate under that root anchor.
+        '''
+        self.root_anchor_path = anchor_name
+
+    def _get_anchor_path(self, subanchors):
+        if subanchors:
+            return '%s/%s' % (self.root_anchor_path, '/'.join(subanchors))
+
+        return self.root_anchor_path
+
+    def add_nested_anchor_rule(self, parent_anchor, child_anchor,
+                               anchor_option=None):
+        """Adds an anchor rule that evaluates nested anchors.
+
+        Adds child_anchor rule with anchor_option (if any) under parent_anchor.
+        If parent_anchor is None, then the root_anchor_path will be used. An
+        anchor rule (anchor "blah/*") tells PF to evaluate all the anchors and
+        rules under 'blah'.
+
+        pfctl(1M) doesn't provide a way to update ruleset under an anchor, so
+        we need to always read the existing rules and add a new rule or
+        remove an exisitng rule.
+        """
+        anchor_path = self._get_anchor_path(parent_anchor)
+        existing_anchor_rules = self.list_anchor_rules(parent_anchor)
+        LOG.debug(_('Existing anchor rules %s under %s') %
+                  (existing_anchor_rules, anchor_path))
+        for rule in existing_anchor_rules:
+            if child_anchor in rule:
+                LOG.debug(_('Anchor rule %s already exists') % rule)
+                return
+        anchor_rule = 'anchor "%s/*"' % child_anchor
+        if anchor_option:
+            anchor_rule = anchor_rule + " " + anchor_option
+        existing_anchor_rules.append(anchor_rule)
+        process_input = '%s\n' % '\n'.join(existing_anchor_rules)
+        cmd = ['/usr/bin/pfexec', '/usr/sbin/pfctl', '-a', anchor_path,
+               '-f', '-']
+        LOG.debug(_('Running: %s') % ' '.join(cmd))
+        utils.execute(cmd, process_input=process_input)
+
+    def remove_nested_anchor_rule(self, parent_anchor, child_anchor):
+        """ Removes an anchor rule that evaluates nested anchors.
+
+        pfctl(1M) doesn't provide a way to update ruleset under an anchor, so
+        we need to always read the existing rules and add a new rule or
+        remove an exisitng rule.
+        """
+        anchor_path = self._get_anchor_path(parent_anchor)
+        existing_anchor_rules = self.list_anchor_rules(parent_anchor)
+        LOG.debug(_('Existing anchor rules %s under %s') %
+                  (existing_anchor_rules, anchor_path))
+        for rule in existing_anchor_rules:
+            if child_anchor in rule:
+                break
+        else:
+            LOG.debug(_('Anchor rule %s does not exist') % rule)
+            return
+        existing_anchor_rules.remove(rule)
+        process_input = '%s\n' % '\n'.join(existing_anchor_rules)
+        cmd = ['/usr/bin/pfexec', '/usr/sbin/pfctl', '-a', anchor_path,
+               '-f', '-']
+        LOG.debug(_('Running: %s') % ' '.join(cmd))
+        utils.execute(cmd, process_input=process_input)
+
+    def list_anchor_rules(self, subanchors=None):
+        anchor_path = self._get_anchor_path(subanchors)
+        cmd = ['/usr/bin/pfexec', '/usr/sbin/pfctl', '-a', anchor_path, '-sr']
+        LOG.debug(_('Running: %s') % " ".join(cmd))
+        stdout = utils.execute(cmd)
+        anchor_rules = []
+        for anchor_rule in stdout.strip().splitlines():
+            anchor_rules.append(anchor_rule.strip())
+        return anchor_rules
+
+    def list_anchors(self, subanchors=None):
+        anchor_path = self._get_anchor_path(subanchors)
+        cmd = ['/usr/bin/pfexec', '/usr/sbin/pfctl', '-a', anchor_path, '-sA']
+        LOG.debug(_('Running: %s') % " ".join(cmd))
+        stdout = utils.execute(cmd)
+        anchors = []
+        for anchor in stdout.strip().splitlines():
+            anchors.append(anchor.strip())
+        return anchors
+
+    def add_table(self, name, subanchors=None):
+        anchor_path = self._get_anchor_path(subanchors)
+        cmd = ['/usr/bin/pfexec', '/usr/sbin/pfctl', '-a', anchor_path,
+               '-t', name, '-T', 'add']
+        LOG.debug(_('Running: %s') % " ".join(cmd))
+        utils.execute(cmd)
+
+    def add_table_entry(self, name, cidrs, subanchors=None):
+        anchor_path = self._get_anchor_path(subanchors)
+        cmd = ['/usr/bin/pfexec', '/usr/sbin/pfctl', '-a', anchor_path,
+               '-t', name, '-T', 'add']
+        cmd.extend(cidrs)
+        LOG.debug(_('Running: %s') % " ".join(cmd))
+        utils.execute(cmd)
+
+    def table_exists(self, name, subanchors=None):
+        anchor_path = self._get_anchor_path(subanchors)
+        try:
+            cmd = ['/usr/bin/pfexec', '/usr/sbin/pfctl', '-a', anchor_path,
+                   '-t', name, '-T', 'show']
+            utils.execute(cmd)
+        except:
+            return False
+        return True
+
+    def remove_table(self, name, subanchors=None):
+        if not self.table_exists(name, subanchors):
+            LOG.debug(_('Table %s does not exist hence returning without '
+                      'deleting') % name)
+            return
+        anchor_path = self._get_anchor_path(subanchors)
+        cmd = ['/usr/bin/pfexec', '/usr/sbin/pfctl', '-a', anchor_path,
+               '-t', name, '-T', 'delete']
+        LOG.debug(_('Running: %s') % " ".join(cmd))
+        utils.execute(cmd)
+
+    def remove_table_entry(self, name, cidrs, subanchors=None):
+        if not self.table_exists(name, subanchors):
+            LOG.debug(_('Table %s does not exist hence returning without '
+                      'deleting') % name)
+            return
+        anchor_path = self._get_anchor_path(subanchors)
+        cmd = ['/usr/bin/pfexec', '/usr/sbin/pfctl', '-a', anchor_path,
+               '-t', name, '-T', 'delete']
+        cmd.extend(cidrs)
+        LOG.debug(_('Running: %s') % " ".join(cmd))
+        utils.execute(cmd)
+
+    def add_rules(self, rules, subanchors=None):
+        anchor_path = self._get_anchor_path(subanchors)
+        process_input = '\n'.join(rules) + '\n'
+        cmd = ['/usr/bin/pfexec', '/usr/sbin/pfctl', '-a', anchor_path,
+               '-f', '-']
+        LOG.debug(_('Running: %s with input %s') % (" ".join(cmd),
+                                                    process_input))
+        utils.execute(cmd, process_input=process_input)
+
+    def _get_rule_label(self, rule):
+        if 'label' not in rule:
+            return None
+        keywords = rule.split(' ')
+        for i, keyword in enumerate(keywords):
+            if keyword == 'label':
+                break
+        return keywords[i + 1]
+
+    def remove_anchor(self, subanchors=None):
+        anchor_path = self._get_anchor_path(subanchors)
+
+        # retrieve all the labels for rules, we will delete the state
+        # after removing the rules
+        cmd = ['/usr/bin/pfexec', '/usr/sbin/pfctl', '-a', anchor_path, '-sr']
+        LOG.debug(_('Running: %s') % " ".join(cmd))
+        stdout = utils.execute(cmd)
+        labels = []
+        for rule in stdout.strip().splitlines():
+            label = self._get_rule_label(rule.strip())
+            if label:
+                labels.append(label)
+
+        # delete the rules and tables
+        cmd = ['/usr/bin/pfexec', '/usr/sbin/pfctl', '-a', anchor_path,
+               '-F', 'all']
+        LOG.debug(_('Running: %s') % " ".join(cmd))
+        utils.execute(cmd)
+
+        # clear the state
+        for label in labels:
+            cmd = ['/usr/bin/pfexec', '/usr/sbin/pfctl', '-k', 'label',
+                   '-k', label]
+            LOG.debug(_('Running: %s') % " ".join(cmd))
+            utils.execute(cmd)
+
+    def _get_relative_nested_anchors(self, anchorname):
+        # anchor name always come with absolute path, so we need to
+        # remove the root anchor name to get relative path
+        subanchors = anchorname.split('/')
+        for anchor in self.root_anchor_path.split('/'):
+            if anchor in subanchors:
+                subanchors.remove(anchor)
+        return subanchors
+
+    def remove_anchor_recursively(self, subanchors=None):
+        anchor_path = self._get_anchor_path(subanchors)
+        cmd = ['/usr/bin/pfexec', '/usr/sbin/pfctl', '-a', anchor_path, '-sA']
+        LOG.debug(_('Running: %s') % ' '.join(cmd))
+        stdout = utils.execute(cmd)
+        if not stdout.strip():
+            return
+
+        # we have nested anchors to remove so make recursive calls
+        for nested_anchor in stdout.strip().splitlines():
+            nested_anchor = nested_anchor.strip()
+            if not nested_anchor:
+                continue
+            anchor_list = self._get_relative_nested_anchors(nested_anchor)
+            self.remove_anchor_recursively(anchor_list)
+            self.remove_anchor(anchor_list)
+        anchor_list = self._get_relative_nested_anchors(anchor_path)
+        self.remove_anchor(anchor_list)
--- a/components/openstack/neutron/files/neutron-l3-agent	Thu Mar 10 13:27:59 2016 -0800
+++ b/components/openstack/neutron/files/neutron-l3-agent	Thu Mar 10 14:12:18 2016 -0800
@@ -16,12 +16,14 @@
 
 import os
 import re
+from subprocess import CalledProcessError, Popen, PIPE, check_call
 import sys
 
 import netaddr
 import smf_include
 
-from subprocess import CalledProcessError, Popen, PIPE, check_call
+
+from neutron.agent.solaris import packetfilter
 from neutron_vpnaas.services.vpn.device_drivers.solaris_ipsec import \
     get_vpn_interfaces
 from neutron_vpnaas.services.vpn.device_drivers.solaris_ipsec import \
@@ -38,8 +40,8 @@
         return False
     if output.strip() == value:
         return True
-    cmd = ["/usr/sbin/ipadm", "set-prop", "-t", "-p", "hostmodel=%s" % value,
-           "ipv4"]
+    cmd = ["/usr/bin/pfexec", "/usr/sbin/ipadm", "set-prop", "-t", "-p",
+           "hostmodel=%s" % value, "ipv4"]
     p = Popen(cmd, stdout=PIPE, stderr=PIPE)
     output, error = p.communicate()
     if p.returncode != 0:
@@ -80,8 +82,13 @@
               "enabled before enabling neutron-l3-agent"
         return smf_include.SMF_EXIT_ERR_CONFIG
 
-    cmd = "/usr/lib/neutron/neutron-l3-agent --config-file %s " \
-        "--config-file %s --config-file %s" % tuple(sys.argv[2:5])
+    # remove any stale PF rules under _auto/neutron:l3:agent anchor
+    pf = packetfilter.PacketFilter('_auto/neutron:l3:agent')
+    pf.remove_anchor_recursively()
+
+    cmd = "/usr/bin/pfexec /usr/lib/neutron/neutron-l3-agent " \
+        "--config-file %s --config-file %s --config-file %s" % \
+        tuple(sys.argv[2:5])
 
     # The VPNaaS shutdown should unplumb all IP tunnels it created. But
     # be paranoid and check for lingering tunnels created by OpenStack
@@ -108,51 +115,6 @@
     return smf_include.smf_subprocess(cmd)
 
 
-def remove_ipfilter_rules(version):
-    # remove IP Filter rules added by neutron-l3-agent
-    cmd = ["/usr/bin/pfexec", "/usr/sbin/ipfstat", "-io"]
-    if version == 6:
-        cmd.insert(2, "-6")
-    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
-    output, error = p.communicate()
-    if p.returncode != 0:
-        print "failed to retrieve IP Filter rules"
-        return smf_include.SMF_EXIT_ERR_FATAL
-
-    ipfilters = output.splitlines()
-    # L3 agent IP Filter rules are of the form
-    # block in quick on l3i64cbb496_a_0 from ... to pool/15417332
-    prog = re.compile('on l3i[0-9A-Fa-f\_]{10}_0')
-    ippool_names = []
-    for ipf in ipfilters:
-        if not prog.search(ipf):
-            continue
-        # capture the IP pool name
-        if 'pool/' in ipf:
-            ippool_names.append(ipf.split('pool/')[1])
-
-        try:
-            # remove the IP Filter rule
-            p = Popen(["echo", ipf], stdout=PIPE)
-            cmd = ["/usr/bin/pfexec", "/usr/sbin/ipf", "-r", "-f", "-"]
-            if version == 6:
-                cmd.insert(2, "-6")
-            check_call(cmd, stdin=p.stdout)
-        except CalledProcessError as err:
-            print "failed to remove IP Filter rule %s: %s" % (ipf, err)
-            return smf_include.SMF_EXIT_ERR_FATAL
-
-    # remove IP Pools added by neutron-l3-agent
-    for ippool_name in ippool_names:
-        try:
-            check_call(["/usr/bin/pfexec", "/usr/sbin/ippool", "-R",
-                        "-m", ippool_name, "-t", "tree"])
-        except CalledProcessError as err:
-            print "failed to remove IP Pool %s: %s" % (ippool_name, err)
-            return smf_include.SMF_EXIT_ERR_FATAL
-    return smf_include.SMF_EXIT_OK
-
-
 def stop():
     shutdown_vpn()
     try:
@@ -161,43 +123,10 @@
     except CalledProcessError as err:
         print "failed to kill the SMF contract: %s" % (err)
 
-    # We need to first remove the IP filter rules and then remove
-    # the IP interfaces on which the rules were applied.
-
-    # remove IPv4 Filter rules added by neutron-l3-agent
-    rv = remove_ipfilter_rules(4)
-    if rv != smf_include.SMF_EXIT_OK:
-        return rv
-
-    # remove IPv6 Filter rules added by neutron-l3-agent
-    rv = remove_ipfilter_rules(6)
-    if rv != smf_include.SMF_EXIT_OK:
-        return rv
-
-    # remove IP NAT rules added by neutron-l3-agent
-    cmd = ["/usr/bin/pfexec", "/usr/sbin/ipnat", "-lR"]
-    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
-    output, error = p.communicate()
-    if p.returncode != 0:
-        print "failed to retrieve IP NAT rules"
-        return smf_include.SMF_EXIT_ERR_FATAL
-
-    ipnat_rules = output.splitlines()
-    # L3 agent IP NAT rules are of the form
-    # bimap l3e64ccc496_a_0 .... OR
-    # rdr l3iedf345cc96_a_0 ....
-    prog = re.compile('l3[ie][0-9A-Fa-f\_]{10}_0')
-    for ipnat_rule in ipnat_rules:
-        if not prog.search(ipnat_rule):
-            continue
-        # remove the IP NAT rule
-        try:
-            p = Popen(["echo", ipnat_rule], stdout=PIPE)
-            check_call(["/usr/bin/pfexec", "/usr/sbin/ipnat", "-r", "-f", "-"],
-                       stdin=p.stdout)
-        except CalledProcessError as err:
-            print "failed to remove IP NAT rule %s: %s" % (ipnat_rule, err)
-            return smf_include.SMF_EXIT_ERR_FATAL
+    # We need to first remove the PF rules added under _auto/neutron:l3:agent
+    # anchor and then remove the IP interfaces on which the rules were applied.
+    pf = packetfilter.PacketFilter('_auto/neutron:l3:agent')
+    pf.remove_anchor_recursively()
 
     # remove VNICs associated with L3 agent
     cmd = ["/usr/sbin/ipadm", "show-if", "-p", "-o", "ifname"]
--- a/components/openstack/neutron/files/neutron-l3-agent.xml	Thu Mar 10 13:27:59 2016 -0800
+++ b/components/openstack/neutron/files/neutron-l3-agent.xml	Thu Mar 10 14:12:18 2016 -0800
@@ -36,9 +36,9 @@
         value='svc:/application/openstack/neutron/neutron-upgrade' />
     </dependency>
 
-    <dependency name='ipfilter' grouping='require_all' restart_on='error'
+    <dependency name='firewall' grouping='require_all' restart_on='error'
       type='service'>
-      <service_fmri value='svc:/network/ipfilter:default' />
+      <service_fmri value='svc:/network/firewall:default' />
     </dependency>
 
     <dependency name='ntp' grouping='optional_all' restart_on='none'
@@ -61,15 +61,13 @@
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/neutron-l3-agent %m %{config/config_path} %{config/l3_config_path} %{config/vpn_config_path}">
       <method_context>
-        <!-- sys_ip_config is required to set 'hostmodel' ipadm property -->
-        <method_credential user='neutron' group='neutron' privileges='basic,sys_ip_config' />
+        <method_credential user='neutron' group='neutron' />
       </method_context>
     </exec_method>
     <exec_method timeout_seconds="600" type="method" name="stop"
       exec="/lib/svc/method/neutron-l3-agent %m %{restarter/contract}">
       <method_context>
-        <!-- sys_ip_config is required to set 'hostmodel' ipadm property -->
-        <method_credential user='neutron' group='neutron' privileges='basic,sys_ip_config' />
+        <method_credential user='neutron' group='neutron' />
       </method_context>
     </exec_method>
 
--- a/components/openstack/neutron/files/neutron.prof_attr	Thu Mar 10 13:27:59 2016 -0800
+++ b/components/openstack/neutron/files/neutron.prof_attr	Thu Mar 10 14:12:18 2016 -0800
@@ -16,4 +16,5 @@
 auths=solaris.network.interface.config,\
 solaris.smf.manage.routing,\
 solaris.smf.value.routing;\
-profiles=Elastic Virtual Switch Administration,IP Filter Management,Network IPsec Management
+profiles=Elastic Virtual Switch Administration,Network Firewall Management,\
+Network IPsec Management
--- a/components/openstack/neutron/files/services/vpn/device_drivers/solaris_ipsec.py	Thu Mar 10 13:27:59 2016 -0800
+++ b/components/openstack/neutron/files/services/vpn/device_drivers/solaris_ipsec.py	Thu Mar 10 14:12:18 2016 -0800
@@ -107,7 +107,7 @@
 from oslo import messaging
 from oslo_concurrency import lockutils, processutils
 from netaddr import IPNetwork
-from neutron.agent.solaris import interface, net_lib, ra, ipfilters_manager
+from neutron.agent.solaris import packetfilter
 from neutron.agent.linux import ip_lib, utils
 from neutron.common import rpc as n_rpc
 from neutron_vpnaas.db.vpn import vpn_db
@@ -181,8 +181,8 @@
         'packet_logging', default=False,
         help=_('IPsec policy failure logging')),
     cfg.StrOpt(
-         'logger_level', default='message+packet',
-         help=_('IPsec policy log level'))
+        'logger_level', default='message+packet',
+        help=_('IPsec policy log level'))
 ]
 cfg.CONF.register_opts(solaris_defaults, 'solaris')
 cfg.CONF.register_opts(solaris_opts, 'solaris')
@@ -241,29 +241,29 @@
     LOG.info(
         "Disabling IPsec policy service: \"svc:/%s\"" % ipsec_svc)
     instance = rad_connection.get_object(
-       smfb.Instance(),
-       rad.client.ADRGlobPattern({'service': ipsec_svc,
-                                 'instance': 'default'}))
+        smfb.Instance(),
+        rad.client.ADRGlobPattern({'service': ipsec_svc,
+                                   'instance': 'default'}))
     instance.disable(False)
 
     instance = rad_connection.get_object(
-       smfb.Instance(),
-       rad.client.ADRGlobPattern({'service': ipsec_svc,
-                                 'instance': 'logger'}))
+        smfb.Instance(),
+        rad.client.ADRGlobPattern({'service': ipsec_svc,
+                                   'instance': 'logger'}))
     instance.disable(False)
 
     LOG.info("Disabling IKE service: \"svc:/%s:default\"" % ike_svc)
     instance = rad_connection.get_object(
-       smfb.Instance(),
-       rad.client.ADRGlobPattern({'service': ike_svc,
-                                 'instance': 'default'}))
+        smfb.Instance(),
+        rad.client.ADRGlobPattern({'service': ike_svc,
+                                   'instance': 'default'}))
     instance.disable(False)
 
     LOG.info("Disabling IKE service: \"svc:/%s:ikev2\"" % ike_svc)
     instance = rad_connection.get_object(
-       smfb.Instance(),
-       rad.client.ADRGlobPattern({'service': ike_svc,
-                                 'instance': 'ikev2'}))
+        smfb.Instance(),
+        rad.client.ADRGlobPattern({'service': ike_svc,
+                                   'instance': 'ikev2'}))
     instance.disable(False)
 
     rad_connection.close()
@@ -355,7 +355,7 @@
        is the tunnel name. See comment in sync() for a description
        of tunnel names.
 
-       Also remove any IPF bypass rules. We don't need
+       Also remove any PF bypass rules. We don't need
        to delete the routes added by our evil twin add_tunnels()
        because these get whacked when the tunnel is removed.
        We are not really interested in any errors at this point, the
@@ -367,8 +367,6 @@
         LOG.info("VPNaaS has just started, no tunnels to whack.")
         return
 
-    ipfilters_manager = net_lib.IPfilterCommand()
-
     for idstr in existing_tunnels:
         cmd = ['/usr/bin/pfexec', 'ipadm', 'delete-ip', idstr]
         try:
@@ -381,20 +379,23 @@
         except processutils.ProcessExecutionError as stderr:
             LOG.debug("\"%s\"" % stderr)
 
-    cmd = ['/usr/sbin/ipfstat', '-i']
-    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
-    output, error = p.communicate()
-    if p.returncode != 0:
-        print "failed to retrieve IP interface names"
-    ifnames = output.splitlines()
-    LOG.debug("ifnames %s" % ifnames)
-    pass_in_rule = re.compile('pass in quick on')
-    for ifname in ifnames:
-        if not pass_in_rule.search(ifname):
+    # Remove all the VPN bypass rules that were added for local subnet
+    # going out to remote subnet. These rules are all captured inside of
+    # a anchor whose name is of the form vpn_{peer_cidr}
+    pf = packetfilter.PacketFilter('_auto/neutron:l3:agent')
+    anchors = pf.list_anchors()
+    LOG.debug("All anchors under _auto/neutron:l3:agent: %s" % anchors)
+    for anchor in anchors:
+        if 'l3i' not in anchor:
             continue
-        ipf_cmd = ['%s' % ifname]
-        LOG.info("Deleting IPF bypass: \"%s\"" % ipf_cmd)
-        ipfilters_manager.remove_rules(ipf_cmd, 4)
+        subanchors = anchor.split('/')[2:]
+        l3i_anchors = pf.list_anchors(subanchors)
+        LOG.debug("All anchors under %s: %s" % (anchor, l3i_anchors))
+        for l3i_anchor in l3i_anchors:
+            if 'vpn_' not in l3i_anchor:
+                continue
+            l3i_subanchors = l3i_anchor.split('/')[2:]
+            pf.remove_anchor(l3i_subanchors)
 
     existing_tunnels = []
 
@@ -659,11 +660,12 @@
             status_changed_vpn_services)
 
     def get_connection_status(self):
-	"""Update the status of the ipsec-site-connection
+        """Update the status of the ipsec-site-connection
            based on the output of ikeadm(1m). See get_status()
            for further comments. Any connections that ended up
            on the badboys list will be marked as state DOWN.
         """
+        LOG.debug("Getting Connection Status")
         global being_shutdown
         global restarting
 
@@ -782,7 +784,7 @@
         self.ike_version = ""
         self.agent_rpc = IPsecVpnDriverApi(topics.IPSEC_DRIVER_TOPIC)
         self.context = context.get_admin_context_without_session()
-        self.ipfilters_manager = net_lib.IPfilterCommand()
+        self.pf = packetfilter.PacketFilter('_auto/neutron:l3:agent')
         LOG.info("Solaris IPsec/IKE Configuration manager loaded.")
 
     def ensure_configs(self):
@@ -878,8 +880,8 @@
                 stdout, stderr = processutils.execute(*cmd)
             except processutils.ProcessExecutionError as stderr:
                 if re.search('Interface already exists', str(stderr)):
-                    LOG.warn(
-                       "Tunnel interface: %s already exists." % tun_name)
+                    LOG.warn("Tunnel interface: '%s' already exists." %
+                             tun_name)
                 else:
                     LOG.warn("Error creating tunnel")
                     LOG.warn("\"%s\"" % stderr)
@@ -910,11 +912,16 @@
                     self.badboys.append(site)
                     continue
 
-            # Now for some Policy Based Routing (PBR) voodoo. When the EVS
-            # adds a virtual network, it adds a PBR rule that looks like this:
+            # Now for some Policy Based Routing (PBR) voodoo. When a Neutron
+            # subnet is added to a Neutron router, it adds a PBR rule that
+            # looks like this:
             #
-            # pass in on l3ia18d6189_8_0 to l3e2d9b3c1c_8_0:10.132.148.1
-            #     from any to !192.168.80.0/24
+            #    anchor "l3ia18d6189_8_0/*" on l3ia18d6189_8_0 all {
+            #      anchor "pbr" all {
+            #        pass in inet from any to ! 192.168.80.0/24
+            #          route-to 10.132.148.1.1@l3e88c2027b_9_0
+            #        }
+            #    }
             #
             # What this does is to pass all packets leaving interface
             # "l3ia18d6189_8_0" directly to interface/address
@@ -928,30 +935,38 @@
             # To make this happen, we find the interface associated with our
             # network and add a bypass rule. The rule looks like this:
             #
-            # pass in quick on l3ia18d6189_8_0 from any to 192.168.100.0/24
+            # pass in quick on l3ia18d6189_8_0 from any to 192.168.80.0/24
             #
             # There will be one of these pass rules for each remote network.
             # The "quick" keyword ensures it matches *BEFORE* the PBR rule.
 
-            cmd = ['/usr/sbin/ipfstat', '-i']
+            # Find the interfaces names that start with 'l3i and have the IP
+            # address that matches the Inner Source IP address of an IP tunnel
+            # added by VPNaaS. Add a bypass rule to the sub-anchor for this
+            # interface
+            cmd = ['/usr/sbin/ipadm', 'show-addr', '-po', 'addrobj,addr']
             p = Popen(cmd, stdout=PIPE, stderr=PIPE)
             output, error = p.communicate()
-            if p.returncode != 0:
-                print "failed to retrieve IP interface names"
-            ifnames = output.splitlines()
-            far_subnet = re.compile(subnet['cidr'])
-            for ifname in ifnames:
-                if not ifname.startswith('pass in on'):
-                    continue
-                if far_subnet.search(ifname):
-                    rule_args = ifname.split(' ')
-                    ifa = rule_args[3]
-                    LOG.debug("Found interface for VPN subnet: \"%s\"" % ifa)
+            ifname = None
+            if p.returncode == 0:
+                for addrobj_addr in output.strip().splitlines():
+                    if ((i_local + '/') in addrobj_addr and
+                            addrobj_addr.startswith('l3i')):
+                        addrobj = addrobj_addr.split(':')[0]
+                        ifname = addrobj.split('/')[0]
+                        break
+            if not ifname:
+                LOG.warn("Failed to find IP interface corresponding to "
+                         "VPN subnet: %s. Skipping bypass rule for '%s'" %
+                         (subnet['cidr'], tun_name))
+                continue
 
-            ipf_cmd = ['pass in quick on %s from any to %s' % (ifa, peer_cidr)]
-            LOG.info("No PBR for: \"%s\"" % peer_cidr)
-            LOG.debug("Adding PBR bypass rule: \"%s\"" % ipf_cmd)
-            self.ipfilters_manager.add_rules(ipf_cmd, 4)
+            label = 'vpn_%s_%s' % (ifname, peer_cidr.replace('/', '_'))
+            bypass_rule = 'pass in quick from any to %s label %s' % \
+                (peer_cidr, label)
+            anchor_name = 'vpn_%s' % (peer_cidr.replace('/', '_'))
+            self.pf.add_rules([bypass_rule], [ifname, anchor_name])
+            LOG.debug("Added PBR bypass rule: '%s'" % bypass_rule)
 
     def get_status(self):
         """Check to see if IKE is configured and running.
@@ -1067,18 +1082,18 @@
         LOG.info(
             "Setting IPsec policy config file to: \"%s\"" % self.config_file)
         instance = rad_connection.get_object(
-           smfb.Instance(),
-           rad.client.ADRGlobPattern({'service': self.ipsec_svc,
-                                     'instance': 'default'}))
+            smfb.Instance(),
+            rad.client.ADRGlobPattern({'service': self.ipsec_svc,
+                                       'instance': 'default'}))
         instance.writeProperty('config/config_file', smfb.PropertyType.ASTRING,
                                [self.config_file])
         instance.refresh()
 
         LOG.info("Setting IKEv1 config file to: \"%s\"" % self.ike_config_file)
         instance = rad_connection.get_object(
-           smfb.Instance(),
-           rad.client.ADRGlobPattern({'service': self.ike_svc,
-                                     'instance': 'default'}))
+            smfb.Instance(),
+            rad.client.ADRGlobPattern({'service': self.ike_svc,
+                                       'instance': 'default'}))
         instance.writeProperty('config/config_file', smfb.PropertyType.ASTRING,
                                [self.ike_config_file])
 
@@ -1090,9 +1105,9 @@
         LOG.info(
             "Setting IKEv2 config file to: \"%s\"" % self.ikev2_config_file)
         instance = rad_connection.get_object(
-           smfb.Instance(),
-           rad.client.ADRGlobPattern({'service': self.ike_svc,
-                                     'instance': 'ikev2'}))
+            smfb.Instance(),
+            rad.client.ADRGlobPattern({'service': self.ike_svc,
+                                       'instance': 'ikev2'}))
         instance.writeProperty('config/config_file', smfb.PropertyType.ASTRING,
                                [self.ikev2_config_file])
 
@@ -1106,9 +1121,9 @@
             LOG.info(
                 "Setting IPsec policy logger to: \"%s\"" % self.logging_level)
             instance = rad_connection.get_object(
-               smfb.Instance(),
-               rad.client.ADRGlobPattern({'service': self.ipsec_svc,
-                                         'instance': 'logger'}))
+                smfb.Instance(),
+                rad.client.ADRGlobPattern({'service': self.ipsec_svc,
+                                           'instance': 'logger'}))
             instance.writeProperty('config/log_level',
                                    smfb.PropertyType.ASTRING,
                                    [self.logging_level])
@@ -1122,24 +1137,24 @@
         LOG.info("Enabling IPsec policy.")
         rad_connection = rad.connect.connect_unix()
         instance = rad_connection.get_object(
-           smfb.Instance(),
-           rad.client.ADRGlobPattern({'service': self.ipsec_svc,
-                                     'instance': 'default'}))
+            smfb.Instance(),
+            rad.client.ADRGlobPattern({'service': self.ipsec_svc,
+                                       'instance': 'default'}))
         instance.enable(True)
 
         LOG.info("Enabling IKE version \"%s\"" % self.ike_version)
         instance = rad_connection.get_object(
-           smfb.Instance(),
-           rad.client.ADRGlobPattern({'service': self.ike_svc,
-                                     'instance': self.ike_version}))
+            smfb.Instance(),
+            rad.client.ADRGlobPattern({'service': self.ike_svc,
+                                       'instance': self.ike_version}))
         instance.enable(True)
 
         if self.packet_logging:
             LOG.warn("Enabling IPsec packet logger.")
             instance = rad_connection.get_object(
-               smfb.Instance(),
-               rad.client.ADRGlobPattern({'service': self.ipsec_svc,
-                                         'instance': 'logger'}))
+                smfb.Instance(),
+                rad.client.ADRGlobPattern({'service': self.ipsec_svc,
+                                           'instance': 'logger'}))
             instance.enable(True)
 
         rad_connection.close()
@@ -1388,12 +1403,12 @@
 
             for new_ipsec_site_conn in process.connection_ids:
                 if ipsec_site_conn == new_ipsec_site_conn:
-                    LOG.debug(
-                        "Found entry for ID: \"%s\"" % new_ipsec_site_conn)
+                    LOG.debug("Found entry for ID: '%s'" %
+                              new_ipsec_site_conn)
                     found_previous_connection = True
             if not found_previous_connection:
-                LOG.debug(
-                   "Unable to find entry for ID: \"%s\"" % ipsec_site_conn)
+                LOG.debug("Unable to find entry for ID: '%s'" %
+                          ipsec_site_conn)
                 return True
             continue
 
--- a/components/openstack/neutron/neutron.p5m	Thu Mar 10 13:27:59 2016 -0800
+++ b/components/openstack/neutron/neutron.p5m	Thu Mar 10 14:12:18 2016 -0800
@@ -39,7 +39,7 @@
 set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
 set name=openstack.upgrade-id reboot-needed=true value=$(COMPONENT_BE_VERSION)
 set name=org.opensolaris.arc-caseid value=PSARC/2013/350 value=PSARC/2014/059 \
-    value=PSARC/2015/110 value=PSARC/2015/535
+    value=PSARC/2015/110 value=PSARC/2015/535 value=PSARC/2016/116
 set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
 #
 dir  path=etc/neutron owner=neutron group=neutron mode=0700
@@ -242,8 +242,8 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/dhcp.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/interface.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/ipfilters_manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/net_lib.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/packetfilter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/ra.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/windows/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/windows/polling.py
@@ -975,17 +975,8 @@
 # force a dependency on package delivering ipadm(1M)
 depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/ipadm
 
-# force a dependency on package delivering ipf(1M)
-depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/ipf
-
-# force a dependency on package delivering ipfstat(1M)
-depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/ipfstat
-
-# force a dependency on package delivering ipnat(1M)
-depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/ipnat
-
-# force a dependency on package delivering ippool(1M)
-depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/ippool
+# force a dependency on package delivering pfctl(1M)
+depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/pfctl
 
 # force a dependency on cliff; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/cliff-$(PYV)