PSARC 2016/268 Neutron EVS Plugin EOF
authorLaszlo Peter <laszlo.peter@oracle.com>
Wed, 07 Sep 2016 14:48:41 -0700
changeset 6848 8e252a37ed0d
parent 6847 57069587975f
child 6849 f9a2279efa0d
PSARC 2016/268 Neutron EVS Plugin EOF 24465835 Update Neutron for the Mitaka release 22271305 EOF monolithic neutron plugin for Openstack 18734794 port-create --fixed-ip accepts invalid argument, creates port with wrong IP
components/openstack/neutron/Makefile
components/openstack/neutron/files/agent/l3/solaris_agent.py
components/openstack/neutron/files/agent/solaris/dhcp.py
components/openstack/neutron/files/agent/solaris/interface.py
components/openstack/neutron/files/agent/solaris/namespace_manager.py
components/openstack/neutron/files/agent/solaris/net_lib.py
components/openstack/neutron/files/agent/solaris/pd.py
components/openstack/neutron/files/bgp_dragent.ini
components/openstack/neutron/files/dhcp_agent.ini
components/openstack/neutron/files/evs/migrate/__init__.py
components/openstack/neutron/files/evs/migrate/evs-neutron-migration.py
components/openstack/neutron/files/evs/migrate/havana_api.py
components/openstack/neutron/files/evs/migrate/migrate-evs-to-ovs
components/openstack/neutron/files/evs/plugin.py
components/openstack/neutron/files/evs_plugin.ini
components/openstack/neutron/files/l3_agent.ini
components/openstack/neutron/files/metadata_agent.ini
components/openstack/neutron/files/metering_agent.ini
components/openstack/neutron/files/ml2_conf.ini
components/openstack/neutron/files/neutron-dhcp-agent
components/openstack/neutron/files/neutron-l3-agent
components/openstack/neutron/files/neutron-openvswitch-agent.xml
components/openstack/neutron/files/neutron-server
components/openstack/neutron/files/neutron-upgrade
components/openstack/neutron/files/neutron.conf
components/openstack/neutron/files/neutron_vpnaas.conf
components/openstack/neutron/files/ovs_neutron_plugin.ini
components/openstack/neutron/files/plugins/ml2/linuxbridge_agent.ini
components/openstack/neutron/files/plugins/ml2/macvtap_agent.ini
components/openstack/neutron/files/plugins/ml2/ml2_conf.ini
components/openstack/neutron/files/plugins/ml2/ml2_conf_sriov.ini
components/openstack/neutron/files/plugins/ml2/openvswitch_agent.ini
components/openstack/neutron/files/plugins/ml2/sriov_agent.ini
components/openstack/neutron/files/services/vpn/device_drivers/solaris_ipsec.py
components/openstack/neutron/files/vpn_agent.ini
components/openstack/neutron/neutron.p5m
components/openstack/neutron/patches/01-dhcp-agent-add-solaris.patch
components/openstack/neutron/patches/02-l3-agent-add-solaris.patch
components/openstack/neutron/patches/03-metadata-driver-solaris.patch
components/openstack/neutron/patches/04-requirements.patch
components/openstack/neutron/patches/05-alembic-migrations.patch
components/openstack/neutron/patches/06-ml2-ovs-support.patch
components/openstack/neutron/patches/06-opts.patch
components/openstack/neutron/patches/07-ml2-ovs-support.patch
components/openstack/neutron/patches/07-ovs-agent-monitor-assertion-fix.patch
components/openstack/neutron/patches/08-ovs-binding-failed-fix.patch
components/openstack/neutron/patches/09-dhcp-agent-warning-fix.patch
components/openstack/neutron/patches/09-ml2-ovs-agent-misc.patch
components/openstack/neutron/patches/10-floatingip-remove-port-on-failed-create.patch
components/openstack/neutron/patches/10-interface-driver-entry-point.patch
components/openstack/neutron/patches/11-mysql_cluster_support.patch
components/openstack/neutron/patches/vpnaas-01-vpn_db.patch_1
components/openstack/neutron/patches/vpnaas-02-opts.patch_1
components/openstack/neutron/vpnaas_patches/01-vpn_db_add_solaris.patch
--- a/components/openstack/neutron/Makefile	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/Makefile	Wed Sep 07 14:48:41 2016 -0700
@@ -27,36 +27,36 @@
 include ../../../make-rules/shared-targets.mk
 
 COMPONENT_NAME=		neutron
-COMPONENT_CODENAME=	kilo
-COMPONENT_VERSION=	2015.1.2
-COMPONENT_BE_VERSION=	2015.1
+COMPONENT_CODENAME=	mitaka
+COMPONENT_VERSION=	8.1.2
+COMPONENT_BE_VERSION=	2016.1
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:3ff282d75e86ea25f64e97e24b2960e7ffaeef7cf4a69c16d20ffe18065d0ef0
-COMPONENT_ARCHIVE_URL=	http://launchpad.net/$(COMPONENT_NAME)/$(COMPONENT_CODENAME)/$(COMPONENT_VERSION)/+download/$(COMPONENT_ARCHIVE)
+    sha256:a3fdeed1421e1586bbdabd046474f1060bff4751257eacd90489f9e1b6eeff9d
+COMPONENT_ARCHIVE_URL=	https://tarballs.openstack.org/$(COMPONENT_NAME)/$(COMPONENT_ARCHIVE)
 COMPONENT_SIG_URL=	$(COMPONENT_ARCHIVE_URL).asc
 COMPONENT_PROJECT_URL=	http://www.openstack.org/
 COMPONENT_BUGDB=	service/neutron
-IPS_COMPONENT_VERSION=	0.$(COMPONENT_VERSION)
 IPS_PKG_NAME=		cloud/openstack/neutron
 
-TPNO=			25791
-TPNO_VPNAAS=		27275
-
-NUM_EXTRA_ARCHIVES=	1
+TPNO=			30361
+TPNO_VPNAAS=		30362
 
 COMPONENT_NAME_1=	neutron-vpnaas
-COMPONENT_SRC_1=	$(COMPONENT_NAME_1)-$(COMPONENT_VERSION)
+COMPONENT_VERSION_1=	8.1.2
+COMPONENT_SRC_1=	$(COMPONENT_NAME_1)-$(COMPONENT_VERSION_1)
 COMPONENT_ARCHIVE_1=	$(COMPONENT_SRC_1).tar.gz
 COMPONENT_ARCHIVE_HASH_1=	\
-    sha256:969d0d098db2d5df33d3008d3139821330bafcc7d7e684472db8b4c23b2126e6
-COMPONENT_ARCHIVE_URL_1=	http://launchpad.net/neutron/$(COMPONENT_CODENAME)/$(COMPONENT_VERSION)/+download/$(COMPONENT_ARCHIVE_1)
+    sha256:3852d8bf27c49c1beb0813a327d10e50b185f26e4479ad168498c4a2c6c97dd2
+COMPONENT_ARCHIVE_URL_1=https://tarballs.openstack.org/$(COMPONENT_NAME_1)/$(COMPONENT_ARCHIVE_1)
 COMPONENT_SIG_URL_1=	$(COMPONENT_ARCHIVE_URL_1).asc
 SOURCE_DIR_1=		$(COMPONENT_DIR)/$(COMPONENT_SRC_1)
+
 DEVICE_DRIVERS=		neutron_vpnaas/services/vpn/device_drivers
 DEVICE_TEMPLATE=	$(DEVICE_DRIVERS)/template/solaris
 
+PKG_PROTO_DIRS =	$(SOURCE_DIR_1)
 PKG_VARS +=		COMPONENT_BE_VERSION
 
 include $(WS_MAKE_RULES)/prep.mk
@@ -80,10 +80,25 @@
 PKG_MACROS +=		PYVER=$(PYTHON_VERSIONS)
 PKG_MACROS +=		PYV=$(shell echo $(PYTHON_VERSIONS) | tr -d .)
 
+install-vpnaas: $(SOURCE_DIR_1)/.installed
+
+$(SOURCE_DIR_1)/.installed:
+	(cd $(SOURCE_DIR_1); \
+	 $(ENV) \
+	     HOME=$(BUILD_DIR)/config-$* \
+	     PROTO_DIR=$(PYTHON_VERSIONS:%=$(BUILD_DIR)/$(MACH)-%) \
+	     $(COMPONENT_BUILD_ENV) \
+	     $(PYTHON.$(BITS)) ./setup.py build \
+		 --build-base $(PYTHON_VERSIONS:%=$(BUILD_DIR)/$(MACH)-%); \
+	 $(ENV) \
+	     HOME=$(BUILD_DIR)/config-$* \
+	     PROTO_DIR=$(PYTHON_VERSIONS:%=$(BUILD_DIR)/$(MACH)-%) \
+	     $(COMPONENT_BUILD_ENV) \
+	     $(PYTHON.$(BITS)) ./setup.py install $(COMPONENT_INSTALL_ARGS))
+	$(TOUCH) $(SOURCE_DIR_1)/.installed
+
 # move all the proper files into place and construct .pyc files for them
 COMPONENT_POST_BUILD_ACTION += \
-    $(GPATCH) -d $(SOURCE_DIR_1) $(GPATCH_FLAGS) \
-	< vpnaas_patches/01-vpn_db_add_solaris.patch; \
     (cd $(SOURCE_DIR_1) ; \
 	$(ENV) PROTO_DIR=$(PYTHON_VERSIONS:%=$(BUILD_DIR)/$(MACH)-%) \
 	    HOME=$(BUILD_DIR)/config-$* $(COMPONENT_BUILD_ENV) \
@@ -102,10 +117,6 @@
 	 files/neutron-server.xml \
 	 files/neutron-upgrade.xml \
 	 $(PROTO_DIR)/lib/svc/manifest/application/openstack; \
-    $(MKDIR) $(PROTO_DIR)/usr/lib/neutron; \
-    $(CP) files/evs/migrate/evs-neutron-migration.py \
-	 $(PROTO_DIR)/usr/lib/neutron/evs-neutron-migration; \
-    $(MKDIR) $(PROTO_DIR)/$(PYTHON_LIB)/neutron/agent; \
     $(MKDIR) $(PROTO_DIR)/$(PYTHON_LIB)/neutron/agent/l3; \
     $(CP) files/agent/l3/solaris_agent.py \
 	 $(PROTO_DIR)/$(PYTHON_LIB)/neutron/agent/l3; \
@@ -114,27 +125,21 @@
 	 files/agent/solaris/__init__.py \
 	 files/agent/solaris/dhcp.py \
 	 files/agent/solaris/interface.py \
+	 files/agent/solaris/namespace_manager.py \
 	 files/agent/solaris/net_lib.py \
 	 files/agent/solaris/packetfilter.py \
+	 files/agent/solaris/pd.py \
 	 files/agent/solaris/ra.py \
 	 $(PROTO_DIR)/$(PYTHON_LIB)/neutron/agent/solaris; \
-    $(MKDIR) $(PROTO_DIR)/$(PYTHON_LIB)/neutron/plugins/evs; \
-    $(TOUCH) $(PROTO_DIR)/$(PYTHON_LIB)/neutron/plugins/evs/__init__.py; \
-    $(CP) files/evs/plugin.py $(PROTO_DIR)/$(PYTHON_LIB)/neutron/plugins/evs; \
-    $(MKDIR) $(PROTO_DIR)/$(PYTHON_LIB)/neutron/plugins/evs/migrate; \
-    $(CP) \
-	 files/evs/migrate/__init__.py \
-	 files/evs/migrate/havana_api.py \
-	 $(PROTO_DIR)/$(PYTHON_LIB)/neutron/plugins/evs/migrate; \
     $(MKDIR) $(PROTO_DIR)/$(PYTHON_LIB)/$(DEVICE_TEMPLATE); \
     $(CP) files/services/vpn/device_drivers/solaris_ipsec.py \
 	$(PROTO_DIR)/$(PYTHON_LIB)/$(DEVICE_DRIVERS); \
     $(CP) \
+	files/services/vpn/device_drivers/template/solaris/ike.secret.template \
+	files/services/vpn/device_drivers/template/solaris/ike.template \
 	files/services/vpn/device_drivers/template/solaris/ikev2.secret.template \
-	files/services/vpn/device_drivers/template/solaris/ike.template \
 	files/services/vpn/device_drivers/template/solaris/ikev2.template \
 	files/services/vpn/device_drivers/template/solaris/ipsecinit.conf.template \
-	files/services/vpn/device_drivers/template/solaris/ike.secret.template \
 	$(PROTO_DIR)/$(PYTHON_LIB)/$(DEVICE_TEMPLATE)
 
 COMPONENT_POST_INSTALL_ACTION += \
@@ -143,30 +148,24 @@
 # common targets
 build:		$(BUILD_NO_ARCH)
 
-install:	$(INSTALL_NO_ARCH)
+install:	$(INSTALL_NO_ARCH) install-vpnaas
 
 test:		$(NO_TESTS)
 
-system-test:    $(NO_TESTS)
-
+system-test:	$(NO_TESTS)
 
 REQUIRED_PACKAGES += cloud/openstack/openstack-common
-REQUIRED_PACKAGES += library/python/alembic-27
 REQUIRED_PACKAGES += library/python/eventlet-27
-REQUIRED_PACKAGES += library/python/iniparse-27
 REQUIRED_PACKAGES += library/python/netaddr-27
-REQUIRED_PACKAGES += library/python/netifaces-27
-REQUIRED_PACKAGES += library/python/neutronclient-27
 REQUIRED_PACKAGES += library/python/oslo.config-27
-REQUIRED_PACKAGES += library/python/oslo.db-27
+REQUIRED_PACKAGES += library/python/oslo.log-27
+REQUIRED_PACKAGES += library/python/oslo.messaging-27
+REQUIRED_PACKAGES += library/python/oslo.utils-27
 REQUIRED_PACKAGES += library/python/simplejson-27
 REQUIRED_PACKAGES += library/python/six-27
-REQUIRED_PACKAGES += library/python/sqlalchemy-27
 REQUIRED_PACKAGES += network/arping
 REQUIRED_PACKAGES += network/firewall
 REQUIRED_PACKAGES += service/network/dnsmasq
-REQUIRED_PACKAGES += service/network/evs
 REQUIRED_PACKAGES += service/network/openvswitch
 REQUIRED_PACKAGES += system/core-os
-REQUIRED_PACKAGES += system/management/rad/client/rad-python
 REQUIRED_PACKAGES += system/network
--- a/components/openstack/neutron/files/agent/l3/solaris_agent.py	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/agent/l3/solaris_agent.py	Wed Sep 07 14:48:41 2016 -0700
@@ -18,15 +18,16 @@
 #
 
 """
-Based off generic l3_agent (neutron/agent/l3_agent) code
+Based off of generic l3_agent (neutron/agent/l3/agent.py) code
 """
 
 import errno
 import netaddr
 
-from oslo.config import cfg
+from oslo_config import cfg
 from oslo_log import log as logging
 
+from neutron._i18n import _, _LE, _LW
 from neutron.agent.common import ovs_lib
 from neutron.agent.l3 import agent as l3_agent
 from neutron.agent.l3 import router_info as router
@@ -40,6 +41,7 @@
 from neutron.callbacks import resources
 from neutron.common import constants as l3_constants
 from neutron.common import exceptions as n_exc
+from neutron.common import ipv6_utils
 from neutron.common import utils as common_utils
 
 from neutron_vpnaas.services.vpn import vpn_service
@@ -60,6 +62,8 @@
         self.pf = packetfilter.PacketFilter("_auto/neutron:l3:agent")
         self.iptables_manager = None
         self.remove_route = False
+        self.router_namespace = None
+        self.ns_name = None
         self.ipnet_gwportname = dict()
         self.tenant_subnets = dict()
         self.tenant_subnets['all_tenants'] = set()
@@ -93,14 +97,141 @@
         dname += '_0'
         return dname.replace('-', '_')
 
-    def routes_updated(self):
-        pass
+    def update_routing_table(self, operation, route):
+        if operation == 'replace':
+            operation = 'change'
+            cmd = ['/usr/sbin/route', 'get', route['destination']]
+            try:
+                utils.execute(cmd, log_fail_as_error=False)
+            except:
+                operation = 'add'
+            cmd = ['/usr/sbin/route', operation, route['destination'],
+                   route['nexthop']]
+            utils.execute(cmd)
+        else:
+            assert operation == 'delete'
+            cmd = ['/usr/sbin/route', 'delete', route['destination'],
+                   route['nexthop']]
+            utils.execute(cmd)
+
+    def _add_floating_ip_rules(self, interface_name, fip, fip_statuses):
+        fixed_ip = fip['fixed_ip_address']
+        fip_ip = fip['floating_ip_address']
+        for ipnet, gwportname in self.ipnet_gwportname.iteritems():
+            if fixed_ip in ipnet:
+                break
+        else:
+            fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
+            LOG.warn(_("Unable to configure IP address for floating IP(%s)"
+                       " '%s' for '%s'") % (fip['id'], fip_ip, fixed_ip))
+            return False
+
+        label = 'fip_%s' % str(fip_ip)
+        fip_rules = ['pass out quick from %s to any nat-to %s static-port '
+                     'label %s_out reply-to %s@%s' % (fixed_ip, fip_ip, label,
+                                                      fixed_ip,  gwportname)]
+        fip_rules.append('pass in quick from any to %s rdr-to %s label %s_in '
+                         'route-to %s@%s' % (fip_ip, fixed_ip, label,
+                                             fixed_ip, gwportname))
+        self.pf.add_rules(fip_rules, [interface_name, fip_ip])
+        return True
+
+    def process_floating_ip_addresses(self, interface_name):
+        """Configure IP addresses on router's external gateway interface.
+
+        Ensures addresses for existing floating IPs and cleans up
+        those that should not longer be configured.
+        """
+
+        fip_statuses = {}
+        if interface_name is None:
+            LOG.debug('No Interface for floating IPs router: %s',
+                      self.router['id'])
+            return fip_statuses
+
+        ipintf = net_lib.IPInterface(interface_name)
+        ipaddr_list = ipintf.ipaddr_list()['static']
+
+        existing_cidrs = set(ipaddr_list)
+        new_cidrs = set()
 
-    def _get_existing_devices(self):
-        return net_lib.Datalink.show_link()
+        floating_ips = self.get_floating_ips()
+        # Loop once to ensure that floating ips are configured.
+        for fip in floating_ips:
+            fixed_ip = fip['fixed_ip_address']
+            fip_ip = fip['floating_ip_address']
+            fip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX
+            new_cidrs.add(fip_cidr)
+            fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
+            if fip_cidr not in existing_cidrs:
+                try:
+                    ipintf.create_address(fip_cidr, ifcheck=False,
+                                          addrcheck=False)
+                    if not self._add_floating_ip_rules(interface_name, fip,
+                                                       fip_statuses):
+                        continue
+                    net_lib.send_ip_addr_adv_notif(interface_name,
+                                                   fip['floating_ip_address'],
+                                                   self.agent_conf)
+                except Exception as err:
+                    # any exception occurred here should cause the floating IP
+                    # to be set in error state
+                    fip_statuses[fip['id']] = (
+                        l3_constants.FLOATINGIP_STATUS_ERROR)
+                    LOG.warn(_("Unable to configure IP address for "
+                               "floating IP: %s: %s") % (fip['id'], err))
+                    # remove the fip_cidr address if it was added
+                    try:
+                        ipintf.delete_address(fip_cidr)
+                    except:
+                        pass
+                    continue
+            else:
+                existing_anchor_rules = self.pf.list_anchor_rules(
+                    [interface_name, fip_ip])
+                # check if existing fip has been reassigned
+                fip_reassigned = any([fixed_ip not in rule for rule in
+                                      existing_anchor_rules])
+                if fip_reassigned:
+                    LOG.debug("Floating ip '%s' reassigned to '%s'",
+                              fip_ip, fixed_ip)
+                    # flush rules associated with old fixed_ip and add
+                    # new rules for the new fixed_ip
+                    self.pf.remove_anchor([interface_name, fip_ip])
+                    if not self._add_floating_ip_rules(interface_name, fip,
+                                                       fip_statuses):
+                        continue
+                elif fip_statuses[fip['id']] == fip['status']:
+                    # mark the status as not changed. we can't remove it
+                    # because that's how the caller determines that it was
+                    # removed (TODO(gmoodalb): check this)
+                    fip_statuses[fip['id']] = router.FLOATINGIP_STATUS_NOCHANGE
+
+            LOG.debug("Floating ip %(id)s added, status %(status)s",
+                      {'id': fip['id'],
+                       'status': fip_statuses.get(fip['id'])})
+
+        # Clean up addresses that no longer belong on the gateway interface and
+        # remove the binat-to PF rule associated with them
+        for ip_cidr in existing_cidrs - new_cidrs:
+            if ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX):
+                LOG.debug("Removing floating ip %s from interface %s",
+                          ip_cidr, ipintf)
+                self.pf.remove_anchor([interface_name, ip_cidr.split('/')[0]])
+                ipintf.delete_address(ip_cidr, addrcheck=False)
+        return fip_statuses
+
+    def delete(self, agent):
+        self.router['gw_port'] = None
+        self.router[l3_constants.INTERFACE_KEY] = []
+        self.router[l3_constants.FLOATINGIP_KEY] = []
+        self.process_delete(agent)
+        self.disable_radvd()
 
     def internal_network_added(self, port):
         internal_dlname = self.get_internal_device_name(port['id'])
+        LOG.debug("adding internal network: port(%s), interface(%s)",
+                  port['id'], internal_dlname)
         # driver just returns if datalink and IP interface already exists
         self.driver.plug(port['tenant_id'], port['network_id'], port['id'],
                          internal_dlname, port['mac_address'],
@@ -121,9 +252,8 @@
         # listening at self.agent_conf.metadata_port
         if self.agent_conf.enable_metadata_proxy and ipversion == 4:
             rules.append('pass in quick proto tcp to 169.254.169.254/32 '
-                         'port 80 rdr-to 127.0.0.1 port %s label metadata_%s '
-                         'reply-to %s' % (self.agent_conf.metadata_port,
-                          internal_dlname, internal_dlname))
+                         'port 80 rdr-to 127.0.0.1 port %s label metadata_%s'
+                         % (self.agent_conf.metadata_port, internal_dlname))
 
         # Since we support shared router model, we need to block the new
         # internal port from reaching other tenant's ports. However, if
@@ -173,6 +303,8 @@
 
     def internal_network_removed(self, port):
         internal_dlname = self.get_internal_device_name(port['id'])
+        LOG.debug("removing internal network: port(%s) interface(%s)",
+                  port['id'], internal_dlname)
         # remove the anchor and tables associated with this internal port
         self.pf.remove_anchor_recursively([internal_dlname])
         if self.ex_gw_port and self._snat_enabled:
@@ -181,9 +313,14 @@
             self.pf.remove_anchor_recursively([external_dlname,
                                                internal_dlname])
         if net_lib.Datalink.datalink_exists(internal_dlname):
-            self.driver.fini_l3(internal_dlname)
             self.driver.unplug(internal_dlname)
 
+    def _get_existing_devices(self):
+        return net_lib.Datalink.show_link()
+
+    def internal_network_updated(self, interface_name, ip_cidrs):
+        pass
+
     def _apply_common_rules(self, all_subnets, internal_ports):
         v4_subnets = [subnet for subnet in all_subnets
                       if netaddr.IPNetwork(subnet).version == 4]
@@ -289,7 +426,7 @@
                 self.pf.replace_table_entry(allow_tblname, list(allow_subnets),
                                             [internal_dlname, 'normal'])
 
-    def _process_internal_ports(self):
+    def _process_internal_ports(self, pd):
         existing_port_ids = set([p['id'] for p in self.internal_ports])
 
         internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
@@ -309,14 +446,26 @@
         enable_ra = False
         for p in new_ports:
             self.internal_network_added(p)
+            LOG.debug("appending port %s to internal_ports cache", p)
             self.internal_ports.append(p)
             enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
+            for subnet in p['subnets']:
+                if ipv6_utils.is_ipv6_pd_enabled(subnet):
+                    interface_name = self.get_internal_device_name(p['id'])
+                    pd.enable_subnet(self.router_id, subnet['id'],
+                                     subnet['cidr'],
+                                     interface_name, p['mac_address'])
 
         for p in old_ports:
             self.internal_network_removed(p)
+            LOG.debug("removing port %s from internal_ports cache", p)
             self.internal_ports.remove(p)
             enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
+            for subnet in p['subnets']:
+                if ipv6_utils.is_ipv6_pd_enabled(subnet):
+                    pd.disable_subnet(self.router_id, subnet['id'])
 
+#         updated_cidres = []
 #         if updated_ports:
 #             for index, p in enumerate(internal_ports):
 #                 if not updated_ports.get(p['id']):
@@ -324,10 +473,27 @@
 #                 self.internal_ports[index] = updated_ports[p['id']]
 #                 interface_name = self.get_internal_device_name(p['id'])
 #                 ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips'])
+#                 LOG.debug("updating internal network for port %s", p)
+#                 updated_cidrs += ip_cidrs
+
 #                 self.driver.init_l3(interface_name, ip_cidrs=ip_cidrs,
 #                         namespace=self.ns_name)
 #                 enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
 
+#         # Check if there is any pd prefix update
+#         for p in internal_ports:
+#             if p['id'] in (set(current_port_ids) & set(existing_port_ids)):
+#                 for subnet in p.get('subnets', []):
+#                     if ipv6_utils.is_ipv6_pd_enabled(subnet):
+#                         old_prefix = pd.update_subnet(self.router_id,
+#                                                       subnet['id'],
+#                                                       subnet['cidr'])
+#                         if old_prefix:
+#                             self._internal_network_updated(p, subnet['id'],
+#                                                            subnet['cidr'],
+#                                                            old_prefix,
+#                                                            updated_cidrs)
+#                             enable_ra = True
         # Enable RA
         if enable_ra:
             self.radvd.enable(internal_ports)
@@ -342,110 +508,14 @@
         for stale_dev in stale_devs:
             LOG.debug(_('Deleting stale internal router device: %s'),
                       stale_dev)
-            self.driver.fini_l3(stale_dev)
+            pd.remove_stale_ri_ifname(self.router_id, stale_dev)
             self.driver.unplug(stale_dev)
 
-    def _add_floating_ip_rules(self, interface_name, fip, fip_statuses):
-        fixed_ip = fip['fixed_ip_address']
-        fip_ip = fip['floating_ip_address']
-        for ipnet, gwportname in self.ipnet_gwportname.iteritems():
-            if fixed_ip in ipnet:
-                break
-        else:
-            fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
-            LOG.warn(_("Unable to configure IP address for floating IP(%s)"
-                       " '%s' for '%s'") % (fip['id'], fip_ip, fixed_ip))
-            return False
-
-        label = 'fip_%s' % str(fip_ip)
-        fip_rules = ['pass out quick from %s to any nat-to %s static-port '
-                     'label %s_out reply-to %s@%s' % (fixed_ip, fip_ip, label,
-                                                      fixed_ip,  gwportname)]
-        fip_rules.append('pass in quick from any to %s rdr-to %s label %s_in '
-                         'route-to %s@%s' % (fip_ip, fixed_ip, label,
-                                             fixed_ip, gwportname))
-        self.pf.add_rules(fip_rules, [interface_name, fip_ip])
-        return True
-
-    def process_floating_ip_addresses(self, interface_name):
-        """Configure IP addresses on router's external gateway interface.
-
-        Ensures addresses for existing floating IPs and cleans up
-        those that should not longer be configured.
-        """
-
-        fip_statuses = {}
-        if interface_name is None:
-            LOG.debug('No Interface for floating IPs router: %s',
-                      self.router['id'])
-            return fip_statuses
-
-        ipintf = net_lib.IPInterface(interface_name)
-        ipaddr_list = ipintf.ipaddr_list()['static']
-
-        existing_cidrs = set(ipaddr_list)
-        new_cidrs = set()
-
-        floating_ips = self.get_floating_ips()
-
-        # Loop once to ensure that floating ips are configured.
-        for fip in floating_ips:
-            fixed_ip = fip['fixed_ip_address']
-            fip_ip = fip['floating_ip_address']
-            fip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX
-            new_cidrs.add(fip_cidr)
-            if fip_cidr not in existing_cidrs:
-                try:
-                    ipintf.create_address(fip_cidr, ifcheck=False,
-                                          addrcheck=False)
-                    if not self._add_floating_ip_rules(interface_name, fip,
-                                                       fip_statuses):
-                        continue
-                    net_lib.send_ip_addr_adv_notif(interface_name,
-                                                   fip['floating_ip_address'],
-                                                   self.agent_conf)
-                except Exception as err:
-                    # any exception occurred here should cause the floating IP
-                    # to be set in error state
-                    fip_statuses[fip['id']] = (
-                        l3_constants.FLOATINGIP_STATUS_ERROR)
-                    LOG.warn(_("Unable to configure IP address for "
-                               "floating IP: %s: %s") % (fip['id'], err))
-                    # remove the fip_cidr address if it was added
-                    try:
-                        ipintf.delete_address(fip_cidr)
-                    except:
-                        pass
-                    continue
-            else:
-                existing_anchor_rules = self.pf.list_anchor_rules(
-                    [interface_name, fip_ip])
-                # check if existing fip has been reassigned
-                fip_reassigned = any([fixed_ip not in rule for rule in
-                                      existing_anchor_rules])
-                if fip_reassigned:
-                    LOG.debug("Floating ip '%s' reassigned to '%s'",
-                              fip_ip, fixed_ip)
-                    # flush rules associated with old fixed_ip and add
-                    # new rules for the new fixed_ip
-                    self.pf.remove_anchor([interface_name, fip_ip])
-                    if not self._add_floating_ip_rules(interface_name, fip,
-                                                       fip_statuses):
-                        continue
-            fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
-            LOG.debug("Floating ip %(id)s added, status %(status)s",
-                      {'id': fip['id'], 'status': fip_statuses.get(fip['id'])})
-
-        # Clean up addresses that no longer belong on the gateway interface and
-        # remove the binat-to PF rule associated with them
-        for ip_cidr in existing_cidrs - new_cidrs:
-            if ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX):
-                self.pf.remove_anchor([interface_name, ip_cidr.split('/')[0]])
-                ipintf.delete_address(ip_cidr, addrcheck=False)
-        return fip_statuses
-
     # TODO(gmoodalb): need to do more work on ipv6 gateway
     def external_gateway_added(self, ex_gw_port, external_dlname):
+        LOG.debug("External gateway added: port(%s), interface(%s)",
+                  ex_gw_port, external_dlname)
+        # TODO(gmoodalb): add MTU to plug()?
         self.driver.plug(ex_gw_port['tenant_id'], ex_gw_port['network_id'],
                          ex_gw_port['id'], external_dlname,
                          ex_gw_port['mac_address'],
@@ -493,6 +563,8 @@
         pass
 
     def external_gateway_removed(self, ex_gw_port, external_dlname):
+        LOG.debug("External gateway removed: port(%s), interface(%s)",
+                  ex_gw_port, external_dlname)
         # remove nested anchor rule first
         self.pf.remove_nested_anchor_rule(None, external_dlname)
 
@@ -509,11 +581,10 @@
                 utils.execute(cmd, check_exit_code=False)
 
         if net_lib.Datalink.datalink_exists(external_dlname):
-            self.driver.fini_l3(external_dlname)
             self.driver.unplug(external_dlname,
                                self.agent_conf.external_network_bridge)
 
-    def _process_external_gateway(self, ex_gw_port):
+    def _process_external_gateway(self, ex_gw_port, pd):
         # TODO(Carl) Refactor to clarify roles of ex_gw_port vs self.ex_gw_port
         ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
                          self.ex_gw_port and self.ex_gw_port['id'])
@@ -523,24 +594,16 @@
         if ex_gw_port_id:
             interface_name = self.get_external_device_name(ex_gw_port_id)
         if ex_gw_port:
-            def _gateway_ports_equal(port1, port2):
-                def _get_filtered_dict(d, ignore):
-                    return dict((k, v) for k, v in d.iteritems()
-                                if k not in ignore)
-
-                keys_to_ignore = set(['binding:host_id'])
-                port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
-                port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
-                return port1_filtered == port2_filtered
-
             if not self.ex_gw_port:
                 self.external_gateway_added(ex_gw_port, interface_name)
+                pd.add_gw_interface(self.router['id'], interface_name)
                 ex_gw_port_status = 'added'
-            elif not _gateway_ports_equal(ex_gw_port, self.ex_gw_port):
+            elif not self._gateway_ports_equal(ex_gw_port, self.ex_gw_port):
                 self.external_gateway_updated(ex_gw_port, interface_name)
                 ex_gw_port_status = 'updated'
         elif not ex_gw_port and self.ex_gw_port:
             self.external_gateway_removed(self.ex_gw_port, interface_name)
+            pd.remove_gw_interface(self.router['id'])
             ex_gw_port_status = 'removed'
 
         # Remove any external stale router interfaces (i.e., l3e.. VNICs)
@@ -551,12 +614,12 @@
         for stale_dev in stale_devs:
             LOG.debug(_('Deleting stale external router device: %s'),
                       stale_dev)
-            self.driver.fini_l3(stale_dev)
             self.driver.unplug(stale_dev)
 
         # Process SNAT rules for external gateway
-        self.perform_snat_action(self._handle_router_snat_rules,
-                                 interface_name, ex_gw_port_status)
+        gw_port = self._router.get('gw_port')
+        self._handle_router_snat_rules(gw_port, interface_name,
+                                       ex_gw_port_status)
 
     def external_gateway_snat_rules(self, ex_gw_port_ip, external_dlname):
         rules = {}
@@ -573,7 +636,10 @@
         return rules
 
     def _handle_router_snat_rules(self, ex_gw_port, external_dlname,
-                                  ex_gw_port_status, action):
+                                  ex_gw_port_status):
+        # Todo(gmoodalb): need this when we support address_scope
+        # self.process_external_port_address_scope_routing(iptables_manager)
+
         # Remove all the old SNAT rules
         # This is safe because if use_namespaces is set as False
         # then the agent can only configure one router, otherwise
@@ -585,7 +651,7 @@
                     self.pf.remove_anchor(snat_anchor.split('/')[-2:])
 
         # And add them back if the action is add_rules
-        if action == 'add_rules' and ex_gw_port_status in ['added', 'updated']:
+        if ex_gw_port_status in ['added', 'updated']:
             # NAT rules are added only if ex_gw_port has an IPv4 address
             ex_gw_port_ip = ex_gw_port['fixed_ips'][0]['ip_address']
             if netaddr.IPAddress(ex_gw_port_ip).version != 4:
@@ -596,10 +662,10 @@
                 self.pf.add_rules(rules, [external_dlname, internal_dlname])
 
     def process_external(self, agent):
-        existing_floating_ips = self.floating_ips
+        fip_statuses = {}
         try:
             ex_gw_port = self.get_ex_gw_port()
-            self._process_external_gateway(ex_gw_port)
+            self._process_external_gateway(ex_gw_port, agent.pd)
             # TODO(Carl) Return after setting existing_floating_ips and
             # still call update_fip_statuses?
             if not ex_gw_port:
@@ -609,20 +675,22 @@
             # configure their addresses on the external gateway port
             interface_name = self.get_external_device_name(ex_gw_port['id'])
             fip_statuses = self.configure_fip_addresses(interface_name)
-        except (n_exc.FloatingIpSetupException,
-                n_exc.IpTablesApplyException) as e:
+        except n_exc.FloatingIpSetupException:
                 # All floating IPs must be put in error state
-                LOG.exception(e)
+                LOG.exception(_LE("Failed to process floating IPs."))
                 fip_statuses = self.put_fips_in_error_state()
+        finally:
+            self.update_fip_statuses(agent, fip_statuses)
 
-        agent.update_fip_statuses(self, existing_floating_ips, fip_statuses)
+    def process_external_port_address_scope_routing(self, iptables_manager):
+        pass
+
+    def process_address_scope(self):
+        pass
 
 
 class L3NATAgent(l3_agent.L3NATAgentWithStateReport):
     OPTS = [
-        cfg.StrOpt('external_network_datalink', default='net0',
-                   help=_("Name of the datalink that connects to "
-                          "an external network.")),
         cfg.BoolOpt('allow_forwarding_between_networks', default=False,
                     help=_("Allow forwarding of packets between tenant's "
                            "networks")),
@@ -636,6 +704,35 @@
         self.service = vpn_service.VPNService(self)
         self.device_drivers = self.service.load_device_drivers(host)
 
+    def _check_config_params(self):
+        """Check items in configuration files.
+
+        Check for required and invalid configuration items.
+        The actual values are not verified for correctness.
+        """
+        if not self.conf.interface_driver:
+            msg = _LE('An interface driver must be specified')
+            LOG.error(msg)
+            raise SystemExit(1)
+
+        if not self.conf.router_id:
+            msg = _LE('Router id (router_id) is required to be set.')
+            LOG.error(msg)
+            raise SystemExit(1)
+
+        if self.conf.ipv6_gateway:
+            # ipv6_gateway configured. Check for valid v6 link-local address.
+            try:
+                msg = _LE("%s used in config as ipv6_gateway is not a valid "
+                          "IPv6 link-local address."),
+                ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway)
+                if ip_addr.version != 6 or not ip_addr.is_link_local():
+                    LOG.error(msg, self.conf.ipv6_gateway)
+                    raise SystemExit(1)
+            except netaddr.AddrFormatError:
+                LOG.error(msg, self.conf.ipv6_gateway)
+                raise SystemExit(1)
+
     def _router_added(self, router_id, router):
         args = []
         kwargs = {
@@ -660,10 +757,9 @@
                       self.conf.external_network_bridge)
             return
 
-        # If namespaces are disabled, only process the router associated
+        # We don't support namespaces so only process the router associated
         # with the configured agent id.
-        if (not self.conf.use_namespaces and
-                router['id'] != self.conf.router_id):
+        if (router['id'] != self.conf.router_id):
             raise n_exc.RouterNotCompatibleWithAgent(router_id=router['id'])
 
         # Either ex_net_id or handle_internal_only_routers must be set
--- a/components/openstack/neutron/files/agent/solaris/dhcp.py	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/agent/solaris/dhcp.py	Wed Sep 07 14:48:41 2016 -0700
@@ -19,18 +19,20 @@
 
 import abc
 import netaddr
+import os
 
-from oslo.config import cfg
+from oslo_config import cfg
 from oslo_log import log as logging
+from oslo_utils import excutils
 
+from neutron._i18n import _, _LI, _LW, _LE
+from neutron.agent.linux import dhcp
 from neutron.agent.linux import utils
-from neutron.agent.linux import dhcp
 from neutron.agent.solaris import net_lib
 from neutron.common import constants
 from neutron.common import exceptions
 from neutron.common import ipv6_utils
 
-
 LOG = logging.getLogger(__name__)
 
 
@@ -43,11 +45,24 @@
                                       version, plugin)
         self.device_manager = DeviceManager(self.conf, plugin)
 
+    # overrides method in DhcpLocalProcess due to no namespace support
+    def _destroy_namespace_and_port(self):
+        try:
+            self.device_manager.destroy(self.network, self.interface_name)
+        except RuntimeError:
+            LOG.warning(_LW('Failed trying to delete interface: %s'),
+                        self.interface_name)
+
     def _build_cmdline_callback(self, pid_file):
+        # We ignore local resolv.conf if dns servers are specified
+        # or if local resolution is explicitly disabled.
+        _no_resolv = (
+            '--no-resolv' if self.conf.dnsmasq_dns_servers or
+            not self.conf.dnsmasq_local_resolv else '')
         cmd = [
             '/usr/lib/inet/dnsmasq',
             '--no-hosts',
-            '--no-resolv',
+            _no_resolv,
             '--strict-order',
             '--bind-interfaces',
             '--interface=%s' % self.interface_name,
@@ -56,7 +71,8 @@
             '--dhcp-hostsfile=%s' % self.get_conf_file_name('host'),
             '--addn-hosts=%s' % self.get_conf_file_name('addn_hosts'),
             '--dhcp-optsfile=%s' % self.get_conf_file_name('opts'),
-            '--dhcp-leasefile=%s' % self.get_conf_file_name('leases')
+            '--dhcp-leasefile=%s' % self.get_conf_file_name('leases'),
+            '--dhcp-match=set:ipxe,175',
         ]
 
         possible_leases = 0
@@ -110,7 +126,7 @@
                 possible_leases += cidr.size
 
         if cfg.CONF.advertise_mtu:
-            mtu = self.network.mtu
+            mtu = getattr(self.network, 'mtu', 0)
             # Do not advertise unknown mtu
             if mtu > 0:
                 cmd.append('--dhcp-option-force=option:mtu,%d' % mtu)
@@ -132,12 +148,35 @@
         if self.conf.dhcp_broadcast_reply:
             cmd.append('--dhcp-broadcast')
 
+        if self.conf.dnsmasq_base_log_dir:
+            log_dir = os.path.join(
+                self.conf.dnsmasq_base_log_dir,
+                self.network.id)
+            try:
+                if not os.path.exists(log_dir):
+                    os.makedirs(log_dir)
+            except OSError:
+                LOG.error(_LE('Error while create dnsmasq log dir: %s'),
+                          log_dir)
+            else:
+                log_filename = os.path.join(log_dir, 'dhcp_dns_log')
+                cmd.append('--log-queries')
+                cmd.append('--log-dhcp')
+                cmd.append('--log-facility=%s' % log_filename)
+
         return cmd
 
-    def _release_lease(self, mac_address, ip):
+    def _release_lease(self, mac_address, ip, client_id):
         """Release a DHCP lease."""
+        if netaddr.IPAddress(ip).version == constants.IP_VERSION_6:
+            # Note(SridharG) dhcp_release is only supported for IPv4
+            # addresses. For more details, please refer to man page.
+            return
+
         cmd = ['/usr/lib/inet/dhcp_release', self.interface_name,
                ip, mac_address]
+        if client_id:
+            cmd.append(client_id)
         utils.execute(cmd)
 
     def _make_subnet_interface_ip_map(self):
@@ -157,105 +196,95 @@
     def __init__(self, conf, plugin):
         super(DeviceManager, self).__init__(conf, plugin)
 
-    def setup_dhcp_port(self, network):
-        """Create/update DHCP port for the host if needed and return port."""
+    def _set_default_route(self, network, device_name):
+        """Sets the default gateway for this dhcp namespace.
+
+        This method is idempotent and will only adjust the route if adjusting
+        it would change it from what it already is.  This makes it safe to call
+        and avoids unnecessary perturbation of the system.
+        """
+        pass
 
-        device_id = self.get_device_id(network)
-        subnets = {}
-        dhcp_enabled_subnet_ids = []
-        for subnet in network.subnets:
-            if subnet.enable_dhcp:
-                dhcp_enabled_subnet_ids.append(subnet.id)
-                subnets[subnet.id] = subnet
+    def _setup_existing_dhcp_port(self, network, device_id, dhcp_subnets):
+        """Set up the existing DHCP port, if there is one."""
 
-        dhcp_port = None
+        # To avoid pylint thinking that port might be undefined after
+        # the following loop...
+        port = None
+
+        # Look for an existing DHCP port for this network.
         for port in network.ports:
             port_device_id = getattr(port, 'device_id', None)
             port_device_owner = getattr(port, 'device_owner', None)
-
-            # if the agent is started on a different node, then the
-            # device_ids will be different since they are based off
-            # hostname.
             if (port_device_id == device_id or
-                    (port_device_owner == constants.DEVICE_OWNER_DHCP and
-                     port_device_id.startswith('dhcp'))):
-                port_fixed_ips = []
-                for fixed_ip in port.fixed_ips:
-                    port_fixed_ips.append({'subnet_id': fixed_ip.subnet_id,
-                                           'ip_address': fixed_ip.ip_address})
-                    if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
-                        dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id)
+                (port_device_owner == constants.DEVICE_OWNER_DHCP and
+                 port_device_id.startswith('dhcp'))):
+                # If using gateway IPs on this port, we can skip the
+                # following code, whose purpose is just to review and
+                # update the Neutron-allocated IP addresses for the
+                # port.
+                if self.driver.use_gateway_ips:
+                    return port
+                # Otherwise break out, as we now have the DHCP port
+                # whose subnets and addresses we need to review.
+                break
+        else:
+            return None
 
-                # If there are dhcp_enabled_subnet_ids here that means that
-                # we need to add those to the port and call update.
-                if dhcp_enabled_subnet_ids:
-                    port_fixed_ips.extend(
-                        [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
-                    dhcp_port = self.plugin.update_dhcp_port(
-                        port.id, {'port': {'network_id': network.id,
-                                           'fixed_ips': port_fixed_ips}})
-                    if not dhcp_port:
-                        raise exceptions.Conflict()
-                else:
-                    dhcp_port = port
-                # break since we found port that matches device_id
-                break
+        # Compare what the subnets should be against what is already
+        # on the port.
+        dhcp_enabled_subnet_ids = set(dhcp_subnets)
+        port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips)
 
-        # check for a reserved DHCP port
-        if dhcp_port is None:
-            LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
-                      ' does not yet exist. Checking for a reserved port.',
-                      {'device_id': device_id, 'network_id': network.id})
-            for port in network.ports:
-                port_device_id = getattr(port, 'device_id', None)
-                if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT:
-                    dhcp_port = self.plugin.update_dhcp_port(
-                        port.id, {'port': {'network_id': network.id,
-                                           'device_id': device_id}})
-                    if dhcp_port:
-                        break
+        # If those differ, we need to call update.
+        if dhcp_enabled_subnet_ids != port_subnet_ids:
+            # Collect the subnets and fixed IPs that the port already
+            # has, for subnets that are still in the DHCP-enabled set.
+            wanted_fixed_ips = []
+            for fixed_ip in port.fixed_ips:
+                if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
+                    wanted_fixed_ips.append(
+                        {'subnet_id': fixed_ip.subnet_id,
+                         'ip_address': fixed_ip.ip_address})
 
-        # DHCP port has not yet been created.
-        if dhcp_port is None:
-            LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
-                      ' does not yet exist.', {'device_id': device_id,
-                                               'network_id': network.id})
-            port_dict = dict(
-                name='',
-                admin_state_up=True,
-                device_id=device_id,
-                network_id=network.id,
-                tenant_id=network.tenant_id,
-                fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
-            dhcp_port = self.plugin.create_dhcp_port({'port': port_dict})
+            # Add subnet IDs for new DHCP-enabled subnets.
+            wanted_fixed_ips.extend(
+                dict(subnet_id=s)
+                for s in dhcp_enabled_subnet_ids - port_subnet_ids)
 
-        if not dhcp_port:
-            raise exceptions.Conflict()
+            # Update the port to have the calculated subnets and fixed
+            # IPs.  The Neutron server will allocate a fresh IP for
+            # each subnet that doesn't already have one.
+            port = self.plugin.update_dhcp_port(
+                port.id,
+                {'port': {'network_id': network.id,
+                          'fixed_ips': wanted_fixed_ips}})
+            if not port:
+                raise exceptions.Conflict()
 
-        # Convert subnet_id to subnet dict
-        fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
-                          ip_address=fixed_ip.ip_address,
-                          subnet=subnets[fixed_ip.subnet_id])
-                     for fixed_ip in dhcp_port.fixed_ips]
-
-        ips = [dhcp.DictModel(item) if isinstance(item, dict) else item
-               for item in fixed_ips]
-        dhcp_port.fixed_ips = ips
-
-        return dhcp_port
+        return port
 
     def setup(self, network):
         """Create and initialize a device for network's DHCP on this host."""
         port = self.setup_dhcp_port(network)
+        self._update_dhcp_port(network, port)
         interface_name = self.get_interface_name(network, port)
 
         if net_lib.Datalink.datalink_exists(interface_name):
             LOG.debug('Reusing existing device: %s.', interface_name)
         else:
-            self.driver.plug(network.tenant_id, network.id,
-                             port.id, interface_name, port.mac_address,
-                             network=network,
-                             vif_type=getattr(port, 'binding:vif_type', None))
+            try:
+                self.driver.plug(network.tenant_id, network.id,
+                                 port.id, interface_name, port.mac_address,
+                                 network=network, mtu=network.get('mtu'),
+                                 vif_type=getattr(port, 'binding:vif_type',
+                                                  None))
+            except Exception:
+                with excutils.save_and_reraise_exception():
+                    LOG.exception(_LE('Unable to plug DHCP port for '
+                                      'network %s. Releasing port.'),
+                                  network.id)
+                    self.plugin.release_dhcp_port(network.id, port.device_id)
         ip_cidrs = []
         addrconf = False
         for fixed_ip in port.fixed_ips:
@@ -267,14 +296,17 @@
             else:
                 addrconf = True
 
+        if self.driver.use_gateway_ips:
+            # For each DHCP-enabled subnet, add that subnet's gateway
+            # IP address to the Linux device for the DHCP port.
+            for subnet in network.subnets:
+                if not subnet.enable_dhcp:
+                    continue
+                gateway = subnet.gateway_ip
+                if gateway:
+                    net = netaddr.IPNetwork(subnet.cidr)
+                    ip_cidrs.append('%s/%s' % (gateway, net.prefixlen))
+
         self.driver.init_l3(interface_name, ip_cidrs, addrconf=addrconf)
 
         return interface_name
-
-    def destroy(self, network, device_name):
-        """Destroy the device used for the network's DHCP on this host."""
-
-        self.driver.fini_l3(device_name)
-        self.driver.unplug(device_name)
-        self.plugin.release_dhcp_port(network.id,
-                                      self.get_device_id(network))
--- a/components/openstack/neutron/files/agent/solaris/interface.py	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/agent/solaris/interface.py	Wed Sep 07 14:48:41 2016 -0700
@@ -15,9 +15,6 @@
 # @author: Girish Moodalbail, Oracle, Inc.
 
 from openstack_common import get_ovsdb_info
-import rad.client as radcli
-import rad.connect as radcon
-import rad.bindings.com.oracle.solaris.rad.evscntl_1 as evsbind
 
 from oslo_config import cfg
 from oslo_log import log as logging
@@ -33,23 +30,32 @@
 LOG = logging.getLogger(__name__)
 
 OPTS = [
-    cfg.StrOpt('evs_controller', default='ssh://evsuser@localhost',
-               help=_("An URI that specifies an EVS controller"))
+    cfg.StrOpt('admin_user',
+               help=_("Admin username")),
+    cfg.StrOpt('admin_password',
+               help=_("Admin password"),
+               secret=True),
+    cfg.StrOpt('admin_tenant_name',
+               help=_("Admin tenant name")),
+    cfg.StrOpt('auth_url',
+               help=_("Authentication URL")),
+    cfg.StrOpt('auth_strategy', default='keystone',
+               help=_("The type of authentication to use")),
+    cfg.StrOpt('auth_region',
+               help=_("Authentication region")),
+    cfg.StrOpt('endpoint_type',
+               default='publicURL',
+               help=_("Network service endpoint type to pull from "
+                      "the keystone catalog")),
 ]
 
 
-class EVSControllerError(exceptions.NeutronException):
-    message = _("EVS controller: %(errmsg)s")
-
-    def __init__(self, evs_errmsg):
-        super(EVSControllerError, self).__init__(errmsg=evs_errmsg)
+class OVSInterfaceDriver(object):
+    """Driver used to manage Solaris OVS VNICs.
 
-
-class SolarisVNICDriver(object):
-    """Driver used to manage Solaris EVS VNICs.
-
-    This class provides methods to create/delete an EVS VNIC and
-    plumb/unplumb ab IP interface and addresses on the EVS VNIC.
+    This class provides methods to create/delete a Crossbow VNIC and
+    add it as a port of OVS bridge.
+    TODO(gmoodalb): More methods to implement here for MITAKA??
     """
 
     # TODO(gmoodalb): dnsmasq uses old style `ifreq', so 16 is the maximum
@@ -63,32 +69,23 @@
 
     def __init__(self, conf):
         self.conf = conf
-        try:
-            self.rad_uri = radcon.RadURI(conf.evs_controller)
-        except ValueError as err:
-            raise SystemExit(_("Specified evs_controller is invalid: %s"), err)
-
-        self._rad_connection = None
-        # set the controller property for this host
-        cmd = ['/usr/sbin/evsadm', 'show-prop', '-co', 'value', '-p',
-               'controller']
-        stdout = utils.execute(cmd)
-        if conf.evs_controller != stdout.strip():
-            cmd = ['/usr/sbin/evsadm', 'set-prop', '-p',
-                   'controller=%s' % (conf.evs_controller)]
-            utils.execute(cmd)
+        self._neutron_client = None
 
     @property
-    def rad_connection(self):
-        if (self._rad_connection is not None and
-                self._rad_connection._closed is None):
-            return self._rad_connection
-
-        LOG.debug(_("Connecting to EVS Controller at %s") %
-                  self.conf.evs_controller)
-
-        self._rad_connection = self.rad_uri.connect()
-        return self._rad_connection
+    def neutron_client(self):
+        if self._neutron_client:
+            return self._neutron_client
+        from neutronclient.v2_0 import client
+        self._neutron_client = client.Client(
+            username=self.conf.admin_user,
+            password=self.conf.admin_password,
+            tenant_name=self.conf.admin_tenant_name,
+            auth_url=self.conf.auth_url,
+            auth_strategy=self.conf.auth_strategy,
+            region_name=self.conf.auth_region,
+            endpoint_type=self.conf.endpoint_type
+        )
+        return self._neutron_client
 
     def fini_l3(self, device_name):
         ipif = net_lib.IPInterface(device_name)
@@ -113,120 +110,7 @@
 
     def plug(self, tenant_id, network_id, port_id, datalink_name, mac_address,
              network=None, bridge=None, namespace=None, prefix=None,
-             protection=False, vif_type=None):
-        """Plug in the interface."""
-
-        if net_lib.Datalink.datalink_exists(datalink_name):
-            LOG.info(_("Device %s already exists"), datalink_name)
-            return
-
-        if datalink_name.startswith('l3e'):
-            # verify external network parameter settings
-            dl = net_lib.Datalink(datalink_name)
-            # determine the network type of the external network
-            # TODO(gmoodalb): use EVS RAD APIs
-            evsname = network_id
-            cmd = ['/usr/sbin/evsadm', 'show-evs', '-co', 'l2type,vid',
-                   '-f', 'evs=%s' % evsname]
-            try:
-                stdout = utils.execute(cmd)
-            except Exception as err:
-                LOG.error(_("Failed to retrieve the network type for "
-                            "the external network, and it is required "
-                            "to create an external gateway port: %s") % err)
-                return
-            output = stdout.splitlines()[0].strip()
-            l2type, vid = output.split(':')
-            if l2type != 'flat' and l2type != 'vlan':
-                LOG.error(_("External network should be either Flat or "
-                            "VLAN based, and it is required to "
-                            "create an external gateway port"))
-                return
-            elif (l2type == 'vlan' and
-                  self.conf.get("external_network_datalink", None)):
-                LOG.warning(_("external_network_datalink is deprecated in "
-                              "Juno and will be removed in the next release "
-                              "of Solaris OpenStack. Please use the evsadm "
-                              "set-controlprop subcommand to setup the "
-                              "uplink-port for an external network"))
-                # proceed with the old-style of doing things
-                dl.create_vnic(self.conf.external_network_datalink,
-                               mac_address=mac_address, vid=vid)
-                return
-
-        try:
-            evsc = self.rad_connection.get_object(evsbind.EVSController())
-            vports_info = evsc.getVPortInfo("vport=%s" % (port_id))
-            if vports_info:
-                vport_info = vports_info[0]
-                # This is to handle HA when the 1st DHCP/L3 agent is down and
-                # the second DHCP/L3 agent tries to connect its VNIC to EVS, we
-                # will end up in "vport in use" error. So, we need to reset the
-                # vport before we connect the VNIC to EVS.
-                if vport_info.status == evsbind.VPortStatus.USED:
-                    LOG.debug(_("Retrieving EVS: %s"), vport_info.evsuuid)
-                    pat = radcli.ADRGlobPattern({'uuid': network_id,
-                                                 'tenant': tenant_id})
-                    evs_objs = self.rad_connection.list_objects(evsbind.EVS(),
-                                                                pat)
-                    if evs_objs:
-                        evs = self.rad_connection.get_object(evs_objs[0])
-                        evs.resetVPort(port_id, "force=yes")
-
-                if not protection:
-                    LOG.debug(_("Retrieving VPort: %s"), port_id)
-                    pat = radcli.ADRGlobPattern({'uuid': port_id,
-                                                 'tenant': tenant_id,
-                                                 'evsuuid': network_id})
-                    vport_objs = self.rad_connection.list_objects(
-                        evsbind.VPort(), pat)
-                    if vport_objs:
-                        vport = self.rad_connection.get_object(vport_objs[0])
-                        vport.setProperty("protection=none")
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-
-        dl = net_lib.Datalink(datalink_name)
-        evs_vport = "%s/%s" % (network_id, port_id)
-        dl.connect_vnic(evs_vport, tenant_id)
-
-    def unplug(self, device_name, namespace=None, prefix=None):
-        """Unplug the interface."""
-
-        dl = net_lib.Datalink(device_name)
-        dl.delete_vnic()
-
-
-class OVSInterfaceDriver(SolarisVNICDriver):
-    """Driver used to manage Solaris OVS VNICs.
-
-    This class provides methods to create/delete a Crossbow VNIC and
-    add it as a port of OVS bridge.
-    """
-
-    def __init__(self, conf):
-        self.conf = conf
-        self._neutron_client = None
-
-    @property
-    def neutron_client(self):
-        if self._neutron_client:
-            return self._neutron_client
-        from neutronclient.v2_0 import client
-        self._neutron_client = client.Client(
-            username=self.conf.admin_user,
-            password=self.conf.admin_password,
-            tenant_name=self.conf.admin_tenant_name,
-            auth_url=self.conf.auth_url,
-            auth_strategy=self.conf.auth_strategy,
-            region_name=self.conf.auth_region,
-            endpoint_type=self.conf.endpoint_type
-        )
-        return self._neutron_client
-
-    def plug(self, tenant_id, network_id, port_id, datalink_name, mac_address,
-             network=None, bridge=None, namespace=None, prefix=None,
-             protection=False, vif_type=None):
+             protection=False, mtu=None, vif_type=None):
         """Plug in the interface."""
 
         if net_lib.Datalink.datalink_exists(datalink_name):
@@ -312,6 +196,10 @@
     def unplug(self, datalink_name, bridge=None, namespace=None, prefix=None):
         """Unplug the interface."""
 
+        # remove any IP addresses on top of this datalink, otherwise we will
+        # get 'device busy' error while deleting the datalink
+        self.fini_l3(datalink_name)
+
         dl = net_lib.Datalink(datalink_name)
         dl.delete_vnic()
 
@@ -329,3 +217,43 @@
         except RuntimeError as err:
             LOG.error(_("Failed unplugging interface '%s': %s") %
                       (datalink_name, err))
+
+    @property
+    def use_gateway_ips(self):
+        """Whether to use gateway IPs instead of unique IP allocations.
+
+        In each place where the DHCP agent runs, and for each subnet for
+        which DHCP is handling out IP addresses, the DHCP port needs -
+        at the Linux level - to have an IP address within that subnet.
+        Generally this needs to be a unique Neutron-allocated IP
+        address, because the subnet's underlying L2 domain is bridged
+        across multiple compute hosts and network nodes, and for HA
+        there may be multiple DHCP agents running on that same bridged
+        L2 domain.
+
+        However, if the DHCP ports - on multiple compute/network nodes
+        but for the same network - are _not_ bridged to each other,
+        they do not need each to have a unique IP address.  Instead
+        they can all share the same address from the relevant subnet.
+        This works, without creating any ambiguity, because those
+        ports are not all present on the same L2 domain, and because
+        no data within the network is ever sent to that address.
+        (DHCP requests are broadcast, and it is the network's job to
+        ensure that such a broadcast will reach at least one of the
+        available DHCP servers.  DHCP responses will be sent _from_
+        the DHCP port address.)
+
+        Specifically, for networking backends where it makes sense,
+        the DHCP agent allows all DHCP ports to use the subnet's
+        gateway IP address, and thereby to completely avoid any unique
+        IP address allocation.  This behaviour is selected by running
+        the DHCP agent with a configured interface driver whose
+        'use_gateway_ips' property is True.
+
+        When an operator deploys Neutron with an interface driver that
+        makes use_gateway_ips True, they should also ensure that a
+        gateway IP address is defined for each DHCP-enabled subnet,
+        and that the gateway IP address doesn't change during the
+        subnet's lifetime.
+        """
+        return False
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/agent/solaris/namespace_manager.py	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,52 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+class NamespaceManager(object):
+    ''' Re-implements neutron.agent.l3.namespace_manager'''
+
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, value, traceback):
+        if exc_type:
+            # An exception occurred in the caller's with statement
+            return False
+        return True
+
+    def keep_router(self, router_id):
+        pass
+
+    def keep_ext_net(self, ext_net_id):
+        pass
+
+    def get_prefix_and_id(self, ns_name):
+        return None
+
+    def is_managed(self, ns_name):
+        return False
+
+    def list_all(self):
+        return set()
+
+    def ensure_router_cleanup(self, router_id):
+        pass
+
+    def _cleanup(self, ns_prefix, ns_id):
+        pass
--- a/components/openstack/neutron/files/agent/solaris/net_lib.py	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/agent/solaris/net_lib.py	Wed Sep 07 14:48:41 2016 -0700
@@ -56,8 +56,10 @@
         return True
 
     @classmethod
-    def ipaddr_exists(cls, ifname, ipaddr):
-        cmd = ['/usr/sbin/ipadm', 'show-addr', '-po', 'addr', ifname]
+    def ipaddr_exists(cls, ipaddr, ifname=None):
+        cmd = ['/usr/sbin/ipadm', 'show-addr', '-po', 'addr']
+        if ifname:
+            cmd.append(ifname)
         stdout = cls.execute(cmd)
 
         return ipaddr in stdout
@@ -86,7 +88,7 @@
             if temp:
                 cmd.append('-t')
             self.execute_with_pfexec(cmd)
-        elif addrcheck and self.ipaddr_exists(self._ifname, ipaddr):
+        elif addrcheck and self.ipaddr_exists(ipaddr, self._ifname):
             return
 
         # If an address is IPv6, then to create a static IPv6 address
@@ -99,8 +101,8 @@
             mac_addr = stdout.splitlines()[0].strip()
             ll_addr = netaddr.EUI(mac_addr).ipv6_link_local()
 
-            if addrcheck and not self.ipaddr_exists(self._ifname,
-                                                    str(ll_addr)):
+            if addrcheck and not self.ipaddr_exists(str(ll_addr),
+                                                    self._ifname):
                 # create a link-local address
                 cmd = ['/usr/sbin/ipadm', 'create-addr', '-T', 'static', '-a',
                        str(ll_addr), self._ifname]
@@ -135,7 +137,7 @@
         self.execute_with_pfexec(cmd)
 
     def delete_address(self, ipaddr, addrcheck=True):
-        if addrcheck and not self.ipaddr_exists(self._ifname, ipaddr):
+        if addrcheck and not self.ipaddr_exists(ipaddr, self._ifname):
             return
 
         cmd = ['/usr/sbin/ipadm', 'show-addr', '-po', 'addrobj,addr',
@@ -179,19 +181,6 @@
             return False
         return True
 
-    def connect_vnic(self, evsvport, tenantname=None, temp=True):
-        if self.datalink_exists(self._dlname):
-            return
-
-        cmd = ['/usr/sbin/dladm', 'create-vnic', '-c', evsvport, self._dlname]
-        if temp:
-            cmd.append('-t')
-        if tenantname:
-            cmd.append('-T')
-            cmd.append(tenantname)
-
-        self.execute_with_pfexec(cmd)
-
     def create_vnic(self, lower_link, mac_address=None, vid=None, temp=True):
         if self.datalink_exists(self._dlname):
             return
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/agent/solaris/pd.py	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,79 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from neutron.common import utils
+
+OPTS = []
+
+
+class PrefixDelegation(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def enable_subnet(self, router_id, subnet_id, prefix, ri_ifname, mac):
+        return
+
+    @utils.synchronized("l3-agent-pd")
+    def disable_subnet(self, router_id, subnet_id):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def update_subnet(self, router_id, subnet_id, prefix):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def add_gw_interface(self, router_id, gw_ifname):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def delete_router_pd(self, router):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def remove_gw_interface(self, router_id):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def sync_router(self, router_id):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def remove_stale_ri_ifname(self, router_id, stale_ifname):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def process_prefix_update(self):
+        pass
+
+    def after_start(self):
+        pass
+
+
[email protected]("l3-agent-pd")
+def remove_router(resource, event, l3_agent, **kwargs):
+    pass
+
+
+def get_router_entry(ns_name):
+    return {'gw_interface': None,
+            'ns_name': None,
+            'subnets': {}}
+
+
[email protected]("l3-agent-pd")
+def add_router(resource, event, l3_agent, **kwargs):
+    pass
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/bgp_dragent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,151 @@
+[DEFAULT]
+
+#
+# From neutron.base.agent
+#
+
+# Name of Open vSwitch bridge to use (string value)
+#ovs_integration_bridge = br-int
+
+# Uses veth for an OVS interface or not. Support kernels with limited namespace
+# support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. (boolean
+# value)
+#ovs_use_veth = false
+
+# MTU setting for device. This option will be removed in Newton. Please use the
+# system-wide segment_mtu setting which the agents will take into account when
+# wiring VIFs. (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#network_device_mtu = <None>
+
+# The driver used to manage the virtual interface. (string value)
+#interface_driver = <None>
+
+# Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs
+# commands will fail with ALARMCLOCK error. (integer value)
+#ovs_vsctl_timeout = 10
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[AGENT]
+
+#
+# From neutron.base.agent
+#
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
+
+
+[BGP]
+
+#
+# From neutron.bgp.agent
+#
+
+# BGP speaker driver class to be instantiated. (string value)
+#bgp_speaker_driver = <None>
+
+# 32-bit BGP identifier, typically an IPv4 address owned by the system running
+# the BGP DrAgent. (string value)
+#bgp_router_id = <None>
--- a/components/openstack/neutron/files/dhcp_agent.ini	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/dhcp_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -1,102 +1,213 @@
 [DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = False
+
+#
+# From neutron.base.agent
+#
+
+# Name of Open vSwitch bridge to use (string value)
+ovs_integration_bridge = br_int0
+
+# Uses veth for an OVS interface or not. Support kernels with limited namespace
+# support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. (boolean
+# value)
+#ovs_use_veth = false
+
+# MTU setting for device. This option will be removed in Newton. Please use the
+# system-wide segment_mtu setting which the agents will take into account when
+# wiring VIFs. (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#network_device_mtu = <None>
+
+# The driver used to manage the virtual interface. (string value)
+interface_driver = neutron.agent.solaris.interface.OVSInterfaceDriver
+
+# Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs
+# commands will fail with ALARMCLOCK error. (integer value)
+#ovs_vsctl_timeout = 10
+
+#
+# From neutron.dhcp.agent
+#
 
 # The DHCP agent will resync its state with Neutron to recover from any
-# transient notification or rpc errors. The interval is number of
-# seconds between attempts.
-# resync_interval = 5
-
-# The DHCP agent requires an interface driver be set. Choose the one that best
-# matches your plugin.
-# interface_driver =
-
-# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
-# BigSwitch/Floodlight)
-# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-
-# Interface driver for Solaris Open vSwitch
-# interface_driver = neutron.agent.solaris.interface.OVSInterfaceDriver
-
-# Name of Open vSwitch bridge to use
-# ovs_integration_bridge = br_int0
+# transient notification or RPC errors. The interval is number of seconds
+# between attempts. (integer value)
+#resync_interval = 5
 
-# Use veth for an OVS interface or not.
-# Support kernels with limited namespace support
-# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
-# ovs_use_veth = False
-
-# Example of interface_driver option for LinuxBridge
-# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-
-# Interface driver for Solaris Elastic Virtual Switch (EVS)
-interface_driver = neutron.agent.solaris.interface.SolarisVNICDriver
-
-# The agent can use other DHCP drivers.  Dnsmasq is the simplest and requires
-# no additional setup of the DHCP server.
+# The driver used to manage the DHCP server. (string value)
 dhcp_driver = neutron.agent.solaris.dhcp.Dnsmasq
 
-# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
-# iproute2 package that supports namespaces). This option is deprecated and
-# will be removed in a future release, at which point the old behavior of
-# use_namespaces = True will be enforced.
-use_namespaces = False
-
 # The DHCP server can assist with providing metadata support on isolated
 # networks. Setting this value to True will cause the DHCP server to append
-# specific host routes to the DHCP request. The metadata service will only
-# be activated when the subnet does not contain any router port. The guest
+# specific host routes to the DHCP request. The metadata service will only be
+# activated when the subnet does not contain any router port. The guest
 # instance must be configured to request host routes via DHCP (Option 121).
-# enable_isolated_metadata = False
+# This option doesn't have any effect when force_metadata is set to True.
+# (boolean value)
+#enable_isolated_metadata = false
 
-# Allows for serving metadata requests coming from a dedicated metadata
-# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
-# is connected to a Neutron router from which the VMs send metadata
-# request. In this case DHCP Option 121 will not be injected in VMs, as
-# they will be able to reach 169.254.169.254 through a router.
-# This option requires enable_isolated_metadata = True
-# enable_metadata_network = False
+# In some cases the Neutron router is not present to provide the metadata IP
+# but the DHCP server can be used to provide this info. Setting this value will
+# force the DHCP server to append specific host routes to the DHCP request. If
+# this option is set, then the metadata service will be activated for all the
+# networks. (boolean value)
+#force_metadata = false
+
+# Allows for serving metadata requests coming from a dedicated metadata access
+# network whose CIDR is 169.254.169.254/16 (or larger prefix), and is connected
+# to a Neutron router from which the VMs send metadata:1 request. In this case
+# DHCP Option 121 will not be injected in VMs, as they will be able to reach
+# 169.254.169.254 through a router. This option requires
+# enable_isolated_metadata = True. (boolean value)
+#enable_metadata_network = false
 
 # Number of threads to use during sync process. Should not exceed connection
-# pool size configured on server.
-# num_sync_threads = 4
+# pool size configured on server. (integer value)
+#num_sync_threads = 4
+
+# Location to store DHCP server config files. (string value)
+#dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames. This option is deprecated. It has
+# been moved to neutron.conf as dns_domain. It will be removed in a future
+# release. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file. (string value)
+#dnsmasq_config_file =
 
-# Location to store DHCP server config files
-# dhcp_confs = $state_path/dhcp
+# Comma-separated list of the DNS servers which will be used as forwarders.
+# (list value)
+# Deprecated group/name - [DEFAULT]/dnsmasq_dns_server
+#dnsmasq_dns_servers = <None>
+
+# Base log dir for dnsmasq logging. The log contains DHCP and DNS log
+# information and is useful for debugging issues with either DHCP or DNS. If
+# this section is null, disable dnsmasq log. (string value)
+#dnsmasq_base_log_dir = <None>
 
-# Domain to use for building the hostnames
-# dhcp_domain = openstacklocal
+# Enables the dnsmasq service to provide name resolution for instances via DNS
+# resolvers on the host running the DHCP agent. Effectively removes the '--no-
+# resolv' option from the dnsmasq process arguments. Adding custom DNS
+# resolvers to the 'dnsmasq_dns_servers' option disables this feature. (boolean
+# value)
+#dnsmasq_local_resolv = false
+
+# Limit number of leases to prevent a denial-of-service. (integer value)
+#dnsmasq_lease_max = 16777216
 
-# Override the default dnsmasq settings with this file
-# dnsmasq_config_file =
+# Use broadcast in DHCP replies. (boolean value)
+#dhcp_broadcast_reply = false
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
 
-# Comma-separated list of DNS servers which will be used by dnsmasq
-# as forwarders.
-# dnsmasq_dns_servers =
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
 
-# Limit number of leases to prevent a denial-of-service.
-# dnsmasq_lease_max = 16777216
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
 
-# Location to DHCP lease relay UNIX domain socket
-# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
 
-# Use broadcast in DHCP replies
-# dhcp_broadcast_reply = False
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
 
-# dhcp_delete_namespaces, which is false by default, can be set to True if
-# namespaces can be deleted cleanly on the host running the dhcp agent.
-# Do not enable this until you understand the problem with the Linux iproute
-# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
-# you are sure that your version of iproute does not suffer from the problem.
-# If True, namespaces will be deleted when a dhcp server is disabled.
-# dhcp_delete_namespaces = False
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
 
-# Timeout for ovs-vsctl commands.
-# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
-# ovs_vsctl_timeout = 10
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[AGENT]
 
-# An URI that specifies an EVS controller. It is of the form
-# ssh://user@hostname, where user is the username to use to connect
-# to EVS controller specified by hostname. By default it's set to
-# ssh://evsuser@localhost.
-# evs_controller = ssh://evsuser@localhost
+#
+# From neutron.base.agent
+#
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
--- a/components/openstack/neutron/files/evs/migrate/__init__.py	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,13 +0,0 @@
-# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
--- a/components/openstack/neutron/files/evs/migrate/evs-neutron-migration.py	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,432 +0,0 @@
-#!/usr/bin/python2.7
-#
-# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-#
-# This script migrates the network, subnet and port information from EVS DB to
-# neutron-server DB. It also re-creates routers and floatingips tables with
-# Neutron's l3 schema. This script needs to be run for the proper upgrade of
-# Neutron from Havana to Juno release.
-#
-
-import ConfigParser
-import time
-
-from oslo.config import cfg
-from oslo.db import exception as excp
-from oslo.db import options as db_options
-import rad.bindings.com.oracle.solaris.rad.evscntl as evsc
-import rad.connect as radcon
-import sqlalchemy as sa
-from sqlalchemy import MetaData, sql
-from sqlalchemy.orm import sessionmaker
-from sqlalchemy.schema import DropConstraint
-
-from neutron import context as ctx
-from neutron.db import common_db_mixin, model_base
-from neutron.plugins.evs.migrate import havana_api
-
-
-def create_db_network(nw, engine, ext_ro):
-    ''' Method for creating networks table in the neutron-server DB
-        Input params:
-        @nw - Dictionary with values from EVS DB
-        @engine - SQL engine
-        @ext_ro - External router
-    '''
-    # Importing locally because these modules end up importing neutron.wsgi
-    # which causes RAD to hang
-    from neutron.db import db_base_plugin_v2
-    from neutron.db import external_net_db as ext_net
-    model_base.BASEV2.metadata.bind = engine
-    for _none in range(60):
-        try:
-            model_base.BASEV2.metadata.create_all(engine)
-            break
-        except sa.exc.OperationalError as err:
-            # mysql is not ready. sleep for 2 more seconds
-            time.sleep(2)
-    else:
-        print "Unable to connect to MySQL:  %s" % err
-        print ("Please verify MySQL is properly configured and online "
-               "before using svcadm(1M) to clear this service.")
-        raise RuntimeError
-    ctxt = ctx.get_admin_context()
-    inst = db_base_plugin_v2.NeutronDbPluginV2()
-    dup = False
-    try:
-        db_base_plugin_v2.NeutronDbPluginV2.create_network(inst, ctxt, nw)
-        print "\nnetwork=%s added" % nw['network']['name']
-        if ext_ro:
-            ext_nw = ext_net.ExternalNetwork(network_id=nw['network']['id'])
-            session = sessionmaker()
-            session.configure(bind=engine)
-            s = session()
-            s.add(ext_nw)
-            s.commit()
-    except excp.DBDuplicateEntry:
-        print "\nnetwork '%s' already exists" % nw['network']['name']
-        dup = True
-    return dup
-
-
-def create_db_subnet(sub):
-    ''' Method for creating subnets table in the neutron-server DB
-        Input params:
-        @sub - Dictionary with values from EVS DB
-    '''
-    # Importing locally because this module ends up importing neutron.wsgi
-    # which causes RAD to hang
-    from neutron.db import db_base_plugin_v2
-    ctxt = ctx.get_admin_context()
-    inst = db_base_plugin_v2.NeutronDbPluginV2()
-    try:
-        db_base_plugin_v2.NeutronDbPluginV2.create_subnet(inst, ctxt, sub)
-        print "\nsubnet=%s added" % sub['subnet']['id']
-    except excp.DBDuplicateEntry:
-        print "\nsubnet '%s' already exists" % sub['subnet']['id']
-
-
-def create_db_port(port):
-    ''' Method for creating ports table in the neutron-server DB
-        Input params:
-        @port - Dictionary with values from EVS DB
-    '''
-    # Importing locally because this module ends up importing neutron.wsgi
-    # which causes RAD to hang
-    from neutron.db import db_base_plugin_v2
-    ctxt = ctx.get_admin_context()
-    inst = db_base_plugin_v2.NeutronDbPluginV2()
-    try:
-        db_base_plugin_v2.NeutronDbPluginV2.create_port(inst, ctxt, port)
-        print "\nport=%s added" % port['port']['id']
-    except excp.DBDuplicateEntry:
-        print "\nport '%s' already exists" % port['port']['id']
-
-
-def main():
-    print "Start Migration."
-
-    # Connect to EVS controller
-    config = ConfigParser.RawConfigParser()
-    config.readfp(open("/etc/neutron/plugins/evs/evs_plugin.ini"))
-    if config.has_option("EVS", 'evs_controller'):
-        config_suh = config.get("EVS", 'evs_controller')
-    else:
-        config_suh = 'ssh://evsuser@localhost'
-    suh = config_suh.split('://')
-    if len(suh) != 2 or suh[0] != 'ssh' or not suh[1].strip():
-        raise SystemExit(_("Specified evs_controller is invalid"))
-    uh = suh[1].split('@')
-    if len(uh) != 2 or not uh[0].strip() or not uh[1].strip():
-        raise SystemExit(_("'user' and 'hostname' need to be specified "
-                           "for evs_controller"))
-    try:
-        rc = radcon.connect_ssh(uh[1], user=uh[0])
-    except:
-        raise SystemExit(_("Cannot connect to EVS Controller"))
-    try:
-        evs_contr = rc.get_object(evsc.EVSController())
-    except:
-        raise SystemExit(_("Could not retrieve EVS info from EVS Controller"))
-
-    config.readfp(open("/etc/neutron/neutron.conf"))
-    if config.has_option("database", 'connection'):
-        SQL_CONNECTION = config.get("database", 'connection')
-    else:
-        SQL_CONNECTION = 'sqlite:////var/lib/neutron/neutron.sqlite'
-
-    conf = cfg.CONF
-    db_options.set_defaults(cfg.CONF,
-                            connection=SQL_CONNECTION,
-                            sqlite_db='', max_pool_size=10,
-                            max_overflow=20, pool_timeout=10)
-
-    neutron_engine = sa.create_engine(SQL_CONNECTION)
-    router_port_ids = {}
-
-    evsinfo = evs_contr.getEVSInfo()
-    for e in evsinfo:
-        ext_ro = False
-        for p in e.props:
-            if p.name == 'OpenStack:router:external' and p.value == 'True':
-                ext_ro = True
-        # Populate networks table
-        n = {
-            'tenant_id': e.tenantname,
-            'id': e.uuid,
-            'name': e.name,
-            'status': 'ACTIVE',
-            'admin_state_up': True,
-            'shared': False
-            }
-        nw = {'network': n}
-        dup = create_db_network(nw, neutron_engine, ext_ro)
-        if dup:
-            continue  # No need to iterate over subnets and ports
-
-        # Populate subnets table
-        if not e.ipnets:
-            continue
-        for i in e.ipnets:
-            cidr = None
-            gateway_ip = None
-            enable_dhcp = None
-            dns = []
-            host = []
-            start = []
-            for p in i.props:
-                if p.name == 'subnet':
-                    cidr = p.value
-                elif p.name == 'defrouter':
-                    gateway_ip = p.value
-                elif p.name == 'OpenStack:enable_dhcp':
-                    enable_dhcp = p.value == 'True'
-                elif p.name == 'OpenStack:dns_nameservers':
-                    dns = p.value.split(',')
-                elif p.name == 'OpenStack:host_routes':
-                    hh = p.value.split(',')
-                    for h in range(0, len(hh), 2):
-                        d = {hh[h]: hh[h+1]}
-                        host.append(d)
-                elif p.name == 'pool':
-                    ss = p.value.split(',')
-                    for s in ss:
-                        if '-' in s:
-                            d = {'start': s.split('-')[0],
-                                 'end': s.split('-')[1]}
-                            start.append(d)
-                        else:
-                            d = {'start': s, 'end': s}
-                            start.append(d)
-            ip_version = 4 if i.ipvers == evsc.IPVersion.IPV4 else 6
-
-            if i.name.startswith(i.uuid[:8]):
-                # Skip autogenerated names
-                name = None
-            else:
-                name = i.name
-            s = {
-                'tenant_id': i.tenantname,
-                'id': i.uuid,
-                'name': name,
-                'network_id': e.uuid,
-                'ip_version': ip_version,
-                'cidr': cidr,
-                'gateway_ip': gateway_ip,
-                'enable_dhcp': enable_dhcp,
-                'shared': False,
-                'allocation_pools': start,
-                'dns_nameservers': dns,
-                'host_routes': host
-                }
-
-            sub = {'subnet': s}
-            create_db_subnet(sub)
-
-        # Populate ports table
-        if not e.vports:
-            continue
-        for j in e.vports:
-            device_owner = ''
-            device_id = ''
-            mac_address = None
-            ipaddr = None
-            for v in j.props:
-                if v.name == 'OpenStack:device_owner':
-                    device_owner = v.value
-                    if v.value in ('network:router_interface',
-                                   'network:router_gateway'):
-                        router_port_ids[j.uuid] = v.value
-                elif v.name == 'OpenStack:device_id':
-                    device_id = v.value
-                elif v.name == 'macaddr':
-                    mac_address = v.value
-                elif v.name == 'ipaddr':
-                    ipaddr = v.value.split('/')[0]
-            if j.name.startswith(j.uuid[:8]):
-                # Skip autogenerated names
-                name = None
-            else:
-                name = j.name
-
-            p = {
-                'tenant_id': j.tenantname,
-                'id': j.uuid,
-                'name': name,
-                'network_id': e.uuid,
-                'mac_address': mac_address,
-                'admin_state_up': True,
-                'status': 'ACTIVE',
-                'device_id': device_id,
-                'device_owner': device_owner,
-                'fixed_ips': [{'subnet_id': e.ipnets[0].uuid,
-                               'ip_address': ipaddr}]
-                }
-            port = {'port': p}
-            create_db_port(port)
-
-    # Change the schema of the floatingips and routers tables by doing
-    # the following:
-    #     Fetch the floatingip, router entry using EVS API,
-    #     Temporarily store the information,
-    #     Delete floatingip, router entry,
-    #     Remove floatingip, router as a constraint from existing tables,
-    #     Drop the routers, floatingips table,
-    #     Add router, floatingip entry using Neutron API
-
-    # Importing locally because this module ends up importing neutron.wsgi
-    # which causes RAD to hang
-    from neutron.db import l3_db
-    havana_api.configure_db()
-    session = havana_api.get_session()
-
-    # Fetch the floatingip entry using EVS API
-    query = session.query(havana_api.FloatingIP)
-    floatingips = query.all()
-    fl = []
-    if floatingips:
-        for f in floatingips:
-            fi = {
-                'id': f['id'],
-                'floating_ip_address': f['floating_ip_address'],
-                'floating_network_id': f['floating_network_id'],
-                'floating_port_id': f['floating_port_id'],
-                'fixed_port_id': f['fixed_port_id'],
-                'fixed_ip_address': f['fixed_ip_address'],
-                'tenant_id': f['tenant_id'],
-                'router_id': f['router_id'],
-                }
-            fl.append(fi)
-
-        # Delete floatingip entry
-        ctxt = ctx.get_admin_context()
-        ctxt = havana_api.get_evs_context(ctxt)
-        with ctxt.session.begin(subtransactions=True):
-            cm_db_inst = common_db_mixin.CommonDbMixin()
-            query = common_db_mixin.CommonDbMixin._model_query(cm_db_inst,
-                                                               ctxt,
-                                                               havana_api.
-                                                               FloatingIP)
-            for fip in query:
-                ctxt.session.delete(fip)
-
-    # Fetch the router entry using EVS API
-    query = session.query(havana_api.Router)
-    routers = []
-    try:
-        routers = query.all()
-    except sa.exc.OperationalError:
-        pass
-    if routers:
-        for r in routers:
-            router_id = r['id']
-            rt = {
-                'tenant_id': r['tenant_id'],
-                'id': r['id'],
-                'name': r['name'],
-                'admin_state_up': r['admin_state_up'],
-                'gw_port_id': r['gw_port_id'],
-                'status': 'ACTIVE'
-                }
-
-        # Delete router entry
-        ctxt = ctx.get_admin_context()
-        ctxt = havana_api.get_evs_context(ctxt)
-        with ctxt.session.begin(subtransactions=True):
-            cm_db_inst = common_db_mixin.CommonDbMixin()
-            query = common_db_mixin.CommonDbMixin._model_query(cm_db_inst,
-                                                               ctxt,
-                                                               havana_api.
-                                                               Router)
-            router = query.filter(havana_api.Router.id == router_id).one()
-            ctxt.session.delete(router)
-
-    engine = sa.create_engine(SQL_CONNECTION)
-    meta = MetaData()
-    conn = engine.connect()
-    trans = conn.begin()
-    meta.reflect(engine)
-
-    # Remove router as a constraint from existing tables,
-    # Drop the routers table to remove old schema
-    for t in meta.tables.values():
-        for fk in t.foreign_keys:
-            if fk.column.table.name == "routers":
-                if fk.constraint.name:
-                    engine.execute(DropConstraint(fk.constraint))
-    for t in meta.tables.values():
-        if t.name == "routers":
-            t.drop(bind=conn)
-
-    # Remove floatingip as a constraint from existing tables,
-    # Drop the floatingip table to remove old schema
-    for t in meta.tables.values():
-        for fk in t.foreign_keys:
-            if fk.column.table.name == "floatingips":
-                if fk.constraint.name:
-                    engine.execute(DropConstraint(fk.constraint))
-    for t in meta.tables.values():
-        if t.name == "floatingips":
-            t.drop(bind=conn)
-    conn.close()
-
-    # Add the routers and floatingips using the schema in l3_db.py
-
-    setattr(l3_db.Router, 'enable_snat', sa.Column(sa.Boolean,
-            default=True, server_default=sql.true(), nullable=False))
-    neutron_engine = sa.create_engine(SQL_CONNECTION)
-    model_base.BASEV2.metadata.bind = neutron_engine
-    model_base.BASEV2.metadata.create_all(neutron_engine)
-    if routers:
-        ctxt = ctx.get_admin_context()
-        with ctxt.session.begin(subtransactions=True):
-            router_db = l3_db.Router(id=router_id,
-                                     tenant_id=r['tenant_id'],
-                                     name=rt['name'],
-                                     admin_state_up=rt['admin_state_up'],
-                                     gw_port_id=rt['gw_port_id'],
-                                     status="ACTIVE")
-            ctxt.session.add(router_db)
-            print "\nrouter=%s updated" % rt['name']
-        with ctxt.session.begin(subtransactions=True):
-            for i, j in router_port_ids.iteritems():
-                router_port = l3_db.RouterPort(
-                    port_id=i,
-                    router_id=router_id,
-                    port_type=j)
-                ctxt.session.add(router_port)
-
-    if floatingips:
-        ctxt = ctx.get_admin_context()
-        with ctxt.session.begin(subtransactions=True):
-            for i in fl:
-                fl_db = l3_db.FloatingIP(
-                    id=i['id'],
-                    floating_ip_address=i['floating_ip_address'],
-                    floating_network_id=i['floating_network_id'],
-                    floating_port_id=i['floating_port_id'],
-                    fixed_port_id=i['fixed_port_id'],
-                    fixed_ip_address=i['fixed_ip_address'],
-                    router_id=i['router_id'],
-                    tenant_id=i['tenant_id'])
-                ctxt.session.add(fl_db)
-                print "\nfloatingip=%s updated" % i['floating_ip_address']
-
-    print "\nEnd Migration."
-
-
-if __name__ == '__main__':
-    main()
--- a/components/openstack/neutron/files/evs/migrate/havana_api.py	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,102 +0,0 @@
-#
-# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import ConfigParser
-
-from sqlalchemy.ext import declarative
-from sqlalchemy import orm
-import sqlalchemy as sa
-
-from neutron.context import ContextBase
-from neutron.db import model_base
-from neutron.openstack.common import uuidutils
-
-EVS_DB_BASE = declarative.declarative_base(cls=model_base.NeutronBaseV2)
-EVS_DB_ENGINE = None
-EVS_DB_MAKER = None
-
-
-class EVSContext(ContextBase):
-    @property
-    def session(self):
-        return self._session
-
-    @session.setter
-    def session(self, session):
-        self._session = session
-
-
-def configure_db():
-    global EVS_DB_ENGINE
-    if not EVS_DB_ENGINE:
-        config = ConfigParser.RawConfigParser()
-        config.readfp(open("/etc/neutron/neutron.conf"))
-        if config.has_option("database", 'connection'):
-            sql_connection = config.get("database", 'connection')
-        else:
-            sql_connection = 'sqlite:////var/lib/neutron/neutron.sqlite'
-        EVS_DB_ENGINE = sa.create_engine(sql_connection, echo=False)
-        EVS_DB_BASE.metadata.create_all(EVS_DB_ENGINE)
-
-
-def get_session(autocommit=True, expire_on_commit=False):
-    global EVS_DB_ENGINE, EVS_DB_MAKER
-    assert EVS_DB_ENGINE
-    if not EVS_DB_MAKER:
-        EVS_DB_MAKER = orm.sessionmaker(bind=EVS_DB_ENGINE,
-                                        autocommit=autocommit,
-                                        expire_on_commit=expire_on_commit)
-    return EVS_DB_MAKER()
-
-
-def get_evs_context(context):
-    """Overrides the Neutron DB session with EVS DB session"""
-
-    evs_context = EVSContext.from_dict(context.to_dict())
-    evs_context.session = get_session()
-
-    return evs_context
-
-
-class Router(EVS_DB_BASE):
-    """Represents a v2 neutron router."""
-
-    id = sa.Column(sa.String(36), primary_key=True,
-                   default=uuidutils.generate_uuid)
-    name = sa.Column(sa.String(255))
-    status = sa.Column(sa.String(16))
-    admin_state_up = sa.Column(sa.Boolean)
-    tenant_id = sa.Column(sa.String(255))
-    gw_port_id = sa.Column(sa.String(36))
-    gw_port_network_id = sa.Column(sa.String(36))
-
-
-class FloatingIP(EVS_DB_BASE):
-    """Represents a floating IP address.
-
-    This IP address may or may not be allocated to a tenant, and may or
-    may not be associated with an internal port/ip address/router.
-    """
-
-    id = sa.Column(sa.String(36), primary_key=True,
-                   default=uuidutils.generate_uuid)
-    floating_ip_address = sa.Column(sa.String(64), nullable=False)
-    floating_network_id = sa.Column(sa.String(36), nullable=False)
-    floating_port_id = sa.Column(sa.String(36), nullable=False)
-    fixed_port_id = sa.Column(sa.String(36))
-    fixed_ip_address = sa.Column(sa.String(64))
-    router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'))
-    tenant_id = sa.Column(sa.String(255))
--- a/components/openstack/neutron/files/evs/migrate/migrate-evs-to-ovs	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1740 +0,0 @@
-#!/usr/bin/python2.7
-#
-# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-#
-# There are four aspects to migrate an OpenStack cloud running Neutron EVS
-# to Neutron ML2 + OVS and they are enumerated below. This script needs to
-# be run on each of the nodes that is either -- compute, controller, or
-# network -- and the script infers the role of the node based on the SMF
-# services running and does one or more of the operations enumerated below.
-#
-# 1. Populate Neutron ML2 tables
-# ------------------------------
-# Neutron ML2 plugin uses a different set of tables to manage various
-# network types and mechanism drivers underneath it. The names of these
-# tables start with ml2_* and the content of these tables will need to be
-# inferred from other Neutron tables and from EVS controller
-#
-# 2. Update existing configuration files
-# --------------------------------------
-# Following files need to be updated for various Neutron services.
-#  - /etc/neutron/neutron.conf
-#   - change core_plugin option to neutron.plugins.ml2.plugin.Ml2Plugin
-#
-#  - /etc/neutron/dhcp_agent.ini
-#   - change interface_driver option to \
-#    neutron.agent.solaris.interface.SolarisOVSInterfaceDriver
-#   - set ovs_integration_bridge to br_int0
-#
-#  - /etc/neutron/l3_agent.ini
-#   - change interface_driver option to \
-#       neutron.agent.solaris.interface.SolarisOVSInterfaceDriver
-#   - set ovs_integration_bridge to br_int0
-#   - set external_network_bridge to br_ex0
-#   - add service tenant's neutron user credentials to communicate with
-#       neutron-server
-#
-# Following files need to be updated on every node where nova-compute runs.
-#  - /etc/nova/nova.conf
-#    The only change to this file is to add an ovs_bridge
-#    option set to 'br_int0' (default OVS bridge to which various VNICs
-#    (Neutron ports) are added)
-#
-# 3. Create new configuration files
-# ---------------------------------
-# Following new file needs to be created on the node running neutron-server.
-#  - /etc/neutron/plugins/ml2/ml2_conf.ini
-#
-# Following new file needs to be created on every node running either
-# nova-compute, neutron-dhcp-agent, or neutron-l3-agent.
-#  - /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
-#
-# The majority of the contents of the file is inferred from EVS controller
-#
-# 4. Migrate all the VMs from EVS to OVS
-# --------------------------------------
-# The anets of each VM, spawned in Neutron EVS cloud, has one global(tenant)
-# and two anet(evs and vport) properites that are EVS specific. We will need
-# to clear those properties. Before we do that, we will need to first fetch
-# the information (MAC address, lower-link, and such) from EVS controller
-# for a given anet which is uniquely identified by <tenant, evs, vport> and
-# explicitly set corresponding anet properties. This step needs to be
-# repeated for other EVS based anets, if any, in the VM.
-#
-
-import argparse
-from collections import OrderedDict
-from datetime import datetime
-import iniparse
-import netaddr as na
-import netifaces as ni
-import os
-import pwd
-import re
-from shutil import copy2, move
-import signal
-import socket
-import sqlalchemy as sa
-from subprocess import check_output, check_call, CalledProcessError, PIPE
-import sys
-import uuid
-
-import rad.bindings.com.oracle.solaris.rad.evscntl_1 as evscntl
-import rad.bindings.com.oracle.solaris.rad.zonemgr_1 as zonemgr
-import rad.client as radcli
-import rad.connect as radcon
-
-from oslo_db.sqlalchemy import session
-from neutronclient.v2_0 import client as neutron_client
-from neutron.extensions import portbindings
-from neutron.openstack.common import uuidutils
-
-# SMF services
-SVC_NOVA_COMPUTE = 'nova-compute:default'
-SVC_NEUTRON_SERVER = 'neutron-server:default'
-SVC_DHCP_AGENT = 'neutron-dhcp-agent:default'
-SVC_L3_AGENT = 'neutron-l3-agent:default'
-SVC_METADATA_AGENT = 'neutron-metadata-agent:default'
-SVC_OVS_AGENT = 'neutron-openvswitch-agent:default'
-SVC_VSWITCH_SERVER = 'vswitch-server:default'
-SVC_OVSDB_SERVER = 'ovsdb-server:default'
-SVC_NEUTRON_UPGRADE = 'neutron-upgrade:default'
-
-
-ALL_SVCS = [SVC_NEUTRON_SERVER, SVC_DHCP_AGENT, SVC_L3_AGENT, SVC_NOVA_COMPUTE]
-curnode_svcs = []
-
-# conf files
-NEUTRON_CONF = '/etc/neutron/neutron.conf'
-ML2_INI = '/etc/neutron/plugins/ml2/ml2_conf.ini'
-OVS_INI = '/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini'
-EVS_INI = '/etc/neutron/plugins/evs/evs_plugin.ini'
-DHCP_INI = '/etc/neutron/dhcp_agent.ini'
-L3_INI = '/etc/neutron/l3_agent.ini'
-METADATA_INI = '/etc/neutron/metadata_agent.ini'
-NOVA_CONF = '/etc/nova/nova.conf'
-
-# constants
-ML2_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-OVS_INTFC_DRIVER = 'neutron.agent.solaris.interface.OVSInterfaceDriver'
-OVS_INT_BRIDGE = 'br_int0'
-OVS_EXT_BRIDGE = 'br_ex0'
-VXLAN_UPLINK_PORT = 'ovs.vxlan1'
-FLAT_PHYS_NET = 'flatnet'
-EXT_VLAN_PHYS_NET = 'extnet'
-RABBITMQ_DEFAULT_USERID = 'guest'
-RABBITMQ_DEFAULT_PASSWORD = 'guest'
-L2_TYPE_VLAN = 'vlan'
-L2_TYPE_VXLAN = 'vxlan'
-L2_TYPE_FLAT = 'flat'
-UID_NEUTRON = 84
-UID_NOVA = 85
-
-# file ownership
-file_owner = {
-    NEUTRON_CONF: UID_NEUTRON,
-    ML2_INI: UID_NEUTRON,
-    OVS_INI: UID_NEUTRON,
-    EVS_INI: UID_NEUTRON,
-    DHCP_INI: UID_NEUTRON,
-    L3_INI: UID_NEUTRON,
-    METADATA_INI: UID_NEUTRON,
-    NOVA_CONF: UID_NOVA
-}
-
-# LOGGING LEVELS
-LOG_DEBUG = 'DEBUG:'
-LOG_INFO = 'INFO:'
-LOG_WARN = 'WARN:'
-LOG_ERROR = 'ERROR:'
-
-HOSTNAME = socket.gethostname().split('.')[0]
-
-evsutil = None
-l2type = None
-external_network_datalink = None
-external_network_name = None
-external_network_vid = None
-bridge_mappings = {}
-neutron_conn = {}
-
-
-def log_msg(level, msg, oneliner=True):
-    if oneliner:
-        msg = msg.replace('\n', ' ')
-        msg = re.sub(r'\s\s+', ' ', msg)
-    print level, msg
-
-
-class ZoneConfig(object):
-    """ZoneConfig - context manager for access zone configurations.
-    Automatically opens the configuration for a zone and commits any changes
-    before exiting
-    """
-    def __init__(self, zone):
-        """zone is a zonemgr object representing either a kernel zone or
-        non-global zone.
-        """
-        self.zone = zone
-        self.editing = False
-
-    def __enter__(self):
-        """enables the editing of the zone."""
-        try:
-            self.zone.editConfig()
-            self.editing = True
-            return self
-        except:
-            raise
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        """looks for any kind of exception before exiting.  If one is found,
-        cancel any configuration changes and reraise the exception.  If not,
-        commit the new configuration.
-        """
-        if exc_type is not None and self.editing:
-            # We received some kind of exception.  Cancel the config and raise.
-            self.zone.cancelConfig()
-            raise
-        else:
-            # commit the config
-            try:
-                self.zone.commitConfig()
-            except:
-                raise
-
-    def get_resources(self, resource_type):
-        """Get list of resources of specified type
-        """
-        try:
-            return self.zone.getResources(zonemgr.Resource(resource_type))
-        except:
-            raise
-
-    def set_resource_prop(self, resource, prop, value, rsc_filter=None):
-        """sets a property for an existing resource.
-        """
-        try:
-            if isinstance(resource, basestring):
-                resource = zonemgr.Resource(resource, rsc_filter)
-            self.zone.setResourceProperties(resource,
-                                            [zonemgr.Property(prop, value)])
-        except:
-            raise
-
-    def clear_resource_props(self, resource, props, rsc_filter=None):
-        """Clear property values of a given resource
-        """
-        try:
-            if isinstance(resource, basestring):
-                resource = zonemgr.Resource(resource, rsc_filter)
-            self.zone.clearResourceProperties(resource, props)
-        except:
-            raise
-
-    def lookup_resource_property(self, resource, prop, rsc_filter=None):
-        """Lookup specified property from specified Solaris Zone resource."""
-        try:
-            if isinstance(resource, basestring):
-                resource = zonemgr.Resource(resource, rsc_filter)
-            val = self.zone.getResourceProperties(resource, [prop])
-        except radcli.ObjectError:
-            return None
-        except Exception:
-            raise
-        return val[0].value if val else None
-
-
-class ZoneUtil(object):
-    """Zone utility functions like getting list of zones, zone names etc.
-    """
-    def __init__(self):
-        self.rc = radcon.connect_unix()
-
-    def get_zone_by_name(self, name):
-            """Return a Solaris Zones object via RAD by name."""
-            try:
-                zone = self.rc.get_object(
-                    zonemgr.Zone(), radcli.ADRGlobPattern({'name': name}))
-            except radcli.NotFoundError:
-                return None
-            except Exception:
-                raise
-            return zone
-
-    def _get_zone_objects(self):
-        """Return a list of all Solaris Zones objects via RAD."""
-        return self.rc.list_objects(zonemgr.Zone())
-
-    def get_zone_names(self):
-        """Return the names of all the instances known to the virtualization
-        layer, as a list.
-        """
-        instances_list = []
-        for zone in self._get_zone_objects():
-            instances_list.append(self.rc.get_object(zone).name)
-        return instances_list
-
-
-class EVSUtil():
-    """Use to access EVS info.
-    """
-    def __init__(self):
-        ctl_locn = self._get_evs_controller()
-        try:
-            self.rad_uri = radcon.RadURI(ctl_locn)
-        except ValueError as err:
-            raise SystemExit(_("Specified evs_controller is invalid: %s"), err)
-        try:
-            self._rc = self.rad_uri.connect()
-        except:
-            raise SystemExit(_("Cannot connect to EVS Controller"))
-        try:
-            self._evs_contr = self._rc.get_object(evscntl.EVSController())
-        except:
-            raise SystemExit(_("Failed to get EVS Controller"))
-        self.l2type = self._evs_contr.getProperty('l2-type')[0].current_value
-        self._evsinfo = None
-        self._vportinfo = None
-        self._l2rangeinfo = None
-        self._evs_cache = {}
-        # _global_vlanrange_to_nw_uplink does not contain host specific entries
-        # and is of the form:
-        # {comma separated vlanrange strings: (physical n/w name, uplink port)}
-        self._global_vlanrange_to_nw_uplink = {}
-        # _local_vlanrange_to_uplink contains only this host specific entries
-        # and is of the form:
-        # {comma separated vlanrange strings: uplink port}
-        self._local_vlanrange_to_uplink = {}
-        # global uplink port for flatnet
-        self._global_flat_nw_uplink = None
-        # local uplink port for flatnet
-        self._local_flat_nw_uplink = None
-
-    def _get_evs_controller(self):
-        if (set(curnode_svcs) &
-                set([SVC_NOVA_COMPUTE, SVC_DHCP_AGENT, SVC_L3_AGENT])):
-            try:
-                evsc = check_output(['/usr/sbin/evsadm', 'show-prop', '-co',
-                                     'value', '-p', 'controller']).strip()
-            except:
-                raise SystemExit(_("Could not determine EVS Controller "
-                                   "RAD URI"))
-            return evsc.strip()
-
-        assert SVC_NEUTRON_SERVER in curnode_svcs
-        # get evs_controller from EVS_INI
-        config = iniparse.ConfigParser()
-        config.readfp(open(EVS_INI))
-        try:
-            evsc = config.get("EVS", "evs_controller")
-        except:
-            return 'ssh://evsuser@localhost'
-        return evsc.strip()
-
-    @property
-    def evsinfo(self):
-        if not self._evsinfo:
-            self._evsinfo = self._evs_contr.getEVSInfo()
-        return self._evsinfo
-
-    @property
-    def vportinfo(self):
-        if not self._vportinfo:
-            self._vportinfo = self._evs_contr.getVPortInfo()
-        return self._vportinfo
-
-    @property
-    def l2rangeinfo(self):
-        if not self._l2rangeinfo:
-            self._l2rangeinfo = self._evs_contr.getL2TypeIdRange()
-        return self._l2rangeinfo
-
-    @property
-    def global_flat_nw_uplink(self):
-        if not self._global_flat_nw_uplink:
-            self.get_global_vlanrange_nw_uplink_map()
-        return self._global_flat_nw_uplink
-
-    @property
-    def local_flat_nw_uplink(self):
-        if not self._local_flat_nw_uplink:
-            self.get_local_vlanrange_uplink_map()
-        return self._local_flat_nw_uplink
-
-    def _get_vport(self, tenant_name, evs_uuid, vport_uuid):
-        pat = radcli.ADRGlobPattern({'tenant': tenant_name,
-                                     'evsuuid': evs_uuid,
-                                     'uuid': vport_uuid})
-        adrnames = self._rc.list_objects(evscntl.VPort(), pat)
-        if not adrnames:
-            return None
-        return self._rc.get_object(adrnames[0])
-
-    def get_macaddr(self, tenant_name, evs_uuid, vport_uuid):
-        vport = self._get_vport(tenant_name, evs_uuid, vport_uuid)
-        return vport.getProperty('macaddr')[0].current_value
-
-    def _get_evs(self, tenant_name, evs_uuid):
-        if evs_uuid in self._evs_cache:
-            return self._evs_cache[evs_uuid]
-        pat = radcli.ADRGlobPattern({'tenant': tenant_name,
-                                     'uuid': evs_uuid})
-        adrnames = self._rc.list_objects(evscntl.EVS(), pat)
-        if not adrnames:
-            return None
-        evs = self._rc.get_object(adrnames[0])
-        self._evs_cache[evs_uuid] = evs
-        return evs
-
-    def _vid_in_vidrange(self, vid, vidrange):
-        # vidrange is of the form 1-5,10-20,30-35
-        vlan_ranges = vidrange.split(',')
-        for vlan_range_str in vlan_ranges:
-            vlan_range = vlan_range_str.split("-")
-            vlan_start = int(vlan_range[0])
-            if len(vlan_range) == 2:
-                vlan_end = int(vlan_range[1]) + 1
-            else:
-                vlan_end = vlan_start + 1
-            if vid in xrange(vlan_start, vlan_end):
-                return True
-        return False
-
-    def get_global_vlanrange_nw_uplink_map(self):
-        if self._global_vlanrange_to_nw_uplink:
-            return self._global_vlanrange_to_nw_uplink
-        i = 1
-        extnet_found = False
-        for l2ri in self.l2rangeinfo:
-            if l2ri.host or l2ri.name != 'uplink-port':
-                continue
-            uplink_port = l2ri.value
-            for range_prop in l2ri.range:
-                if range_prop.name != 'vlan-range':
-                    if range_prop.name == 'flat-range':
-                        self._global_flat_nw_uplink = uplink_port
-                    continue
-                vlanrange = range_prop.value
-                phys_nw = ''
-                if external_network_vid and not extnet_found:
-                    extnet_found = self._vid_in_vidrange(external_network_vid,
-                                                         vlanrange)
-                    if extnet_found:
-                        phys_nw = EXT_VLAN_PHYS_NET
-                if not phys_nw:
-                    phys_nw = 'physnet' + str(i)
-                    i += 1
-                self._global_vlanrange_to_nw_uplink[vlanrange] = (phys_nw,
-                                                                  uplink_port)
-        return self._global_vlanrange_to_nw_uplink
-
-    def get_local_vlanrange_uplink_map(self):
-        if self._local_vlanrange_to_uplink:
-            return self._local_vlanrange_to_uplink
-        for l2ri in self.l2rangeinfo:
-            if not l2ri.host:
-                continue
-            l2ri_hostname = l2ri.host.split('.')[0]
-            if l2ri_hostname != HOSTNAME or l2ri.name != 'uplink-port':
-                continue
-            uplink_port = l2ri.value
-            for range_prop in l2ri.range:
-                if range_prop.name != 'vlan-range':
-                    if range_prop.name == 'flat-range':
-                        self._local_flat_nw_uplink = uplink_port
-                    continue
-                vlanrange = range_prop.value
-                self._local_vlanrange_to_uplink[vlanrange] = uplink_port
-        return self._local_vlanrange_to_uplink
-
-    def _get_vlanrange_dict_val(self, vlanrangedict, vlanid):
-        """Each key in vlanrangedict is of the form
-        'vid_start_1-vid_end_1,vid_start_2-vid_end_2'..
-        This method parses the keys and finds the one which contains the
-        required vlanid and returns its corresponding dictionary value.
-        """
-        for vlan_ranges_str, value in vlanrangedict.iteritems():
-            if self._vid_in_vidrange(vlanid, vlan_ranges_str):
-                return value
-
-    def get_uplink_port(self, tenant_name, evs_uuid):
-        """ For VXLAN the uplink port is always ovs.vxlan1.
-        For flat, we can return local or global uplink port after executing
-        get_local_vlanrange_uplink_map() or get_global_vlanrange_uplink_map().
-        For vlan, to find we first find the vlan-id associated
-        with this evs. Then check which l2range object contains this vlan-id
-        for this host and get the corresponding uplink-port.
-        """
-        if l2type == L2_TYPE_VXLAN:
-            return VXLAN_UPLINK_PORT
-        elif l2type == L2_TYPE_FLAT:
-            if self.local_flat_nw_uplink:
-                return self.local_flat_nw_uplink
-            return self.global_flat_nw_uplink
-        assert l2type == L2_TYPE_VLAN
-        evs = self._get_evs(tenant_name, evs_uuid)
-        vlanid = int(evs.getProperty('vlanid')[0].current_value)
-        val = self._get_vlanrange_dict_val(
-            self.get_local_vlanrange_uplink_map(), vlanid)
-        if val:
-            return val
-        val = self._get_vlanrange_dict_val(
-            self.get_global_vlanrange_nw_uplink_map(), vlanid)[1]
-        return val
-
-    def get_vni_range_list(self):
-        vni_ranges_list = []
-        for l2ri in self.l2rangeinfo:
-            if l2ri.host:
-                continue
-            for range_prop in l2ri.range:
-                if range_prop.name != 'vxlan-range':
-                    continue
-                vni_ranges_list += range_prop.value.split(',')
-        return vni_ranges_list
-
-    def get_vxlan_addrs_and_uplinks(self):
-        local_vxlan_addr, local_uplink_port = '', ''
-        global_vxlan_addr, global_uplink_port = '', ''
-        for l2ri in self.l2rangeinfo:
-            if l2ri.host:
-                if l2ri.host.split('.')[0] != HOSTNAME:
-                    # Don't care about other hosts' configurations
-                    continue
-                if l2ri.name == 'vxlan-addr':
-                    local_vxlan_addr = l2ri.value
-                    # if we found -h vxlan-addr, we don't need the other values
-                    break
-                elif l2ri.name == 'uplink-port':
-                    for range_prop in l2ri.range:
-                        if range_prop.name == 'vxlan-range':
-                            local_uplink_port = l2ri.value
-                            break
-            else:
-                if l2ri.name == 'vxlan_addr' and l2ri.value != '0.0.0.0':
-                    global_vxlan_addr = l2ri.value
-                else:
-                    for range_prop in l2ri.range:
-                        if range_prop.name == 'vxlan-range':
-                            global_uplink_port = l2ri.value
-                            break
-            if local_vxlan_addr and local_uplink_port and global_vxlan_addr \
-                    and global_uplink_port:
-                break
-        return (local_vxlan_addr, local_uplink_port, global_vxlan_addr,
-                global_uplink_port)
-
-
-def get_db_connection():
-    config = iniparse.ConfigParser()
-    config.readfp(open(NEUTRON_CONF))
-    if config.has_option('database', 'connection'):
-        connection = config.get('database', 'connection')
-    else:
-        raise SystemExit(_("Connection url for target DB not found."))
-    return connection
-
-
-class DBEVSToMl2(object):
-    def __init__(self):
-        self._table_names = ['ml2_network_segments', 'ml2_vxlan_allocations',
-                             'ml2_vlan_allocations', 'ml2_port_binding_levels',
-                             'ml2_port_bindings', 'router_extra_attributes']
-        self._vif_type = portbindings.VIF_TYPE_OVS
-        self._driver_type = 'openvswitch'
-        # _vlan_xrange_to_nw is a list of tuples to hold the mapping from
-        # vlan-id to physical_network. The tuple format is
-        # (xrange(vid_range_start, vid_range_end), physical_network).
-        self._vlan_xrange_to_nw = []
-
-    def __call__(self):
-        connection = get_db_connection()
-        engine = session.create_engine(connection)
-        metadata = sa.MetaData()
-        self._check_db_schema_version(engine, metadata)
-        # Autoload the ports table to ensure that foreign keys to it and
-        # the network table can be created for the new tables.
-        sa.Table('ports', metadata, autoload=True, autoload_with=engine)
-        metadata.create_all(engine)
-        self._clear_tables(engine, metadata)
-        self._get_vlanrange_mapping()
-        self._migrate_network_segments(engine, metadata)
-        self._migrate_vlan_allocations(engine)
-        self._migrate_vxlan_allocations(engine)
-        self._migrate_port_bindings(engine, metadata)
-        self._add_router_extra_attributes(engine, metadata)
-
-    def _check_db_schema_version(self, engine, metadata):
-        """Check that current version of the db schema is supported."""
-        supported_schema_version = 'kilo'
-        version_table = sa.Table(
-            'alembic_version', metadata, autoload=True, autoload_with=engine)
-        versions = [v[0] for v in engine.execute(version_table.select())]
-        if not versions:
-            raise ValueError(_("Missing version in alembic_versions table"))
-        elif len(versions) > 1:
-            raise ValueError(_("Multiple versions in alembic_versions table:"
-                               " %s") % versions)
-        current_version = versions[0]
-        if current_version != supported_schema_version:
-            raise SystemError(_("Unsupported database schema %(current)s. "
-                                "Please migrate your database to one of "
-                                " following versions: %(supported)s")
-                              % {'current': current_version,
-                                 'supported': supported_schema_version}
-                              )
-
-    def _clear_tables(self, engine, metadata):
-        for tbl_name in self._table_names:
-            sa.Table(tbl_name, metadata, autoload=True, autoload_with=engine)
-            tbl = metadata.tables[tbl_name]
-            engine.execute(tbl.delete())
-
-    def _get_vlanrange_mapping(self):
-        vlanrange_to_nw_uplink = evsutil.get_global_vlanrange_nw_uplink_map()
-        # mapping from vlan-id to physical_network
-        for vlan_ranges_str, (nw, _) in vlanrange_to_nw_uplink.iteritems():
-            vlan_ranges = vlan_ranges_str.split(',')
-            for vlan_range_str in vlan_ranges:
-                vlan_range = vlan_range_str.split("-")
-                vlan_start = int(vlan_range[0])
-                if len(vlan_range) == 2:
-                    vlan_end = int(vlan_range[1]) + 1
-                else:
-                    vlan_end = vlan_start + 1
-                self._vlan_xrange_to_nw.append((xrange(vlan_start, vlan_end),
-                                                nw))
-
-    def _get_phys_net(self, l2type, vid):
-        if l2type == L2_TYPE_VLAN:
-            for vid_range, phys in self._vlan_xrange_to_nw:
-                if vid in vid_range:
-                    return phys
-        elif l2type == L2_TYPE_FLAT:
-            return FLAT_PHYS_NET
-        return None
-
-    def _add_router_extra_attributes(self, engine, metadata):
-        routers = engine.execute("SELECT id FROM routers")
-        routers = list(routers)
-        records = []
-        for router in routers:
-            router_ext_attr = {}
-            router_ext_attr['router_id'] = router[0]
-            router_ext_attr['distributed'] = 0
-            router_ext_attr['service_router'] = 0
-            router_ext_attr['ha'] = 0
-            router_ext_attr['ha_vr_id'] = 0
-            records.append(router_ext_attr)
-
-        if records:
-            sa.Table('router_extra_attributes', metadata, autoload=True,
-                     autoload_with=engine)
-            router_ea = metadata.tables['router_extra_attributes']
-            engine.execute(router_ea.insert(), records)
-
-    def _migrate_network_segments(self, engine, metadata):
-        records = []
-        for evsinfo in evsutil.evsinfo:
-            segment = dict(id=uuidutils.generate_uuid())
-            segment['network_id'] = evsinfo.uuid
-            segment['segmentation_id'] = None
-            for prop in evsinfo.props:
-                if prop.name == 'l2-type':
-                    segment['network_type'] = prop.value
-                elif prop.name == 'vlanid' or prop.name == 'vni':
-                    segment['segmentation_id'] = int(prop.value)
-            phys_net = self._get_phys_net(segment['network_type'],
-                                          segment['segmentation_id'])
-            segment['physical_network'] = phys_net
-            records.append(segment)
-        if records:
-            sa.Table('ml2_network_segments', metadata, autoload=True,
-                     autoload_with=engine)
-            ml2_network_segments = metadata.tables['ml2_network_segments']
-            engine.execute(ml2_network_segments.insert(), records)
-
-    def _migrate_vxlan_allocations(self, engine):
-        vnis = []
-        for evsinfo in evsutil.evsinfo:
-            pdict = dict((prop.name, prop.value) for prop in evsinfo.props)
-            if L2_TYPE_VXLAN not in pdict.values():
-                continue
-            vnis.append(int(pdict['vni']))
-        records = [dict(vxlan_vni=vni, allocated=True) for vni in vnis]
-        if records:
-            metadata = sa.MetaData()
-            sa.Table('ml2_vxlan_allocations', metadata, autoload=True,
-                     autoload_with=engine)
-            vxlan_allocations = metadata.tables['ml2_vxlan_allocations']
-            engine.execute(vxlan_allocations.insert(), records)
-
-    def _migrate_vlan_allocations(self, engine):
-        vid_allocated_map = OrderedDict()
-        # initially set 'allocated' to False for all vids
-        for vid_range, _ in self._vlan_xrange_to_nw:
-            for vid in vid_range:
-                vid_allocated_map[vid] = False
-        for evsinfo in evsutil.evsinfo:
-            pdict = dict((prop.name, prop.value) for prop in evsinfo.props)
-            if L2_TYPE_VLAN not in pdict.values():
-                continue
-            vid = int(pdict['vlanid'])
-            vid_allocated_map[vid] = True
-        records = [
-            dict(physical_network=self._get_phys_net(L2_TYPE_VLAN, vid),
-                 vlan_id=vid, allocated=alloc)
-            for vid, alloc in vid_allocated_map.iteritems()
-        ]
-        if records:
-            metadata = sa.MetaData()
-            sa.Table('ml2_vlan_allocations', metadata, autoload=True,
-                     autoload_with=engine)
-            vlan_allocations = metadata.tables['ml2_vlan_allocations']
-            engine.execute(vlan_allocations.insert(), records)
-
-    def _get_port_segment_map(self, engine):
-        port_segments = engine.execute("""
-            SELECT ports_network.port_id, ml2_network_segments.id AS segment_id
-              FROM ml2_network_segments, (
-                SELECT ports.id AS port_id, ports.network_id
-                  FROM ports
-              ) AS ports_network
-              WHERE ml2_network_segments.network_id = ports_network.network_id
-        """)
-        return dict(x for x in port_segments)
-
-    def _migrate_port_bindings(self, engine, metadata):
-        ml2_bindings = []
-        ml2_binding_levels = []
-        port_segment_map = self._get_port_segment_map(engine)
-        metadata = sa.MetaData()
-        for vportinfo in evsutil.vportinfo:
-            binding = {}
-            binding['port_id'] = vportinfo.uuid
-            binding['host'] = vportinfo.hostname
-            if vportinfo.hostname:
-                binding['vif_type'] = self._vif_type
-                binding['vif_details'] = '{"port_filter": false, ' \
-                    '"ovs_hybrid_plug": false}'
-                ml2_bindings.append(binding)
-                binding_level = {}
-                binding_level['port_id'] = vportinfo.uuid
-                binding_level['host'] = vportinfo.hostname
-                binding_level['level'] = 0
-                binding_level['driver'] = self._driver_type
-                segment_id = port_segment_map.get(binding_level['port_id'])
-                if segment_id:
-                    binding_level['segment_id'] = segment_id
-                ml2_binding_levels.append(binding_level)
-            else:
-                binding['vif_type'] = 'unbound'
-                binding['vif_details'] = ''
-                ml2_bindings.append(binding)
-        if ml2_bindings:
-            sa.Table('ml2_port_bindings', metadata, autoload=True,
-                     autoload_with=engine)
-            ml2_port_bindings = metadata.tables['ml2_port_bindings']
-            engine.execute(ml2_port_bindings.insert(), ml2_bindings)
-        if ml2_binding_levels:
-            sa.Table('ml2_port_binding_levels', metadata, autoload=True,
-                     autoload_with=engine)
-            ml2_port_binding_lvls = metadata.tables['ml2_port_binding_levels']
-            engine.execute(ml2_port_binding_lvls.insert(), ml2_binding_levels)
-
-
-class NovaVmEVSToOVS(object):
-    def _zc_get_evs_vport_vals(self, zc, anet_rsc):
-        """Get mac-address and lower-link for this anet from evs.
-        """
-        mac_addr, uplink_port = None, None
-        tenant_name = zc.lookup_resource_property('global', 'tenant')
-        evs_uuid = zc.lookup_resource_property(anet_rsc, 'evs')
-        vport_uuid = zc.lookup_resource_property(anet_rsc, 'vport')
-        if not evs_uuid or not vport_uuid:
-            return mac_addr, uplink_port
-        mac_addr = evsutil.get_macaddr(tenant_name, evs_uuid, vport_uuid)
-        uplink_port = evsutil.get_uplink_port(tenant_name, evs_uuid)
-        return mac_addr, uplink_port
-
-    def migrate(self, zone):
-        """Update zonecfg by deleting evs-specific and adding ovs-specific conf
-        """
-        installed_port_uuids = []
-        with ZoneConfig(zone) as zc:
-            brand = zc.lookup_resource_property('global', 'brand')
-            anet_update_failed = False
-            for anet_rsc in zc.get_resources('anet'):
-                mac_addr, lower_link = self._zc_get_evs_vport_vals(zc,
-                                                                   anet_rsc)
-                if not mac_addr or not lower_link:
-                    anet_update_failed = True
-                    msg = "Failed to get ovs info for zone"
-                    log_msg(LOG_ERROR, msg)
-                    continue
-                if zone.state == 'installed':
-                    vport_uuid = zc.lookup_resource_property(anet_rsc, 'vport')
-                    if vport_uuid:
-                        installed_port_uuids.append(vport_uuid)
-                fname = 'id' if brand == 'solaris-kz' else 'linkname'
-                fvalue = zc.lookup_resource_property(anet_rsc, fname)
-                zc.clear_resource_props(anet_rsc, ['evs', 'vport'])
-                rsc_filter = [zonemgr.Property(fname, fvalue)]
-                zc.set_resource_prop('anet', 'mac-address', mac_addr,
-                                     rsc_filter)
-                zc.set_resource_prop('anet', 'lower-link', lower_link,
-                                     rsc_filter)
-
-            if not anet_update_failed:
-                zc.clear_resource_props('global', ['tenant'])
-        return installed_port_uuids
-
-
-class ConfigEVSToOVS():
-    def __init__(self):
-        # These are the configuration changes that are fixed, i.e., don't
-        # require extra computation. The data structure format is:
-        # _fixed = {config_file: [(section, param_name, param_value),]}
-        self._fixed = {
-            NEUTRON_CONF: [('DEFAULT', 'core_plugin', ML2_PLUGIN)],
-            ML2_INI: [('ml2_type_flat', 'flat_networks', 'flatnet')],
-            DHCP_INI: [('DEFAULT', 'interface_driver', OVS_INTFC_DRIVER),
-                       ('DEFAULT', 'ovs_integration_bridge', OVS_INT_BRIDGE)],
-            L3_INI: [('DEFAULT', 'interface_driver', OVS_INTFC_DRIVER),
-                     ('DEFAULT', 'ovs_integration_bridge', OVS_INT_BRIDGE),
-                     ('DEFAULT', 'external_network_bridge', OVS_EXT_BRIDGE)],
-            NOVA_CONF: [('neutron', 'ovs_bridge', OVS_INT_BRIDGE)]
-        }
-        # Config changes that are fixed depending on the l2-type
-        if l2type == L2_TYPE_VXLAN:
-            self._fixed[ML2_INI] += [('ml2', 'tenant_network_types', 'vxlan')]
-            self._fixed[OVS_INI] = [('ovs', 'enable_tunneling', 'True'),
-                                    ('agent', 'tunnel_types', 'vxlan')]
-        elif l2type == L2_TYPE_VLAN:
-            self._fixed[ML2_INI] += [('ml2', 'tenant_network_types', 'vlan')]
-        else:
-            assert l2type == L2_TYPE_FLAT
-            self._fixed[ML2_INI] += [('ml2', 'tenant_network_types', 'flat')]
-        self._vxlan_local_ip = None
-
-    def _read_config(self, conf_file):
-        config = iniparse.ConfigParser()
-        config.readfp(open(conf_file))
-        return config
-
-    def _write_config(self, conf_file, config):
-        with open(conf_file, 'wb+') as fp:
-            config.write(fp)
-
-    def _do_fixed(self, conf_file, config):
-        orig_conf_file = conf_file.replace('.migr', '')
-        if orig_conf_file not in self._fixed:
-            return
-        for sec, key, val in self._fixed[orig_conf_file]:
-            config.set(sec, key, val)
-
-    def _do_ml2_vlan_range(self, config):
-        vlanrange_to_nw_uplink = evsutil.get_global_vlanrange_nw_uplink_map()
-        nw_vlan_str_list = []
-        for vlan_ranges_str, (nw, _) in vlanrange_to_nw_uplink.iteritems():
-            vlan_ranges = vlan_ranges_str.split(',')
-            for vlan_range_str in vlan_ranges:
-                vlan_range = vlan_range_str.split("-")
-                vlan_start = vlan_end = vlan_range[0]
-                if len(vlan_range) == 2:
-                    vlan_end = vlan_range[1]
-                nw_vlan_str = nw + ":" + vlan_start + ":" + vlan_end
-                nw_vlan_str_list.append(nw_vlan_str)
-        nw_vlan_strs = ",".join(nw_vlan_str_list)
-        config.set('ml2_type_vlan', 'network_vlan_ranges', nw_vlan_strs)
-
-    def _do_ml2_vni_range(self, config):
-        vni_ranges_list = evsutil.get_vni_range_list()
-        vni_ranges_list = [vr.replace('-', ':') for vr in vni_ranges_list]
-        vni_ranges = ",".join(vni_ranges_list)
-        config.set('ml2_type_vxlan', 'vni_ranges', vni_ranges)
-
-    def _get_rabbit_host(self, conf_file):
-        config = self._read_config(conf_file)
-        host = 'localhost'
-        if config.has_option('DEFAULT', 'rabbit_host'):
-            host = config.get('DEFAULT', 'rabbit_host')
-        elif config.has_option('oslo_messaging_rabbit', 'rabbit_host'):
-            host = config.get('oslo_messaging_rabbit', 'rabbit_host')
-
-        port = '5672'
-        if config.has_option('DEFAULT', 'rabbit_port'):
-            port = config.get('DEFAULT', 'rabbit_port')
-        elif config.has_option('oslo_messaging_rabbit', 'rabbit_port'):
-            port = config.get('oslo_messaging_rabbit', 'rabbit_port')
-
-        hosts = ':'.join([host, port])
-        if config.has_option('DEFAULT', 'rabbit_hosts'):
-            hosts = config.get('DEFAULT', 'rabbit_hosts')
-        elif config.has_option('oslo_messaging_rabbit', 'rabbit_hosts'):
-            hosts = config.get('oslo_messaging_rabbit', 'rabbit_hosts')
-
-        userid = RABBITMQ_DEFAULT_USERID
-        if config.has_option('DEFAULT', 'rabbit_userid'):
-            userid = config.get('DEFAULT', 'rabbit_userid')
-        elif config.has_option('oslo_messaging_rabbit', 'rabbit_userid'):
-            userid = config.get('oslo_messaging_rabbit', 'rabbit_userid')
-
-        passwd = RABBITMQ_DEFAULT_PASSWORD
-        if config.has_option('DEFAULT', 'rabbit_password'):
-            passwd = config.get('DEFAULT', 'rabbit_password')
-        elif config.has_option('oslo_messaging_rabbit', 'rabbit_password'):
-            passwd = config.get('oslo_messaging_rabbit', 'rabbit_password')
-        passwd += '\n'
-
-        return (host, hosts, userid, passwd)
-
-    def _do_rabbit_host(self, config):
-        if SVC_NOVA_COMPUTE in curnode_svcs:
-            (host, hosts, userid, passwd) = self._get_rabbit_host(NOVA_CONF)
-        elif set([SVC_DHCP_AGENT, SVC_L3_AGENT]) & set(curnode_svcs):
-            (host, hosts, userid, passwd) = self._get_rabbit_host(NEUTRON_CONF)
-        else:
-            return
-        if not config.has_section('oslo_messaging_rabbit'):
-            config.add_section('oslo_messaging_rabbit')
-        config.set('oslo_messaging_rabbit', 'rabbit_host', host)
-        config.set('oslo_messaging_rabbit', 'rabbit_hosts', hosts)
-        config.set('oslo_messaging_rabbit', 'rabbit_userid', userid)
-        config.set('oslo_messaging_rabbit', 'rabbit_password', passwd)
-
-    def _get_local_ip(self, if_str='', subnet_str=''):
-        if not if_str and not subnet_str:
-            return None
-        for iface in ni.interfaces():
-            if if_str:
-                if iface != if_str:
-                    continue
-                # Only IPv4 addresses, not considering IPv6 since OVS
-                # doesn't support IPv6 VXLANs
-                for addrinfo in ni.ifaddresses(iface)[ni.AF_INET]:
-                    addr = addrinfo['addr']
-                    if subnet_str:
-                        if na.IPAddress(addr) in na.IPNetwork(subnet_str):
-                            return addr
-                    else:
-                        if addr != '127.0.0.1':
-                            return addr
-                break
-            else:
-                for addrinfo in ni.ifaddresses(iface)[ni.AF_INET]:
-                    addr = addrinfo['addr']
-                    if na.IPAddress(addr) in na.IPNetwork(subnet_str):
-                        return addr
-        return None
-
-    def _get_vxlan_local_ip(self):
-        """Returns the local_ip for vxlan_endpoint. It is found as follows:
-        1. If host specific vxlan-addr is present, use it.
-        2. If local uplink-port and global vxlan-addr(subnet) is present, use
-        the first IP address on that uplink-port which is in the subnet.
-        3. If local uplink-port, use the first IP on the uplink-port.
-        4. If global uplink-port and global vxlan-addr(subnet), use first
-        IP address on that uplink-port which is in the subnet.
-        5. If global vxlan-addr is configured only, use the first IP address
-        on any interface that is in the subnet of global vxlan-addr.
-        """
-        if self._vxlan_local_ip:
-            return self._vxlan_local_ip
-        (laddr, lup, gaddr, gup) = evsutil.get_vxlan_addrs_and_uplinks()
-        if laddr:
-            self._vxlan_local_ip = laddr
-        elif lup:
-            self._vxlan_local_ip = self._get_local_ip(lup, gaddr)
-        else:
-            self._vxlan_local_ip = self._get_local_ip(gup, gaddr)
-        return self._vxlan_local_ip
-
-    def _do_neutron_credentials(self, config, input_file, section):
-        neutron_cfg = self._read_config(input_file)
-        tenant = None
-        if neutron_cfg.has_option(section, 'admin_tenant_name'):
-            tenant = neutron_cfg.get(section, 'admin_tenant_name')
-            config.set('DEFAULT', 'admin_tenant_name', tenant)
-        user = None
-        if neutron_cfg.has_option(section, 'admin_user'):
-            user = neutron_cfg.get(section, 'admin_user')
-            config.set('DEFAULT', 'admin_user', user)
-        passwd = None
-        if neutron_cfg.has_option(section, 'admin_password'):
-            passwd = neutron_cfg.get(section, 'admin_password')
-            config.set('DEFAULT', 'admin_password', passwd)
-        auth_uri_option = ('auth_uri' if input_file == NEUTRON_CONF else
-                           'auth_url')
-        if neutron_cfg.has_option(section, auth_uri_option):
-            auth_url = neutron_cfg.get(section, auth_uri_option)
-            config.set('DEFAULT', 'auth_url', auth_url)
-        if neutron_cfg.has_option(section, 'auth_region'):
-            auth_region = neutron_cfg.get(section, 'auth_region')
-            config.set('DEFAULT', 'auth_region', auth_region)
-
-        if any('%SERVICE_' in val for val in [tenant, user, passwd]):
-            msg = "Neutron credentials are incomplete in %s" % L3_INI
-            log_msg(LOG_WARN, msg)
-
-    def _backup_file(self, orig_file):
-        today = datetime.now().strftime("%Y%m%d%H%M%S")
-        new_file = orig_file + '.' + today
-        try:
-            self._copy_file(orig_file, new_file)
-            msg = "Backed up current %s in %s" % (orig_file, new_file)
-            log_msg(LOG_DEBUG, msg)
-        except (IOError, OSError):
-            msg = "Unable to create a backup of %s" % orig_file
-            log_msg(LOG_WARN, msg)
-
-    def _copy_file(self, orig_file, new_file):
-        copy2(orig_file, new_file)
-        uid = file_owner[orig_file]
-        os.chown(new_file, uid, uid)
-
-    def update_neutron_conf(self):
-        self._backup_file(NEUTRON_CONF)
-        msg = "Updating %s" % NEUTRON_CONF
-        log_msg(LOG_DEBUG, msg)
-        self._copy_file(NEUTRON_CONF, NEUTRON_CONF + '.migr')
-        conf_file = NEUTRON_CONF + '.migr'
-        config = self._read_config(conf_file)
-        self._do_fixed(conf_file, config)
-        service_plugins = 'router'
-        if config.has_option('DEFAULT', 'service_plugins'):
-            service_plugins = config.get('DEFAULT', 'service_plugins')
-            if service_plugins:
-                service_plugins = 'router,' + service_plugins
-            else:
-                service_plugins = 'router'
-        config.set('DEFAULT', 'service_plugins', service_plugins)
-        self._write_config(conf_file, config)
-        move(conf_file, NEUTRON_CONF)
-
-    def update_ml2_conf_ini(self):
-        """
-        Reference target configuration state:
-        [ml2]
-        type_drivers = flat,vlan,vxlan
-        tenant_network_types = vlan
-        mechanism_drivers = openvswitch
-        [ml2_type_flat]
-        flat_networks = external
-        [ml2_type_vlan]
-        network_vlan_ranges = physnet1:300:400,extnet:240:240
-        [ml2_type_gre]
-        [ml2_type_vxlan]
-        [securitygroup]
-        enable_security_group = False
-        enable_ipset = False
-        """
-        self._backup_file(ML2_INI)
-        msg = "Updating %s" % ML2_INI
-        log_msg(LOG_DEBUG, msg)
-        self._copy_file(ML2_INI, ML2_INI + '.migr')
-        conf_file = ML2_INI + '.migr'
-        config = self._read_config(conf_file)
-        self._do_fixed(conf_file, config)
-        if l2type == L2_TYPE_VXLAN:
-            self._do_ml2_vni_range(config)
-        elif l2type == L2_TYPE_VLAN:
-            self._do_ml2_vlan_range(config)
-        self._write_config(conf_file, config)
-        move(conf_file, ML2_INI)
-
-    def update_ovs_neutron_plugin_ini(self, bmap_str):
-        """
-        Reference target configuration state:
-        [ovs]
-        integration_bridge = br_int0
-        bridge_mappings = physnet1:l3stub0 (for VLAN)
-        local_ip = A.B.C.D (for VXLAN)
-        enable_tunneling = True (for VXLAN)
-        [agent]
-        root_helper =
-        tunnel_types = vxlan (for VXLAN)
-        [securitygroup]
-        enable_security_group = False
-        """
-        self._backup_file(OVS_INI)
-        msg = "Updating %s" % OVS_INI
-        log_msg(LOG_DEBUG, msg)
-        self._copy_file(OVS_INI, OVS_INI + '.migr')
-        conf_file = OVS_INI + '.migr'
-        config = self._read_config(conf_file)
-        self._do_fixed(conf_file, config)
-        if l2type == L2_TYPE_VXLAN:
-            local_ip = self._get_vxlan_local_ip()
-            if local_ip:
-                config.set('ovs', 'local_ip', local_ip)
-            else:
-                msg = """Could not determine IP address for VXLAN endpoint.
-                Manually set the local_ip option in ovs_neutron_plugin.ini"""
-                log_msg(LOG_WARN, msg)
-        if bmap_str:
-            config.set('ovs', 'bridge_mappings', bmap_str)
-        self._do_rabbit_host(config)
-        self._write_config(conf_file, config)
-        move(conf_file, OVS_INI)
-
-    def update_dhcp_agent_ini(self):
-        self._backup_file(DHCP_INI)
-        msg = "Updating %s" % DHCP_INI
-        log_msg(LOG_DEBUG, msg)
-        self._copy_file(DHCP_INI, DHCP_INI + '.migr')
-        conf_file = DHCP_INI + '.migr'
-        config = self._read_config(conf_file)
-        self._do_fixed(conf_file, config)
-        self._write_config(conf_file, config)
-        move(conf_file, DHCP_INI)
-
-    def update_l3_agent_ini(self):
-        self._backup_file(L3_INI)
-        msg = "Updating %s" % L3_INI
-        log_msg(LOG_DEBUG, msg)
-        self._copy_file(L3_INI, L3_INI + '.migr')
-        conf_file = L3_INI + '.migr'
-        config = self._read_config(conf_file)
-        if l2type == L2_TYPE_VLAN:
-            global external_network_datalink
-            if config.has_option('DEFAULT', 'external_network_datalink'):
-                external_network_datalink = \
-                    config.get('DEFAULT', 'external_network_datalink')
-                if not external_network_datalink:
-                    external_network_datalink = None
-            else:
-                external_network_datalink = 'net0'
-        self._do_fixed(conf_file, config)
-        if is_svc_online(SVC_METADATA_AGENT):
-            self._do_neutron_credentials(config, METADATA_INI, "DEFAULT")
-        else:
-            self._do_neutron_credentials(config, NEUTRON_CONF,
-                                         "keystone_authtoken")
-        self._write_config(conf_file, config)
-        move(conf_file, L3_INI)
-
-    def update_nova_conf(self):
-        self._backup_file(NOVA_CONF)
-        msg = "Updating %s" % NOVA_CONF
-        log_msg(LOG_DEBUG, msg)
-        self._copy_file(NOVA_CONF, NOVA_CONF + '.migr')
-        conf_file = NOVA_CONF + '.migr'
-        config = self._read_config(conf_file)
-        self._do_fixed(conf_file, config)
-        self._write_config(conf_file, config)
-        move(conf_file, NOVA_CONF)
-
-    def update_Open_vSwitch_other_config(self, bmap_str):
-        bm_str = "other_config:bridge_mappings=" + bmap_str
-        try:
-            check_call(['/usr/bin/pfexec', '/usr/sbin/ovs-vsctl', 'set',
-                        'Open_vSwitch', '.', bm_str])
-            msg = """Successfully set other_config column in Open_vSwitch table
-            with value %s.""" % bm_str
-            log_msg(LOG_DEBUG, msg)
-        except:
-            msg = """Failed to set other_config column in Open_vSwitch table
-            with value %s.""" % bm_str
-            log_msg(LOG_WARN, msg)
-
-
-def enable_svc(svcname, exit_on_fail=False):
-    msg = "Enabling service: %s" % svcname
-    log_msg(LOG_INFO, msg)
-    cmd = ['/usr/bin/pfexec', '/usr/sbin/svcadm', 'enable', '-s']
-    cmd.append(svcname)
-    try:
-        check_call(cmd, stdout=PIPE, stderr=PIPE)
-    except CalledProcessError as err:
-        msg = """Failed to enable %s: %s.
-        Please verify "and manually enable the service""" % (svcname, err)
-        log_msg(LOG_ERROR, msg)
-        if exit_on_fail:
-            msg = "Exiting..."
-            log_msg(LOG_INFO, msg)
-            sys.exit(1)
-
-
-def disable_svc(svcname):
-    msg = "Disabling service: %s" % svcname
-    log_msg(LOG_INFO, msg)
-    try:
-        check_call(['/usr/bin/pfexec', '/usr/sbin/svcadm', 'disable', '-s',
-                    svcname], stdout=PIPE, stderr=PIPE)
-    except CalledProcessError as err:
-        msg = "Failed to disable %s: %s." % (svcname, err)
-        log_msg(LOG_ERROR, msg)
-
-
-def nova_evs_to_ovs(migr_conf_obj):
-    # step-1: disable nova-compute
-    disable_svc(SVC_NOVA_COMPUTE)
-
-    # step-2: update zones' config
-    migr_vm = NovaVmEVSToOVS()
-    determine_neutron_conn_params()
-    zoneutil = ZoneUtil()
-    for name in zoneutil.get_zone_names():
-        zone = zoneutil.get_zone_by_name(name)
-        if not zone:
-            msg = "skipping EVS-OVS migration of VM %s; not found" % name
-            log_msg(LOG_DEBUG, msg)
-            continue
-        if zone.state == 'incomplete':
-            msg = """skipping EVS-OVS migration of VM %s; It is in 'incomplete'
-            state""" % name
-            log_msg(LOG_DEBUG, msg)
-            continue
-        with ZoneConfig(zone) as zc:
-            tenant_name = zc.lookup_resource_property('global', 'tenant')
-            if not tenant_name:
-                msg = """skipping EVS-OVS migration of non-openstack
-                managed VM %s""" % name
-                log_msg(LOG_DEBUG, msg)
-                continue
-            try:
-                uuid.UUID(tenant_name)
-            except:
-                msg = """skipping EVS-OVS migration of non-openstack
-                managed VM %s""" % name
-                log_msg(LOG_DEBUG, msg)
-                continue
-        msg = "Performing EVS-OVS migration of VM: %s" % name
-        log_msg(LOG_INFO, msg)
-
-        # step 2.1: migrate zone config
-        installed_port_uuids = migr_vm.migrate(zone)
-        # step 2.2: shutdown
-        if zone.state == 'running':
-            try:
-                msg = "Shutting down VM: %s, after modifying zone's config" % \
-                    name
-                log_msg(LOG_DEBUG, msg)
-                zone.shutdown()
-            except Exception as ex:
-                msg = """ Failed to shutdown instance %s. The zone's config
-                has been modified to OVS. Manually start the VM""" % name
-                log_msg(LOG_WARN, msg)
-        if installed_port_uuids:
-            nc = neutron_client.Client(
-                username=neutron_conn['username'],
-                password=neutron_conn['password'],
-                tenant_name=neutron_conn['tenant'],
-                auth_url=neutron_conn['auth_url'])
-            for vport_uuid in installed_port_uuids:
-                port_req_body = {'port': {'binding:host_id': HOSTNAME}}
-                nc.update_port(vport_uuid, port_req_body)
-
-    # step-3: change nova.conf
-    migr_conf_obj.update_nova_conf()
-
-    # we will enable the service later
-
-
-def dhcp_evs_to_ovs(migr_conf_obj):
-    # step-1: disable neutron-dhcp-agent
-    disable_svc(SVC_DHCP_AGENT)
-
-    # step-2: change dhcp_agent.ini
-    migr_conf_obj.update_dhcp_agent_ini()
-
-    # we will enable the service later
-
-
-def add_ovs_bridge(bridge_name):
-    try:
-        check_call(['/usr/bin/pfexec', '/usr/sbin/ovs-vsctl', '--',
-                    '--may-exist', 'add-br', bridge_name], stdout=PIPE,
-                   stderr=PIPE)
-        msg = "Created %s ovs bridge" % bridge_name
-        log_msg(LOG_DEBUG, msg)
-        if bridge_name == OVS_EXT_BRIDGE:
-            check_call(['/usr/bin/pfexec', '/usr/sbin/ovs-vsctl',
-                        'br-set-external-id', OVS_EXT_BRIDGE, 'bridge-id',
-                        OVS_EXT_BRIDGE])
-    except CalledProcessError as err:
-        msg = "Failed to create %s ovs bridge: %s" % (bridge_name, err)
-        log_msg(LOG_ERROR, msg)
-
-
-def l3_evs_to_ovs(migr_conf_obj):
-    # step-1: disable neutron-l3-agent
-    disable_svc(SVC_L3_AGENT)
-
-    # step-2: change l3_agent.ini and ovs_neutron_plugin.ini
-    migr_conf_obj.update_l3_agent_ini()
-
-    # step-3: create external network bridge
-    add_ovs_bridge(OVS_EXT_BRIDGE)
-
-    # we will enable the service later
-
-
-def neutron_evs_to_ovs(migr_conf_obj):
-    # step-1: disable neutron-server
-    disable_svc(SVC_NEUTRON_SERVER)
-
-    # step-2: migrate DB to ml2
-    migr_ml2 = DBEVSToMl2()
-    migr_ml2()
-
-    # step-3: change ml2_conf.ini and neutron.conf
-    migr_conf_obj.update_ml2_conf_ini()
-    migr_conf_obj.update_neutron_conf()
-
-    # step-4: enable neutron-server
-    enable_svc(SVC_NEUTRON_SERVER)
-
-
-def is_svc_online(svc, exit_on_maintenance=False):
-    try:
-        state = check_output(['/usr/bin/svcs', '-H', '-o', 'state', svc],
-                             stderr=PIPE)
-    except:
-        return False
-    if exit_on_maintenance and state.strip() == 'maintenance':
-        msg = """Unable to perform EVS to OVS migration as %s is in maintenance
-            state. Please fix the errors and clear the svc before running
-            migration""" % svc
-        log_msg(LOG_ERROR, msg)
-        sys.exit(1)
-    return state.strip() == 'online'
-
-
-def create_backup_be():
-    msg = "Creating backup BE"
-    log_msg(LOG_INFO, msg)
-    boot_envs = check_output(['/usr/sbin/beadm', 'list', '-H'],
-                             stderr=PIPE)
-    for be in boot_envs.splitlines():
-        be_fields = be.split(';')
-        if 'N' in be_fields[2]:
-            curr_be = be_fields[0]
-            backup_be = curr_be + '-backup-ovs-upgrade'
-            break
-    msg = "Active BE is: %s" % curr_be
-    log_msg(LOG_DEBUG, msg)
-    try:
-        check_call(['/usr/sbin/beadm', 'create', backup_be], stdout=PIPE,
-                   stderr=PIPE)
-        msg = "Created backup BE: " + backup_be
-        log_msg(LOG_DEBUG, msg)
-    except:
-        msg = "Backup BE already exists: " + backup_be
-        log_msg(LOG_DEBUG, msg)
-
-
-def get_node_svcs():
-    global curnode_svcs
-    for svc in ALL_SVCS:
-        if is_svc_online(svc):
-            curnode_svcs.append(svc)
-
-
-def get_default_gateways():
-    def_gws = set()
-    routes = check_output(['/usr/bin/pfexec', '/usr/bin/netstat',
-                           '-arn']).splitlines()
-    for route in routes:
-        route = route.strip()
-        elems = route.split()
-        if elems and elems[0] == 'default':
-            def_gws.add(elems[1])
-    return def_gws
-
-
-def add_uplink_to_br(uplink, bridge):
-    def add_ips_and_gws_to_port(port):
-        if ips:
-            try:
-                check_call(['/usr/bin/pfexec', '/usr/sbin/ipadm', 'show-if',
-                            port], stdout=PIPE, stderr=PIPE)
-            except CalledProcessError:
-                check_call(['/usr/bin/pfexec', '/usr/sbin/ipadm', 'create-ip',
-                            port], stdout=PIPE)
-        aconf_configured = False
-        for ip in ips:
-            msg = "Adding IP %s to %s" % (ip, port)
-            log_msg(LOG_DEBUG, msg)
-            addrtype_addr = ip.split(':')
-            addrtype, addr = addrtype_addr[0], addrtype_addr[1]
-            if addrtype == 'static':
-                check_call(['/usr/bin/pfexec', '/usr/sbin/ipadm',
-                            'create-addr', '-T',  addrtype, '-a', addr, port],
-                           stdout=PIPE)
-            elif addrtype == 'addrconf':
-                if not aconf_configured:
-                    check_call(['/usr/bin/pfexec', '/usr/sbin/ipadm',
-                                'create-addr', '-T', addrtype, port],
-                               stdout=PIPE)
-                    aconf_configured = True
-            else:
-                check_call(['/usr/bin/pfexec', '/usr/sbin/ipadm',
-                            'create-addr', '-T', addrtype, port], stdout=PIPE)
-        new_gateways = get_default_gateways()
-        removed_gateways = old_gateways - new_gateways
-        for gw in removed_gateways:
-            # simple check for IPv6 address
-            if ':' in gw:
-                continue
-            msg = "Adding default gateway %s" % gw
-            log_msg(LOG_DEBUG, msg)
-            check_call(['/usr/bin/pfexec', '/usr/sbin/route', 'add', 'default',
-                        gw], stdout=PIPE)
-
-    msg = "Migrating %s link to OVS bridge: %s" % (uplink, bridge)
-    log_msg(LOG_DEBUG, msg)
-    # Store IP and gateway info
-    ips = []
-    old_gateways = get_default_gateways()
-    try:
-        ips = check_output(['/usr/bin/pfexec', '/usr/sbin/ipadm', 'show-addr',
-                            '-po', 'type,addr',
-                            uplink], stderr=PIPE).splitlines()
-        check_call(['/usr/bin/pfexec', '/usr/sbin/ipadm', 'delete-ip',
-                    uplink], stdout=PIPE, stderr=PIPE)
-    except CalledProcessError as err:
-        pass
-
-    try:
-        check_call(['/usr/bin/pfexec', '/usr/sbin/dladm', 'set-linkprop', '-p',
-                    'openvswitch=on', uplink], stdout=PIPE, stderr=PIPE)
-    except CalledProcessError as err:
-        msg = """Failed to set openvswitch property=on for %s - link is busy.
-        Follow the below steps to migrate link to OVS bridge manually.
-        1. Remove any flows, IP etc. so that link is unused.
-        2. dladm set-linkprop -p openvswitch=on %s
-        3. ovs-vsctl -- --may-exist add-port %s %s
-        4. Replumb IPs, if existed before on %s, on %s.""" % \
-            (uplink, uplink, bridge, uplink, uplink, bridge)
-        log_msg(LOG_ERROR, msg, oneliner=False)
-        return
-
-    # add uplink to bridge
-    check_call(['/usr/bin/pfexec', '/usr/sbin/ovs-vsctl', '--', '--may-exist',
-                'add-port', bridge, uplink])
-    try:
-        add_ips_and_gws_to_port(bridge)
-    except CalledProcessError as err:
-        msg = """Failed to configure the IPs(%s) on %s VNIC. Manually
-        configure the IPs and set default gateway""" % (ips, bridge)
-        log_msg(LOG_ERROR, msg)
-
-
-def get_uplink_ports_for_int_bridge():
-    int_uplinks = set(bridge_mappings.values())
-    int_uplinks.discard(external_network_datalink)
-    return int_uplinks
-
-
-def get_uplink_port_for_ext_bridge():
-    if l2type == L2_TYPE_VLAN and external_network_datalink is not None:
-        return external_network_datalink
-    return bridge_mappings.get(external_network_name)
-
-
-def determine_neutron_conn_params():
-        global neutron_conn
-        if neutron_conn:
-            return
-        config = iniparse.ConfigParser()
-        if SVC_NOVA_COMPUTE in curnode_svcs:
-            config.readfp(open(NOVA_CONF))
-            neutron_conn['username'] = config.get('neutron', 'admin_username')
-            neutron_conn['password'] = config.get('neutron', 'admin_password')
-            neutron_conn['tenant'] = config.get('neutron', 'admin_tenant_name')
-            neutron_conn['auth_url'] = \
-                config.get('keystone_authtoken', 'auth_uri')
-        else:
-            config.readfp(open(NEUTRON_CONF))
-            neutron_conn['username'] = \
-                config.get('keystone_authtoken', 'admin_user')
-            neutron_conn['password'] = \
-                config.get('keystone_authtoken', 'admin_password')
-            neutron_conn['tenant'] = \
-                config.get('keystone_authtoken', 'admin_tenant_name')
-            neutron_conn['auth_url'] = \
-                config.get('keystone_authtoken', 'auth_uri')
-
-
-def determine_external_network_name():
-    global external_network_name, external_network_vid
-    determine_neutron_conn_params()
-    nc = neutron_client.Client(username=neutron_conn['username'],
-                               password=neutron_conn['password'],
-                               tenant_name=neutron_conn['tenant'],
-                               auth_url=neutron_conn['auth_url'])
-    search_opts = {'router:external': True}
-    try:
-        external_network = nc.list_networks(**search_opts)['networks']
-    except:
-        msg = """Could not get external network information from
-        neutron-server. Make sure it is online."""
-        log_msg(LOG_ERROR, msg)
-        sys.exit(1)
-
-    if not external_network:
-        return
-    external_network = external_network[0]
-    nw_type = external_network['provider:network_type']
-    if nw_type == L2_TYPE_FLAT:
-        external_network_name = FLAT_PHYS_NET
-    else:
-        assert nw_type == L2_TYPE_VLAN
-        external_network_name = EXT_VLAN_PHYS_NET
-        external_network_vid = external_network['provider:segmentation_id']
-    msg = "External Network name is " + external_network_name
-    log_msg(LOG_DEBUG, msg)
-
-
-def determine_bridge_mappings():
-    global bridge_mappings, external_network_datalink
-    global_nw_uplink_map = evsutil.get_global_vlanrange_nw_uplink_map()
-    local_uplink_map = evsutil.get_local_vlanrange_uplink_map()
-    # Any local uplink ports should have the same vlan-range boundaries
-    # as the global ones. This is expected in an openstack deployment but
-    # is not enforced by evs itself. So we raise a warning if we encounter
-    # a local uplink-port for a vlan-range whose boundaries are different
-    # from any that are defined globally.
-    errs = set(local_uplink_map.keys()) - set(global_nw_uplink_map.keys())
-    if errs:
-        errs = ','.join(errs)
-        msg = """Found the following incorrect vlan_ranges that were not
-        added to bridge_mappings in ovs_neutron_plugin.ini. Please update
-        manually if necessary - %s""" % errs
-        log_msg(LOG_WARN, msg)
-    for vlanranges_str, (nw, uplink) in global_nw_uplink_map.iteritems():
-        uplink = local_uplink_map.get(vlanranges_str, uplink)
-        bridge_mappings[nw] = uplink
-    if evsutil.local_flat_nw_uplink:
-        bridge_mappings[FLAT_PHYS_NET] = evsutil.local_flat_nw_uplink
-    elif evsutil.global_flat_nw_uplink:
-        bridge_mappings[FLAT_PHYS_NET] = evsutil.global_flat_nw_uplink
-
-    external_network_datalink = bridge_mappings.get(external_network_name)
-    if external_network_datalink:
-        msg = "External Network datalink is " + external_network_datalink
-        log_msg(LOG_DEBUG, msg)
-    if bridge_mappings.values().count(external_network_datalink) > 1:
-        msg = """The external network datalink '%s' cannot be the uplink-port
-        of any physical network other than external network. Please satisfy
-        this condition before running migration.""" % external_network_datalink
-        log_msg(LOG_ERROR, msg)
-        sys.exit(1)
-
-    # Depending on l2type and whether l3-agent is running on this node,
-    # bridge_mappings should have the following:
-    # 1. l3-agent not in node and l2type = vxlan => no bridge mappings. This is
-    # already handled since determine_bridge_mappings() won't be called for
-    # this condition.
-    # 2. l3-agent not in node and l2type = vlan/flat => bridge mappings should
-    # not have mapping for external network.
-    # 3. l3-agent in node and l2type = vxlan => bridge mappings should have
-    # only the mapping for external network.
-    # 4. l3-agent in node and l2type = vlan/flat => bridge mappings should have
-    # all the orignial mappings.
-    if SVC_L3_AGENT not in curnode_svcs:
-        bridge_mappings.pop(external_network_name, None)
-    elif l2type == L2_TYPE_VXLAN:
-        bridge_mappings.clear()
-        if external_network_datalink:
-            bridge_mappings[external_network_name] = \
-                external_network_datalink
-
-
-def finish():
-    msg = "Migration Successful"
-    log_msg(LOG_INFO, msg)
-    check_call(['/usr/bin/pfexec', '/usr/sbin/svccfg', '-s',
-                SVC_NEUTRON_UPGRADE, 'setprop', 'config/evs2ovs', '=',
-                'astring:', 'done'], stdout=PIPE, stderr=PIPE)
-    check_call(['/usr/bin/pfexec', '/usr/sbin/svccfg', '-s',
-                SVC_NEUTRON_UPGRADE, 'refresh'], stdout=PIPE, stderr=PIPE)
-    msg = "Exiting..."
-    log_msg(LOG_INFO, msg)
-    sys.exit()
-
-
-def main():
-    # help text
-    parser = argparse.ArgumentParser(
-        formatter_class=argparse.RawDescriptionHelpFormatter, description='''
-    Migration script to migrate OpenStack Cloud based on EVS to an
-    OpenStack cloud based on OVS.
-
-    There are four steps to migration:
-        -- Populate Neutron ML2 tables
-        -- Replace EVS information in existing configuration files with OVS
-           (neutron.conf, dhcp_agent.ini, l3_agent.ini, and nova.conf)
-        -- Add OVS information to new configuration files
-           (ml2_conf.ini and ovs_neutron_agent.ini)
-        -- Clear EVS information in Zones and populate the anets for OVS
-
-    The nodes must be migrated in the following order:
-        -- controller node running neutron-server
-        -- all of the nodes running neutron-dhcp-agent or neutron-l3-agent
-        -- all of the compute nodes
-
-    It is advisable to run migration with nohup if using ssh over a link that
-    is also used by OpenStack.
-    ''')
-    parser.parse_args()
-
-    signal.signal(signal.SIGHUP, signal.SIG_IGN)
-    try:
-        out = check_output(['/usr/bin/pfexec', '/usr/bin/svcprop', '-p',
-                            'config/evs2ovs', SVC_NEUTRON_UPGRADE],
-                           stderr=PIPE)
-        if out.strip() == 'done':
-            msg = "Migration has already run on this node."
-            log_msg(LOG_INFO, msg)
-            return
-    except:
-        pass
-
-    # get the current node services
-    get_node_svcs()
-    if not curnode_svcs:
-        msg = "Nothing to migrate on this node. Quitting."
-        log_msg(LOG_INFO, msg)
-        return
-
-    msg = """The script has determined that following services - %s - are
-    online and the system will be migrated based on these services.""" % \
-        ', '.join(curnode_svcs)
-    log_msg(LOG_INFO, msg)
-
-    # Create backup BE
-    create_backup_be()
-
-    # Even if nova-compute is the only svc on this node, make sure neutron
-    # is also installed.
-    if not set(curnode_svcs) - set([SVC_NOVA_COMPUTE]):
-        try:
-            check_call(['pkg', 'info', 'neutron'], stdout=PIPE, stderr=PIPE)
-        except:
-            msg = "cloud/openstack/neutron pkg not found."
-            log_msg(LOG_ERROR, msg)
-            msg = """cloud/openstack/neutron pkg needs to be installed on this
-            node before migration."""
-            log_msg(LOG_INFO, msg)
-            return
-
-    # If nova-compute is running on this node, we can execute everything as
-    # root. Else, this is a network node and we can execute everything as
-    # neutron user.
-    if SVC_NOVA_COMPUTE not in curnode_svcs:
-        msg = "Changing user to neutron"
-        log_msg(LOG_DEBUG, msg)
-        os.setgid(UID_NEUTRON)
-        os.setuid(UID_NEUTRON)
-
-    global evsutil
-    evsutil = EVSUtil()
-    global l2type
-    l2type = evsutil.l2type
-    msg = "l2type = %s" % l2type
-    log_msg(LOG_DEBUG, msg)
-    migr_conf_obj = ConfigEVSToOVS()
-
-    # step-0: Determine bridge_mappings and ensure external network datalink
-    # is not serving as uplink port for other physical networks. This is only
-    # required if l2-type is VLAN or FLAT or if neutron-l3-agent is running on
-    # this node.
-    if l2type != L2_TYPE_VXLAN or SVC_L3_AGENT in curnode_svcs:
-        determine_external_network_name()
-        determine_bridge_mappings()
-
-    # step-1: Populate ML2 tables and update Neutron and ML2 config files.
-    if SVC_NEUTRON_SERVER in curnode_svcs:
-        msg = "Current migration based on svc: %s" % SVC_NEUTRON_SERVER
-        log_msg(LOG_INFO, msg)
-        neutron_evs_to_ovs(migr_conf_obj)
-        # We have already enabled neutron-server. There is nothing else to do
-        # wrt the service.
-        curnode_svcs.remove(SVC_NEUTRON_SERVER)
-
-    # We don't need to do anything else if neutron-server is the only service
-    # we are migrating on this node.
-    if not curnode_svcs:
-        finish()
-
-    # step-2: add ovs integration bridge and update conf for
-    # neutron-openvswitch-agent.
-    if not is_svc_online(SVC_OVSDB_SERVER, exit_on_maintenance=True):
-        enable_svc(SVC_OVSDB_SERVER, exit_on_fail=True)
-    if not is_svc_online(SVC_VSWITCH_SERVER, exit_on_maintenance=True):
-        enable_svc(SVC_VSWITCH_SERVER, exit_on_fail=True)
-    add_ovs_bridge(OVS_INT_BRIDGE)
-    bmap_str = ''
-    if bridge_mappings:
-        for nw, uplink in bridge_mappings.iteritems():
-            bmap_str += nw + ':' + uplink + ','
-        bmap_str = bmap_str.strip(',')
-    if bmap_str:
-        msg = "bridge_mappings = " + bmap_str
-        log_msg(LOG_DEBUG, msg)
-        migr_conf_obj.update_Open_vSwitch_other_config(bmap_str)
-    migr_conf_obj.update_ovs_neutron_plugin_ini(bmap_str)
-    # we will enable the OVS agent later
-
-    # step-3: migrate the other services.
-    svc_func_map = {
-        SVC_DHCP_AGENT: dhcp_evs_to_ovs,
-        SVC_L3_AGENT: l3_evs_to_ovs,
-        SVC_NOVA_COMPUTE: nova_evs_to_ovs
-    }
-
-    for svc in curnode_svcs:
-        msg = "Current migration based on svc: %s" % svc
-        log_msg(LOG_INFO, msg)
-        svc_func_map[svc](migr_conf_obj)
-
-    # At this point we have disabled all the services that we are interested
-    # in. Now we need to add the right uplink-port to the OVS bridges.
-    if l2type == L2_TYPE_VXLAN:
-        # check if there are any left over evs-vxlan datalinks
-        output = check_output(['/usr/sbin/dladm', 'show-vxlan', '-po', 'link'],
-                              stderr=PIPE)
-        if len(output.strip().splitlines()) != 0:
-            msg = """There are other VXLAN datalinks present and as a result
-            OVS agent will go into maintenance. Please remove these datalinks
-            and clear the OVS agent service."""
-            log_msg(LOG_WARN, msg)
-    else:
-        assert l2type == L2_TYPE_VLAN or l2type == L2_TYPE_FLAT
-        int_uplinks = get_uplink_ports_for_int_bridge()
-        # add the uplink-ports to integration bridge
-        for uplink in int_uplinks:
-            add_uplink_to_br(uplink, OVS_INT_BRIDGE)
-
-    # enable all services
-    enable_svc(SVC_OVS_AGENT)
-    for svc in curnode_svcs:
-        if svc == SVC_L3_AGENT:
-            # add the port to br_ex0
-            ext_uplink = get_uplink_port_for_ext_bridge()
-            if ext_uplink:
-                add_uplink_to_br(ext_uplink, OVS_EXT_BRIDGE)
-        enable_svc(svc)
-
-    finish()
-
-
-if __name__ == "__main__":
-    main()
--- a/components/openstack/neutron/files/evs/plugin.py	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,707 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-# @author: Girish Moodalbail, Oracle, Inc.
-
-import rad.client as radcli
-import rad.connect as radcon
-import rad.bindings.com.oracle.solaris.rad.evscntl_1 as evsbind
-
-from oslo_concurrency import lockutils
-from oslo_config import cfg
-from oslo_db import api as oslo_db_api
-from oslo_log import log as logging
-from oslo_utils import importutils
-
-from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
-from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
-from neutron.api.rpc.handlers import dhcp_rpc
-from neutron.api.rpc.handlers import l3_rpc
-from neutron.api.rpc.handlers import metadata_rpc
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.db import agents_db
-from neutron.db import agentschedulers_db
-from neutron.db import api as db
-from neutron.db import db_base_plugin_v2
-from neutron.db import external_net_db
-from neutron.db import l3_agentschedulers_db
-from neutron.db import l3_attrs_db
-from neutron.db import l3_gwmode_db
-from neutron.db import models_v2
-from neutron.db import portbindings_db
-from neutron.db import quota_db
-from neutron.db import securitygroups_db
-from neutron.extensions import external_net
-from neutron.extensions import providernet
-from neutron.plugins.common import constants as svc_constants
-from neutron.plugins.ml2 import models
-
-LOG = logging.getLogger(__name__)
-# Only import the vpn server code if it exists.
-try:
-    sp = cfg.CONF.service_plugins
-    vpns = 'vpnaas'
-    if vpns in sp:
-        try:
-            from neutron_vpnaas.db.vpn import vpn_db
-            LOG.debug("Loading VPNaaS service driver.")
-        except ImportError:
-            pass
-    else:
-        LOG.debug("vpnaas service_plugin not configured")
-except:
-    pass
-
-evs_controller_opts = [
-    cfg.StrOpt('evs_controller', default='ssh://evsuser@localhost',
-               help=_("An URI that specifies an EVS controller"))
-]
-
-cfg.CONF.register_opts(evs_controller_opts, "EVS")
-
-
-class EVSControllerError(exceptions.NeutronException):
-    message = _("EVS controller: %(errmsg)s")
-
-    def __init__(self, evs_errmsg):
-        super(EVSControllerError, self).__init__(errmsg=evs_errmsg)
-
-
-class EVSOpNotSupported(exceptions.NeutronException):
-    message = _("Operation not supported by EVS plugin: %(opname)s")
-
-    def __init__(self, evs_errmsg):
-        super(EVSOpNotSupported, self).__init__(opname=evs_errmsg)
-
-
-class EVSNotFound(exceptions.NeutronException):
-    message = _("Network %(net_id)s could not be found in EVS")
-
-
-class EVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
-                         agentschedulers_db.DhcpAgentSchedulerDbMixin,
-                         external_net_db.External_net_db_mixin,
-                         l3_agentschedulers_db.L3AgentSchedulerDbMixin,
-                         l3_gwmode_db.L3_NAT_db_mixin):
-    """Implements v2 Neutron Plug-in API specification.
-
-    All the neutron API calls to create/delete/retrieve Network/Subnet/Port
-    are forwarded to EVS controller through Solaris RAD. The RAD connection
-    to EVS Controller is over SSH. In order that this plugin can communicate
-    with EVS Controller non-interactively and securely, one should setup SSH
-    authentication with pre-shared keys between the host running neutron-server
-    and the host running EVS controller.
-
-    The following table maps OpenStack Neutron resources and attributes to
-    Solaris Elastic Virtual Switch resources and attributes
-
-    |---------------------+------------------+------------------------------|
-    | OpenStack Neutron   | Solaris EVS      | Comments                     |
-    |---------------------+------------------+------------------------------|
-    | Network             | EVS              | Represents an isolated L2    |
-    | -- name             | -- name          | segment; implemented either  |
-    | -- id               | -- uuid          | through VLANs or VXLANs      |
-    | -- tenant_id        | -- tenant        |                              |
-    | -- shared           | Always False     |                              |
-    | -- admin_state_up   | Always True      |                              |
-    | -- status           | Always ACTIVE    |                              |
-    | -- provider:        |                  |                              |
-    |    network_type     |  -- l2-type      | (either VLAN or VXLAN)       |
-    | -- provider:        |                  |                              |
-    |    segmentation_id  |  -- vlanid/vni   |                              |
-    |                     |                  |                              |
-    |                     |                  |                              |
-    | Subnet              | IPnet            | An IP network represents     |
-    | -- name             | -- name          | a block of either IPv4       |
-    | -- id               | -- uuid          | or IPv6 addresses (subnet)   |
-    | -- network_id       | -- evs           | along with a default router  |
-    | -- tenant_id        | -- tenant        | for the block                |
-    | -- cidr             | -- subnet        |                              |
-    | -- gateway_ip       | -- defrouter     |                              |
-    | -- allocation_pools | -- start/stop    |                              |
-    | -- dns_nameservers  | -- OpenStack:\   |                              |
-    |                     | dns_nameservers  |                              |
-    | -- host_routes      | -- OpenStack:\   |                              |
-    |                     | host_routes      |                              |
-    | -- enable_dhcp      | -- OpenStack:\   |                              |
-    |                     | enable_dhcp      |                              |
-    | -- shared           | Always False     |                              |
-    |                     |                  |                              |
-    | Port                | VPort            | A VPort represents the point |
-    | -- name             | -- name          | of attachment between the    |
-    | -- id               | -- uuid          | VNIC and an EVS. It          |
-    | -- network_id       | -- evs           | encapsulates various network |
-    | -- tenant_id        | -- tenant        | configuration parameters (   |
-    | -- status           | -- status        | MAC addresses, IP addresses, |
-    | -- mac_address      | -- macaddr       | and SLAs)                    |
-    | -- fixed_ips        | -- ipaddr        |                              |
-    | -- device_id        | -- OpenStack:\   |                              |
-    |                     |    device_id     |                              |
-    | -- device_owner     | -- OpenStack:\   |                              |
-    |                     |    device_owner  |                              |
-    | -- security_groups  | -- Not Supported |                              |
-    | -- admin_state_up   | Always UP        |                              |
-    |---------------------+------------------+------------------------------|
-    """
-
-    _supported_extension_aliases = ["provider", "external-net", "router",
-                                    "ext-gw-mode", "quotas", "agent",
-                                    "l3_agent_scheduler",
-                                    "dhcp_agent_scheduler"]
-
-    def __init__(self):
-        self.network_scheduler = importutils.import_object(
-            cfg.CONF.network_scheduler_driver
-        )
-        self.router_scheduler = importutils.import_object(
-            cfg.CONF.router_scheduler_driver
-        )
-        self._setup_rpc()
-        self._rad_connection = None
-
-    @property
-    def rad_connection(self):
-        # Since there is no connect_uri() yet, we need to do
-        # parsing of ssh://user@hostname ourselves
-        suh = cfg.CONF.EVS.evs_controller.split('://')
-        if len(suh) != 2 or suh[0] != 'ssh' or not suh[1].strip():
-            raise SystemExit(_("Specified evs_controller is invalid"))
-        uh = suh[1].split('@')
-        if len(uh) != 2 or not uh[0].strip() or not uh[1].strip():
-            raise SystemExit(_("'user' and 'hostname' need to be specified "
-                               "for evs_controller"))
-
-        if (self._rad_connection is not None and
-                self._rad_connection._closed is None):
-            return self._rad_connection
-
-        LOG.debug(_("Connecting to EVS Controller at %s as %s") %
-                  (uh[1], uh[0]))
-        self._rad_connection = radcon.connect_ssh(uh[1], user=uh[0])
-        return self._rad_connection
-
-    def _setup_rpc(self):
-        # RPC support
-        self.service_topics = {svc_constants.CORE: topics.PLUGIN,
-                               svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
-        self.conn = n_rpc.create_connection(new=True)
-        self.endpoints = [dhcp_rpc.DhcpRpcCallback(),
-                          l3_rpc.L3RpcCallback(),
-                          agents_db.AgentExtRpcCallback(),
-                          metadata_rpc.MetadataRpcCallback()]
-        for svc_topic in self.service_topics.values():
-            self.conn.create_consumer(svc_topic, self.endpoints, fanout=False)
-        # Consume from all consumers in a thread
-        self.conn.consume_in_threads()
-        self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
-        self.l3_agent_notifier = l3_rpc_agent_api.L3AgentNotifyAPI()
-
-        # needed by AgentSchedulerDbMixin()
-        self.agent_notifiers[constants.AGENT_TYPE_DHCP] = \
-            self.dhcp_agent_notifier
-        self.agent_notifiers[constants.AGENT_TYPE_L3] = \
-            self.l3_agent_notifier
-
-    @property
-    def supported_extension_aliases(self):
-        return self._supported_extension_aliases
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_addIPnet(self, tenantname, evsname, ipnetname,
-                                 propstr):
-        LOG.debug(_("Adding IPnet: %s with properties: %s for tenant: %s "
-                    "and for evs: %s") %
-                  (ipnetname, propstr, tenantname, evsname))
-
-        pat = radcli.ADRGlobPattern({'name': evsname,
-                                     'tenant': tenantname})
-        try:
-            evs = self.rad_connection.get_object(evsbind.EVS(), pat)
-            ipnet = evs.addIPnet(propstr, ipnetname)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-        return ipnet
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_updateIPnet(self, ipnetuuid, propstr):
-        LOG.debug(_("Updating IPnet with id: %s with property string: %s") %
-                  (ipnetuuid, propstr))
-        pat = radcli.ADRGlobPattern({'uuid': ipnetuuid})
-        try:
-            ipnetlist = self.rad_connection.list_objects(evsbind.IPnet(), pat)
-            if not ipnetlist:
-                return
-            assert len(ipnetlist) == 1
-            ipnet = self.rad_connection.get_object(ipnetlist[0])
-            ipnet.setProperty(propstr)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-
-    def _subnet_pool_to_evs_pool(self, subnet):
-        poolstr = ""
-        # obtain the optional allocation pool
-        pools = subnet.get('allocation_pools')
-        if not pools or pools is attributes.ATTR_NOT_SPECIFIED:
-            return poolstr
-
-        for pool in pools:
-            if poolstr:
-                poolstr += ","
-            # if start and end address is same, EVS expects the address
-            # to be provided as-is instead of x.x.x.x-x.x.x.x
-            if pool['start'] == pool['end']:
-                poolstr += pool['start']
-            else:
-                poolstr += "%s-%s" % (pool['start'], pool['end'])
-        return poolstr
-
-    def create_subnet(self, context, subnet):
-        """Creates a subnet(IPnet) for a given network(EVS).
-
-         An IP network represents a block of either IPv4 or IPv6 addresses
-         (i.e., subnet) along with a default router for the block. Only one
-         IPnet can be associated with an EVS. All the zones/VNICs that
-         connect to the EVS, through a VPort, will get an IP address from the
-         IPnet associated with the EVS.
-        """
-
-        if (subnet['subnet']['host_routes'] is not
-                attributes.ATTR_NOT_SPECIFIED):
-            raise EVSOpNotSupported(_("setting --host-route for a subnet "
-                                      "not supported"))
-
-        poolstr = self._subnet_pool_to_evs_pool(subnet['subnet'])
-
-        with context.session.begin(subtransactions=True):
-            # create the subnet in the DB
-            db_subnet = super(EVSNeutronPluginV2, self).create_subnet(context,
-                                                                      subnet)
-            ipnetname = db_subnet['name']
-            if not ipnetname:
-                ipnetname = None
-            evsname = db_subnet['network_id']
-            tenantname = db_subnet['tenant_id']
-            proplist = ['subnet=%s' % db_subnet['cidr']]
-            defrouter = db_subnet['gateway_ip']
-            if not defrouter:
-                defrouter = '0.0.0.0' if db_subnet['ip_version'] == 4 else '::'
-            proplist.append('defrouter=%s' % defrouter)
-            proplist.append('uuid=%s' % db_subnet['id'])
-            if poolstr:
-                proplist.append('pool=%s' % (poolstr))
-            self._evs_controller_addIPnet(tenantname, evsname, ipnetname,
-                                          ",".join(proplist))
-
-        return db_subnet
-
-    def update_subnet(self, context, id, subnet):
-        LOG.debug(_("Updating Subnet: %s with %s") % (id, subnet))
-        if (set(subnet['subnet'].keys()) - set(('enable_dhcp',
-                                                'allocation_pools',
-                                                'dns_nameservers',
-                                                'ipv6_address_mode',
-                                                'ipv6_ra_mode'))):
-                raise EVSOpNotSupported(_("only following subnet attributes "
-                                          "enable-dhcp, allocation-pool, "
-                                          "dns-nameserver, ipv6-address-mode, "
-                                          "and ipv6-ra-mode can be updated"))
-
-        poolstr = self._subnet_pool_to_evs_pool(subnet['subnet'])
-
-        with context.session.begin(subtransactions=True):
-            # update subnet in DB
-            retval = super(EVSNeutronPluginV2, self).\
-                update_subnet(context, id, subnet)
-            # update EVS IPnet with allocation pool info
-            if poolstr:
-                self._evs_controller_updateIPnet(id, "pool=%s" % poolstr)
-
-        return retval
-
-    def get_subnet(self, context, id, fields=None):
-        LOG.debug(_("Getting subnet: %s"), id)
-        subnet = super(EVSNeutronPluginV2, self).get_subnet(context, id, None)
-        return self._fields(subnet, fields)
-
-    def get_subnets(self, context, filters=None, fields=None,
-                    sorts=None, limit=None, marker=None, page_reverse=False):
-        subnets = super(EVSNeutronPluginV2, self).\
-            get_subnets(context, filters, None, sorts, limit, marker,
-                        page_reverse)
-        return [self._fields(subnet, fields) for subnet in subnets]
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_removeIPnet(self, tenantname, evsname, ipnetuuid,
-                                    auto_created_ports):
-        LOG.debug(_("Removing IPnet with id: %s for tenant: %s for evs: %s") %
-                  (ipnetuuid, tenantname, evsname))
-        pat = radcli.ADRGlobPattern({'name': evsname, 'tenant': tenantname})
-        try:
-            evs = self.rad_connection.get_object(evsbind.EVS(), pat)
-            if auto_created_ports:
-                LOG.debug(_("Need to remove following ports %s before "
-                            "removing the IPnet") % (auto_created_ports))
-                for port in auto_created_ports:
-                    try:
-                        evs.removeVPort(port['id'], "force=yes")
-                    except radcli.ObjectError as oe:
-                        # '43' corresponds to EVS' EVS_ENOENT_VPORT error code
-                        if oe.get_payload().err == 43:
-                            LOG.debug(_("VPort %s could not be found") %
-                                      (port['id']))
-            evs.removeIPnet(ipnetuuid)
-        except (radcli.NotFoundError, radcli.ObjectError) as oe:
-            # '42' corresponds to EVS' EVS_ENOENT_IPNET error code
-            if oe.get_payload() is None or oe.get_payload().err == 42:
-                # EVS doesn't have that IPnet, return success to delete
-                # the IPnet from Neutron DB.
-                LOG.debug(_("IPnet could not be found in EVS."))
-                return
-            raise EVSControllerError(oe.get_payload().errmsg)
-
-    def delete_subnet(self, context, id):
-        subnet = self.get_subnet(context, id)
-        if not subnet:
-            return
-
-        with context.session.begin(subtransactions=True):
-            # get a list of ports automatically created by Neutron
-            auto_created_ports = context.session.query(models_v2.Port).\
-                filter(models_v2.Port.device_owner.
-                       in_(db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS)).all()
-            # delete subnet in DB
-            super(EVSNeutronPluginV2, self).delete_subnet(context, id)
-            self._evs_controller_removeIPnet(subnet['tenant_id'],
-                                             subnet['network_id'], id,
-                                             auto_created_ports)
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_createEVS(self, tenantname, evsname, propstr):
-        LOG.debug(_("Adding EVS: %s with properties: %s for tenant: %s") %
-                  (evsname, propstr, tenantname))
-        try:
-            evs = self.rad_connection.\
-                get_object(evsbind.EVSController()).\
-                createEVS(propstr, tenantname, evsname)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-        return evs
-
-    def _extend_network_dict(self, network, evs):
-        for prop in evs.props:
-            if prop.name == 'l2-type':
-                network[providernet.NETWORK_TYPE] = prop.value
-            elif prop.name == 'vlanid' or prop.name == 'vni':
-                network[providernet.SEGMENTATION_ID] = int(prop.value)
-
-    def create_network(self, context, network):
-        """Creates a network(EVS) for a given tenant.
-
-        An Elastic Virtual Switch (EVS) is a virtual switch that spans
-        one or more servers (physical machines). It represents an isolated L2
-        segment, and the isolation is implemented either through VLANs or
-        VXLANs. An EVS provides network connectivity between the Virtual
-        Machines connected to it. There are two main resources associated with
-        an EVS: IPnet and VPort.
-        """
-
-        if network['network']['admin_state_up'] is False:
-            raise EVSOpNotSupported(_("setting admin_state_up=False for a "
-                                      "network not supported"))
-
-        if network['network']['shared'] is True:
-            raise EVSOpNotSupported(_("setting shared=True for a "
-                                      "network not supported"))
-
-        evsname = network['network']['name']
-        if not evsname:
-            evsname = None
-
-        tenantname = self._get_tenant_id_for_create(context,
-                                                    network['network'])
-        proplist = []
-        network_type = network['network'][providernet.NETWORK_TYPE]
-        if attributes.is_attr_set(network_type):
-            proplist.append('l2-type=%s' % network_type)
-
-        segment_id = network['network'][providernet.SEGMENTATION_ID]
-        if attributes.is_attr_set(segment_id):
-            if (not attributes.is_attr_set(network_type) or
-                    len(network_type) == 0):
-                raise EVSControllerError(_("provider:network_type must be "
-                                           "specified when provider:"
-                                           "segmentation_id is provided"))
-
-            if network_type == 'vxlan':
-                proplist.append('vni=%d' % segment_id)
-            elif network_type == 'vlan':
-                proplist.append('vlanid=%d' % segment_id)
-            else:
-                raise EVSControllerError(_("specified "
-                                           "provider:network_type '%s' not "
-                                           "supported") % network_type)
-
-        propstr = None
-        if proplist:
-            propstr = ",".join(proplist)
-
-        with context.session.begin(subtransactions=True):
-            # create the network in DB
-            net = super(EVSNeutronPluginV2, self).create_network(context,
-                                                                 network)
-            self._process_l3_create(context, net, network['network'])
-            # if --router:external is not set, the above function does
-            # not update net with router:external set to False
-            if net.get(external_net.EXTERNAL) is None:
-                net[external_net.EXTERNAL] = False
-
-            # create EVS on the EVS controller
-            if propstr:
-                propstr += ",uuid=%s" % net['id']
-            else:
-                propstr = "uuid=%s" % net['id']
-            evs = self._evs_controller_createEVS(tenantname, evsname, propstr)
-
-            # add provider information into net
-            self._extend_network_dict(net, evs)
-
-        return net
-
-    def update_network(self, context, id, network):
-        raise EVSOpNotSupported(_("net-update"))
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_getEVS(self, evsuuid):
-        LOG.debug(_("Getting EVS: %s"), evsuuid)
-        try:
-            evslist = self.rad_connection.\
-                get_object(evsbind.EVSController()).\
-                getEVSInfo('evs=%s' % evsuuid)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.getpayload().errmsg)
-        if not evslist:
-            LOG.error(_("EVS framework does not have Neutron network "
-                        "'%s' defined"), evsuuid)
-            return None
-        return evslist[0]
-
-    def get_network(self, context, id, fields=None):
-        with context.session.begin(subtransactions=True):
-            net = super(EVSNeutronPluginV2, self).get_network(context,
-                                                              id, None)
-            # call EVS controller to get provider network information
-            evs = self._evs_controller_getEVS(net['id'])
-            if evs:
-                self._extend_network_dict(net, evs)
-        return self._fields(net, fields)
-
-    def get_networks(self, context, filters=None, fields=None,
-                     sorts=None, limit=None, marker=None, page_reverse=False):
-
-        with context.session.begin(subtransactions=True):
-            nets = super(EVSNeutronPluginV2, self).\
-                get_networks(context, filters, None, sorts, limit, marker,
-                             page_reverse)
-            for net in nets:
-                evs = self._evs_controller_getEVS(net['id'])
-                if evs:
-                    self._extend_network_dict(net, evs)
-        return [self._fields(net, fields) for net in nets]
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_deleteEVS(self, tenantname, evsuuid):
-        LOG.debug(_("Removing EVS with id: %s for tenant: %s") %
-                  (evsuuid, tenantname))
-        try:
-            self.rad_connection.\
-                get_object(evsbind.EVSController()).\
-                deleteEVS(evsuuid, tenantname)
-        except (radcli.NotFoundError, radcli.ObjectError) as oe:
-            # '41' corresponds to EVS' EVS_ENOENT_EVS error code
-            if oe.get_payload() is None or oe.get_payload().err == 41:
-                # EVS doesn't have that EVS, return success to delete
-                # the EVS from Neutron DB.
-                LOG.debug(_("EVS could not be found in EVS backend."))
-                return
-            raise EVSControllerError(oe.get_payload().errmsg)
-
-    def delete_network(self, context, id):
-        with context.session.begin(subtransactions=True):
-            network = self._get_network(context, id)
-
-            qry_network_ports = context.session.query(models_v2.Port).\
-                filter_by(network_id=id).filter(models_v2.Port.device_owner.
-                                                in_(db_base_plugin_v2.
-                                                    AUTO_DELETE_PORT_OWNERS))
-
-            auto_created_ports = qry_network_ports.all()
-            qry_network_ports.delete(synchronize_session=False)
-
-            port_in_use = context.session.query(models_v2.Port).filter_by(
-                network_id=id).first()
-
-            if port_in_use:
-                raise exceptions.NetworkInUse(net_id=id)
-
-            # clean up subnets
-            subnets = self._get_subnets_by_network(context, id)
-            for subnet in subnets:
-                super(EVSNeutronPluginV2, self).delete_subnet(context,
-                                                              subnet['id'])
-                self._evs_controller_removeIPnet(subnet['tenant_id'],
-                                                 subnet['network_id'],
-                                                 subnet['id'],
-                                                 auto_created_ports)
-
-            context.session.delete(network)
-            self._evs_controller_deleteEVS(network['tenant_id'], id)
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_addVPort(self, tenantname, evsname, vportname,
-                                 propstr):
-        LOG.debug(_("Adding VPort: %s with properties: %s for tenant: %s "
-                    "and for evs: %s") %
-                  (vportname, propstr, tenantname, evsname))
-
-        try:
-            pat = radcli.ADRGlobPattern({'name': evsname,
-                                         'tenant': tenantname})
-            evs = self.rad_connection.get_object(evsbind.EVS(), pat)
-            vport = evs.addVPort(propstr, vportname)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-        return vport
-
-    @oslo_db_api.wrap_db_retry(max_retries=db.MAX_RETRIES,
-                               retry_on_request=True,
-                               retry_on_deadlock=True)
-    def create_port(self, context, port):
-        """Creates a port(VPort) for a given network(EVS).
-
-         A VPort represents the point of attachment between the VNIC and an
-         EVS. It encapsulates various network configuration parameters such as
-             -- SLAs (maxbw, cos, and priority)
-             -- IP address and
-             -- MAC address, et al
-         This configuration is inherited by the VNIC when it connects to the
-         VPort.
-        """
-        if port['port']['admin_state_up'] is False:
-            raise EVSOpNotSupported(_("setting admin_state_up=False for a "
-                                      "port not supported"))
-
-        with context.session.begin(subtransactions=True):
-            # for external gateway ports and floating ips, tenant_id
-            # is not set, but EVS does not like it.
-            tenant_id = self._get_tenant_id_for_create(context, port['port'])
-            if not tenant_id:
-                network = self._get_network(context,
-                                            port['port']['network_id'])
-                port['port']['tenant_id'] = network['tenant_id']
-            # create the port in the DB
-            db_port = super(EVSNeutronPluginV2, self).create_port(context,
-                                                                  port)
-            # Neutron allows to create a port on a network that doesn't
-            # yet have subnet associated with it, however EVS doesn't
-            # support this.
-            if not db_port['fixed_ips']:
-                raise EVSOpNotSupported(_("creating a port on a network that "
-                                          "does not yet have subnet "
-                                          "associated with it is not "
-                                          "supported"))
-            tenantname = db_port['tenant_id']
-            vportname = db_port['name']
-            if not vportname:
-                vportname = None
-            evs_id = db_port['network_id']
-            proplist = ['macaddr=%s' % db_port['mac_address']]
-            proplist.append('ipaddr=%s' %
-                            db_port['fixed_ips'][0].get('ip_address'))
-            proplist.append('uuid=%s' % db_port['id'])
-
-            self._evs_controller_addVPort(tenantname, evs_id, vportname,
-                                          ",".join(proplist))
-        return db_port
-
-    def update_port(self, context, id, port):
-        # EVS does not allow updating certain attributes, so check for it
-        state = port['port'].get('admin_state_up')
-        if state and state is False:
-            raise EVSOpNotSupported(_("updating port's admin_state_up to "
-                                      "False is not supported"))
-
-        # Get the original port and fail if any attempt is being made
-        # to change fixed_ips of the port since EVS doesn't support it
-        original_port = super(EVSNeutronPluginV2, self).get_port(context, id)
-        original_ips = original_port['fixed_ips']
-        update_ips = port['port'].get('fixed_ips')
-        if (update_ips and
-            (len(update_ips) != 1 or
-             update_ips[0]['subnet_id'] != original_ips[0]['subnet_id'] or
-             update_ips[0]['ip_address'] != original_ips[0]['ip_address'])):
-            raise EVSOpNotSupported(_("updating port's fixed_ips "
-                                      "is not supported"))
-        LOG.debug(_("Updating port %s with %s") % (id, port))
-        db_port = super(EVSNeutronPluginV2, self).update_port(context,
-                                                              id, port)
-        return db_port
-
-    def get_port(self, context, id, fields=None):
-        LOG.debug(_("Getting port: %s"), id)
-        port = super(EVSNeutronPluginV2, self).get_port(context, id, None)
-        return self._fields(port, fields)
-
-    def get_ports(self, context, filters=None, fields=None,
-                  sorts=None, limit=None, marker=None, page_reverse=False):
-        ports = super(EVSNeutronPluginV2, self).\
-            get_ports(context, filters, None, sorts, limit, marker,
-                      page_reverse)
-        return [self._fields(port, fields) for port in ports]
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_removeVPort(self, tenantname, evsname, vportuuid):
-        LOG.debug(_("Removing VPort with id: %s for tenant: %s for evs: %s") %
-                  (vportuuid, tenantname, evsname))
-        pat = radcli.ADRGlobPattern({'name': evsname,
-                                     'tenant': tenantname})
-        try:
-            evs = self.rad_connection.get_object(evsbind.EVS(), pat)
-            evs.removeVPort(vportuuid, "force=yes")
-        except (radcli.NotFoundError, radcli.ObjectError) as oe:
-            # '43' corresponds to EVS' EVS_ENOENT_VPORT error code
-            if oe.get_payload() is None or oe.get_payload().err == 43:
-                # EVS doesn't have that VPort, return success to delete
-                # the VPort from Neutron DB.
-                LOG.debug(_("VPort could not be found in EVS."))
-            else:
-                raise EVSControllerError(oe.get_payload().errmsg)
-
-    def delete_port(self, context, id, l3_port_check=True):
-        if l3_port_check:
-            self.prevent_l3_port_deletion(context, id)
-        self.disassociate_floatingips(context, id)
-        port = self.get_port(context, id)
-        if not port:
-            return
-        with context.session.begin(subtransactions=True):
-            super(EVSNeutronPluginV2, self).delete_port(context, id)
-            self._evs_controller_removeVPort(port['tenant_id'],
-                                             port['network_id'],
-                                             port['id'])
--- a/components/openstack/neutron/files/evs_plugin.ini	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,6 +0,0 @@
-[EVS]
-# An URI that specifies an EVS controller. It is of the form
-# ssh://user@hostname, where user is the username to use to connect
-# to EVS controller specified by hostname. By default it's set to
-# ssh://evsuser@localhost.
-# evs_controller = ssh://evsuser@localhost
--- a/components/openstack/neutron/files/l3_agent.ini	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/l3_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -1,164 +1,285 @@
 [DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = False
+
+#
+# From neutron.base.agent
+#
 
-# L3 requires that an interface driver be set. Choose the one that best
-# matches your plugin.
-# interface_driver =
+# Name of Open vSwitch bridge to use (string value)
+ovs_integration_bridge = br_int0
 
-# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
-# that supports L3 agent
-# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-
-# Interface driver for Solaris Open vSwitch
-# interface_driver = neutron.agent.solaris.interface.OVSInterfaceDriver
+# Uses veth for an OVS interface or not. Support kernels with limited namespace
+# support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. (boolean
+# value)
+#ovs_use_veth = false
 
-# Name of Open vSwitch bridge to use
-# ovs_integration_bridge = br_int0
-
-# Use veth for an OVS interface or not.
-# Support kernels with limited namespace support
-# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
-# ovs_use_veth = False
+# MTU setting for device. This option will be removed in Newton. Please use the
+# system-wide segment_mtu setting which the agents will take into account when
+# wiring VIFs. (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#network_device_mtu = <None>
 
-# Example of interface_driver option for LinuxBridge
-# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-
-# Interface driver for Solaris Elastic Virtual Switch (EVS)
-interface_driver = neutron.agent.solaris.interface.SolarisVNICDriver
+# The driver used to manage the virtual interface. (string value)
+interface_driver = neutron.agent.solaris.interface.OVSInterfaceDriver
 
-# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
-# iproute2 package that supports namespaces). This option is deprecated and
-# will be removed in a future release, at which point the old behavior of
-# use_namespaces = True will be enforced.
-use_namespaces = False
+# Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs
+# commands will fail with ALARMCLOCK error. (integer value)
+#ovs_vsctl_timeout = 10
+
+#
+# From neutron.l3.agent
+#
 
-# If use_namespaces is set as False then the agent can only configure one
-# router.
-# This is done by setting the specific router_id.
-# router_id =
+# The working mode for the agent. Allowed modes are: 'legacy' - this preserves
+# the existing behavior where the L3 agent is deployed on a centralized
+# networking node to provide L3 services like DNAT, and SNAT. Use this mode if
+# you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality and
+# must be used for an L3 agent that runs on a compute host. 'dvr_snat' - this
+# enables centralized SNAT support in conjunction with DVR.  This mode must be
+# used for an L3 agent running on a centralized node (or in single-host
+# deployments, e.g. devstack) (string value)
+# Allowed values: dvr, dvr_snat, legacy
+#agent_mode = legacy
 
-# When external_network_bridge is set, each L3 agent can be associated
-# with no more than one external network. This value should be set to the UUID
-# of that external network. To allow L3 agent support multiple external
-# networks, both the external_network_bridge and gateway_external_network_id
-# must be left empty.
-# gateway_external_network_id =
+# TCP Port used by Neutron metadata namespace proxy. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#metadata_port = 9697
 
-# With IPv6, the network used for the external gateway does not need
-# to have an associated subnet, since the automatically assigned
-# link-local address (LLA) can be used. However, an IPv6 gateway address
-# is needed for use as the next-hop for the default route. If no IPv6
-# gateway address is configured here, (and only then) the neutron router
-# will be configured to get its default route from router advertisements (RAs)
-# from the upstream router; in which case the upstream router must also be
-# configured to send these RAs.
-# The ipv6_gateway, when configured, should be the LLA of the interface
-# on the upstream router. If a next-hop using a global unique address (GUA)
-# is desired, it needs to be done via a subnet allocated to the network
-# and not through this parameter.
-# ipv6_gateway =
+# Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the
+# feature is disabled (integer value)
+#send_arp_for_ha = 3
+
+# If non-empty, the l3 agent can only configure a router that has the matching
+# router ID. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#router_id =
 
-# Indicates that this L3 agent should also handle routers that do not have
-# an external network gateway configured.  This option should be True only
-# for a single agent in a Neutron deployment, and may be False for all agents
-# if all routers must have an external network gateway
-# handle_internal_only_routers = True
+# Indicates that this L3 agent should also handle routers that do not have an
+# external network gateway configured. This option should be True only for a
+# single agent in a Neutron deployment, and may be False for all agents if all
+# routers must have an external network gateway. (boolean value)
+#handle_internal_only_routers = true
 
-# Name of bridge used for external network traffic. This should be set to
-# empty value for the linux bridge. when this parameter is set, each L3 agent
-# can be associated with no more than one external network.
-external_network_bridge =
+# When external_network_bridge is set, each L3 agent can be associated with no
+# more than one external network. This value should be set to the UUID of that
+# external network. To allow L3 agent support multiple external networks, both
+# the external_network_bridge and gateway_external_network_id must be left
+# empty. (string value)
+#gateway_external_network_id =
 
-# TCP Port used by Neutron metadata server
-# metadata_port = 9697
-
-# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
-# to disable this feature.
-# send_arp_for_ha = 3
+# With IPv6, the network used for the external gateway does not need to have an
+# associated subnet, since the automatically assigned link-local address (LLA)
+# can be used. However, an IPv6 gateway address is needed for use as the next-
+# hop for the default route. If no IPv6 gateway address is configured here,
+# (and only then) the neutron router will be configured to get its default
+# route from router advertisements (RAs) from the upstream router; in which
+# case the upstream router must also be configured to send these RAs. The
+# ipv6_gateway, when configured, should be the LLA of the interface on the
+# upstream router. If a next-hop using a global unique address (GUA) is
+# desired, it needs to be done via a subnet allocated to the network and not
+# through this parameter.  (string value)
+#ipv6_gateway =
 
-# seconds between re-sync routers' data if needed
-# periodic_interval = 40
-
-# seconds to start to sync routers' data after
-# starting agent
-# periodic_fuzzy_delay = 5
-
-# enable_metadata_proxy, which is true by default, can be set to False
-# if the Nova metadata server is not available
-# enable_metadata_proxy = True
+# Driver used for ipv6 prefix delegation. This needs to be an entry point
+# defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for
+# entry points included with the neutron source. (string value)
+#prefix_delegation_driver = dibbler
 
-# Iptables mangle mark used to mark metadata valid requests
-# metadata_access_mark = 0x1
+# Allow running metadata proxy. (boolean value)
+#enable_metadata_proxy = true
 
-# Iptables mangle mark used to mark ingress from external network
-# external_ingress_mark = 0x2
+# Iptables mangle mark used to mark metadata valid requests. This mark will be
+# masked with 0xffff so that only the lower 16 bits will be used. (string
+# value)
+#metadata_access_mark = 0x1
 
-# router_delete_namespaces, which is false by default, can be set to True if
-# namespaces can be deleted cleanly on the host running the L3 agent.
-# Do not enable this until you understand the problem with the Linux iproute
-# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
-# you are sure that your version of iproute does not suffer from the problem.
-# If True, namespaces will be deleted when a router is destroyed.
-# router_delete_namespaces = False
+# Iptables mangle mark used to mark ingress from external network. This mark
+# will be masked with 0xffff so that only the lower 16 bits will be used.
+# (string value)
+#external_ingress_mark = 0x2
 
-# Timeout for ovs-vsctl commands.
-# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
-# ovs_vsctl_timeout = 10
+# Name of bridge used for external network traffic. This should be set to an
+# empty value for the Linux Bridge. When this parameter is set, each L3 agent
+# can be associated with no more than one external network. (string value)
+external_network_bridge = br_ex0
+
+# Seconds between running periodic tasks (integer value)
+#periodic_interval = 40
 
-# The working mode for the agent. Allowed values are:
-# - legacy: this preserves the existing behavior where the L3 agent is
-#   deployed on a centralized networking node to provide L3 services
-#   like DNAT, and SNAT. Use this mode if you do not want to adopt DVR.
-# - dvr: this mode enables DVR functionality, and must be used for an L3
-#   agent that runs on a compute host.
-# - dvr_snat: this enables centralized SNAT support in conjunction with
-#   DVR. This mode must be used for an L3 agent running on a centralized
-#   node (or in single-host deployments, e.g. devstack).
-# agent_mode = legacy
+# Number of separate API worker processes for service. If not specified, the
+# default is equal to the number of CPUs available for best performance.
+# (integer value)
+#api_workers = <None>
+
+# Number of RPC worker processes for service (integer value)
+#rpc_workers = 1
 
-# Location to store keepalived and all HA configurations
-# ha_confs_path = $state_path/ha_confs
+# Number of RPC worker processes dedicated to state reports queue (integer
+# value)
+#rpc_state_report_workers = 1
 
-# VRRP authentication type AH/PASS
-# ha_vrrp_auth_type = PASS
+# Range of seconds to randomly delay when starting the periodic task scheduler
+# to reduce stampeding. (Disable by setting to 0) (integer value)
+#periodic_fuzzy_delay = 5
+
+# Location to store keepalived/conntrackd config files (string value)
+#ha_confs_path = $state_path/ha_confs
+
+# VRRP authentication type (string value)
+# Allowed values: AH, PASS
+#ha_vrrp_auth_type = PASS
 
-# VRRP authentication password
-# ha_vrrp_auth_password =
+# VRRP authentication password (string value)
+#ha_vrrp_auth_password = <None>
 
-# The advertisement interval in seconds
-# ha_vrrp_advert_int = 2
+# The advertisement interval in seconds (integer value)
+#ha_vrrp_advert_int = 2
 
-# Name of the datalink that connects to an external network. By default it's
-# set to net0.
-# external_network_datalink = net0
+# Service to handle DHCPv6 Prefix delegation. (string value)
+#pd_dhcp_driver = dibbler
 
-# Allow forwarding of packets between tenant's networks
-# allow_forwarding_between_networks = False
+# Location to store IPv6 RA config files (string value)
+#ra_confs = $state_path/ra
+
+# MinRtrAdvInterval setting for radvd.conf (integer value)
+#min_rtr_adv_interval = 30
 
-# An URI that specifies an EVS controller. It is of the form
-# ssh://user@hostname, where user is the username to use to connect
-# to EVS controller specified by hostname. By default it's set to
-# ssh://evsuser@localhost.
-# evs_controller = ssh://evsuser@localhost
+# MaxRtrAdvInterval setting for radvd.conf (integer value)
+#max_rtr_adv_interval = 100
 
-# Admin username
+# Allow forwarding of packets between tenant's networks (boolean value)
+#allow_forwarding_between_networks = false
+
+# Admin username (string value)
 admin_user = %SERVICE_USER%
 
-# Admin password
+# Admin password (string value)
 admin_password = %SERVICE_PASSWORD%
 
-# Admin tenant name
-admin_tenant_name = %SERVICE_PASSWORD%
+# Admin tenant name (string value)
+admin_tenant_name = %SERVICE_TENANT_NAME%
 
-# Authentication URL
+# Authentication URL (string value)
 auth_url = http://localhost:5000/v2.0
 
-# The type of authentication to use
-# auth_strategy = keystone
+# The type of authentication to use (string value)
+#auth_strategy = keystone
+
+# Authentication region (string value)
+#auth_region = <None>
+
+# Network service endpoint type to pull from the keystone catalog (string
+# value)
+#endpoint_type = publicURL
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
 
-# Authentication region
-# auth_region = <None>
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
 
-# Network service endpoint type to pull from the keystone catalog
-# endpoint_type = publicURL
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[AGENT]
+
+#
+# From neutron.base.agent
+#
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
--- a/components/openstack/neutron/files/metadata_agent.ini	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/metadata_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -1,68 +1,178 @@
 [DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = True
+
+#
+# From neutron.metadata.agent
+#
 
-# The Neutron user information for accessing the Neutron API.
-auth_url = http://localhost:5000/v2.0
-auth_region = RegionOne
-# Turn off verification of the certificate for ssl
-# auth_insecure = False
-# Certificate Authority public key (CA cert) file for ssl
-# auth_ca_cert =
-admin_tenant_name = %SERVICE_TENANT_NAME%
-admin_user = %SERVICE_USER%
-admin_password = %SERVICE_PASSWORD%
+# Location for Metadata Proxy UNIX domain socket. (string value)
+#metadata_proxy_socket = $state_path/metadata_proxy
 
-# Network service endpoint type to pull from the keystone catalog
-# endpoint_type = adminURL
+# User (uid or name) running metadata proxy after its initialization (if empty:
+# agent effective user). (string value)
+#metadata_proxy_user =
 
-# IP address used by Nova metadata server
-# nova_metadata_ip = 127.0.0.1
+# Group (gid or name) running metadata proxy after its initialization (if
+# empty: agent effective group). (string value)
+#metadata_proxy_group =
 
-# TCP Port used by Nova metadata server
-# nova_metadata_port = 8775
-
-# Which protocol to use for requests to Nova metadata server, http or https
-# nova_metadata_protocol = http
+# Certificate Authority public key (CA cert) file for ssl (string value)
+#auth_ca_cert = <None>
 
-# Whether insecure SSL connection should be accepted for Nova metadata server
-# requests
-# nova_metadata_insecure = False
+# IP address used by Nova metadata server. (string value)
+#nova_metadata_ip = 127.0.0.1
 
-# Client certificate for nova api, needed when nova api requires client
-# certificates
-# nova_client_cert =
-
-# Private key for nova client certificate
-# nova_client_priv_key =
+# TCP Port used by Nova metadata server. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#nova_metadata_port = 8775
 
 # When proxying metadata requests, Neutron signs the Instance-ID header with a
-# shared secret to prevent spoofing.  You may select any string for a secret,
+# shared secret to prevent spoofing. You may select any string for a secret,
 # but it must match here and in the configuration used by the Nova Metadata
 # Server. NOTE: Nova uses the same config key, but in [neutron] section.
-# metadata_proxy_shared_secret =
+# (string value)
+#metadata_proxy_shared_secret =
+
+# Protocol to access nova metadata, http or https (string value)
+# Allowed values: http, https
+#nova_metadata_protocol = http
 
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
+# Allow to perform insecure SSL (https) requests to nova metadata (boolean
+# value)
+#nova_metadata_insecure = false
+
+# Client certificate for nova metadata api server. (string value)
+#nova_client_cert =
 
-# Metadata Proxy UNIX domain socket mode, 3 values allowed:
-# 'deduce': deduce mode from metadata_proxy_user/group values,
-# 'user': set metadata proxy socket mode to 0o644, to use when
-# metadata_proxy_user is agent effective user or root,
-# 'group': set metadata proxy socket mode to 0o664, to use when
-# metadata_proxy_group is agent effective group,
-# 'all': set metadata proxy socket mode to 0o666, to use otherwise.
-# metadata_proxy_socket_mode = deduce
+# Private key of client certificate. (string value)
+#nova_client_priv_key =
 
-# Number of separate worker processes for metadata server. Defaults to
-# half the number of CPU cores
+# Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce
+# mode from metadata_proxy_user/group values, 'user': set metadata proxy socket
+# mode to 0o644, to use when metadata_proxy_user is agent effective user or
+# root, 'group': set metadata proxy socket mode to 0o664, to use when
+# metadata_proxy_group is agent effective group or root, 'all': set metadata
+# proxy socket mode to 0o666, to use otherwise. (string value)
+# Allowed values: deduce, user, group, all
+#metadata_proxy_socket_mode = deduce
+
+# Number of separate worker processes for metadata server (defaults to half of
+# the number of CPUs) (integer value)
 metadata_workers = 1
 
 # Number of backlog requests to configure the metadata server socket with
-# metadata_backlog = 4096
+# (integer value)
+#metadata_backlog = 4096
+
+# URL to connect to the cache back end. (string value)
+#cache_url = memory://
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
 
-# URL to connect to the cache backend.
-# default_ttl=0 parameter will cause cache entries to never expire.
-# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
-# No cache is used in case no value is passed.
-# cache_url = memory://?default_ttl=5
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[AGENT]
+
+#
+# From neutron.metadata.agent
+#
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/metering_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,112 @@
+[DEFAULT]
+
+#
+# From neutron.metering.agent
+#
+
+# Metering driver (string value)
+#driver = neutron.services.metering.drivers.noop.noop_driver.NoopMeteringDriver
+
+# Interval between two metering measures (integer value)
+#measure_interval = 30
+
+# Interval between two metering reports (integer value)
+#report_interval = 300
+
+# The driver used to manage the virtual interface. (string value)
+#interface_driver = <None>
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
--- a/components/openstack/neutron/files/ml2_conf.ini	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,101 +0,0 @@
-[ml2]
-# (ListOpt) List of network type driver entrypoints to be loaded from
-# the neutron.ml2.type_drivers namespace.
-#
-# type_drivers = local,flat,vlan,gre,vxlan
-# Example: type_drivers = flat,vlan,gre,vxlan
-type_drivers = flat,vlan,vxlan
-
-# (ListOpt) Ordered list of network_types to allocate as tenant
-# networks. The default value 'local' is useful for single-box testing
-# but provides no connectivity between hosts.
-#
-# In the case of Solaris, 'local' can be achieved by using 'flat' network
-# type and Solaris Etherstubs, so 'local' network type as such is not
-# supported.
-#
-# tenant_network_types = local
-# Example: tenant_network_types = vlan,gre,vxlan
-tenant_network_types = vlan
-
-# (ListOpt) Ordered list of networking mechanism driver entrypoints
-# to be loaded from the neutron.ml2.mechanism_drivers namespace.
-# mechanism_drivers =
-# Example: mechanism_drivers = openvswitch,mlnx
-# Example: mechanism_drivers = arista
-# Example: mechanism_drivers = cisco,logger
-# Example: mechanism_drivers = openvswitch,brocade
-# Example: mechanism_drivers = linuxbridge,brocade
-mechanism_drivers = openvswitch
-
-# (ListOpt) Ordered list of extension driver entrypoints
-# to be loaded from the neutron.ml2.extension_drivers namespace.
-# extension_drivers =
-# Example: extension_drivers = anewextensiondriver
-
-# =========== items for MTU selection and advertisement =============
-# (IntOpt) Path MTU.  The maximum permissible size of an unfragmented
-# packet travelling from and to addresses where encapsulated Neutron
-# traffic is sent.  Drivers calculate maximum viable MTU for
-# validating tenant requests based on this value (typically,
-# path_mtu - max encap header size).  If <=0, the path MTU is
-# indeterminate and no calculation takes place.
-# path_mtu = 0
-
-# (IntOpt) Segment MTU.  The maximum permissible size of an
-# unfragmented packet travelling a L2 network segment.  If <=0,
-# the segment MTU is indeterminate and no calculation takes place.
-# segment_mtu = 0
-
-# (ListOpt) Physical network MTUs.  List of mappings of physical
-# network to MTU value.  The format of the mapping is
-# <physnet>:<mtu val>.  This mapping allows specifying a
-# physical network MTU value that differs from the default
-# segment_mtu value.
-# physical_network_mtus =
-# Example: physical_network_mtus = physnet1:1550, physnet2:1500
-# ======== end of items for MTU selection and advertisement =========
-
-[ml2_type_flat]
-# (ListOpt) List of physical_network names with which flat networks
-# can be created. Use * to allow flat networks with arbitrary
-# physical_network names.
-#
-# flat_networks =
-# Example:flat_networks = physnet1,physnet2
-# Example:flat_networks = *
-
-[ml2_type_vlan]
-# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
-# specifying physical_network names usable for VLAN provider and
-# tenant networks, as well as ranges of VLAN tags on each
-# physical_network available for allocation as tenant networks.
-#
-# network_vlan_ranges =
-# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
-
-[ml2_type_gre]
-# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
-# tunnel_id_ranges =
-
-[ml2_type_vxlan]
-# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
-# ranges of VXLAN VNI IDs that are available for tenant network allocation.
-#
-# vni_ranges =
-
-# (StrOpt) Multicast group for the VXLAN interface. When configured, will
-# enable sending all broadcast traffic to this multicast group. When left
-# unconfigured, will disable multicast VXLAN mode.
-#
-# vxlan_group =
-# Example: vxlan_group = 239.1.1.1
-
-[securitygroup]
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-enable_security_group = False
-
-# Use ipset to speed-up the iptables security groups. Enabling ipset support
-# requires that ipset is installed on L2 agent node.
-enable_ipset = False
--- a/components/openstack/neutron/files/neutron-dhcp-agent	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/neutron-dhcp-agent	Wed Sep 07 14:48:41 2016 -0700
@@ -17,13 +17,12 @@
 import ConfigParser
 import os
 import re
+from subprocess import CalledProcessError, Popen, PIPE, check_call
 import sys
 
-from openstack_common import is_ml2_plugin, kill_contract
+from openstack_common import kill_contract
 import smf_include
 
-from subprocess import CalledProcessError, Popen, PIPE, check_call
-
 
 def set_hostmodel(value):
     cmd = ["/usr/sbin/ipadm", "show-prop", "-p", "hostmodel",
@@ -58,9 +57,7 @@
     # 'dh', end with '_0', and in between they are hexadecimal digits.
     prog = re.compile('dh[0-9A-Fa-f\_]{11}_0')
     ret_code = smf_include.SMF_EXIT_OK
-    ovs_bridge = None
-    if is_ml2_plugin():
-        ovs_bridge = get_ovs_bridge()
+    ovs_bridge = get_ovs_bridge()
     for dlname in dlnames:
         if prog.search(dlname) is None:
             continue
@@ -110,7 +107,7 @@
 
 def get_ovs_bridge():
     parser = ConfigParser.ConfigParser()
-    parser.read("/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini")
+    parser.read("/etc/neutron/plugins/ml2/openvswitch_agent.ini")
     try:
         ovs_bridge = parser.get("ovs", "integration_bridge")
     except ConfigParser.NoOptionError:
--- a/components/openstack/neutron/files/neutron-l3-agent	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/neutron-l3-agent	Wed Sep 07 14:48:41 2016 -0700
@@ -20,15 +20,12 @@
 from subprocess import CalledProcessError, Popen, PIPE, check_call
 import sys
 
-import netaddr
-from openstack_common import is_ml2_plugin, kill_contract
+from openstack_common import kill_contract
 import smf_include
 
 from neutron.agent.solaris import packetfilter
 from neutron_vpnaas.services.vpn.device_drivers.solaris_ipsec import \
-    get_vpn_interfaces
-from neutron_vpnaas.services.vpn.device_drivers.solaris_ipsec import \
-    shutdown_vpn
+    get_vpn_interfaces, shutdown_vpn
 
 
 def set_hostmodel(value):
@@ -65,7 +62,6 @@
     # hexadecimal digits.
     prog = re.compile('l3[ie][0-9A-Fa-f\_]{10}_0')
     retcode = smf_include.SMF_EXIT_OK
-    is_ml2 = is_ml2_plugin()
     for dlname in dlnames:
         if prog.search(dlname) is None:
             continue
@@ -82,11 +78,10 @@
             check_call(["/usr/bin/pfexec", "/usr/sbin/dladm", "delete-vnic",
                         dlname])
             # remove the OVS Port
-            if is_ml2:
-                ovs_bridge = get_ovs_bridge(dlname)
-                if ovs_bridge:
-                    check_call(["/usr/bin/pfexec", "/usr/sbin/ovs-vsctl", "--",
-                                "--if-exists", "del-port", ovs_bridge, dlname])
+            ovs_bridge = get_ovs_bridge(dlname)
+            if ovs_bridge:
+                check_call(["/usr/bin/pfexec", "/usr/sbin/ovs-vsctl", "--",
+                            "--if-exists", "del-port", ovs_bridge, dlname])
         except CalledProcessError as err:
             print "failed to remove datalink '%s' used by L3 agent: %s" % \
                 (dlname, err)
@@ -136,15 +131,9 @@
               "enabled before enabling neutron-l3-agent"
         return smf_include.SMF_EXIT_ERR_CONFIG
 
-    # remove any stale PF rules under _auto/neutron:l3:agent anchor
-    pf = packetfilter.PacketFilter('_auto/neutron:l3:agent')
-    pf.remove_anchor_recursively()
-
     cmd = "/usr/bin/pfexec /usr/lib/neutron/neutron-l3-agent " \
-        "--config-file %s --config-file %s --config-file %s" % \
-        tuple(sys.argv[2:5])
-    if is_ml2_plugin():
-        cmd += " --config-file %s" % sys.argv[5]
+        "--config-file %s --config-file %s --config-file %s " \
+        "--config-file %s" % tuple(sys.argv[2:6])
 
     # The VPNaaS shutdown should unplumb all IP tunnels it created. But
     # be paranoid and check for lingering tunnels created by OpenStack
@@ -173,7 +162,7 @@
 def get_ovs_bridge(dlname):
     # retrieve the right OVS bridge based on the interface name
     if dlname.startswith('l3i'):
-        config_file = '/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini'
+        config_file = '/etc/neutron/plugins/ml2/openvswitch_agent.ini'
         section = "ovs"
         option = "integration_bridge"
     else:
--- a/components/openstack/neutron/files/neutron-openvswitch-agent.xml	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/neutron-openvswitch-agent.xml	Wed Sep 07 14:48:41 2016 -0700
@@ -76,7 +76,7 @@
         <propval name='config_path' type='astring'
           value='/etc/neutron/neutron.conf'/>
         <propval name='ovs_config_path' type='astring'
-          value='/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini'/>
+          value='/etc/neutron/plugins/ml2/openvswitch_agent.ini'/>
       </property_group>
     </instance>
 
--- a/components/openstack/neutron/files/neutron-server	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/neutron-server	Wed Sep 07 14:48:41 2016 -0700
@@ -15,19 +15,16 @@
 #    under the License.
 
 import os
+from subprocess import CalledProcessError, check_call
 import sys
 
-from openstack_common import is_ml2_plugin
 import smf_include
-from subprocess import CalledProcessError, check_call
 
 
 def start():
     cfg_files = sys.argv[2:3]
-    if is_ml2_plugin():
-        cfg_files.append("/etc/neutron/plugins/ml2/ml2_conf.ini")
-    else:
-        cfg_files.append("/etc/neutron/plugins/evs/evs_plugin.ini")
+    # It is ML2 plugin for now, until we introduce another plugin
+    cfg_files.append("/etc/neutron/plugins/ml2/ml2_conf.ini")
 
     # verify paths are valid
     for f in cfg_files:
--- a/components/openstack/neutron/files/neutron-upgrade	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/neutron-upgrade	Wed Sep 07 14:48:41 2016 -0700
@@ -20,23 +20,26 @@
 import sys
 import traceback
 
-import iniparse
 import smf_include
-import sqlalchemy
 
 from openstack_common import alter_mysql_tables, create_backups, modify_conf
 
 
 NEUTRON_CONF_MAPPINGS = {
-    # Deprecated group/name
-    ('DEFAULT', 'nova_api_insecure'): ('nova', 'insecure'),
-    ('DEFAULT', 'nova_ca_certificates_file'): ('nova', 'cafile'),
+    # Deprecated group/name for Liberty
+    ('DEFAULT', 'use_syslog'): (None, None),
+    ('DEFAULT', 'log_format'): (None, None),
+    ('DEFAULT', 'rpc_thread_pool_size'):
+        ('DEFAULT', 'executor_thread_pool_size'),
+    ('ml2_sriov', 'agent_required'): (None, None),
+    # Deprecated group/name for Mitaka
+    ('ml2', 'segment_mtu'): ('DEFAULT', 'global_physnet_mtu'),
     ('DEFAULT', 'nova_region_name'): ('nova', 'region_name'),
-    ('DEFAULT', 'max_request_body_size'):
-        ('oslo_middleware', 'max_request_body_size'),
-    ('DEFAULT', 'use-syslog'): (None, None),
-    ('DEFAULT', 'log-format'): (None, None),
-    ('DEFAULT', 'log_format'): (None, None),
+    ('DEFAULT', 'nova_admin_username'): ('nova', 'username'),
+    ('DEFAULT', 'nova_admin_tenant_id'): ('nova', 'tenant_id'),
+    ('DEFAULT', 'nova_admin_tenant_name'): ('nova', 'tenant_name'),
+    ('DEFAULT', 'nova_admin_password'): ('nova', 'password'),
+    ('DEFAULT', 'nova_admin_auth_url'): ('nova', 'auth_url'),
 }
 
 NEUTRON_CONF_EXCEPTIONS = [
@@ -59,7 +62,7 @@
     ('DEFAULT', 'ovs_integration_bridge'),
     ('DEFAULT', 'interface_driver'),
     ('DEFAULT', 'external_network_bridge'),
-    ('DEFAULT', 'evs_controller'),    
+    ('DEFAULT', 'evs_controller'),
 ]
 
 DHCP_AGENT_EXCEPTIONS = [
@@ -79,6 +82,21 @@
     ('DEFAULT', 'metadata_workers'),
 ]
 
+OPENVSWITCH_AGENT_EXCEPTIONS = [
+    ('ovs', 'integration_bridge'),
+    ('ovs', 'tunnel_bridge'),
+    ('securitygroup', 'enable_security_group'),
+    ('securitygroup', 'enable_ipset'),
+]
+
+ML2_CONF_EXCEPTION = [
+    ('ml2', 'type_drivers'),
+    ('ml2', 'tenant_network_types'),
+    ('ml2', 'mechanism_drivers'),
+    ('securitygroup', 'enable_security_group'),
+    ('securitygroup', 'enable_ipset'),
+]
+
 
 def start():
     # pull out the current version of config/upgrade-id
@@ -98,8 +116,10 @@
         # No need to upgrade
         sys.exit(smf_include.SMF_EXIT_OK)
 
+    # TODO: Kilo EVS check. If upgrade is from Kilo running EVS,
+    # fail the upgrade.
+
     # look for any .new files
-    db_connection = None
     if glob.glob('/etc/neutron/*.new'):
         # the versions are different, so perform an upgrade
         # modify the configuration files
@@ -116,19 +136,20 @@
         modify_conf('/etc/neutron/metadata_agent.ini', mapping=None,
                     exception_list=METADATA_AGENT_EXCEPTIONS)
 
-    config = iniparse.RawConfigParser()
-    config.read('/etc/neutron/neutron.conf')
-    if config.has_section('database'):
-        db_connection = config.get('database', 'connection')
-        engine = sqlalchemy.create_engine(db_connection)
-        if engine.url.username != '%SERVICE_USER%':
-            check_call(['/usr/bin/neutron-db-manage', '--config-file',
-                        '/etc/neutron/neutron.conf', 'stamp', 'havana'])
-            check_call(['/usr/bin/neutron-db-manage', '--config-file',
-                        '/etc/neutron/neutron.conf', 'upgrade', 'juno'])
-            check_call(['/usr/bin/neutron-db-manage', '--config-file',
-                        '/etc/neutron/neutron.conf', 'upgrade', 'kilo'])
+    # look for any .new files for ml2 plugin
+    if glob.glob('/etc/neutron/plugins/ml2/*.new'):
+        # modify the configuration files
+
+        # backup all the old configuration files
+        create_backups('/etc/neutron/plugins/ml2')
 
+        modify_conf('/etc/neutron/plugins/ml2/openvswitch_agent.ini',
+                    mapping=None,
+                    exception_list=OPENVSWITCH_AGENT_EXCEPTIONS)
+
+        modify_conf('/etc/neutron/plugins/ml2/ml2_conf.ini',
+                    mapping=None,
+                    exception_list=ML2_CONF_EXCEPTION)
 
     # update the current version
     check_call(['/usr/sbin/svccfg', '-s', os.environ['SMF_FMRI'], 'setprop',
--- a/components/openstack/neutron/files/neutron.conf	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/neutron.conf	Wed Sep 07 14:48:41 2016 -0700
@@ -1,826 +1,1075 @@
 [DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-# verbose = False
+
+#
+# From neutron
+#
 
-# =========Start Global Config Option for Distributed L3 Router===============
-# Setting the "router_distributed" flag to "True" will default to the creation
-# of distributed tenant routers. The admin can override this flag by specifying
-# the type of the router on the create request (admin-only attribute). Default
-# value is "False" to support legacy mode (centralized) routers.
-#
-# router_distributed = False
-#
-# ===========End Global Config Option for Distributed L3 Router===============
+# Where to store Neutron state files. This directory must be writable by the
+# agent. (string value)
+#state_path = /var/lib/neutron
+
+# The host IP to bind to (string value)
+#bind_host = 0.0.0.0
 
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-# debug = False
-
-# Where to store Neutron state files.  This directory must be writable by the
-# user executing the agent.
-# state_path = /var/lib/neutron
+# The port to bind to (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#bind_port = 9696
 
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
+# The path for API extensions. Note that this can be a colon-separated list of
+# paths. For example: api_extensions_path =
+# extensions:/path/to/more/exts:/even/more/exts. The __path__ of
+# neutron.extensions is appended to this, so if your extensions are in there
+# you don't need to specify them here. (string value)
+#api_extensions_path =
 
-# use_syslog                           -> syslog
-# log_file and log_dir                 -> log_dir/log_file
-# (not log_file) and log_dir           -> log_dir/{binary_name}.log
-# use_stderr                           -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors                       -> notification system
+# The type of authentication to use (string value)
+#auth_strategy = keystone
 
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-# log_dir =
+# The core plugin Neutron will use (string value)
+#
+# The ML2 plugin provides support for heterogenous networking technologies in
+# the cloud.
+core_plugin = ml2
 
-# publish_errors = False
-
-# Address to bind the API server to
-# bind_host = 0.0.0.0
-
-# Port the bind the API server to
-# bind_port = 9696
+# The service plugins Neutron will use (list value)
+#
+# This option must be set when the core_plugin is set to 'ml2' and the
+# supported values are 'router' and 'vpnaas'.
+service_plugins = router
 
-# Path to the extensions.  Note that this can be a colon-separated list of
-# paths.  For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
+# The base MAC address Neutron will use for VIFs. The first 3 octets will
+# remain unchanged. If the 4th octet is not 00, it will also be used. The
+# others will be randomly generated. (string value)
+#base_mac = fa:16:3e:00:00:00
+
+# How many times Neutron will retry MAC generation (integer value)
+#mac_generation_retries = 16
 
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-# The ML2 plugin provides support for heterogenous networking technologies
-# in the cloud.
-#
-# core_plugin =
-# Example: core_plugin = ml2
-# core_plugin = ml2
+# Allow the usage of the bulk API (boolean value)
+#allow_bulk = true
+
+# Allow the usage of the pagination (boolean value)
+#allow_pagination = false
+
+# Allow the usage of the sorting (boolean value)
+#allow_sorting = false
+
+# The maximum number of items returned in a single response, value was
+# 'infinite' or negative integer means no limit (string value)
+#pagination_max_limit = -1
 
-# The EVSNeutronPluginV2 Neutron plugin connects to the Solaris Elastic
-# Virtual Switch framework to provide virtual networking between Solaris
-# Zones.
-core_plugin = neutron.plugins.evs.plugin.EVSNeutronPluginV2
+# Default value of availability zone hints. The availability zone aware
+# schedulers use this when the resources availability_zone_hints is empty.
+# Multiple availability zones can be specified by a comma separated string.
+# This value can be empty. In this case, even if availability_zone_hints for a
+# resource is empty, availability zone is considered for high availability
+# while scheduling the resource. (list value)
+#default_availability_zones =
+
+# Maximum number of DNS nameservers per subnet (integer value)
+#max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet (integer value)
+#max_subnet_host_routes = 20
 
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# This option must be set when the core_plugin is set to ML2 and the
-# supported values are router and vpnaas.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-# service_plugins = router
+# Maximum number of fixed ips per port. This option is deprecated and will be
+# removed in the N release. (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#max_fixed_ips_per_port = 5
 
-# Paste configuration file
-# api_paste_config = api-paste.ini
-
-# (StrOpt) Hostname to be used by the neutron server, agents and services
-# running on this machine. All the agents and services running on this machine
-# must use the same host value.
-# The default value is hostname of the machine.
-#
-# host =
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-# auth_strategy = keystone
+# Default IPv4 subnet pool to be used for automatic subnet CIDR allocation.
+# Specifies by UUID the pool to be used in case where creation of a subnet is
+# being called without a subnet pool ID. If not set then no pool will be used
+# unless passed explicitly to the subnet create. If no pool is used, then a
+# CIDR must be passed to create a subnet and that subnet will not be allocated
+# from any pool; it will be considered part of the tenant's private address
+# space. This option is deprecated for removal in the N release. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#default_ipv4_subnet_pool = <None>
 
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
+# Default IPv6 subnet pool to be used for automatic subnet CIDR allocation.
+# Specifies by UUID the pool to be used in case where creation of a subnet is
+# being called without a subnet pool ID. See the description for
+# default_ipv4_subnet_pool for more information. This option is deprecated for
+# removal in the N release. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#default_ipv6_subnet_pool = <None>
 
-# DVR Base MAC address. The first 3 octets will remain unchanged. If the
-# 4th octet is not 00, it will also be used.  The others will be randomly
-# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
-# avoid mixing them up with MAC's allocated for tenant ports.
-# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
-# The default is 3 octet
-# dvr_base_mac = fa:16:3f:00:00:00
+# Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to
+# True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable
+# environment. Users making subnet creation requests for IPv6 subnets without
+# providing a CIDR or subnetpool ID will be given a CIDR via the Prefix
+# Delegation mechanism. Note that enabling PD will override the behavior of the
+# default IPv6 subnetpool. (boolean value)
+#ipv6_pd_enabled = false
 
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
+# DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite
+# lease times. (integer value)
+# Deprecated group/name - [DEFAULT]/dhcp_lease_time
+#dhcp_lease_duration = 86400
 
-# DHCP Lease duration (in seconds).  Use -1 to
-# tell dnsmasq to use infinite lease times.
-# dhcp_lease_duration = 86400
+# Domain to use for building the hostnames (string value)
+#dns_domain = openstacklocal
 
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
+# Driver for external DNS integration. (string value)
+#external_dns_driver = <None>
 
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-# allow_overlapping_ips = False
+# Allow sending resource operation notification to DHCP agent (boolean value)
+#dhcp_agent_notification = true
+
+# Allow overlapping IP support in Neutron. Attention: the following parameter
+# MUST be set to False if Neutron is being used in conjunction with Nova
+# security groups. (boolean value)
+#allow_overlapping_ips = false
+
+# Hostname to be used by the Neutron server, agents and services running on
+# this machine. All the agents and services running on this machine must use
+# the same host value. (string value)
+#host = example.domain
+
 # Ensure that configured gateway is on subnet. For IPv6, validate only if
 # gateway is not a link local address. Deprecated, to be removed during the
-# K release, at which point the check will be mandatory.
-# force_gateway_on_subnet = True
+# Newton release, at which point the gateway will not be forced on to subnet.
+# (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#force_gateway_on_subnet = true
 
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
+# Send notification to nova when port status changes (boolean value)
+#notify_nova_on_port_status_changes = true
 
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
+# Send notification to nova when port data (fixed_ips/floatingip) changes so
+# nova can update its cache. (boolean value)
+#notify_nova_on_port_data_changes = true
 
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
+# Number of seconds between sending events to nova if there are any events to
+# send. (integer value)
+#send_events_interval = 2
 
-# Maximum number of routes per router
-# max_routes = 30
+# If True, advertise network MTU values if core plugin calculates them. MTU is
+# advertised to running instances via DHCP and RA MTU options. (boolean value)
+#advertise_mtu = true
 
-# Default Subnet Pool to be used for IPv4 subnet-allocation.
-# Specifies by UUID the pool to be used in case of subnet-create being called
-# without a subnet-pool ID.  The default of None means that no pool will be
-# used unless passed explicitly to subnet create.  If no pool is used, then a
-# CIDR must be passed to create a subnet and that subnet will not be allocated
-# from any pool; it will be considered part of the tenant's private address
-# space.
-# default_ipv4_subnet_pool =
+# Neutron IPAM (IP address management) driver to use. If ipam_driver is not set
+# (default behavior), no IPAM driver is used. In order to use the reference
+# implementation of Neutron IPAM driver, use 'internal'. (string value)
+#ipam_driver = <None>
 
-# Default Subnet Pool to be used for IPv6 subnet-allocation.
-# Specifies by UUID the pool to be used in case of subnet-create being
-# called without a subnet-pool ID.  Set to "prefix_delegation"
-# to enable IPv6 Prefix Delegation in a PD-capable environment.
-# See the description for default_ipv4_subnet_pool for more information.
-# default_ipv6_subnet_pool =
+# If True, then allow plugins that support it to create VLAN transparent
+# networks. (boolean value)
+#vlan_transparent = false
 
-# =========== items for MTU selection and advertisement =============
-# Advertise MTU.  If True, effort is made to advertise MTU
-# settings to VMs via network methods (ie. DHCP and RA MTU options)
-# when the network's preferred MTU is known.
-# advertise_mtu = False
-# ======== end of items for MTU selection and advertisement =========
+# This will choose the web framework in which to run the Neutron API server.
+# 'pecan' is a new experiemental rewrite of the API server. (string value)
+# Allowed values: legacy, pecan
+#web_framework = legacy
 
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-# agent_down_time = 75
-# ===========  end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+# MTU of the underlying physical network. Neutron uses this value to calculate
+# MTU for all virtual network components. For flat and VLAN networks, neutron
+# uses this value without modification. For overlay networks such as VXLAN,
+# neutron automatically subtracts the overlay protocol overhead from this
+# value. Defaults to 1500, the standard value for Ethernet. Also consider
+# setting the path_mtu ml2 configuration value to the global_physnet_mtu value
+# when using the ml2 plug-in. Otherwise the global_physnet_mtu value might get
+# overridden by a smaller path_mtu value and hence have no effect on
+# overlay/tunnel networks but only flat and VLAN networks. (integer value)
+# Deprecated group/name - [ml2]/segment_mtu
+#global_physnet_mtu = 1500
 
-# (StrOpt) Representing the resource type whose load is being reported by
-# the agent.
-# This can be 'networks','subnets' or 'ports'. When specified (Default is networks),
-# the server will extract particular load sent as part of its agent configuration object
-# from the agent report state, which is the number of resources being consumed, at
-# every report_interval.
-# dhcp_load_type can be used in combination with network_scheduler_driver =
-# neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
-# When the network_scheduler_driver is WeightScheduler, dhcp_load_type can
-# be configured to represent the choice for the resource being balanced.
-# Example: dhcp_load_type = networks
-# Values:
-#   networks - number of networks hosted on the agent
-#   subnets -  number of subnets associated with the networks hosted on the agent
-#   ports   -  number of ports associated with the networks hosted on the agent
-# dhcp_load_type = networks
+# Number of backlog requests to configure the socket with (integer value)
+#backlog = 4096
+
+# Number of seconds to keep retrying to listen (integer value)
+#retry_until_window = 30
+
+# Enable SSL on the API server (boolean value)
+#use_ssl = false
+
+# Seconds between running periodic tasks (integer value)
+#periodic_interval = 40
 
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
+# Number of separate API worker processes for service. If not specified, the
+# default is equal to the number of CPUs available for best performance.
+# (integer value)
+api_workers = 1
 
-# Allow automatic rescheduling of routers from dead L3 agents with
-# admin_state_up set to True to alive agents.
-# allow_automatic_l3agent_failover = False
+# Number of RPC worker processes for service (integer value)
+#rpc_workers = 1
 
-# Allow automatic removal of networks from dead DHCP agents with
-# admin_state_up set to True.
-# Networks could then be rescheduled if network_auto_schedule is True
-# allow_automatic_dhcp_failover = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
+# Number of RPC worker processes dedicated to state reports queue (integer
+# value)
+#rpc_state_report_workers = 1
 
-# Enable services on agents with admin_state_up False.
-# If this option is False, when admin_state_up of an agent is turned to
-# False, services on it will be disabled. If this option is True, services
-# on agents with admin_state_up False keep available and manual scheduling
-# to such agents is available. Agents with admin_state_up False are not
-# selected for automatic scheduling regardless of this option.
-# enable_services_on_agents_with_admin_state_down = False
-
-# ===========  end of items for agent scheduler extension =====
+# Range of seconds to randomly delay when starting the periodic task scheduler
+# to reduce stampeding. (Disable by setting to 0) (integer value)
+#periodic_fuzzy_delay = 5
 
-# =========== items for l3 extension ==============
-# Enable high availability for virtual routers.
-# l3_ha = False
 #
-# Maximum number of l3 agents which a HA router will be scheduled on. If it
-# is set to 0 the router will be scheduled on every agent.
-# max_l3_agents_per_router = 3
-#
-# Minimum number of l3 agents which a HA router will be scheduled on. The
-# default value is 2.
-# min_l3_agents_per_router = 2
-#
-# CIDR of the administrative network if HA mode is enabled
-# l3_ha_net_cidr = 169.254.192.0/18
+# From neutron.agent
 #
-# The network type to use when creating the HA network for an HA router.
-# By default or if empty, the first 'tenant_network_types'
-# is used. This is helpful when the VRRP traffic should use a specific
-# network which not the default one.
-# ha_network_type =
-# Example: ha_network_type = flat
-#
-# The physical network name with which the HA network can be created.
-# ha_network_physical_name =
-# Example: ha_network_physical_name = physnet1
-# =========== end of items for l3 extension =======
+
+# The driver used to manage the virtual interface. (string value)
+#interface_driver = <None>
+
+# Location for Metadata Proxy UNIX domain socket. (string value)
+#metadata_proxy_socket = $state_path/metadata_proxy
 
-# =========== items for metadata proxy configuration ==============
-# User (uid or name) running metadata proxy after its initialization
-# (if empty: agent effective user)
-# metadata_proxy_user =
+# User (uid or name) running metadata proxy after its initialization (if empty:
+# agent effective user). (string value)
+#metadata_proxy_user =
 
-# Group (gid or name) running metadata proxy after its initialization
-# (if empty: agent effective group)
-# metadata_proxy_group =
+# Group (gid or name) running metadata proxy after its initialization (if
+# empty: agent effective group). (string value)
+#metadata_proxy_group =
 
-# Enable/Disable log watch by metadata proxy, it should be disabled when
+# Enable/Disable log watch by metadata proxy. It should be disabled when
 # metadata_proxy_user/group is not allowed to read/write its log file and
-# 'copytruncate' logrotate option must be used if logrotate is enabled on
+# copytruncate logrotate option must be used if logrotate is enabled on
 # metadata proxy log files. Option default value is deduced from
 # metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent
-# effective user id/name.
-# metadata_proxy_watch_log =
+# effective user id/name. (boolean value)
+#metadata_proxy_watch_log = <None>
 
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
-# =========== end of items for metadata proxy configuration ==============
+#
+# From neutron.db
+#
+
+# Seconds to regard the agent is down; should be at least twice
+# report_interval, to be sure the agent is down for good. (integer value)
+#agent_down_time = 75
 
-# ========== items for VLAN trunking networks ==========
-# Setting this flag to True will allow plugins that support it to
-# create VLAN transparent networks. This flag has no effect for
-# plugins that do not support VLAN transparent networks.
-# vlan_transparent = False
-# ========== end of items for VLAN trunking networks ==========
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn.  The default, 0, runs the
-# worker thread in the current process.  Greater than 0 launches that number of
-# child processes as workers.  The parent process manages them.
-# api_workers = 0
-
-# Number of separate RPC worker processes to spawn.  The default, 0, runs the
-# worker thread in the current process.  Greater than 0 launches that number of
-# child processes as RPC workers.  The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-# rpc_workers = 0
+# Representing the resource type whose load is being reported by the agent.
+# This can be "networks", "subnets" or "ports". When specified (Default is
+# networks), the server will extract particular load sent as part of its agent
+# configuration object from the agent report state, which is the number of
+# resources being consumed, at every report_interval.dhcp_load_type can be used
+# in combination with network_scheduler_driver =
+# neutron.scheduler.dhcp_agent_scheduler.WeightScheduler When the
+# network_scheduler_driver is WeightScheduler, dhcp_load_type can be configured
+# to represent the choice for the resource being balanced. Example:
+# dhcp_load_type=networks (string value)
+# Allowed values: networks, subnets, ports
+#dhcp_load_type = networks
 
-# Timeout for client connections socket operations. If an
-# incoming connection is idle for this number of seconds it
-# will be closed. A value of '0' means wait forever. (integer
-# value)
-# client_socket_timeout = 900
+# Agent starts with admin_state_up=False when enable_new_agents=False. In the
+# case, user's resources will not be scheduled automatically to the agent until
+# admin changes admin_state_up to True. (boolean value)
+#enable_new_agents = true
 
-# wsgi keepalive option. Determines if connections are allowed to be held open
-# by clients after a request is fulfilled. A value of False will ensure that
-# the socket connection will be explicitly closed once a response has been
-# sent to the client.
-# wsgi_keep_alive = True
+# Maximum number of routes per router (integer value)
+#max_routes = 30
+
+# Define the default value of enable_snat if not provided in
+# external_gateway_info. (boolean value)
+#enable_snat_by_default = true
 
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
+# Driver to use for scheduling network to DHCP agent (string value)
+#network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
 
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
+# Allow auto scheduling networks to DHCP agent. (boolean value)
+#network_auto_schedule = true
 
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
+# Automatically remove networks from offline DHCP agents. (boolean value)
+#allow_automatic_dhcp_failover = true
 
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
+# Number of DHCP agents scheduled to host a tenant network. If this number is
+# greater than 1, the scheduler automatically assigns multiple DHCP agents for
+# a given tenant network, providing high availability for DHCP service.
+# (integer value)
+#dhcp_agents_per_network = 1
 
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
+# Enable services on an agent with admin_state_up False. If this option is
+# False, when admin_state_up of an agent is turned False, services on it will
+# be disabled. Agents with admin_state_up False are not selected for automatic
+# scheduling regardless of this option. But manual scheduling to such agents is
+# available if this option is True. (boolean value)
+#enable_services_on_agents_with_admin_state_down = false
 
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-# notify_nova_on_port_status_changes = True
+# The base mac address used for unique DVR instances by Neutron. The first 3
+# octets will remain unchanged. If the 4th octet is not 00, it will also be
+# used. The others will be randomly generated. The 'dvr_base_mac' *must* be
+# different from 'base_mac' to avoid mixing them up with MAC's allocated for
+# tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00.
+# The default is 3 octet (string value)
+#dvr_base_mac = fa:16:3f:00:00:00
 
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-# notify_nova_on_port_data_changes = True
+# System-wide flag to determine the type of router that tenants can create.
+# Only admin can override. (boolean value)
+#router_distributed = false
 
-# URL for connection to nova (Only supports one nova region currently).
-# nova_url = http://127.0.0.1:8774/v2
+# Driver to use for scheduling router to a default L3 agent (string value)
+#router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler
 
-# Name of nova region to use. Useful if keystone manages more than one region
-# nova_region_name =
+# Allow auto scheduling of routers to L3 agent. (boolean value)
+#router_auto_schedule = true
 
-# Username for connection to nova in admin context
-# nova_admin_username =
-
-# The uuid of the admin nova tenant
-# nova_admin_tenant_id =
+# Automatically reschedule routers from offline L3 agents to online L3 agents.
+# (boolean value)
+#allow_automatic_l3agent_failover = false
 
-# The name of the admin nova tenant. If the uuid of the admin nova tenant
-# is set, this is optional.  Useful for cases where the uuid of the admin
-# nova tenant is not available when configuration is being done.
-# nova_admin_tenant_name =
+# Enable HA mode for virtual routers. (boolean value)
+#l3_ha = false
 
-# Password for connection to nova in admin context.
-# nova_admin_password =
+# Maximum number of L3 agents which a HA router will be scheduled on. If it is
+# set to 0 then the router will be scheduled on every agent. (integer value)
+#max_l3_agents_per_router = 3
 
-# Authorization URL for connection to nova in admin context.
-# nova_admin_auth_url = http://localhost:5000/v2.0
+# Minimum number of L3 agents which a HA router will be scheduled on. If it is
+# set to 0 then the router will be scheduled on every agent. (integer value)
+#min_l3_agents_per_router = 2
 
-# CA file for novaclient to verify server certificates
-# nova_ca_certificates_file =
+# Subnet used for the l3 HA admin network. (string value)
+#l3_ha_net_cidr = 169.254.192.0/18
 
-# Boolean to control ignoring SSL errors on the nova url
-# nova_api_insecure = False
+# The network type to use when creating the HA network for an HA router. By
+# default or if empty, the first 'tenant_network_types' is used. This is
+# helpful when the VRRP traffic should use a specific network which is not the
+# default one. (string value)
+#l3_ha_network_type =
 
-# Number of seconds between sending events to nova if there are any events to send
-# send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
+# The physical network name with which the HA network can be created. (string
+# value)
+#l3_ha_network_physical_name =
 
 #
-# Options defined in oslo.messaging
+# From neutron.extensions
+#
+
+# Maximum number of allowed address pairs (integer value)
+#max_allowed_address_pair = 10
+
+#
+# From neutron.qos
+#
+
+# Drivers list to use to send the update notification (list value)
+#notification_drivers = message_queue
+
+#
+# From oslo.log
 #
 
-# Use durable queues in amqp. (boolean value)
-# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
-# amqp_durable_queues=false
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
 
-# Auto-delete queues in amqp. (boolean value)
-# amqp_auto_delete=false
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+#
+# From oslo.messaging
+#
 
 # Size of RPC connection pool. (integer value)
-# rpc_conn_pool_size=30
-
-# Qpid broker hostname. (string value)
-# qpid_hostname=localhost
-
-# Qpid broker port. (integer value)
-# qpid_port=5672
-
-# Qpid HA cluster host:port pairs. (list value)
-# qpid_hosts=$qpid_hostname:$qpid_port
-
-# Username for Qpid connection. (string value)
-# qpid_username=
-
-# Password for Qpid connection. (string value)
-# qpid_password=
-
-# Space separated list of SASL mechanisms to use for auth.
-# (string value)
-# qpid_sasl_mechanisms=
-
-# Seconds between connection keepalive heartbeats. (integer
-# value)
-# qpid_heartbeat=60
-
-# Transport to use, either 'tcp' or 'ssl'. (string value)
-# qpid_protocol=tcp
-
-# Whether to disable the Nagle algorithm. (boolean value)
-# qpid_tcp_nodelay=true
-
-# The qpid topology version to use.  Version 1 is what was
-# originally used by impl_qpid.  Version 2 includes some
-# backwards-incompatible changes that allow broker federation
-# to work.  Users should update to version 2 when they are
-# able to take everything down, as it requires a clean break.
-# (integer value)
-# qpid_topology_version=1
-
-# SSL version to use (valid only if SSL enabled). valid values
-# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
-# distributions. (string value)
-# kombu_ssl_version=
-
-# SSL key file (valid only if SSL enabled). (string value)
-# kombu_ssl_keyfile=
-
-# SSL cert file (valid only if SSL enabled). (string value)
-# kombu_ssl_certfile=
-
-# SSL certification authority file (valid only if SSL
-# enabled). (string value)
-# kombu_ssl_ca_certs=
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+#rpc_conn_pool_size = 30
 
-# How long to wait before reconnecting in response to an AMQP
-# consumer cancel notification. (floating point value)
-# kombu_reconnect_delay=1.0
-
-# The RabbitMQ broker address where a single node is used.
-# (string value)
-# rabbit_host=localhost
-
-# The RabbitMQ broker port where a single node is used.
-# (integer value)
-# rabbit_port=5672
-
-# RabbitMQ HA cluster host:port pairs. (list value)
-# rabbit_hosts=$rabbit_host:$rabbit_port
-
-# Connect over SSL for RabbitMQ. (boolean value)
-# rabbit_use_ssl=false
-
-# The RabbitMQ userid. (string value)
-# rabbit_userid=guest
-
-# The RabbitMQ password. (string value)
-# rabbit_password=guest
-
-# the RabbitMQ login method (string value)
-# rabbit_login_method=AMQPLAIN
-
-# The RabbitMQ virtual host. (string value)
-# rabbit_virtual_host=/
-
-# How frequently to retry connecting with RabbitMQ. (integer
-# value)
-# rabbit_retry_interval=1
-
-# How long to backoff for between retries when connecting to
-# RabbitMQ. (integer value)
-# rabbit_retry_backoff=2
-
-# Maximum number of RabbitMQ connection retries. Default is 0
-# (infinite retry count). (integer value)
-# rabbit_max_retries=0
-
-# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
-# this option, you must wipe the RabbitMQ database. (boolean
-# value)
-# rabbit_ha_queues=false
-
-# If passed, use a fake RabbitMQ provider. (boolean value)
-# fake_rabbit=false
-
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet
-# interface, or IP. The "host" option should point or resolve
-# to this address. (string value)
-# rpc_zmq_bind_address=*
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address. (string value)
+#rpc_zmq_bind_address = *
 
 # MatchMaker driver. (string value)
-# rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+# Allowed values: redis, dummy
+#rpc_zmq_matchmaker = redis
 
-# ZeroMQ receiver listening port. (integer value)
-# rpc_zmq_port=9501
+# Type of concurrency used. Either "native" or "eventlet" (string value)
+#rpc_zmq_concurrency = eventlet
 
 # Number of ZeroMQ contexts, defaults to 1. (integer value)
-# rpc_zmq_contexts=1
+#rpc_zmq_contexts = 1
 
-# Maximum number of ingress messages to locally buffer per
-# topic. Default is unlimited. (integer value)
-# rpc_zmq_topic_backlog=
+# Maximum number of ingress messages to locally buffer per topic. Default is
+# unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
 
 # Directory for holding IPC sockets. (string value)
-# rpc_zmq_ipc_dir=/var/run/openstack
+#rpc_zmq_ipc_dir = /var/run/openstack
 
-# Name of this node. Must be a valid hostname, FQDN, or IP
-# address. Must match "host" option, if running Nova. (string
-# value)
-# rpc_zmq_host=oslo
+# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
+# "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
 
-# Seconds to wait before a cast expires (TTL). Only supported
-# by impl_zmq. (integer value)
-# rpc_cast_timeout=30
+# Seconds to wait before a cast expires (TTL). The default value of -1
+# specifies an infinite linger period. The value of 0 specifies no linger
+# period. Pending messages shall be discarded immediately when the socket is
+# closed. Only supported by impl_zmq. (integer value)
+#rpc_cast_timeout = -1
 
-# Heartbeat frequency. (integer value)
-# matchmaker_heartbeat_freq=300
+# The default number of seconds that poll should wait. Poll raises timeout
+# exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about existing target
+# ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 120
 
-# Heartbeat time-to-live. (integer value)
-# matchmaker_heartbeat_ttl=600
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean
+# value)
+#use_pub_sub = true
 
-# Size of RPC greenthread pool. (integer value)
-# rpc_thread_pool_size=64
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49152
 
-# Driver or drivers to handle sending notifications. (multi
-# valued)
-# notification_driver=
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
 
-# AMQP topic used for OpenStack notifications. (list value)
-# Deprecated group/name - [rpc_notifier2]/topics
-# notification_topics=notifications
+# Number of retries to find free port number before fail with ZMQBindError.
+# (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Size of executor thread pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
 
 # Seconds to wait for a response from a call. (integer value)
-# rpc_response_timeout=60
+#rpc_response_timeout = 60
+
+# A URL representing the messaging driver to use and its full configuration. If
+# not set, we fall back to the rpc_backend option and driver specific
+# configuration. (string value)
+#transport_url = <None>
+
+# The messaging driver to use, defaults to rabbit. Other drivers include amqp
+# and zmq. (string value)
+#rpc_backend = rabbit
+
+# The default exchange under which topics are scoped. May be overridden by an
+# exchange name specified in the transport_url option. (string value)
+#control_exchange = neutron
+
+#
+# From oslo.service.wsgi
+#
+
+# File name for the paste.deploy config for api service (string value)
+#api_paste_config = api-paste.ini
+
+# A python format string that is used as the template to generate log lines.
+# The following values can beformatted into it: client_ip, date_time,
+# request_line, status_code, body_length, wall_seconds. (string value)
+#wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s  len: %(body_length)s time: %(wall_seconds).7f
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not
+# supported on OS X. (integer value)
+#tcp_keepidle = 600
+
+# Size of the pool of greenthreads used by wsgi (integer value)
+#wsgi_default_pool_size = 100
+
+# Maximum line size of message headers to be accepted. max_header_line may need
+# to be increased when using large tokens (typically those generated when
+# keystone is configured to use PKI tokens with big service catalogs). (integer
+# value)
+#max_header_line = 16384
+
+# If False, closes the client socket connection explicitly. (boolean value)
+#wsgi_keep_alive = true
+
+# Timeout for client connections' socket operations. If an incoming connection
+# is idle for this number of seconds it will be closed. A value of '0' means
+# wait forever. (integer value)
+#client_socket_timeout = 900
+
+
+[agent]
+
+#
+# From neutron.agent
+#
+
+# Root helper application. Use 'sudo neutron-rootwrap
+# /etc/neutron/rootwrap.conf' to use the real root filter facility. Change to
+# 'sudo' to skip the filtering and just run the command directly. (string
+# value)
+root_helper =
+
+# Use the root helper when listing the namespaces on a system. This may not be
+# required depending on the security configuration. If the root helper is not
+# required, set this to False for a performance improvement. (boolean value)
+#use_helper_for_ns_read = true
+
+# Root helper daemon application to use when possible. (string value)
+#root_helper_daemon = <None>
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
+
+# Add comments to iptables rules. Set to false to disallow the addition of
+# comments to generated iptables rules that describe each rule's purpose.
+# System must support the iptables comments module for addition of comments.
+# (boolean value)
+#comment_iptables_rules = true
+
+# Action to be executed when a child process dies (string value)
+# Allowed values: respawn, exit
+#check_child_processes_action = respawn
+
+# Interval between checks of child process liveness (seconds), use 0 to disable
+# (integer value)
+#check_child_processes_interval = 60
+
+# Availability zone of this node (string value)
+#availability_zone = nova
+
+
+[cors]
 
-# A URL representing the messaging driver to use and its full
-# configuration. If not set, we fall back to the rpc_backend
-# option and driver specific configuration. (string value)
-# transport_url=
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID,OpenStack-Volume-microversion
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
+
+
+[cors.subdomain]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID,OpenStack-Volume-microversion
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
+
+
+[database]
+
+#
+# From neutron.db
+#
+
+# Database engine for which script will be generated when using offline
+# migration. (string value)
+#engine =
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
+#sqlite_db = oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database. (string
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+connection = mysql://%SERVICE_USER%:%SERVICE_PASSWORD%@localhost/neutron
+
+# The SQLAlchemy connection string to use to connect to the slave database.
+# (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including the
+# default, overrides any server-set SQL mode. To use whatever SQL mode is set
+# by the server configuration, set this to no value. Example: mysql_sql_mode=
+# (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
 
-# The messaging driver to use, defaults to rabbit. Other
-# drivers include qpid and zmq. (string value)
-# rpc_backend=rabbit
+# Minimum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection lost.
+# (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database operation up to
+# db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries of a
+# database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before error is
+# raised. Set to -1 to specify an infinite retry count. (integer value)
+#db_max_retries = 20
+
+
+[keystone_authtoken]
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete public Identity API endpoint. (string value)
+auth_uri = http://127.0.0.1:5000/v2.0/
+
+# API version of the admin Identity API endpoint. (string value)
+#auth_version = <None>
+
+# Do not handle authorization requests within the middleware, but delegate the
+# authorization decision to downstream WSGI components. (boolean value)
+#delay_auth_decision = false
+
+# Request timeout value for communicating with Identity API server. (integer
+# value)
+#http_connect_timeout = <None>
+
+# How many times are we trying to reconnect when communicating with Identity
+# API Server. (integer value)
+#http_request_max_retries = 3
+
+# Env key for the swift cache. (string value)
+#cache = <None>
+
+# Required if identity server requires client certificate (string value)
+#certfile = <None>
+
+# Required if identity server requires client certificate (string value)
+#keyfile = <None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs connections.
+# Defaults to system CAs. (string value)
+#cafile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# The region in which the identity server can be found. (string value)
+#region_name = <None>
 
-# The default exchange under which topics are scoped. May be
-# overridden by an exchange name specified in the
-# transport_url option. (string value)
-# control_exchange=openstack
+# Directory used to cache files related to PKI tokens. (string value)
+signing_dir = $state_path/keystone-signing
+
+# Optionally specify a list of memcached server(s) to use for caching. If left
+# undefined, tokens will instead be cached in-process. (list value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers = <None>
+
+# In order to prevent excessive effort spent validating tokens, the middleware
+# caches previously-seen tokens for a configurable duration (in seconds). Set
+# to -1 to disable caching completely. (integer value)
+#token_cache_time = 300
+
+# Determines the frequency at which the list of revoked tokens is retrieved
+# from the Identity service (in seconds). A high number of revocation events
+# combined with a low cache duration may significantly reduce performance.
+# (integer value)
+#revocation_cache_time = 10
+
+# (Optional) If defined, indicate whether token data should be authenticated or
+# authenticated and encrypted. If MAC, token data is authenticated (with HMAC)
+# in the cache. If ENCRYPT, token data is encrypted and authenticated in the
+# cache. If the value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+# Allowed values: None, MAC, ENCRYPT
+#memcache_security_strategy = None
+
+# (Optional, mandatory if memcache_security_strategy is defined) This string is
+# used for key derivation. (string value)
+#memcache_secret_key = <None>
+
+# (Optional) Number of seconds memcached server is considered dead before it is
+# tried again. (integer value)
+#memcache_pool_dead_retry = 300
+
+# (Optional) Maximum total number of open connections to every memcached
+# server. (integer value)
+#memcache_pool_maxsize = 10
+
+# (Optional) Socket timeout in seconds for communicating with a memcached
+# server. (integer value)
+#memcache_pool_socket_timeout = 3
+
+# (Optional) Number of seconds a connection to memcached is held unused in the
+# pool before it is closed. (integer value)
+#memcache_pool_unused_timeout = 60
+
+# (Optional) Number of seconds that an operation will wait to get a memcached
+# client connection from the pool. (integer value)
+#memcache_pool_conn_get_timeout = 10
+
+# (Optional) Use the advanced (eventlet safe) memcached client pool. The
+# advanced pool will only work under python 2.x. (boolean value)
+#memcache_use_advanced_pool = false
+
+# (Optional) Indicate whether to set the X-Service-Catalog header. If False,
+# middleware will not ask for service catalog on token validation and will not
+# set the X-Service-Catalog header. (boolean value)
+#include_service_catalog = true
+
+# Used to control the use and type of token binding. Can be set to: "disabled"
+# to not check token binding. "permissive" (default) to validate binding
+# information if the bind type is of a form known to the server and ignore it
+# if not. "strict" like "permissive" but if the bind type is unknown the token
+# will be rejected. "required" any form of token binding is needed to be
+# allowed. Finally the name of a binding method that must be present in tokens.
+# (string value)
+#enforce_token_bind = permissive
+
+# If true, the revocation list will be checked for cached tokens. This requires
+# that PKI tokens are configured on the identity server. (boolean value)
+#check_revocations_for_cached = false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm
+# or multiple. The algorithms are those supported by Python standard
+# hashlib.new(). The hashes will be tried in the order given, so put the
+# preferred one first for performance. The result of the first hash will be
+# stored in the cache. This will typically be set to multiple values only while
+# migrating from a less secure algorithm to a more secure one. Once all the old
+# tokens are expired this option should be set to a single value for better
+# performance. (list value)
+#hash_algorithms = md5
+
+# Authentication type to load (unknown value)
+# Deprecated group/name - [DEFAULT]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (unknown value)
+#auth_section = <None>
+
+# Complete admin Identity API endpoint. This should specify the unversioned
+# root endpoint e.g. https://localhost:35357/ (string value)
+identity_uri = http://127.0.0.1:35357/
+
+# Service username. (string value)
+admin_user = %SERVICE_USER%
+
+# Service user password. (string value)
+admin_password = %SERVICE_PASSWORD%
+
+# Service tenant name. (string value)
+admin_tenant_name = %SERVICE_TENANT_NAME%
 
 
 [matchmaker_redis]
 
 #
-# Options defined in oslo.messaging
+# From oslo.messaging
 #
 
 # Host to locate redis. (string value)
-# host=127.0.0.1
+#host = 127.0.0.1
 
-# Use this port to connect to redis host. (integer value)
-# port=6379
+# Use this port to connect to redis host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#port = 6379
 
 # Password for Redis server (optional). (string value)
-# password=
+#password =
+
+# List of Redis Sentinel hosts (fault tolerance mode) e.g.
+# [host:port, host1:port ... ] (list value)
+#sentinel_hosts =
+
+# Redis replica set name. (string value)
+#sentinel_group_name = oslo-messaging-zeromq
+
+# Time in ms to wait between connection attempts. (integer value)
+#wait_timeout = 500
+
+# Time in ms to wait before the transaction is killed. (integer value)
+#check_timeout = 20000
+
+# Timeout in ms on blocking socket operations (integer value)
+#socket_timeout = 1000
 
 
-[matchmaker_ring]
+[nova]
 
 #
-# Options defined in oslo.messaging
+# From neutron
+#
+
+# Name of nova region to use. Useful if keystone manages more than one region.
+# (string value)
+#region_name = <None>
+
+# Type of the nova endpoint to use.  This endpoint will be looked up in the
+# keystone catalog and should be one of public, internal or admin. (string
+# value)
+# Allowed values: public, admin, internal
+#endpoint_type = public
+
+#
+# From nova.auth
 #
 
-# Matchmaker ring file (JSON). (string value)
-# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
-# ringfile=/etc/oslo/matchmaker_ring.json
-
-[quotas]
-# Default driver to use for quota checks
-# quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-# quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-# default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-# quota_network = 10
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-# quota_subnet = 10
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-# quota_port = 50
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-# quota_security_group = 10
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-# quota_security_group_rule = 100
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitor = -1
-
-# Number of loadbalancers allowed per tenant. A negative value means unlimited.
-# quota_loadbalancer = 10
-
-# Number of listeners allowed per tenant. A negative value means unlimited.
-# quota_listener = -1
-
-# Number of v2 health monitors allowed per tenant. A negative value means
-# unlimited. These health monitors exist under the lbaas v2 API
-# quota_healthmonitor = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-# Number of firewalls allowed per tenant. A negative value means unlimited.
-# quota_firewall = 1
-
-# Number of firewall policies allowed per tenant. A negative value means
-# unlimited.
-# quota_firewall_policy = 1
-
-# Number of firewall rules allowed per tenant. A negative value means
-# unlimited.
-# quota_firewall_rule = 100
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the command directly
-root_helper =
-
-# Set to true to add comments to generated iptables rules that describe
-# each rule's purpose. (System must support the iptables comments module.)
-# comment_iptables_rules = True
-
-# Root helper daemon application to use when possible.
-# root_helper_daemon =
+# Authentication URL (unknown value)
+auth_url = http://127.0.0.1:5000/v2.0/
 
-# Use the root helper when listing the namespaces on a system. This may not
-# be required depending on the security configuration. If the root helper is
-# not required, set this to False for a performance improvement.
-# use_helper_for_ns_read = True
-
-# The interval to check external processes for failure in seconds (0=disabled)
-# check_child_processes_interval = 60
-
-# Action to take when an external process spawned by an agent dies
-# Values:
-#   respawn - Respawns the external process
-#   exit - Exits the agent
-# check_child_processes_action = respawn
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-# report_interval = 30
-
-# ===========  end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://127.0.0.1:5000/v2.0/
-identity_uri = http://127.0.0.1:35357/
-admin_tenant_name = %SERVICE_TENANT_NAME%
-admin_user = %SERVICE_USER%
-admin_password = %SERVICE_PASSWORD%
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:[email protected]:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite://
-# NOTE: In deployment the [database] section and its connection attribute may
-# be set in the corresponding core plugin '.ini' file. However, it is suggested
-# to put the [database] section and its connection attribute in this
-# configuration file.
-connection = mysql://%SERVICE_USER%:%SERVICE_PASSWORD%@localhost/neutron
-
-# Database engine for which script will be generated when using offline
-# migration
-# engine =
-
-# The SQLAlchemy connection string used to connect to the slave database
-# slave_connection =
-
-# This configures the MySQL storage engine. This allows for OpenStack to
-# support different storage engines such as InnoDB, NDB, etc. By Default,
-# this value will be set to InnoDB. For MySQL Cluster, set to NDBCLUSTER.
-# Example: mysql_storage_engine=(string value)
-mysql_storage_engine = InnoDB
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-# max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-# retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-# min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-# max_pool_size = 10
-
-# Timeout in seconds before idle sql connections are reaped
-# idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-# max_overflow = 20
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-# connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-# connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-# pool_timeout = 10
-
-[nova]
-# Name of the plugin to load
-# auth_plugin =
-
-# Config Section from which to load plugin specific options
-# auth_section =
+# Authentication type to load (unknown value)
+# Deprecated group/name - [DEFAULT]/auth_plugin
+auth_type = v2password
 
 # PEM encoded Certificate Authority to use when verifying HTTPs connections.
-# cafile =
+# (string value)
+#cafile = <None>
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# Optional domain ID to use with v3 and v2 parameters. It will be used for both
+# the user and project domain in v3 and ignored in v2 authentication. (unknown
+# value)
+#default_domain_id = <None>
+
+# Optional domain name to use with v3 API and v2 parameters. It will be used
+# for both the user and project domain in v3 and ignored in v2 authentication.
+# (unknown value)
+#default_domain_name = <None>
 
-# PEM encoded client certificate cert file
-# certfile =
+# Domain ID to scope to (unknown value)
+#domain_id = <None>
+
+# Domain name to scope to (unknown value)
+#domain_name = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
 
-# Verify HTTPS connections.
-# insecure = False
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# User's password (unknown value)
+password = %SERVICE_PASSWORD%
+
+# Domain ID containing project (unknown value)
+#project_domain_id = <None>
 
-# PEM encoded client certificate key file
-# keyfile =
+# Domain name containing project (unknown value)
+#project_domain_name = <None>
+
+# Project ID to scope to (unknown value)
+# Deprecated group/name - [DEFAULT]/tenant-id
+#project_id = <None>
+
+# Project name to scope to (unknown value)
+# Deprecated group/name - [DEFAULT]/tenant-name
+#project_name = <None>
+
+# Tenant ID (unknown value)
+#tenant_id = <None>
+
+# Tenant Name (unknown value)
+tenant_name = %SERVICE_TENANT_NAME%
 
-# Name of nova region to use. Useful if keystone manages more than one region.
-# region_name =
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# Trust ID (unknown value)
+#trust_id = <None>
+
+# User's domain id (unknown value)
+#user_domain_id = <None>
 
-# Timeout value for http requests
-# timeout =
+# User's domain name (unknown value)
+#user_domain_name = <None>
+
+# User id (unknown value)
+#user_id = <None>
+
+# Username (unknown value)
+# Deprecated group/name - [DEFAULT]/user-name
+username = %SERVICE_USER%
+
 
 [oslo_concurrency]
 
-# Directory to use for lock files. For security, the specified directory should
-# only be writable by the user running the processes that need locking.
+#
+# From oslo.concurrency
+#
+
+# Enables or disables inter-process locks. (boolean value)
+# Deprecated group/name - [DEFAULT]/disable_process_locking
+#disable_process_locking = false
+
+# Directory to use for lock files.  For security, the specified directory
+# should only be writable by the user running the processes that need locking.
 # Defaults to environment variable OSLO_LOCK_PATH. If external locks are used,
-# a lock path must be set.
+# a lock path must be set. (string value)
+# Deprecated group/name - [DEFAULT]/lock_path
 lock_path = $state_path/lock
 
-# Enables or disables inter-process locks.
-# disable_process_locking = False
-
-[oslo_policy]
-
-# The JSON file that defines policies.
-# policy_file = policy.json
-
-# Default rule. Enforced when a requested rule is not found.
-# policy_default_rule = default
-
-# Directories where policy configuration files are stored.
-# They can be relative to any directory in the search path defined by the
-# config_dir option, or absolute paths. The file defined by policy_file
-# must exist for these directories to be searched. Missing or empty
-# directories are ignored.
-# policy_dirs = policy.d
 
 [oslo_messaging_amqp]
 
@@ -828,115 +1077,91 @@
 # From oslo.messaging
 #
 
-# Address prefix used when sending to a specific server (string value)
+# address prefix used when sending to a specific server (string value)
 # Deprecated group/name - [amqp1]/server_request_prefix
-# server_request_prefix = exclusive
+#server_request_prefix = exclusive
 
-# Address prefix used when broadcasting to all servers (string value)
+# address prefix used when broadcasting to all servers (string value)
 # Deprecated group/name - [amqp1]/broadcast_prefix
-# broadcast_prefix = broadcast
+#broadcast_prefix = broadcast
 
-# Address prefix when sending to any server in group (string value)
+# address prefix when sending to any server in group (string value)
 # Deprecated group/name - [amqp1]/group_request_prefix
-# group_request_prefix = unicast
+#group_request_prefix = unicast
 
 # Name for the AMQP container (string value)
 # Deprecated group/name - [amqp1]/container_name
-# container_name =
+#container_name = <None>
 
 # Timeout for inactive connections (in seconds) (integer value)
 # Deprecated group/name - [amqp1]/idle_timeout
-# idle_timeout = 0
+#idle_timeout = 0
 
 # Debug: dump AMQP frames to stdout (boolean value)
 # Deprecated group/name - [amqp1]/trace
-# trace = false
+#trace = false
 
-# CA certificate PEM file for verifing server certificate (string value)
+# CA certificate PEM file to verify server certificate (string value)
 # Deprecated group/name - [amqp1]/ssl_ca_file
-# ssl_ca_file =
+#ssl_ca_file =
 
 # Identifying certificate PEM file to present to clients (string value)
 # Deprecated group/name - [amqp1]/ssl_cert_file
-# ssl_cert_file =
+#ssl_cert_file =
 
 # Private key PEM file used to sign cert_file certificate (string value)
 # Deprecated group/name - [amqp1]/ssl_key_file
-# ssl_key_file =
+#ssl_key_file =
 
 # Password for decrypting ssl_key_file (if encrypted) (string value)
 # Deprecated group/name - [amqp1]/ssl_key_password
-# ssl_key_password =
+#ssl_key_password = <None>
 
 # Accept clients using either SSL or plain TCP (boolean value)
 # Deprecated group/name - [amqp1]/allow_insecure_clients
-# allow_insecure_clients = false
+#allow_insecure_clients = false
+
+# Space separated list of acceptable SASL mechanisms (string value)
+# Deprecated group/name - [amqp1]/sasl_mechanisms
+#sasl_mechanisms =
+
+# Path to directory that contains the SASL configuration (string value)
+# Deprecated group/name - [amqp1]/sasl_config_dir
+#sasl_config_dir =
+
+# Name of configuration file (without .conf suffix) (string value)
+# Deprecated group/name - [amqp1]/sasl_config_name
+#sasl_config_name =
+
+# User name for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/username
+#username =
+
+# Password for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/password
+#password =
 
 
-[oslo_messaging_qpid]
+[oslo_messaging_notifications]
 
 #
 # From oslo.messaging
 #
 
-# Use durable queues in AMQP. (boolean value)
-# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
-# amqp_durable_queues = false
-
-# Auto-delete queues in AMQP. (boolean value)
-# Deprecated group/name - [DEFAULT]/amqp_auto_delete
-# amqp_auto_delete = false
-
-# Size of RPC connection pool. (integer value)
-# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
-# rpc_conn_pool_size = 30
-
-# Qpid broker hostname. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_hostname
-# qpid_hostname = localhost
-
-# Qpid broker port. (integer value)
-# Deprecated group/name - [DEFAULT]/qpid_port
-# qpid_port = 5672
-
-# Qpid HA cluster host:port pairs. (list value)
-# Deprecated group/name - [DEFAULT]/qpid_hosts
-# qpid_hosts = $qpid_hostname:$qpid_port
-
-# Username for Qpid connection. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_username
-# qpid_username =
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
 
-# Password for Qpid connection. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_password
-# qpid_password =
-
-# Space separated list of SASL mechanisms to use for auth. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms
-# qpid_sasl_mechanisms =
-
-# Seconds between connection keepalive heartbeats. (integer value)
-# Deprecated group/name - [DEFAULT]/qpid_heartbeat
-# qpid_heartbeat = 60
+# A URL representing the messaging driver to use for notifications. If not set,
+# we fall back to the same configuration used for RPC. (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
 
-# Transport to use, either 'tcp' or 'ssl'. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_protocol
-# qpid_protocol = tcp
-
-# Whether to disable the Nagle algorithm. (boolean value)
-# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay
-# qpid_tcp_nodelay = true
-
-# The number of prefetched messages held by receiver. (integer value)
-# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity
-# qpid_receiver_capacity = 1
-
-# The qpid topology version to use.  Version 1 is what was originally used by
-# impl_qpid.  Version 2 includes some backwards-incompatible changes that allow
-# broker federation to work.  Users should update to version 2 when they are
-# able to take everything down, as it requires a clean break. (integer value)
-# Deprecated group/name - [DEFAULT]/qpid_topology_version
-# qpid_topology_version = 1
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
 
 
 [oslo_messaging_rabbit]
@@ -946,90 +1171,337 @@
 #
 
 # Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
 # Deprecated group/name - [DEFAULT]/rabbit_durable_queues
-# amqp_durable_queues = false
+#amqp_durable_queues = false
 
 # Auto-delete queues in AMQP. (boolean value)
 # Deprecated group/name - [DEFAULT]/amqp_auto_delete
-# amqp_auto_delete = false
-
-# Size of RPC connection pool. (integer value)
-# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
-# rpc_conn_pool_size = 30
+#amqp_auto_delete = false
 
 # SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
 # SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
 # distributions. (string value)
 # Deprecated group/name - [DEFAULT]/kombu_ssl_version
-# kombu_ssl_version =
+#kombu_ssl_version =
 
 # SSL key file (valid only if SSL enabled). (string value)
 # Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
-# kombu_ssl_keyfile =
+#kombu_ssl_keyfile =
 
 # SSL cert file (valid only if SSL enabled). (string value)
 # Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
-# kombu_ssl_certfile =
+#kombu_ssl_certfile =
 
 # SSL certification authority file (valid only if SSL enabled). (string value)
 # Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
-# kombu_ssl_ca_certs =
+#kombu_ssl_ca_certs =
 
 # How long to wait before reconnecting in response to an AMQP consumer cancel
 # notification. (floating point value)
 # Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
-# kombu_reconnect_delay = 1.0
+#kombu_reconnect_delay = 1.0
+
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not
+# be used. This option may notbe available in future versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client beforce abandoning to send it its replies.
+# This value should not be longer than rpc_response_timeout. (integer value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we are
+# currently connected to becomes unavailable. Takes effect only if more than
+# one RabbitMQ node is provided in config. (string value)
+# Allowed values: round-robin, shuffle
+#kombu_failover_strategy = round-robin
 
 # The RabbitMQ broker address where a single node is used. (string value)
 # Deprecated group/name - [DEFAULT]/rabbit_host
-# rabbit_host = localhost
+#rabbit_host = localhost
 
-# The RabbitMQ broker port where a single node is used. (integer value)
+# The RabbitMQ broker port where a single node is used. (port value)
+# Minimum value: 0
+# Maximum value: 65535
 # Deprecated group/name - [DEFAULT]/rabbit_port
-# rabbit_port = 5672
+#rabbit_port = 5672
 
 # RabbitMQ HA cluster host:port pairs. (list value)
 # Deprecated group/name - [DEFAULT]/rabbit_hosts
-# rabbit_hosts = $rabbit_host:$rabbit_port
+#rabbit_hosts = $rabbit_host:$rabbit_port
 
 # Connect over SSL for RabbitMQ. (boolean value)
 # Deprecated group/name - [DEFAULT]/rabbit_use_ssl
-# rabbit_use_ssl = false
+#rabbit_use_ssl = false
 
 # The RabbitMQ userid. (string value)
 # Deprecated group/name - [DEFAULT]/rabbit_userid
-# rabbit_userid = guest
+#rabbit_userid = guest
 
 # The RabbitMQ password. (string value)
 # Deprecated group/name - [DEFAULT]/rabbit_password
-# rabbit_password = guest
+#rabbit_password = guest
 
 # The RabbitMQ login method. (string value)
 # Deprecated group/name - [DEFAULT]/rabbit_login_method
-# rabbit_login_method = AMQPLAIN
+#rabbit_login_method = AMQPLAIN
 
 # The RabbitMQ virtual host. (string value)
 # Deprecated group/name - [DEFAULT]/rabbit_virtual_host
-# rabbit_virtual_host = /
+#rabbit_virtual_host = /
 
 # How frequently to retry connecting with RabbitMQ. (integer value)
-# rabbit_retry_interval = 1
+#rabbit_retry_interval = 1
 
 # How long to backoff for between retries when connecting to RabbitMQ. (integer
 # value)
 # Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
-# rabbit_retry_backoff = 2
+#rabbit_retry_backoff = 2
+
+# Maximum interval of RabbitMQ connection retries. Default is 30 seconds.
+# (integer value)
+#rabbit_interval_max = 30
 
 # Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
 # count). (integer value)
 # Deprecated group/name - [DEFAULT]/rabbit_max_retries
-# rabbit_max_retries = 0
+#rabbit_max_retries = 0
+
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
+# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
+# is no longer controlled by the x-ha-policy argument when declaring a queue.
+# If you just want to make sure that all queues (except  those with auto-
+# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy
+# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
+#rabbit_ha_queues = false
 
-# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you
-# must wipe the RabbitMQ database. (boolean value)
-# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
-# rabbit_ha_queues = false
+# Positive integer representing duration in seconds for queue TTL (x-expires).
+# Queues which are unused for the duration of the TTL are automatically
+# deleted. The parameter affects only reply and fanout queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 1800
+
+# Specifies the number of messages to prefetch. Setting to zero allows
+# unlimited messages. (integer value)
+#rabbit_qos_prefetch_count = 0
+
+# Number of seconds after which the Rabbit broker is considered down if
+# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer
+# value)
+#heartbeat_timeout_threshold = 60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate = 2
 
 # Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
 # Deprecated group/name - [DEFAULT]/fake_rabbit
-# fake_rabbit = false
+#fake_rabbit = false
+
+# Maximum number of channels to allow (integer value)
+#channel_max = <None>
+
+# The maximum byte size for an AMQP frame (integer value)
+#frame_max = <None>
+
+# How often to send heartbeats for consumer's connections (integer value)
+#heartbeat_interval = 1
+
+# Enable SSL (boolean value)
+#ssl = <None>
+
+# Arguments passed to ssl.wrap_socket (dict value)
+#ssl_options = <None>
+
+# Set socket timeout in seconds for connection's socket (floating point value)
+#socket_timeout = 0.25
+
+# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating point
+# value)
+#tcp_user_timeout = 0.25
+
+# Set delay for reconnection to some host which has connection error (floating
+# point value)
+#host_connection_reconnect_delay = 0.25
+
+# Maximum number of connections to keep queued. (integer value)
+#pool_max_size = 10
+
+# Maximum number of connections to create above `pool_max_size`. (integer
+# value)
+#pool_max_overflow = 0
+
+# Default number of seconds to wait for a connections to available (integer
+# value)
+#pool_timeout = 30
+
+# Lifetime of a connection (since creation) in seconds or None for no
+# recycling. Expired connections are closed on acquire. (integer value)
+#pool_recycle = 600
+
+# Threshold at which inactive (since release) connections are considered stale
+# in seconds or None for no staleness. Stale connections are closed on acquire.
+# (integer value)
+#pool_stale = 60
+
+# Persist notification messages. (boolean value)
+#notification_persistence = false
+
+# Exchange name for for sending notifications (string value)
+#default_notification_exchange = ${control_exchange}_notification
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# notification listener. (integer value)
+#notification_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# notification, -1 means infinite retry. (integer value)
+#default_notification_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# notification message (floating point value)
+#notification_retry_delay = 0.25
+
+# Time to live for rpc queues without consumers in seconds. (integer value)
+#rpc_queue_expiration = 60
+
+# Exchange name for sending RPC messages (string value)
+#default_rpc_exchange = ${control_exchange}_rpc
+
+# Exchange name for receiving RPC replies (string value)
+#rpc_reply_exchange = ${control_exchange}_rpc_reply
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc
+# listener. (integer value)
+#rpc_listener_prefetch_count = 100
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc reply
+# listener. (integer value)
+#rpc_reply_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# reply. -1 means infinite retry during rpc_timeout (integer value)
+#rpc_reply_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# reply. (floating point value)
+#rpc_reply_retry_delay = 0.25
+
+# Reconnecting retry count in case of connectivity problem during sending RPC
+# message, -1 means infinite retry. If actual retry attempts in not 0 the rpc
+# request could be processed more then one time (integer value)
+#default_rpc_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending RPC
+# message (floating point value)
+#rpc_retry_delay = 0.25
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string value)
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be relative
+# to any directory in the search path defined by the config_dir option, or
+# absolute paths. The file defined by policy_file must exist for these
+# directories to be searched.  Missing or empty directories are ignored. (multi
+# valued)
+# Deprecated group/name - [DEFAULT]/policy_dirs
+#policy_dirs = policy.d
+
+
+[quotas]
+
+#
+# From neutron
+#
+
+# Resource name(s) that are supported in quota features. This option is now
+# deprecated for removal. (list value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited. (integer value)
+#default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_network = 10
+
+# Number of subnets allowed per tenant, A negative value means unlimited.
+# (integer value)
+#quota_subnet = 10
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_port = 50
+
+# Default driver to use for quota checks (string value)
+#quota_driver = neutron.db.quota.driver.DbQuotaDriver
+
+# Keep in track in the database of current resourcequota usage. Plugins which
+# do not leverage the neutron database should set this flag to False (boolean
+# value)
+#track_quota_usage = true
+
+#
+# From neutron.extensions
+#
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_floatingip = 50
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited. (integer value)
+#quota_security_group = 10
+
+# Number of security rules allowed per tenant. A negative value means
+# unlimited. (integer value)
+#quota_security_group_rule = 100
+
+
+[ssl]
+
+#
+# From oslo.service.sslutils
+#
+
+# CA certificate file to use to verify connecting clients. (string value)
+# Deprecated group/name - [DEFAULT]/ssl_ca_file
+#ca_file = <None>
+
+# Certificate file to use when starting the server securely. (string value)
+# Deprecated group/name - [DEFAULT]/ssl_cert_file
+#cert_file = <None>
+
+# Private key file to use when starting the server securely. (string value)
+# Deprecated group/name - [DEFAULT]/ssl_key_file
+#key_file = <None>
+
+# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
+# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
+# distributions. (string value)
+#version = <None>
+
+# Sets the list of available ciphers. value should be a string in the OpenSSL
+# cipher list format. (string value)
+#ciphers = <None>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/neutron_vpnaas.conf	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,12 @@
+[DEFAULT]
+
+
+[service_providers]
+
+#
+# From neutron.vpnaas
+#
+
+# Defines providers for advanced services using the format:
+# <service_type>:<name>:<driver>[:default] (multi valued)
+service_provider = VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
--- a/components/openstack/neutron/files/ovs_neutron_plugin.ini	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,159 +0,0 @@
-[ovs]
-# Do not change this parameter unless you have a good reason to.
-# This is the name of the OVS integration bridge. There is one per hypervisor.
-# The integration bridge acts as a virtual "patch bay". All VM VIFs are
-# attached to this bridge and then "patched" according to their network
-# connectivity.
-#
-integration_bridge = br_int0
-
-# Only used for the agent if tunnel_id_ranges is not empty for
-# the server.  In most cases, the default value should be fine.
-#
-# In the case of Solaris, the integration bridge and tunnel bridge must
-# be the same.
-#
-tunnel_bridge = br_int0
-
-# Peer patch port in integration bridge for tunnel bridge
-# int_peer_patch_port = patch-tun
-
-# Peer patch port in tunnel bridge for integration bridge
-# tun_peer_patch_port = patch-int
-
-# Uncomment this line for the agent if tunnel_id_ranges is not
-# empty for the server. Set local-ip to be the local IP address of
-# this hypervisor.
-#
-# local_ip =
-
-# (ListOpt) Comma-separated list of <physical_network>:<bridge> tuples
-# mapping physical network names to the agent's node-specific OVS
-# bridge names to be used for flat and VLAN networks. The length of
-# bridge names should be no more than 11. Each bridge must
-# exist, and should have a physical network interface configured as a
-# port. All physical networks configured on the server should have
-# mappings to appropriate bridges on each agent.
-#
-# bridge_mappings =
-# Example: bridge_mappings = physnet1:br-eth1
-
-# (BoolOpt) Use veths instead of patch ports to interconnect the integration
-# bridge to physical networks. Support kernel without ovs patch port support
-# so long as it is set to True.
-# use_veth_interconnection = False
-
-# (StrOpt) Which OVSDB backend to use, defaults to 'vsctl'
-# vsctl - The backend based on executing ovs-vsctl
-# native - The backend based on using native OVSDB
-# ovsdb_interface = vsctl
-
-# (StrOpt) The connection string for the native OVSDB backend
-# To enable ovsdb-server to listen on port 6640:
-#   ovs-vsctl set-manager ptcp:6640:127.0.0.1
-# ovsdb_connection = tcp:127.0.0.1:6640
-
-[agent]
-# Agent's polling interval in seconds
-# polling_interval = 2
-
-# Minimize polling by monitoring ovsdb for interface changes
-# minimize_polling = True
-
-# When minimize_polling = True, the number of seconds to wait before
-# respawning the ovsdb monitor after losing communication with it
-# ovsdb_monitor_respawn_interval = 30
-
-# (ListOpt) The types of tenant network tunnels supported by the agent.
-# Setting this will enable tunneling support in the agent. This can be set to
-# either 'gre' or 'vxlan'. If this is unset, it will default to [] and
-# disable tunneling support in the agent.
-# You can specify as many values here as your compute hosts supports.
-#
-# tunnel_types =
-# Example: tunnel_types = gre
-# Example: tunnel_types = vxlan
-# Example: tunnel_types = vxlan, gre
-
-# (IntOpt) The port number to utilize if tunnel_types includes 'vxlan'. By
-# default, this will make use of the Open vSwitch default value of '4789' if
-# not specified.
-#
-# vxlan_udp_port =
-# Example: vxlan_udp_port = 8472
-
-# (IntOpt) This is the MTU size of veth interfaces.
-# Do not change unless you have a good reason to.
-# The default MTU size of veth interfaces is 1500.
-# This option has no effect if use_veth_interconnection is False
-# veth_mtu =
-# Example: veth_mtu = 1504
-
-# (BoolOpt) Flag to enable l2-population extension. This option should only be
-# used in conjunction with ml2 plugin and l2population mechanism driver. It'll
-# enable plugin to populate remote ports macs and IPs (using fdb_add/remove
-# RPC calbbacks instead of tunnel_sync/update) on OVS agents in order to
-# optimize tunnel management.
-#
-# l2_population = False
-
-# Enable local ARP responder. Requires OVS 2.1. This is only used by the l2
-# population ML2 MechanismDriver.
-#
-# arp_responder = False
-
-# Enable suppression of ARP responses that don't match an IP address that
-# belongs to the port from which they originate.
-# Note: This prevents the VMs attached to this agent from spoofing,
-# it doesn't protect them from other devices which have the capability to spoof
-# (e.g. bare metal or VMs attached to agents without this flag set to True).
-# Requires a version of OVS that can match ARP headers.
-#
-# prevent_arp_spoofing = False
-
-# (BoolOpt) Set or un-set the don't fragment (DF) bit on outgoing IP packet
-# carrying GRE/VXLAN tunnel. The default value is True.
-#
-# dont_fragment = True
-
-# (BoolOpt) Set to True on L2 agents to enable support
-# for distributed virtual routing.
-#
-# enable_distributed_routing = False
-
-# (IntOpt) Set new timeout in seconds for new rpc calls after agent receives
-# SIGTERM. If value is set to 0, rpc timeout won't be changed"
-#
-# quitting_rpc_timeout = 10
-
-[securitygroup]
-# Firewall driver for realizing neutron security group function.
-# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
-# Example: firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-enable_security_group = False
-
-#-----------------------------------------------------------------------------
-# Sample Configurations.
-#-----------------------------------------------------------------------------
-#
-# 1. With VLANs on eth1.
-# [ovs]
-# integration_bridge = br-int
-# bridge_mappings = default:br-eth1
-#
-# 2. With GRE tunneling.
-# [ovs]
-# integration_bridge = br-int
-# tunnel_bridge = br-tun
-# local_ip = 10.0.0.3
-#
-# 3. With VXLAN tunneling.
-# [ovs]
-# integration_bridge = br-int
-# tunnel_bridge = br-tun
-# local_ip = 10.0.0.3
-# [agent]
-# tunnel_types = vxlan
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/plugins/ml2/linuxbridge_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,197 @@
+[DEFAULT]
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[agent]
+
+#
+# From neutron.ml2.linuxbridge.agent
+#
+
+# The number of seconds the agent will wait between polling for local device
+# changes. (integer value)
+#polling_interval = 2
+
+# Set new timeout in seconds for new rpc calls after agent receives SIGTERM. If
+# value is set to 0, rpc timeout won't be changed (integer value)
+#quitting_rpc_timeout = 10
+
+# Enable suppression of ARP responses that don't match an IP address that
+# belongs to the port from which they originate. Note: This prevents the VMs
+# attached to this agent from spoofing, it doesn't protect them from other
+# devices which have the capability to spoof (e.g. bare metal or VMs attached
+# to agents without this flag set to True). Spoofing rules will not be added to
+# any ports that have port security disabled. For LinuxBridge, this requires
+# ebtables. For OVS, it requires a version that supports matching ARP headers.
+# This option will be removed in Newton so the only way to disable protection
+# will be via the port security extension. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#prevent_arp_spoofing = true
+
+
+[linux_bridge]
+
+#
+# From neutron.ml2.linuxbridge.agent
+#
+
+# Comma-separated list of <physical_network>:<physical_interface> tuples
+# mapping physical network names to the agent's node-specific physical network
+# interfaces to be used for flat and VLAN networks. All physical networks
+# listed in network_vlan_ranges on the server should have mappings to
+# appropriate interfaces on each agent. (list value)
+#physical_interface_mappings =
+
+# List of <physical_network>:<physical_bridge> (list value)
+#bridge_mappings =
+
+
+[securitygroup]
+
+#
+# From neutron.ml2.linuxbridge.agent
+#
+
+# Driver for security groups firewall in the L2 agent (string value)
+#firewall_driver = <None>
+
+# Controls whether the neutron security group API is enabled in the server. It
+# should be false when using no security groups or using the nova security
+# group API. (boolean value)
+#enable_security_group = true
+
+# Use ipset to speed-up the iptables based security groups. Enabling ipset
+# support requires that ipset is installed on L2 agent node. (boolean value)
+#enable_ipset = true
+
+
+[vxlan]
+
+#
+# From neutron.ml2.linuxbridge.agent
+#
+
+# Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 plugin
+# using linuxbridge mechanism driver (boolean value)
+#enable_vxlan = true
+
+# TTL for vxlan interface protocol packets. (integer value)
+#ttl = <None>
+
+# TOS for vxlan interface protocol packets. (integer value)
+#tos = <None>
+
+# Multicast group(s) for vxlan interface. A range of group addresses may be
+# specified by using CIDR notation. Specifying a range allows different VNIs to
+# use different group addresses, reducing or eliminating spurious broadcast
+# traffic to the tunnel endpoints. To reserve a unique group for each possible
+# (24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on
+# all the agents. (string value)
+#vxlan_group = 224.0.0.1
+
+# Local IP address of the VXLAN endpoints. (IP address value)
+#local_ip = <None>
+
+# Extension to use alongside ml2 plugin's l2population mechanism driver. It
+# enables the plugin to populate VXLAN forwarding table. (boolean value)
+#l2_population = false
+
+# Enable local ARP responder which provides local responses instead of
+# performing ARP broadcast into the overlay. Enabling local ARP responder is
+# not fullycompatible with the allowed-address-pairs extension. (boolean value)
+#arp_responder = false
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/plugins/ml2/macvtap_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,157 @@
+[DEFAULT]
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[agent]
+
+#
+# From neutron.ml2.macvtap.agent
+#
+
+# The number of seconds the agent will wait between polling for local device
+# changes. (integer value)
+#polling_interval = 2
+
+# Set new timeout in seconds for new rpc calls after agent receives SIGTERM. If
+# value is set to 0, rpc timeout won't be changed (integer value)
+#quitting_rpc_timeout = 10
+
+# Enable suppression of ARP responses that don't match an IP address that
+# belongs to the port from which they originate. Note: This prevents the VMs
+# attached to this agent from spoofing, it doesn't protect them from other
+# devices which have the capability to spoof (e.g. bare metal or VMs attached
+# to agents without this flag set to True). Spoofing rules will not be added to
+# any ports that have port security disabled. For LinuxBridge, this requires
+# ebtables. For OVS, it requires a version that supports matching ARP headers.
+# This option will be removed in Newton so the only way to disable protection
+# will be via the port security extension. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#prevent_arp_spoofing = true
+
+
+[macvtap]
+
+#
+# From neutron.ml2.macvtap.agent
+#
+
+# Comma-separated list of <physical_network>:<physical_interface> tuples
+# mapping physical network names to the agent's node-specific physical network
+# interfaces to be used for flat and VLAN networks. All physical networks
+# listed in network_vlan_ranges on the server should have mappings to
+# appropriate interfaces on each agent. (list value)
+#physical_interface_mappings =
+
+
+[securitygroup]
+
+#
+# From neutron.ml2.macvtap.agent
+#
+
+# Driver for security groups firewall in the L2 agent (string value)
+#firewall_driver = <None>
+
+# Controls whether the neutron security group API is enabled in the server. It
+# should be false when using no security groups or using the nova security
+# group API. (boolean value)
+#enable_security_group = true
+
+# Use ipset to speed-up the iptables based security groups. Enabling ipset
+# support requires that ipset is installed on L2 agent node. (boolean value)
+#enable_ipset = true
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/plugins/ml2/ml2_conf.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,240 @@
+[DEFAULT]
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[ml2]
+
+#
+# From neutron.ml2
+#
+
+# List of network type driver entrypoints to be loaded from the
+# neutron.ml2.type_drivers namespace. (list value)
+type_drivers = flat,vlan,vxlan
+
+# Ordered list of network_types to allocate as tenant networks. The default
+# value 'local' is useful for single-box testing but provides no connectivity
+# between hosts. (list value)
+#
+# In the case of Solaris, 'local' can be achieved by specifying a 'flat'
+# network type and Solaris Etherstubs, so the 'local' network type as
+# such is not supported.
+tenant_network_types = vlan
+
+# An ordered list of networking mechanism driver entrypoints to be loaded from
+# the neutron.ml2.mechanism_drivers namespace. (list value)
+mechanism_drivers = openvswitch
+
+# An ordered list of extension driver entrypoints to be loaded from the
+# neutron.ml2.extension_drivers namespace. For example: extension_drivers =
+# port_security,qos (list value)
+#extension_drivers =
+
+# Maximum size of an IP packet (MTU) that can traverse the underlying physical
+# network infrastructure without fragmentation when using an overlay/tunnel
+# protocol. Either set this to the same value as the global_physnet_mtu value
+# or use it to explicitly specify a physical network MTU value that differs
+# from the default global_physnet_mtu value. (integer value)
+#
+# In Solaris, set it to 4 less than the desired value. This is because VXLAN
+# encapsulation overhead is 54 bytes in Solaris which includes 4 additional
+# bytes for VLAN. And since VXLAN_ENCAP_OVERHEAD is constant at 50 bytes, we
+# set the path_mtu to 4 less than the desired value.
+path_mtu = 1496
+
+# A list of mappings of physical networks to MTU values. The format of the
+# mapping is <physnet>:<mtu val>. This mapping allows specifying a physical
+# network MTU value that differs from the default global_physnet_mtu value.
+# (list value)
+#physical_network_mtus =
+
+# Default network type for external networks when no provider attributes are
+# specified. By default it is None, which means that if provider attributes are
+# not specified while creating external networks then they will have the same
+# type as tenant networks. Allowed values for external_network_type config
+# option depend on the network type values configured in type_drivers config
+# option. (string value)
+#external_network_type = <None>
+
+
+[ml2_type_flat]
+
+#
+# From neutron.ml2
+#
+
+# List of physical_network names with which flat networks can be created. Use
+# default '*' to allow flat networks with arbitrary physical_network names. Use
+# an empty list to disable flat networks. (list value)
+#flat_networks = *
+
+
+[ml2_type_geneve]
+
+#
+# From neutron.ml2
+#
+
+# Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of
+# Geneve VNI IDs that are available for tenant network allocation (list value)
+#vni_ranges =
+
+# Geneve encapsulation header size is dynamic, this value is used to calculate
+# the maximum MTU for the driver. This is the sum of the sizes of the outer ETH
+# + IP + UDP + GENEVE header sizes. The default size for this field is 50,
+# which is the size of the Geneve header without any additional option headers.
+# (integer value)
+#max_header_size = 50
+
+
+[ml2_type_gre]
+
+#
+# From neutron.ml2
+#
+
+# Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE
+# tunnel IDs that are available for tenant network allocation (list value)
+#tunnel_id_ranges =
+
+
+[ml2_type_vlan]
+
+#
+# From neutron.ml2
+#
+
+# List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network>
+# specifying physical_network names usable for VLAN provider and tenant
+# networks, as well as ranges of VLAN tags on each available for allocation to
+# tenant networks. (list value)
+#network_vlan_ranges =
+
+
+[ml2_type_vxlan]
+
+#
+# From neutron.ml2
+#
+
+# Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of
+# VXLAN VNI IDs that are available for tenant network allocation (list value)
+#vni_ranges =
+
+# Multicast group for VXLAN. When configured, will enable sending all broadcast
+# traffic to this multicast group. When left unconfigured, will disable
+# multicast VXLAN mode. (string value)
+#vxlan_group = <None>
+
+
+[securitygroup]
+
+#
+# From neutron.ml2
+#
+
+# Driver for security groups firewall in the L2 agent (string value)
+#firewall_driver = <None>
+
+# Controls whether the neutron security group API is enabled in the server. It
+# should be false when using no security groups or using the nova security
+# group API. (boolean value)
+enable_security_group = false
+
+# Use ipset to speed-up the iptables based security groups. Enabling ipset
+# support requires that ipset is installed on L2 agent node. (boolean value)
+enable_ipset = false
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/plugins/ml2/ml2_conf_sriov.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,108 @@
+[DEFAULT]
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[ml2_sriov]
+
+#
+# From neutron.ml2.sriov
+#
+
+# Comma-separated list of supported PCI vendor devices, as defined by
+# vendor_id:product_id according to the PCI ID Repository. Default enables
+# support for Intel and Mellanox SR-IOV capable NICs. (list value)
+#supported_pci_vendor_devs = 15b3:1004,8086:10ca
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/plugins/ml2/openvswitch_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,275 @@
+[DEFAULT]
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[agent]
+
+#
+# From neutron.ml2.ovs.agent
+#
+
+# The number of seconds the agent will wait between polling for local device
+# changes. (integer value)
+#polling_interval = 2
+
+# Minimize polling by monitoring ovsdb for interface changes. (boolean value)
+#minimize_polling = true
+
+# The number of seconds to wait before respawning the ovsdb monitor after
+# losing communication with it. (integer value)
+#ovsdb_monitor_respawn_interval = 30
+
+# Network types supported by the agent (gre and/or vxlan). (list value)
+#tunnel_types =
+
+# The UDP port to use for VXLAN tunnels. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#vxlan_udp_port = 4789
+
+# MTU size of veth interfaces (integer value)
+#veth_mtu = 9000
+
+# Use ML2 l2population mechanism driver to learn remote MAC and IPs and improve
+# tunnel scalability. (boolean value)
+#l2_population = false
+
+# Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2
+# l2population driver. Allows the switch (when supporting an overlay) to
+# respond to an ARP request locally without performing a costly ARP broadcast
+# into the overlay. (boolean value)
+#arp_responder = false
+
+# Enable suppression of ARP responses that don't match an IP address that
+# belongs to the port from which they originate. Note: This prevents the VMs
+# attached to this agent from spoofing, it doesn't protect them from other
+# devices which have the capability to spoof (e.g. bare metal or VMs attached
+# to agents without this flag set to True). Spoofing rules will not be added to
+# any ports that have port security disabled. For LinuxBridge, this requires
+# ebtables. For OVS, it requires a version that supports matching ARP headers.
+# This option will be removed in Newton so the only way to disable protection
+# will be via the port security extension. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#prevent_arp_spoofing = true
+
+# Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying
+# GRE/VXLAN tunnel. (boolean value)
+#dont_fragment = true
+
+# Make the l2 agent run in DVR mode. (boolean value)
+#enable_distributed_routing = false
+
+# Set new timeout in seconds for new rpc calls after agent receives SIGTERM. If
+# value is set to 0, rpc timeout won't be changed (integer value)
+#quitting_rpc_timeout = 10
+
+# Reset flow table on start. Setting this to True will cause brief traffic
+# interruption. (boolean value)
+#drop_flows_on_start = false
+
+# Set or un-set the tunnel header checksum  on outgoing IP packet carrying
+# GRE/VXLAN tunnel. (boolean value)
+#tunnel_csum = false
+
+# Selects the Agent Type reported (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#agent_type = Open vSwitch agent
+
+
+[ovs]
+
+#
+# From neutron.ml2.ovs.agent
+#
+
+# Integration bridge to use. Do not change this parameter unless you have a
+# good reason to. This is the name of the OVS integration bridge. There is one
+# per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM
+# VIFs are attached to this bridge and then 'patched' according to their
+# network connectivity. (string value)
+integration_bridge = br_int0
+
+# Tunnel bridge to use. (string value)
+#
+# In the case of Solaris, the integration bridge and tunnel bridge must
+# be the same.
+tunnel_bridge = br_int0
+
+# Peer patch port in integration bridge for tunnel bridge. (string value)
+#int_peer_patch_port = patch-tun
+
+# Peer patch port in tunnel bridge for integration bridge. (string value)
+#tun_peer_patch_port = patch-int
+
+# Local IP address of tunnel endpoint. (IP address value)
+#local_ip = <None>
+
+# Comma-separated list of <physical_network>:<bridge> tuples mapping physical
+# network names to the agent's node-specific Open vSwitch bridge names to be
+# used for flat and VLAN networks. The length of bridge names should be no more
+# than 11. Each bridge must exist, and should have a physical network interface
+# configured as a port. All physical networks configured on the server should
+# have mappings to appropriate bridges on each agent. Note: If you remove a
+# bridge from this mapping, make sure to disconnect it from the integration
+# bridge as it won't be managed by the agent anymore. Deprecated for ofagent.
+# (list value)
+#bridge_mappings =
+
+# Use veths instead of patch ports to interconnect the integration bridge to
+# physical networks. Support kernel without Open vSwitch patch port support so
+# long as it is set to True. (boolean value)
+#use_veth_interconnection = false
+
+# OpenFlow interface to use. (string value)
+# Allowed values: ovs-ofctl, native
+#of_interface = ovs-ofctl
+
+# OVS datapath to use. 'system' is the default value and corresponds to the
+# kernel datapath. To enable the userspace datapath set this value to 'netdev'.
+# (string value)
+# Allowed values: system, netdev
+#datapath_type = system
+
+# OVS vhost-user socket directory. (string value)
+#vhostuser_socket_dir = /var/run/openvswitch
+
+# Address to listen on for OpenFlow connections. Used only for 'native' driver.
+# (IP address value)
+#of_listen_address = 127.0.0.1
+
+# Port to listen on for OpenFlow connections. Used only for 'native' driver.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#of_listen_port = 6633
+
+# Timeout in seconds to wait for the local switch connecting the controller.
+# Used only for 'native' driver. (integer value)
+#of_connect_timeout = 30
+
+# Timeout in seconds to wait for a single OpenFlow request. Used only for
+# 'native' driver. (integer value)
+#of_request_timeout = 10
+
+# The interface for interacting with the OVSDB (string value)
+# Allowed values: vsctl, native
+#ovsdb_interface = vsctl
+
+# The connection string for the native OVSDB backend. Requires the native
+# ovsdb_interface to be enabled. (string value)
+#ovsdb_connection = tcp:127.0.0.1:6640
+
+
+[securitygroup]
+
+#
+# From neutron.ml2.ovs.agent
+#
+
+# Driver for security groups firewall in the L2 agent (string value)
+#firewall_driver = <None>
+
+# Controls whether the neutron security group API is enabled in the server. It
+# should be false when using no security groups or using the nova security
+# group API. (boolean value)
+enable_security_group = false
+
+# Use ipset to speed-up the iptables based security groups. Enabling ipset
+# support requires that ipset is installed on L2 agent node. (boolean value)
+enable_ipset = false
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/plugins/ml2/sriov_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,128 @@
+[DEFAULT]
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[agent]
+
+#
+# From neutron.ml2.sriov.agent
+#
+
+# Extensions list to use (list value)
+#extensions =
+
+
+[ml2_sriov]
+
+#
+# From neutron.ml2.sriov.agent
+#
+
+# Comma-separated list of <physical_network>:<network_device> tuples mapping
+# physical network names to the agent's node-specific physical network device
+# interfaces of SR-IOV physical function to be used for VLAN networks. All
+# physical networks listed in network_vlan_ranges on the server should have
+# mappings to appropriate interfaces on each agent. (list value)
+#physical_device_mappings =
+
+# Comma-separated list of <network_device>:<vfs_to_exclude> tuples, mapping
+# network_device to the agent's node-specific list of virtual functions that
+# should not be used for virtual networking. vfs_to_exclude is a semicolon-
+# separated list of virtual functions to exclude from network_device. The
+# network_device in the mapping should appear in the physical_device_mappings
+# list. (list value)
+#exclude_devices =
--- a/components/openstack/neutron/files/services/vpn/device_drivers/solaris_ipsec.py	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/services/vpn/device_drivers/solaris_ipsec.py	Wed Sep 07 14:48:41 2016 -0700
@@ -1,4 +1,3 @@
-#
 # Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -12,6 +11,7 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+
 #
 # Based on neutron/services/vpn/device_drivers/ipsec.py written in 2013
 # by Nachi Ueno, NTT I3, Inc.
@@ -43,7 +43,7 @@
 # /etc/neutron/l3_agent.ini
 #
 # The [default] section of this file contains a router_id which
-# is required for the EVS and VPNaaS. Currently only a single
+# is required for Solaris L3 agent and VPNaaS. Currently only a single
 # router_id is supported.
 # e.g.
 #
@@ -80,44 +80,46 @@
 # Adding "Debug = True" to the [default] section will cause LOG.debug()
 # messages to be logged in addition to LOG.info() and LOG.warn().
 #
+
 import abc
 import copy
 import errno
-import iniparse
-import jinja2
 import logging
-import netaddr
 import os
 import re
 import shutil
-import six
 import socket
 import struct
+from subprocess import CalledProcessError, Popen, PIPE, check_call
 import sys
 import threading
 import time
 import traceback
 import unicodedata
+
+import iniparse
+import jinja2
+import netaddr
+from netaddr import IPNetwork
+from oslo_concurrency import lockutils, processutils
+from oslo_config import cfg
 import oslo_messaging
+from oslo_service import loopingcall
+import six
+
 import rad.bindings.com.oracle.solaris.rad.smf_1 as smfb
 import rad.client
 import rad.connect
 
-from oslo.config import cfg
-from oslo import messaging
-from oslo_concurrency import lockutils, processutils
-from netaddr import IPNetwork
+from neutron.agent.linux import ip_lib, utils
 from neutron.agent.solaris import packetfilter
-from neutron.agent.linux import ip_lib, utils
 from neutron.common import rpc as n_rpc
-from neutron_vpnaas.db.vpn import vpn_db
 from neutron import context
-from neutron.openstack.common import loopingcall
 from neutron.plugins.common import constants
 from neutron.plugins.common import utils as plugin_utils
+from neutron_vpnaas.db.vpn import vpn_db
 from neutron_vpnaas.services.vpn.common import topics
 from neutron_vpnaas.services.vpn import device_drivers
-from subprocess import CalledProcessError, Popen, PIPE, check_call
 
 LOG = logging.getLogger(__name__)
 TEMPLATE_PATH = os.path.dirname(__file__)
@@ -1212,7 +1214,7 @@
     """
     RPC_API_VERSION = '1.0'
 
-    target = messaging.Target(version=RPC_API_VERSION)
+    target = oslo_messaging.Target(version=RPC_API_VERSION)
 
     def __init__(self, vpn_service, host):
         self.conf = vpn_service.conf
@@ -1499,7 +1501,7 @@
 
            The code below loops through the list of routers configured
            and enables the VPNs on them. Currently Solaris only supports
-           a single router in any one EVS. It will be a short list ...
+           a single router in Neutron. It will be a short list ...
 
            We no longer have access to the previous configuration. The new
            configuration may well be different, so we have to delete
--- a/components/openstack/neutron/files/vpn_agent.ini	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/vpn_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -1,56 +1,87 @@
 [DEFAULT]
-# VPN-Agent configuration file
-# Note vpn-agent inherits l3-agent, so you can use configs on l3-agent also
 
-[vpnagent]
-# vpn device drivers which vpn agent will use
-# If we want to use multiple drivers,  we need to define this option multiple
-# times.
-# NOTE: StrongSwan and openSwan cannot be installed at the same time. Thus, both
-#       cannot be enabled for use. In the future when flavors/STF support is
-#       available, this will still constrain the flavors which can be used
-#       together.
-# vpn_device_driver=neutron_vpnaas.services.vpn.device_drivers.ipsec.OpenSwanDriver
-# vpn_device_driver=neutron_vpnaas.services.vpn.device_drivers.cisco_ipsec.CiscoCsrIPsecDriver
-# vpn_device_driver=neutron_vpnaas.services.vpn.device_drivers.vyatta_ipsec.VyattaIPSecDriver
-# vpn_device_driver=neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec.StrongSwanDriver
-# vpn_device_driver=neutron_vpnaas.services.vpn.device_drivers.fedora_strongswan_ipsec.FedoraStrongSwanDriver
-# vpn_device_driver=neutron_vpnaas.services.vpn.device_drivers.libreswan_ipsec.LibreSwanDriver
-# vpn_device_driver=another_driver
-vpn_device_driver=neutron_vpnaas.services.vpn.device_drivers.solaris_ipsec.SolarisIPsecDriver
 
 [ipsec]
-# Status check interval
-# ipsec_status_check_interval=60
+
+#
+# From neutron.vpnaas.agent
+#
+
+# Location to store ipsec server config files (string value)
+#config_base_dir = $state_path/ipsec
+
+# Interval for checking ipsec status (integer value)
+#ipsec_status_check_interval = 60
+
+# Enable detail logging for ipsec pluto process. If the flag set to True, the
+# detailed logging will be written into config_base_dir/<pid>/log. Note: This
+# setting applies to OpenSwan and LibreSwan only. StrongSwan logs to syslog.
+# (boolean value)
+#enable_detailed_logging = false
+
+
+[pluto]
+
+#
+# From neutron.vpnaas.agent
+#
+
+# Initial interval in seconds for checking if pluto daemon is shutdown (integer
+# value)
+# Deprecated group/name - [libreswan]/shutdown_check_timeout
+#shutdown_check_timeout = 1
+
+# The maximum number of retries for checking for pluto daemon shutdown (integer
+# value)
+# Deprecated group/name - [libreswan]/shutdown_check_retries
+#shutdown_check_retries = 5
+
+# A factor to increase the retry interval for each retry (floating point value)
+# Deprecated group/name - [libreswan]/shutdown_check_back_off
+#shutdown_check_back_off = 1.5
+
+
+[solaris]
+
+#
+# From neutron.vpnaas.agent
+#
+
+# Interval for checking ipsec status (integer value)
+#ipsec_status_check_interval = 60
+
+# IPsec policy failure logging (boolean value)
+#packet_logging = false
+
+# IPsec policy log level (string value)
+#logger_level = message+packet
+
 
 [strongswan]
-# For fedora use:
-# default_config_area=/usr/share/strongswan/templates/config/strongswan.d
-# Default is for ubuntu use, /etc/strongswan.d
-# default_config_area=/etc/strongswan.d
+
+#
+# From neutron.vpnaas.agent
+#
 
-[libreswan]
-# Initial interval in seconds for checking if pluto daemon is shutdown
-# shutdown_check_timeout=1
-#
-# The maximum number of retries for checking for pluto daemon shutdown
-# shutdown_check_retries=5
-#
-# A factor to increase the retry interval for each retry
-# shutdown_check_back_off=1.5
+# Template file for ipsec configuration. (string value)
+#ipsec_config_template = /usr/lib/python2.7/vendor-packagesneutron_vpnaas/services/vpn/device_drivers/template/strongswan/ipsec.conf.template
+
+# Template file for strongswan configuration. (string value)
+#strongswan_config_template = /usr/lib/python2.7/vendor-packagesneutron_vpnaas/services/vpn/device_drivers/template/strongswan/strongswan.conf.template
+
+# Template file for ipsec secret configuration. (string value)
+#ipsec_secret_template = /usr/lib/python2.7/vendor-packagesneutron_vpnaas/services/vpn/device_drivers/template/strongswan/ipsec.secret.template
 
-[solaris]
-# The Solaris driver only reads values from this section. Setting a value
-# here will override the defaults which are set in the driver files as
-# solaris_opts. Setting values here is optional.
+# The area where default StrongSwan configuration files are located. (string
+# value)
+#default_config_area = /etc/strongswan.d
+
+
+[vpnagent]
+
 #
-# Setting ipsec_status_check_interval will change the interval that the
-# driver runs the status update code. The value is in seconds.
-# A lower value will result in more frequent updates to neutron.
+# From neutron.vpnaas.agent
 #
-# ipsec_status_check_interval = 10
-#
-# This will enable the packet logging service which logs discarded packets.
-#
-# packet_logging = True
-# logger_level = "message+packet"
+
+# The vpn device drivers Neutron will use (multi valued)
+vpn_device_driver = neutron_vpnaas.services.vpn.device_drivers.solaris_ipsec.SolarisIPsecDriver
--- a/components/openstack/neutron/neutron.p5m	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/neutron.p5m	Wed Sep 07 14:48:41 2016 -0700
@@ -28,107 +28,58 @@
 set name=pkg.summary value="OpenStack Neutron (Networking Service)"
 set name=pkg.description \
     value="Neutron provides an API to dynamically request and configure virtual networks. These networks connect 'interfaces' from other OpenStack services (e.g., VNICs from Nova VMs). The Neutron API supports extensions to provide advanced network capabilities (e.g., QoS, ACLs, network monitoring, etc)."
-set name=pkg.human-version value="Kilo $(COMPONENT_VERSION)"
+set name=pkg.human-version value="Mitaka $(COMPONENT_VERSION)"
 set name=info.classification \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
     value="org.opensolaris.category.2008:System/Enterprise Management" \
     value=org.opensolaris.category.2008:System/Virtualization \
     value="org.opensolaris.category.2008:Web Services/Application and Web Servers"
-set name=info.source-url value=$(COMPONENT_ARCHIVE_URL)
+set name=info.source-url \
+    value="$(COMPONENT_ARCHIVE_URL) $(COMPONENT_ARCHIVE_URL_1)"
 set name=info.upstream value="OpenStack <[email protected]>"
 set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
 set name=openstack.upgrade-id reboot-needed=true value=$(COMPONENT_BE_VERSION)
 set name=org.opensolaris.arc-caseid value=PSARC/2013/350 value=PSARC/2014/059 \
     value=PSARC/2015/110 value=PSARC/2015/535 value=PSARC/2016/116 \
-    value=PSARC/2016/251
+    value=PSARC/2016/251 value=PSARC/2016/268 value=PSARC/2016/455
 set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
 set name=variant.debug.container value=false value=true
 #
 dir  path=etc/neutron owner=neutron group=neutron mode=0700
 file etc/api-paste.ini path=etc/neutron/api-paste.ini owner=neutron \
     group=neutron mode=0644 overlay=allow preserve=renamenew
+file files/bgp_dragent.ini path=etc/neutron/bgp_dragent.ini owner=neutron \
+    group=neutron mode=0644 overlay=allow preserve=renamenew
 file files/dhcp_agent.ini path=etc/neutron/dhcp_agent.ini owner=neutron \
     group=neutron mode=0644 overlay=allow preserve=renamenew
 file files/l3_agent.ini path=etc/neutron/l3_agent.ini owner=neutron \
     group=neutron mode=0644 overlay=allow preserve=renamenew
 file files/metadata_agent.ini path=etc/neutron/metadata_agent.ini \
     owner=neutron group=neutron mode=0644 overlay=allow preserve=renamenew
-file etc/metering_agent.ini path=etc/neutron/metering_agent.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
 file files/neutron.conf path=etc/neutron/neutron.conf owner=neutron \
     group=neutron mode=0644 \
     original_name=cloud/openstack/neutron:etc/neutron/quantum.conf \
     overlay=allow preserve=renamenew
-file path=etc/neutron/neutron_vpnaas.conf owner=neutron group=neutron \
-    mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/bigswitch/restproxy.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/bigswitch/ssl/ca_certs/README
-file path=etc/neutron/plugins/bigswitch/ssl/host_certs/README
-file path=etc/neutron/plugins/brocade/brocade.ini owner=neutron group=neutron \
-    mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/brocade/brocade_mlx.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/brocade/vyatta/vrouter.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/cisco/cisco_cfg_agent.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/cisco/cisco_plugins.ini owner=neutron \
+file files/neutron_vpnaas.conf path=etc/neutron/neutron_vpnaas.conf \
+    owner=neutron group=neutron mode=0644 overlay=allow preserve=renamenew
+file files/plugins/ml2/linuxbridge_agent.ini \
+    path=etc/neutron/plugins/ml2/linuxbridge_agent.ini owner=neutron \
     group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/cisco/cisco_router_plugin.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/cisco/cisco_vpn_agent.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/embrane/heleos_conf.ini owner=neutron \
+file files/plugins/ml2/macvtap_agent.ini \
+    path=etc/neutron/plugins/ml2/macvtap_agent.ini owner=neutron \
     group=neutron mode=0644 overlay=allow preserve=renamenew
-file files/evs_plugin.ini path=etc/neutron/plugins/evs/evs_plugin.ini \
-    owner=neutron group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/metaplugin/metaplugin.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/midonet/midonet.ini owner=neutron group=neutron \
-    mode=0644 overlay=allow preserve=renamenew
-file files/ml2_conf.ini path=etc/neutron/plugins/ml2/ml2_conf.ini \
+file files/plugins/ml2/ml2_conf.ini path=etc/neutron/plugins/ml2/ml2_conf.ini \
     owner=neutron group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/ml2/ml2_conf_arista.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/ml2/ml2_conf_brocade.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/ml2/ml2_conf_brocade_fi_ni.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/ml2/ml2_conf_cisco.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/ml2/ml2_conf_mlnx.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/ml2/ml2_conf_ncs.ini owner=neutron group=neutron \
-    mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/ml2/ml2_conf_ofa.ini owner=neutron group=neutron \
-    mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/ml2/ml2_conf_sriov.ini owner=neutron \
+file files/plugins/ml2/ml2_conf_sriov.ini \
+    path=etc/neutron/plugins/ml2/ml2_conf_sriov.ini owner=neutron \
     group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/mlnx/mlnx_conf.ini owner=neutron group=neutron \
-    mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/nec/nec.ini owner=neutron group=neutron \
-    mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/nuage/nuage_plugin.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/oneconvergence/nvsdplugin.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/opencontrail/contrailplugin.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file files/ovs_neutron_plugin.ini \
-    path=etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/ovsvapp/ovsvapp_agent.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/plumgrid/plumgrid.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=renamenew
-file path=etc/neutron/plugins/vmware/nsx.ini owner=neutron group=neutron \
+file files/plugins/ml2/openvswitch_agent.ini \
+    path=etc/neutron/plugins/ml2/openvswitch_agent.ini owner=neutron \
+    group=neutron mode=0644 \
+    original_name=cloud/openstack/neutron:etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini \
+    overlay=allow preserve=renamenew
+file files/plugins/ml2/sriov_agent.ini \
+    path=etc/neutron/plugins/ml2/sriov_agent.ini owner=neutron group=neutron \
     mode=0644 overlay=allow preserve=renamenew
 file etc/policy.json path=etc/neutron/policy.json owner=neutron group=neutron \
     mode=0644 overlay=allow preserve=renamenew
@@ -156,10 +107,7 @@
     path=lib/svc/method/neutron-openvswitch-agent
 file files/neutron-server path=lib/svc/method/neutron-server
 file files/neutron-upgrade path=lib/svc/method/neutron-upgrade
-file files/evs/migrate/migrate-evs-to-ovs path=usr/bin/migrate-evs-to-ovs \
-    mode=0555
 file path=usr/bin/neutron-db-manage
-file path=usr/lib/neutron/evs-neutron-migration mode=0555
 file usr/bin/neutron-dhcp-agent path=usr/lib/neutron/neutron-dhcp-agent \
     mode=0555
 file usr/bin/neutron-l3-agent path=usr/lib/neutron/neutron-l3-agent mode=0555
@@ -181,10 +129,12 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/_i18n.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/common/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/common/base_polling.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/common/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/common/ip_lib.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/common/ovs_lib.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/common/polling.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/common/utils.py
@@ -193,16 +143,25 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/dhcp/config.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/dhcp_agent.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/firewall.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l2population_rpc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l2/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l2/agent_extension.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l2/extensions/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l2/extensions/manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l2/extensions/qos.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/agent.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/config.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/dvr.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/dvr_edge_ha_router.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/dvr_edge_router.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/dvr_fip_ns.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/dvr_router.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/dvr_local_router.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/dvr_router_base.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/dvr_snat_ns.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/fip_rule_priority_allocator.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/ha.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/ha_router.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/item_allocator.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/keepalived_state_change.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/legacy_router.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3/link_local_allocator.py
@@ -217,8 +176,10 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/bridge_lib.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/daemon.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/dhcp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/dibbler.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/external_process.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/interface.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/ip_conntrack.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/ip_lib.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/ip_link_support.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/ip_monitor.py
@@ -227,10 +188,16 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/iptables_firewall.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/iptables_manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/keepalived.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/ovs_lib.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/openvswitch_firewall/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/openvswitch_firewall/constants.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/openvswitch_firewall/firewall.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/openvswitch_firewall/rules.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/ovsdb_monitor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/pd.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/pd_driver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/polling.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/ra.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/tc_lib.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/metadata/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/metadata/agent.py
@@ -245,16 +212,20 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/ovsdb/native/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/ovsdb/native/commands.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/ovsdb/native/connection.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/ovsdb/native/helpers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/ovsdb/native/idlutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/rpc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/securitygroups_rpc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/dhcp.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/interface.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/namespace_manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/net_lib.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/packetfilter.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/pd.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/ra.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/windows/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/windows/ip_lib.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/windows/polling.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/windows/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/__init__.py
@@ -262,14 +233,27 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/extensions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/agentnotifiers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/agentnotifiers/bgp_dr_rpc_agent_api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/callbacks/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/callbacks/consumer/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/callbacks/consumer/registry.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/callbacks/events.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/callbacks/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/callbacks/producer/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/callbacks/producer/registry.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/callbacks/resource_manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/callbacks/resources.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/callbacks/version_manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/handlers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/handlers/bgp_speaker_rpc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/handlers/dhcp_rpc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/handlers/dvr_rpc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/handlers/l3_rpc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/handlers/metadata_rpc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/handlers/resources_rpc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/handlers/securitygroups_rpc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/v2/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/v2/attributes.py
@@ -290,54 +274,72 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/agents/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/agents/bgp_dragent.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/agents/dhcp.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/agents/l3.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/agents/metadata.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/agents/metadata_proxy.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/plugins/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/plugins/mlnx_neutron_agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/plugins/nec_neutron_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/plugins/linuxbridge_neutron_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/plugins/macvtap_neutron_agent.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/plugins/ovs_neutron_agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/plugins/ovsvapp_neutron_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/plugins/sriov_nic_neutron_agent.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/server/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/services/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/services/metering_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/eventlet/usage_audit.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/ipset_cleanup.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/keepalived_state_change.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/linuxbridge_cleanup.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/netns_cleanup.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/ovs_cleanup.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/pd_notify.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/sanity/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/sanity/checks.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/sanity_check.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/usage_audit.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/_deprecate.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/config.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/constants.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/eventlet_utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/exceptions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/ipv6_utils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/log.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/repos.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/rpc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/test_lib.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/topics.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/context.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/core_extensions/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/core_extensions/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/core_extensions/qos.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/address_scope_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/agents_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/agentschedulers_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/allowedaddresspairs_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/availability_zone/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/availability_zone/network.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/availability_zone/router.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/bgp_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/bgp_dragentscheduler_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/common_db_mixin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/db_base_plugin_common.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/db_base_plugin_v2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/dns_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/dvr_mac_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/external_net_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/extradhcpopt_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/extraroute_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/flavors_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/ipam_backend_mixin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/ipam_non_pluggable_backend.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/ipam_pluggable_backend.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/l3_agentschedulers_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/l3_attrs_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/l3_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/l3_dvr_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/l3_dvr_ha_scheduler_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/l3_dvrscheduler_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/l3_gwmode_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/l3_hamode_db.py
@@ -353,130 +355,102 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/brocade_init_ops.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/cisco_init_ops.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/core_init_ops.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/dvr_init_opts.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/env.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/external.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/firewall_init_ops.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/heal_script.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/l3_init_ops.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/lb_init_ops.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/loadbalancer_init_ops.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/metering_init_ops.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/ml2_init_ops.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/mlnx_init_ops.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/nec_init_ops.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/nsxv_initial_opts.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/nuage_init_opts.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/other_extensions_init_ops.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/other_plugins_init_ops.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/ovs_init_ops.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/portsec_init_ops.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/ryu_init_ops.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/script.py.mako
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/secgroup_init_ops.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/034883111f_remove_subnetpool_allow_overlap.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/10cd28e692e9_nuage_extraroute.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/117643811bca_nec_delete_ofc_mapping.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1421183d533f_nsx_dhcp_metadata.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/14be42f3d0a5_default_sec_group_table.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/157a5d299379_ml2_binding_profile.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1680e1f0c4dc_remove_cisco_nexus_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/16a27a58e093_ext_l3_ha_mode.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/16cdf118d31d_extra_dhcp_options_ipv6_support.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/19180cf98af6_nsx_gw_devices.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1955efc66455_weight_scheduler.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1b2580001654_nsx_sec_group_mappin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1b837a7125a9_cisco_apic_driver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1d6ee1ae5da5_db_healing.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1e5dd1d09b22_set_not_null_fields_lb_stats.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1f71e54a85e7_ml2_net_seg_model.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/20b99fd19d4f_cisco_ucs_manager_mechanism_driver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/20c469a5f920_add_index_for_port.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/236b90af57ab_ml2_refactor_for_dynamic_segments.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/2447ad0e9585_add_ipv6_mode_props.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/24c7ea5160d7_cisco_csr_vpnaas.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/268fb5e99aa2_subnetpool_allocation.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/26b54cf9024d_add_index_on_allocated.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/27cc183af192_ml2_vnic_type.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/28a09af858a8_subnetpool_quotas.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/28c0ffb8ebbd_remove_mlnx_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/2a1ee2fb59e0_add_mac_address_unique_constraint.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/2b801560a332_remove_hypervneutronplugin_tables.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/2d2a8a565438_hierarchical_binding.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/2db5203cb7a9_nuage_floatingip.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/2eeaf963a447_floatingip_status.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/31d7f831a591_add_constraint_for_routerid.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/327ee5fde2c7_set_innodb_engine.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/32f3915891fd_cisco_apic_driver_update.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/33c3db036fe4_set_length_of_description_field_metering.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/33dd0a9fa487_embrane_lbaas_driver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/341ee8a4ccb5_sync_with_cisco_repo.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/35a0f3365720_add_port_security_in_ml2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/37f322991f59_nuage_removing_mapping_tables.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/38495dc99731_ml2_tunnel_endpoints_table.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/3927f7f7c456_l3_extension_distributed_mode.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/3b85b693a95f_remove_service_tables.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/3c346828361e_metering_label_shared.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/3d2585038b95_vmware_nsx.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/3d3cb89d84ee_nsx_switch_mappings.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/408cfbf6923c_remove_ryu_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/4119216b7365_add_tenant_id_idx.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/41662e32bce2_l3_dvr_snat_mapping.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/43763a9618fd_add_mtu_attributes_to_network.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/44621190bc02_add_uniqueconstraint_ipavailability_ranges.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/492a106273f8_brocade_ml2_mech_dri.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/4ca36cfc898c_nsx_router_mappings.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/4dbe243cd84d_nsxv.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/4eba2f05c2f4_correct_vxlan_endpoint_primary_key.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/4eca4a84f08a_remove_ml2_cisco_cred_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/50e86cb2637a_nsx_mappings.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/51c54792158e_subnetpools.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/538732fa21e1_nec_rename_quantum_id_to_neutron_id.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/544673ac99ab_add_router_port_table.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/5446f2a45467_set_server_default.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/54f7549a0e5f_set_not_null_peer_address.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/5589aa32bf80_l3_dvr_scheduler.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/57086602ca0a_scrap_nsx_adv_svcs_models.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/57dd745253a6_nuage_kilo_migrate.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/589f9237ca0e_cisco_n1kv_ml2_driver_tables.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/58fe87a01143_cisco_csr_routing.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/5ac1c354a051_n1kv_segment_alloc.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/6be312499f9_set_not_null_vlan_id_cisco.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/81c553f3776c_bsn_consistencyhashes.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/86d6d9776e2b_cisco_apic_driver_update_l3.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/884573acbf1c_unify_nsx_router_extra_attributes.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/HEAD
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/CONTRACT_HEAD
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/aae5706a396_nuage_provider_networks.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/abc88c33f74f_lb_stats_needs_bigint.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/b65aa907aec_set_length_of_protocol_field.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/bebba223288_add_vlan_transparent_property_to_network.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/d06e871c0d5_set_admin_state_up_not_null_ml2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/e766b19a3bb_nuage_initial.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/f15b1fb526dd_cascade_floatingip.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/havana_initial.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/icehouse_release.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/juno_release.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/kilo_release.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/kilo_initial.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/contract/11926bcfe72d_add_geneve_ml2_type_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/contract/2a16083502f3_metaplugin_removal.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/contract/2e5352a0ad4d_add_missing_foreign_keys.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/contract/30018084ec99_initial.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/contract/4af11ca47297_drop_cisco_monolithic_tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/contract/4ffceebfada_rbac_network.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/contract/5498d17be016_drop_legacy_ovs_and_lb.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/expand/1b4c6e320f79_address_scope_support_in_subnetpool.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/expand/1c844d1677f7_dns_nameservers_order.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/expand/26c371498592_subnetpool_hash.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/expand/31337ec0ffee_flavors.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/expand/34af2b5c5a59_add_dns_name_to_port.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e3225_nsxv_vdr_metadata.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/expand/45f955889773_quota_usage.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/expand/48153cb5f051_qos_db_changes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/expand/52c5312f6baf_address_scopes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/expand/599c6a226151_neutrodb_ipam.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/expand/8675309a5c4f_rbac_network.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/liberty/expand/9859ac9c136_quota_reservations.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/contract/1b294093239c_remove_embrane_plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/contract/2b4c2465d44b_dvr_sheduling_refactoring.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/contract/4ffceebfcdc_standard_desc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/contract/5ffceebfada_rbac_network_external.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/contract/8a6d8bdae39_migrate_neutron_resources_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/contract/c6c112992c9_rbac_qos_policy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/contract/e3278ee65050_drop_nec_plugin_tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/0e66c5227a8a_add_desc_to_standard_attr.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/13cfb89f881a_add_is_default_to_subnetpool.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/15be73214821_add_bgp_model_data.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/15e43b934f81_rbac_qos_policy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/19f26505c74f_auto_allocated_topology.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/1df244e556f5_add_unique_ha_router_agent_port_bindings.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/2f9e956e7532_tag_support.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/31ed664953e6_add_resource_versions_row_to_agent_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/32e5974ada25_add_neutron_resources_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/3894bccad37f_add_timestamp_to_base_resources.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/59cb5b6cf4d_availability_zone.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/659bf3d90664_add_attributes_to_support_external_dns_integration.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/b4caf27aae4_add_bgp_dragent_model_data.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/c3a73f615e4_add_ip_version_to_address_scope.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/dce3ec7a25c9_router_az.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/mitaka/expand/ec7fcfbf72ee_network_az.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/vmware_init_ops.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/vpn_init_ops.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/autogen.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/cli.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/migrate_to_ml2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/connection.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/models/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/models/frozen.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/models/head.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/model_base.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/models_v2.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/netmtu_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/network_ip_availability_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/portbindings_base.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/portbindings_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/portsecurity_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/portsecurity_db_common.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/qos/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/qos/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/qos/models.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/quota/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/quota/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/quota/driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/quota/models.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/quota_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/rbac_db_mixin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/rbac_db_models.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/securitygroups_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/securitygroups_rpc_base.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/servicetype_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/sqlalchemytypes.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/sqlalchemyutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/standardattrdescription_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/tag_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/vlantransparent_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/debug/README
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/debug/__init__.py
@@ -484,14 +458,21 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/debug/debug_agent.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/debug/shell.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/address_scope.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/agent.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/allowedaddresspairs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/auto_allocated_topology.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/availability_zone.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/bgp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/bgp_dragentscheduler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/default_subnetpools.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/dhcpagentscheduler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/dns.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/dvr.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/external_net.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/extra_dhcp_opt.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/extraroute.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/flavor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/flavors.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/l3.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/l3_ext_gw_mode.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/l3_ext_ha_mode.py
@@ -499,172 +480,86 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/metering.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/multiprovidernet.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/netmtu.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/network_availability_zone.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/network_ip_availability.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/portbindings.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/portsecurity.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/providernet.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/qos.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/quotasv2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/rbac.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/router_availability_zone.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/routerservicetype.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/securitygroup.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/servicetype.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/standardattrdescription.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/subnetallocation.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/tag.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/timestamp_core.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/vlantransparent.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/hacking/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/hacking/checks.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/hooks.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/i18n.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/ipam/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/ipam/driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/ipam/drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/ipam/drivers/neutrondb_ipam/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/ipam/drivers/neutrondb_ipam/db_api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/ipam/drivers/neutrondb_ipam/db_models.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/ipam/drivers/neutrondb_ipam/driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/ipam/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/ipam/requests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/ipam/subnet_alloc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/ipam/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/neutron_plugin_base_v2.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/notifiers/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/notifiers/batch_notifier.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/notifiers/nova.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/objects/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/objects/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/objects/common_types.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/objects/db/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/objects/db/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/objects/qos/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/objects/qos/policy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/objects/qos/rule.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/objects/qos/rule_type.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/objects/rbac_db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/_i18n.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/cache/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/cache/_backends/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/cache/_backends/memory.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/cache/backends.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/cache/cache.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/eventlet_backdoor.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/fileutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/loopingcall.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/middleware/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/middleware/catch_errors.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/middleware/request_id.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/periodic_task.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/policy.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/service.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/systemd.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/threadgroup.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/uuidutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/versionutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/opts.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/app.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/constants.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/controllers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/controllers/extensions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/controllers/quota.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/controllers/resource.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/controllers/root.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/controllers/router.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/controllers/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/hooks/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/hooks/body_validation.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/hooks/context.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/hooks/notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/hooks/ownership_validation.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/hooks/policy_enforcement.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/hooks/quota_enforcement.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/hooks/translation.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/pecan_wsgi/startup.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/bigswitch/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/bigswitch/agent/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/bigswitch/agent/restproxy_agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/bigswitch/db/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/bigswitch/db/consistency_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/bigswitch/l3_router_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/bigswitch/plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/bigswitch/requirements.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/bigswitch/routerrule_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/brocade/NeutronPlugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/brocade/README.md
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/brocade/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/brocade/db/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/brocade/db/models.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/brocade/nos/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/brocade/nos/fake_nosdriver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/brocade/nos/nctemplates.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/brocade/nos/nosdriver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/brocade/vlanbm.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/common/cisco_constants.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/common/cisco_credentials_v2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/common/cisco_exceptions.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/common/cisco_faults.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/common/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/db/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/db/l3/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/db/l3/l3_models.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/db/n1kv_db_v2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/db/n1kv_models_v2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/db/network_db_v2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/db/network_models_v2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/extensions/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/extensions/_credential_view.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/extensions/_qos_view.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/extensions/credential.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/extensions/n1kv.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/extensions/network_profile.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/extensions/policy_profile.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/extensions/qos.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/l2device_plugin_base.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/models/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/models/virt_phy_sw_v2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/n1kv/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/n1kv/n1kv_client.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/network_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/service_plugins/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/service_plugins/cisco_router_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/cisco/service_plugins/requirements.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/common/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/common/constants.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/common/utils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/agent/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/agent/dispatcher.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/agent/operations/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/agent/operations/router_operations.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/base_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/common/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/common/constants.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/common/contexts.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/common/exceptions.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/common/operation.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/common/utils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/l2base/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/l2base/fake/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/l2base/fake/fake_l2_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/l2base/fake/fakeplugin_support.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/l2base/ml2/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/l2base/ml2/ml2_support.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/l2base/support_base.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/l2base/support_exceptions.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/plugins/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/plugins/embrane_fake_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/embrane/plugins/embrane_ml2_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/evs/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/evs/migrate/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/evs/migrate/havana_api.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/evs/plugin.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/hyperv/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/hyperv/agent/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/hyperv/agent/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/hyperv/agent/l2_agent.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/hyperv/agent/security_groups_driver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ibm/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ibm/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ibm/agent/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ibm/agent/sdnve_neutron_agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ibm/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ibm/common/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ibm/common/constants.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ibm/common/exceptions.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ibm/sdnve_api.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ibm/sdnve_api_fake.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ibm/sdnve_neutron_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/linuxbridge/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/linuxbridge/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/linuxbridge/agent/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/linuxbridge/agent/arp_protect.py
-file \
-    path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py \
-    pkg.depend.bypass-generate=.*/oslo_log.* \
-    pkg.depend.bypass-generate=.*/oslo_messaging.*
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/linuxbridge/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/linuxbridge/common/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/linuxbridge/common/constants.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/linuxbridge/db/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/linuxbridge/db/l2network_models_v2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/common/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/meta_models_v2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/meta_neutron_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/proxy_neutron_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/midonet/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/midonet/plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/midonet/requirements.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/README
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/common/__init__.py
@@ -674,211 +569,158 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/driver_api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/driver_context.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/arista/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/arista/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/arista/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/arista/db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/arista/exceptions.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/arista/mechanism_arista.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/arista/requirements.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/brocade/README.md
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/brocade/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/brocade/db/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/brocade/db/models.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/brocade/fi_ni/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/brocade/fi_ni/mechanism_brocade_fi_ni.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/brocade/requirements.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/apic/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/apic/apic_model.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/apic/apic_sync.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/apic/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/apic/mechanism_apic.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/n1kv/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/n1kv/extensions/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/n1kv/extensions/n1kv.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/n1kv/mech_cisco_n1kv.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/n1kv/n1kv_ext_driver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/n1kv/n1kv_models.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/ncs/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/ncs/driver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/nexus/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/nexus/mech_cisco_nexus.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/nexus/nexus_models_v2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/nexus/requirements.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/nexus/type_nexus_vxlan.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/requirements.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/ucsm/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/ucsm/mech_cisco_ucsm.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/ucsm/ucsm_model.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/freescale/README.fslsdn
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/freescale/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/freescale/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/freescale/mechanism_fslsdn.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/agent/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/agent/_agent_manager_base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/agent/_common_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/agent/config.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/helpers.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/hyperv/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/hyperv/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/hyperv/mech_hyperv.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/hyperv/requirements.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/ibm/mechanism_sdnve.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/ibm/requirements.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/README
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/constants.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/db.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/mech_driver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/rpc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/rpc_manager/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/linuxbridge/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/linuxbridge/agent/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/linuxbridge/agent/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/linuxbridge/agent/common/constants.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/qos_driver.py
+file \
+    path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py \
+    pkg.depend.bypass-generate=.*/oslo_service.*
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/mech_linuxbridge.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/macvtap/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/macvtap/agent/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/macvtap/agent/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/macvtap/agent/macvtap_neutron_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/macvtap/macvtap_common.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/macvtap/mech_driver/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_bigswitch/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_bigswitch/driver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_bigswitch/requirements.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_linuxbridge.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_nuage/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_nuage/driver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_openvswitch.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/exceptions.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/mech_driver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mlnx/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mlnx/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mlnx/agent/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mlnx/agent/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mlnx/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mlnx/requirements.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/ofagent/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/ofagent/driver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/ofagent/requirements.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/opendaylight/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/opendaylight/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/opendaylight/driver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/opendaylight/requirements.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/ovsvapp/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/ovsvapp/mech_driver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/ovsvapp/requirements.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/agent/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/agent/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/agent/common/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/agent/common/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/main.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/br_cookie.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_phys.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_tun.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/main.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_ryuapp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_dvr_process.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_int.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_phys.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_tun.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/main.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_agent_extension_api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/README
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/build-rpm.sh
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec
+file \
+    path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap \
+    pkg.depend.bypass-generate=.*/XenAPIPlugin.*
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/mech_driver/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/type_flat.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/type_geneve.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/type_gre.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/type_local.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/type_tunnel.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/type_vlan.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/type_vxlan.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/extensions/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/extensions/dns_integration.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/extensions/port_security.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/extensions/qos.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/managers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/models.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/plugin.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/rpc.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/nec/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/nec/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/nec/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/nec/db/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/nec/db/models.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/nec/extensions/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/nec/extensions/packetfilter.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/nec/extensions/router_provider.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/nec/nec_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/nec/requirements.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/nuage/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/nuage/nuage_models.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/nuage/plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/oneconvergence/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/oneconvergence/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/oneconvergence/agent/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/oneconvergence/lib/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/oneconvergence/lib/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/oneconvergence/lib/exception.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/oneconvergence/lib/nvsd_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/oneconvergence/lib/nvsdlib.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/oneconvergence/lib/plugin_helper.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/oneconvergence/plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/opencontrail/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/opencontrail/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/opencontrail/common/exceptions.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/opencontrail/contrail_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/agent/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py
-file \
-    path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py \
-    pkg.depend.bypass-generate=.*/oslo_log.* \
-    pkg.depend.bypass-generate=.*/oslo_messaging.*
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/agent/xenapi/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/agent/xenapi/contrib/build-rpm.sh
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/agent/xenapi/contrib/rpmbuild/SPECS/openstack-quantum-xen-plugins.spec
-file \
-    path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/netwrap \
-    pkg.depend.bypass-generate=.*/XenAPIPlugin.*
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/common/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/common/constants.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/ovs_models_v2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/plumgrid/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/plumgrid/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/plumgrid/plumgrid_plugin/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/plumgrid/requirements.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/sriovnicagent/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/sriovnicagent/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/sriovnicagent/common/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/sriovnicagent/common/exceptions.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/sriovnicagent/eswitch_manager.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/sriovnicagent/pci_lib.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/sriovnicagent/sriov_nic_agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/README
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/common/nsxv_constants.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/dbexts/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/dbexts/nsx_models.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/dbexts/nsxv_models.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/dbexts/vcns_models.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/extensions/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/extensions/advancedserviceproviders.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/extensions/lsn.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/extensions/maclearning.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/extensions/networkgw.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/extensions/nvp_qos.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/extensions/qos.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/extensions/routertype.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/extensions/vnicindex.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/vmware/requirements.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/policy.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/quota.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/quota/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/quota/resource.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/quota/resource_registry.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/scheduler/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/scheduler/base_resource_filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/scheduler/base_scheduler.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/scheduler/dhcp_agent_scheduler.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/scheduler/l3_agent_scheduler.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/server/__init__.py \
-    pkg.depend.bypass-generate=.*/oslo_log.*
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/server/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/server/rpc_eventlet.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/server/wsgi_eventlet.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/server/wsgi_pecan.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/service.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/auto_allocate/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/auto_allocate/db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/auto_allocate/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/auto_allocate/models.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/auto_allocate/plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/agent/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/agent/bgp_dragent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/agent/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/agent/entry.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/bgp_plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/common/constants.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/common/opts.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/driver/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/driver/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/driver/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/driver/ryu/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/driver/ryu/driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/driver/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/scheduler/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/bgp/scheduler/bgp_dragent_scheduler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/externaldns/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/externaldns/driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/externaldns/drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/externaldns/drivers/designate/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/externaldns/drivers/designate/driver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/agents/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/agents/firewall_agent_api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/agents/l3reference/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/flavors/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/flavors/flavors_plugin.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/README
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/brocade/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/brocade/l3_router_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/brocade/mlx/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/brocade/mlx/l3_router_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/brocade/requirements.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/brocade/vyatta/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/brocade/vyatta/vrouter_neutron_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/l3_apic.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/l3_arista.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/l3_router_plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/l3_sdnve.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/agents/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/agents/metering_agent.py
@@ -889,35 +731,74 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/drivers/noop/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/drivers/noop/noop_driver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/metering_plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/network_ip_availability/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/network_ip_availability/plugin.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/provider_configuration.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/qos/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/qos/notification_drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/qos/notification_drivers/manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/qos/notification_drivers/message_queue.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/qos/notification_drivers/qos_base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/qos/qos_consts.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/qos/qos_plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/rbac/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/service_base.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/vpn/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/tag/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/tag/tag_plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/timestamp/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/timestamp/timestamp_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/timestamp/timestamp_plugin.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/version.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/worker.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron/wsgi.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas-$(COMPONENT_VERSION)-py$(PYVER).egg-info/PKG-INFO
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas-$(COMPONENT_VERSION)-py$(PYVER).egg-info/entry_points.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas-$(COMPONENT_VERSION)-py$(PYVER).egg-info/not-zip-safe
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas-$(COMPONENT_VERSION)-py$(PYVER).egg-info/pbr.json
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/_i18n.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/cmd/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/cmd/eventlet/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/cmd/eventlet/agent.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/cmd/eventlet/vyatta_agent.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic.ini
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/README
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/env.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/script.py.mako
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/versions/3ea02b2a773e_add_index_tenant_id.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/versions/HEAD
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/versions/CONTRACT_HEAD
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/versions/EXPAND_HEAD
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/versions/kilo_release.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/contract/2c82e782d734_drop_tenant_id_in_cisco_csr_identifier_.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/contract/333dfd6afaa2_populate_vpn_service_table_fields.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/contract/56893333aa52_fix_identifier_map_fk.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/expand/24f28869838b_add_fields_to_vpn_service_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/versions/liberty/expand/30018084ed99_initial.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/contract/2cb4ee992b41_multiple_local_subnets.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/expand/28ee739a7e4b_multiple_local_subnets.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/versions/mitaka/expand/41b509d10b5e_vpnaas_endpoint_groups.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/migration/alembic_migrations/versions/start_neutron_vpnaas.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/models/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/models/head.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/vpn/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/vpn/vpn_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/vpn/vpn_models.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/db/vpn/vpn_validator.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/extensions/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/extensions/vpn_endpoint_groups.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/extensions/vpnaas.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/opts.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/vpn/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/vpn/agent.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/vpn/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/vpn/common/constants.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/vpn/common/netns_wrapper.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/vpn/common/topics.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/vpn/device_drivers/__init__.py
@@ -946,6 +827,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/vpn/service_drivers/cisco_ipsec.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/vpn/service_drivers/cisco_validator.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/vpn/service_drivers/ipsec.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/vpn/service_drivers/ipsec_validator.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/vpn/service_drivers/vyatta_ipsec.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/vpn/vpn_service.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutron_vpnaas/services/vpn/vyatta_agent.py
@@ -969,8 +851,9 @@
     com.oracle.info.tpno=$(TPNO_VPNAAS) \
     com.oracle.info.version=$(COMPONENT_VERSION)
 
-# To upgrade to Kilo version, Juno version of the package must be on the system
-depend type=origin fmri=cloud/openstack/[email protected] root-image=true
+# To upgrade to the Mitaka version, the Kilo version of the package must
+# be on the system
+depend type=origin fmri=cloud/openstack/[email protected] root-image=true
 
 # force a dependency on package delivering dnsmasq(8)
 depend type=require fmri=__TBD pkg.debug.depend.file=usr/lib/inet/dnsmasq
@@ -982,9 +865,6 @@
 depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/dladm \
     variant.debug.container=false
 
-# force a dependency on package delivering evsadm(8)
-depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/evsadm
-
 # force a dependency on package delivering ipadm(8)
 depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/ipadm \
     variant.debug.container=false
@@ -995,25 +875,48 @@
 # force a dependency on package delivering pfctl(8)
 depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/pfctl
 
+# force a dependency on alembic; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/alembic-$(PYV)
+
 # force a dependency on cliff; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/cliff-$(PYV)
 
-# force a dependency on greenlet; pkgdepend work is needed to flush this out.
-depend type=require fmri=library/python/greenlet-$(PYV)
+# force a dependency on debtcollector; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/debtcollector-$(PYV)
+
+# force a dependency on designateclient; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/designateclient-$(PYV)
 
 # force a dependency on httplib2; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/httplib2-$(PYV)
 
+# force a dependency on iniparse; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/iniparse-$(PYV)
+
 # force a dependency on jinja2; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/jinja2-$(PYV)
 
+# force a dependency on keystoneauth1; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/keystoneauth1-$(PYV)
+
 # force a dependency on keystoneclient; pkgdepend work is needed to flush this
 # out.
 depend type=require fmri=library/python/keystoneclient-$(PYV)
 
-# force a dependency on keystonemiddleware; used via a paste.deploy filter
+# force a dependency on keystonemiddleware; pkgdepend work is needed to flush
+# this out.
 depend type=require fmri=library/python/keystonemiddleware-$(PYV)
 
+# force a dependency on neutron-lib; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/neutron-lib-$(PYV)
+
+# force a dependency on neutronclient; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/neutronclient-$(PYV)
+
 # force a dependency on novaclient; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/novaclient-$(PYV)
 
@@ -1025,34 +928,41 @@
 # out.
 depend type=require fmri=library/python/oslo.context-$(PYV)
 
+# force a dependency on oslo.db; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/oslo.db-$(PYV)
+
 # force a dependency on oslo.i18n; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/oslo.i18n-$(PYV)
 
-# force a dependency on oslo.log; pkgdepend work is needed to flush this out.
-depend type=require fmri=library/python/oslo.log-$(PYV)
-
-# force a dependency on oslo.messaging; pkgdepend work is needed to flush this
-# out.
-depend type=require fmri=library/python/oslo.messaging-$(PYV)
-
 # force a dependency on oslo.middleware; pkgdepend work is needed to flush this
 # out.
 depend type=require fmri=library/python/oslo.middleware-$(PYV)
 
+# force a dependency on oslo.policy; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/oslo.policy-$(PYV)
+
+# force a dependency on oslo.reports; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/oslo.reports-$(PYV)
+
 # force a dependency on oslo.serialization; pkgdepend work is needed to flush
 # this out.
 depend type=require fmri=library/python/oslo.serialization-$(PYV)
 
-# force a dependency on oslo.utils; pkgdepend work is needed to flush this out.
-depend type=require fmri=library/python/oslo.utils-$(PYV)
+# force a dependency on oslo.service; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/oslo.service-$(PYV)
 
-# force a dependency on paste.deploy; pkgdepend work is needed to flush this
-# out.
-depend type=require fmri=library/python/paste.deploy-$(PYV)
+# force a dependency on oslo.versionedobjects; pkgdepend work is needed to flush
+# this out.
+depend type=require fmri=library/python/oslo.versionedobjects-$(PYV)
 
 # force a dependency on pbr; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/pbr-$(PYV)
 
+# force a dependency on pecan; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/pecan-$(PYV)
+
 # force a dependency on requests; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/requests-$(PYV)
 
@@ -1065,8 +975,14 @@
 # force a dependency on setuptools; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/setuptools-$(PYV)
 
+# force a dependency on sqlalchemy; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/sqlalchemy-$(PYV)
+
 # force a dependency on stevedore; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/stevedore-$(PYV)
 
 # force a dependency on webob; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/webob-$(PYV)
+
+# force a dependency on rad-python; pkgdepend work is needed to flush this out.
+depend type=require fmri=system/management/rad/client/rad-python
--- a/components/openstack/neutron/patches/01-dhcp-agent-add-solaris.patch	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/patches/01-dhcp-agent-add-solaris.patch	Wed Sep 07 14:48:41 2016 -0700
@@ -1,107 +1,115 @@
 Changes to the Neutron DHCP agent to port it to Solaris. These changes
 will eventually be proposed upstream.
 
---- neutron-2015.1.2/neutron/agent/linux/external_process.py.~1~	2015-10-13 10:35:16.000000000 -0700
-+++ neutron-2015.1.2/neutron/agent/linux/external_process.py	2016-01-28 23:07:42.221029379 -0800
-@@ -15,6 +15,7 @@
- import abc
- import collections
- import os.path
-+import platform
- import six
- 
- import eventlet
-@@ -86,9 +87,17 @@ class ProcessManager(MonitoredProcess):
-                 cmd_callback = self.default_cmd_callback
-             cmd = cmd_callback(self.get_pid_file_name())
- 
--            ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
--            ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env,
--                                     run_as_root=self.run_as_root)
-+            if self.namespace:
-+                ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
-+                ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env,
-+                                         run_as_root=self.run_as_root)
-+            else:
-+                env_params = []
-+                if self.cmd_addl_env:
-+                    env_params = (['/usr/bin/env'] +
-+                                  ['%s=%s' % pair for pair in
-+                                   self.cmd_addl_env.items()])
-+                utils.execute(env_params + list(cmd))
-         elif reload_cfg:
-             self.reload_cfg()
- 
-@@ -131,6 +140,14 @@ class ProcessManager(MonitoredProcess):
-         if pid is None:
-             return False
- 
-+        if platform.system() == "SunOS":
-+            cmd = ['/usr/bin/pargs', '-l', pid]
-+            try:
-+                exec_out = utils.execute(cmd)
-+            except RuntimeError:
-+                return False
-+            return self.uuid in exec_out
-+
-         cmdline = '/proc/%s/cmdline' % pid
-         try:
-             with open(cmdline, "r") as f:
---- neutron-2015.1.2/neutron/api/rpc/handlers/dhcp_rpc.py.~1~	2015-10-13 10:35:16.000000000 -0700
-+++ neutron-2015.1.2/neutron/api/rpc/handlers/dhcp_rpc.py	2016-01-28 23:07:42.219930998 -0800
-@@ -188,11 +188,13 @@ class DhcpRpcCallback(object):
-                 for fixed_ip in port['fixed_ips']:
-                     if fixed_ip['subnet_id'] in dhcp_enabled_subnet_ids:
-                         dhcp_enabled_subnet_ids.remove(fixed_ip['subnet_id'])
--                port['fixed_ips'].extend(
--                    [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
--
--                retval = plugin.update_port(context, port['id'],
--                                            dict(port=port))
-+                if dhcp_enabled_subnet_ids:
-+                    port['fixed_ips'].extend(
-+                        [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
-+                    retval = plugin.update_port(context, port['id'],
-+                                                dict(port=port))
-+                else:
-+                    retval = port
- 
-         except n_exc.NotFound as e:
-             LOG.warning(e)
-*** neutron-2015.1.2/neutron/agent/linux/utils.py	2015-10-13 10:35:16.000000000 -0700
---- new/neutron/agent/linux/utils.py	2016-05-14 07:44:40.976050014 -0700
+*** neutron-8.0.0/neutron/agent/linux/external_process.py	2016-04-07 00:44:25.000000000 -0700
+--- new//neutron/agent/linux/external_process.py	2016-05-24 10:42:23.157427654 -0700
+***************
+*** 15,20 ****
+--- 15,21 ----
+  import abc
+  import collections
+  import os.path
++ import platform
+  
+  import eventlet
+  from oslo_concurrency import lockutils
 ***************
-*** 18,23 ****
---- 18,24 ----
+*** 87,95 ****
+                  cmd_callback = self.default_cmd_callback
+              cmd = cmd_callback(self.get_pid_file_name())
+  
+!             ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
+!             ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env,
+!                                      run_as_root=self.run_as_root)
+          elif reload_cfg:
+              self.reload_cfg()
+  
+--- 88,104 ----
+                  cmd_callback = self.default_cmd_callback
+              cmd = cmd_callback(self.get_pid_file_name())
+  
+!             if self.namespace:
+!                 ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
+!                 ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env,
+!                                          run_as_root=self.run_as_root)
+!             else:
+!                 env_params = []
+!                 if self.cmd_addl_env:
+!                     env_params = (['/usr/bin/env'] +
+!                                   ['%s=%s' % pair for pair in
+!                                    self.cmd_addl_env.items()])
+!                 utils.execute(env_params + list(cmd))
+          elif reload_cfg:
+              self.reload_cfg()
+  
+***************
+*** 100,106 ****
+          pid = self.pid
+  
+          if self.active:
+!             if get_stop_command:
+                  cmd = get_stop_command(self.get_pid_file_name())
+                  ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
+                  ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env)
+--- 109,115 ----
+          pid = self.pid
+  
+          if self.active:
+!             if get_stop_command and self.namespace:
+                  cmd = get_stop_command(self.get_pid_file_name())
+                  ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
+                  ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env)
+***************
+*** 137,142 ****
+--- 146,159 ----
+          if pid is None:
+              return False
+  
++         if platform.system() == "SunOS":
++             cmd = ['/usr/bin/pargs', '-l', pid]
++             try:
++                 exec_out = utils.execute(cmd)
++             except RuntimeError:
++                 return False
++             return self.uuid in exec_out
++ 
+          cmdline = '/proc/%s/cmdline' % pid
+          try:
+              with open(cmdline, "r") as f:
+*** neutron-8.0.0/neutron/agent/linux/utils.py	2016-04-07 00:44:35.000000000 -0700
+--- new/neutron/agent/linux/utils.py	2016-05-22 15:59:48.178297663 -0700
+***************
+*** 17,22 ****
+--- 17,23 ----
+  import glob
   import grp
-  import httplib
   import os
 + import platform
   import pwd
   import shlex
   import socket
 ***************
-*** 31,37 ****
+*** 30,36 ****
+  from eventlet import greenthread
   from oslo_config import cfg
   from oslo_log import log as logging
-  from oslo_log import loggers
 ! from oslo_rootwrap import client
   from oslo_utils import excutils
-  
-  from neutron.agent.common import config
---- 32,41 ----
+  import six
+  from six.moves import http_client as httplib
+--- 31,40 ----
+  from eventlet import greenthread
   from oslo_config import cfg
   from oslo_log import log as logging
-  from oslo_log import loggers
 ! try:
 !     from oslo_rootwrap import client
 ! except:
 !     pass
   from oslo_utils import excutils
-  
-  from neutron.agent.common import config
+  import six
+  from six.moves import http_client as httplib
 ***************
-*** 175,182 ****
+*** 184,191 ****
       """Retrieve a list of the pids of child processes of the given pid."""
   
       try:
@@ -110,7 +118,7 @@
       except RuntimeError as e:
           # Unexpected errors are the responsibility of the caller
           with excutils.save_and_reraise_exception() as ctxt:
---- 179,190 ----
+--- 188,199 ----
       """Retrieve a list of the pids of child processes of the given pid."""
   
       try:
@@ -123,31 +131,65 @@
       except RuntimeError as e:
           # Unexpected errors are the responsibility of the caller
           with excutils.save_and_reraise_exception() as ctxt:
-*** neutron-2015.1.2/neutron/agent/dhcp_agent.py	2015-10-13 10:35:16.000000000 -0700
---- new/neutron/agent/dhcp_agent.py	2016-05-14 07:45:04.012214835 -0700
+*** neutron-8.0.0/neutron/agent/dhcp_agent.py	2016-04-07 00:44:25.000000000 -0700
+--- new/neutron/agent/dhcp_agent.py	2016-05-24 10:43:14.037828809 -0700
 ***************
-*** 17,27 ****
---- 17,29 ----
-  import sys
-  
-  from oslo_config import cfg
-+ from oslo_utils import importutils
-  
-  from neutron.agent.common import config
+*** 23,28 ****
+--- 23,29 ----
   from neutron.agent.dhcp import config as dhcp_config
   from neutron.agent.linux import interface
   from neutron.agent.metadata import config as metadata_config
 + from neutron.agent.solaris import interface as solaris_interface
   from neutron.common import config as common_config
   from neutron.common import topics
-  from neutron.openstack.common import service
+  from neutron import service as neutron_service
 ***************
-*** 38,43 ****
---- 40,46 ----
-      cfg.CONF.register_opts(metadata_config.DRIVER_OPTS)
-      cfg.CONF.register_opts(metadata_config.SHARED_OPTS)
-      cfg.CONF.register_opts(interface.OPTS)
-+     cfg.CONF.register_opts(solaris_interface.OPTS)
+*** 37,43 ****
+      conf.register_opts(dhcp_config.DNSMASQ_OPTS)
+      conf.register_opts(metadata_config.DRIVER_OPTS)
+      conf.register_opts(metadata_config.SHARED_OPTS)
+!     conf.register_opts(interface.OPTS)
+  
+  
+  def main():
+--- 38,45 ----
+      conf.register_opts(dhcp_config.DNSMASQ_OPTS)
+      conf.register_opts(metadata_config.DRIVER_OPTS)
+      conf.register_opts(metadata_config.SHARED_OPTS)
+!     cfg.CONF.register_opts(interface.OPTS)
+!     cfg.CONF.register_opts(solaris_interface.OPTS)
   
   
   def main():
+*** neutron-8.0.0/neutron/agent/linux/dhcp.py	2016-04-07 00:44:35.000000000 -0700
+--- new/neutron/agent/linux/dhcp.py	2016-05-24 10:42:44.985389698 -0700
+***************
+*** 16,21 ****
+--- 16,22 ----
+  import abc
+  import collections
+  import os
++ import platform
+  import re
+  import shutil
+  import time
+***************
+*** 108,114 ****
+      def __init__(self, d):
+          super(NetModel, self).__init__(d)
+  
+!         self._ns_name = "%s%s" % (NS_PREFIX, self.id)
+  
+      @property
+      def namespace(self):
+--- 109,118 ----
+      def __init__(self, d):
+          super(NetModel, self).__init__(d)
+  
+!         if platform.system() == "SunOS":
+!             self._ns_name = None
+!         else:
+!             self._ns_name = "%s%s" % (NS_PREFIX, self.id)
+  
+      @property
+      def namespace(self):
--- a/components/openstack/neutron/patches/02-l3-agent-add-solaris.patch	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/patches/02-l3-agent-add-solaris.patch	Wed Sep 07 14:48:41 2016 -0700
@@ -1,9 +1,19 @@
 Changes to the Neutron L3 agent to port it to Solaris. These changes
 will eventually be proposed upstream.
 
---- neutron-2015.1.2/neutron/agent/linux/daemon.py.~1~	2015-10-13 10:35:16.000000000 -0700
-+++ neutron-2015.1.2/neutron/agent/linux/daemon.py	2016-01-28 23:07:42.234372590 -0800
-@@ -18,12 +18,14 @@ import grp
+--- neutron-8.1.2/neutron/agent/l3/config.py	2016-06-09 18:45:29.000000000 -0700
++++ new/neutron/agent/l3/config.py	2016-08-01 16:27:05.166039913 -0700
+@@ -46,7 +46,6 @@
+                help=_("Send this many gratuitous ARPs for HA setup, if "
+                       "less than or equal to 0, the feature is disabled")),
+     cfg.StrOpt('router_id', default='',
+-               deprecated_for_removal=True,
+                help=_("If non-empty, the l3 agent can only configure a router "
+                       "that has the matching router ID.")),
+     cfg.BoolOpt('handle_internal_only_routers',
+--- neutron-8.1.2/neutron/agent/linux/daemon.py.~1~	2016-06-09 18:45:29.000000000 -0700
++++ neutron-8.1.2/neutron/agent/linux/daemon.py	2016-07-04 16:19:23.339906615 -0700
+@@ -18,6 +18,7 @@ import grp
  import logging as std_logging
  from logging import handlers
  import os
@@ -11,14 +21,15 @@
  import pwd
  import signal
  import sys
+@@ -26,6 +27,7 @@ from oslo_log import log as logging
  
- from oslo_log import log as logging
- 
+ from neutron._i18n import _, _LE, _LI
+ from neutron.common import exceptions
 +from neutron.agent.linux import utils
- from neutron.common import exceptions
- from neutron.i18n import _LE, _LI
  
-@@ -140,6 +142,15 @@ class Pidfile(object):
+ LOG = logging.getLogger(__name__)
+ 
+@@ -152,6 +154,15 @@ class Pidfile(object):
          if not pid:
              return False
  
@@ -34,8 +45,8 @@
          cmdline = '/proc/%s/cmdline' % pid
          try:
              with open(cmdline, "r") as f:
---- neutron-2015.1.2/neutron/common/ipv6_utils.py.~1~	2015-10-13 10:35:16.000000000 -0700
-+++ neutron-2015.1.2/neutron/common/ipv6_utils.py	2016-01-28 23:28:34.771113032 -0800
+--- neutron-8.1.2/neutron/common/ipv6_utils.py.~1~	2016-06-09 18:45:36.000000000 -0700
++++ neutron-8.1.2/neutron/common/ipv6_utils.py	2016-07-04 16:19:23.340616655 -0700
 @@ -17,6 +17,7 @@
  IPv6-related utilities and helper functions.
  """
@@ -56,61 +67,96 @@
      if _IS_IPV6_ENABLED is None:
          disabled_ipv6_path = "/proc/sys/net/ipv6/conf/default/disable_ipv6"
          if os.path.exists(disabled_ipv6_path):
-*** neutron-2015.1.2/neutron/agent/l3_agent.py	2015-10-13 10:35:16.000000000 -0700
---- new/neutron/agent/l3_agent.py	2016-05-14 07:44:53.695396597 -0700
-***************
-*** 14,19 ****
---- 14,20 ----
-  #    License for the specific language governing permissions and limitations
-  #    under the License.
-  
-+ import platform
-  import sys
-  
-  from oslo_config import cfg
-***************
-*** 24,29 ****
---- 25,31 ----
-  from neutron.agent.linux import external_process
-  from neutron.agent.linux import interface
-  from neutron.agent.metadata import config as metadata_config
-+ from neutron.agent.solaris import interface as solaris_interface
-  from neutron.common import config as common_config
-  from neutron.common import topics
-  from neutron.openstack.common import service
-***************
-*** 39,44 ****
---- 41,47 ----
-      config.register_use_namespaces_opts_helper(conf)
-      config.register_agent_state_opts_helper(conf)
-      conf.register_opts(interface.OPTS)
-+     conf.register_opts(solaris_interface.OPTS)
-      conf.register_opts(external_process.OPTS)
-  
-  
-***************
-*** 46,51 ****
---- 49,56 ----
-      register_opts(cfg.CONF)
-      common_config.init(sys.argv[1:])
-      config.setup_logging()
-+     if platform.system() == "SunOS":
-+         manager = 'neutron.agent.l3.solaris_agent.L3NATAgent'
-      server = neutron_service.Service.create(
-          binary='neutron-l3-agent',
-          topic=topics.L3_AGENT,
---- neutron-2015.1.2/neutron/agent/l3/agent.py.~1~	2015-10-13 10:35:16.000000000 -0700
-+++ neutron-2015.1.2/neutron/agent/l3/agent.py	2016-08-19 20:29:49.000000000 -0700
-@@ -523,6 +523,12 @@
-             else:
-                 routers = self.plugin_rpc.get_routers(context,
-                                                       [self.conf.router_id])
+--- neutron-8.1.2/neutron/agent/l3/agent.py.~1~	2016-08-30 13:29:12.113143750 -0700
++++ neutron-8.1.2/neutron/agent/l3/agent.py	2016-08-30 13:20:22.455764906 -0700
+@@ -13,6 +13,9 @@
+ #    under the License.
+ #
+ 
++import os
++import platform
++
+ import eventlet
+ import netaddr
+ from oslo_config import cfg
+@@ -32,12 +35,18 @@
+ from neutron.agent.l3 import ha
+ from neutron.agent.l3 import ha_router
+ from neutron.agent.l3 import legacy_router
+-from neutron.agent.l3 import namespace_manager
++if platform.system() == "SunOS":
++    from neutron.agent.solaris import namespace_manager
++else:
++    from neutron.agent.l3 import namespace_manager
+ from neutron.agent.l3 import namespaces
+ from neutron.agent.l3 import router_processing_queue as queue
+ from neutron.agent.linux import external_process
+ from neutron.agent.linux import ip_lib
+-from neutron.agent.linux import pd
++if platform.system() == "SunOS":
++    from neutron.agent.solaris import pd
++else:
++    from neutron.agent.linux import pd
+ from neutron.agent.metadata import driver as metadata_driver
+ from neutron.agent import rpc as agent_rpc
+ from neutron.callbacks import events
+@@ -268,6 +277,11 @@
+                             "default value is 'br-ex' so it must be "
+                             "explicitly set to a blank value."))
+ 
++        if not self.conf.router_id:
++            msg = _LE('Router id is required if not using namespaces.')
++            LOG.error(msg)
++            raise SystemExit(1)
++
+         if self.conf.ipv6_gateway:
+             # ipv6_gateway configured. Check for valid v6 link-local address.
+             try:
+@@ -559,6 +573,12 @@
+             for i in range(0, len(router_ids), self.sync_routers_chunk_size):
+                 routers = self.plugin_rpc.get_routers(
+                     context, router_ids[i:i + self.sync_routers_chunk_size])
 +                if not routers:
 +                    LOG.error(_LE('Server failed to return info for router '
-+                                  'with id: %s. Make sure the correct '
-+                                  'router_id is specified in l3_agent.ini'),
++                                  'with id %s. Make sure the correct router_id'
++                                  ' is specified in l3_agent.ini'),
 +                              self.conf.router_id)
-+                    raise SystemExit(1)
++                    os._exit(1)
+                 LOG.debug('Processing :%r', routers)
+                 for r in routers:
+                     curr_router_ids.add(r['id'])
+--- neutron-8.1.2/neutron/agent/l3_agent.py.~1~	2016-06-09 18:45:29.000000000 -0700
++++ neutron-8.1.2/neutron/agent/l3_agent.py	2016-07-04 16:19:23.341146110 -0700
+@@ -14,6 +14,7 @@
+ #    License for the specific language governing permissions and limitations
+ #    under the License.
+ 
++import platform
+ import sys
  
-         except oslo_messaging.MessagingException:
-             LOG.exception(_LE("Failed synchronizing routers due to RPC error"))
+ from oslo_config import cfg
+@@ -27,6 +28,7 @@ from neutron.agent.linux import interfac
+ from neutron.agent.linux import pd
+ from neutron.agent.linux import ra
+ from neutron.agent.metadata import config as metadata_config
++from neutron.agent.solaris import interface as solaris_interface
+ from neutron.common import config as common_config
+ from neutron.common import topics
+ from neutron import service as neutron_service
+@@ -40,6 +42,7 @@ def register_opts(conf):
+     config.register_interface_driver_opts_helper(conf)
+     config.register_agent_state_opts_helper(conf)
+     conf.register_opts(interface.OPTS)
++    conf.register_opts(solaris_interface.OPTS)
+     conf.register_opts(external_process.OPTS)
+     conf.register_opts(pd.OPTS)
+     conf.register_opts(ra.OPTS)
+@@ -50,6 +53,8 @@ def main(manager='neutron.agent.l3.agent
+     register_opts(cfg.CONF)
+     common_config.init(sys.argv[1:])
+     config.setup_logging()
++    if platform.system() == "SunOS":
++        manager = 'neutron.agent.l3.solaris_agent.L3NATAgent'
+     server = neutron_service.Service.create(
+         binary='neutron-l3-agent',
+         topic=topics.L3_AGENT,
--- a/components/openstack/neutron/patches/03-metadata-driver-solaris.patch	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/patches/03-metadata-driver-solaris.patch	Wed Sep 07 14:48:41 2016 -0700
@@ -1,17 +1,17 @@
 Changes to the Neutron Metadata agent to port it to Solaris. These
 changes will eventually be proposed upstream.
 
---- neutron-2015.1.0/neutron/agent/metadata/driver.py	2015-04-30 05:52:24.000000000 -0700
-+++ new/neutron/agent/metadata/driver.py	2015-10-08 16:01:14.949791035 -0700
+--- neutron-8.0.0/neutron/agent/metadata/driver.py.~1~	2016-04-07 00:44:25.000000000 -0700
++++ neutron-8.0.0/neutron/agent/metadata/driver.py	2016-05-08 22:58:35.936392260 -0700
 @@ -14,6 +14,7 @@
  #    under the License.
  
  import os
 +import platform
  
- from oslo_log import log as logging
- 
-@@ -38,10 +39,16 @@
+ from neutron.agent.common import config
+ from neutron.agent.l3 import ha_router
+@@ -36,10 +37,16 @@ class MetadataDriver(object):
      def __init__(self, l3_agent):
          self.metadata_port = l3_agent.conf.metadata_port
          self.metadata_access_mark = l3_agent.conf.metadata_access_mark
@@ -32,7 +32,7 @@
  
      @classmethod
      def metadata_filter_rules(cls, port, mark):
-@@ -95,14 +102,18 @@
+@@ -96,14 +103,18 @@ class MetadataDriver(object):
              metadata_proxy_socket = conf.metadata_proxy_socket
              user, group, watch_log = (
                  cls._get_metadata_proxy_user_group_watchlog(conf))
@@ -59,40 +59,16 @@
              proxy_cmd.extend(config.get_log_args(
                  conf, 'neutron-ns-metadata-proxy-%s.log' % uuid,
                  metadata_proxy_watch_log=watch_log))
-@@ -141,7 +152,7 @@
-     router = kwargs['router']
-     proxy = l3_agent.metadata_driver
-     for c, r in proxy.metadata_filter_rules(proxy.metadata_port,
--                                           proxy.metadata_access_mark):
-+                                            proxy.metadata_access_mark):
-         router.iptables_manager.ipv4['filter'].add_rule(c, r)
-     for c, r in proxy.metadata_mangle_rules(proxy.metadata_access_mark):
-         router.iptables_manager.ipv4['mangle'].add_rule(c, r)
-@@ -162,7 +173,7 @@
-     router = kwargs['router']
-     proxy = l3_agent.metadata_driver
-     for c, r in proxy.metadata_filter_rules(proxy.metadata_port,
--                                           proxy.metadata_access_mark):
-+                                            proxy.metadata_access_mark):
-         router.iptables_manager.ipv4['filter'].remove_rule(c, r)
-     for c, r in proxy.metadata_mangle_rules(proxy.metadata_access_mark):
-         router.iptables_manager.ipv4['mangle'].remove_rule(c, r)
-@@ -171,6 +182,28 @@
-     router.iptables_manager.apply()
- 
+@@ -168,3 +179,25 @@ def before_router_removed(resource, even
      proxy.destroy_monitored_metadata_proxy(l3_agent.process_monitor,
--                                          router.router['id'],
--                                          router.ns_name,
--                                          l3_agent.conf)
-+                                           router.router['id'],
-+                                           router.ns_name,
-+                                           l3_agent.conf)
+                                           router.router['id'],
+                                           l3_agent.conf)
 +
 +
 +def after_router_added_solaris(resource, event, l3_agent, **kwargs):
 +    router = kwargs['router']
 +    proxy = l3_agent.metadata_driver
-+    if not router.is_ha:
++    if not router.router['ha']:
 +        proxy.spawn_monitored_metadata_proxy(
 +            l3_agent.process_monitor,
 +            router.ns_name,
@@ -104,7 +80,7 @@
 +def before_router_removed_solaris(resource, event, l3_agent, **kwargs):
 +    router = kwargs['router']
 +    proxy = l3_agent.metadata_driver
-+    if not router.is_ha:
++    if not router.router['ha']:
 +        proxy.destroy_monitored_metadata_proxy(l3_agent.process_monitor,
 +                                               router.router['id'],
 +                                               router.ns_name,
--- a/components/openstack/neutron/patches/04-requirements.patch	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/patches/04-requirements.patch	Wed Sep 07 14:48:41 2016 -0700
@@ -1,57 +1,25 @@
 In-house patch to remove unnecessary dependencies from Neutron's
 requirements files. The specific reasons are as follows:
 
-jsonrpclib	Not applicable
-
 oslo.rootwrap	Not applicable to Solaris
 
 Paste		Not applicable
 
---- neutron-2015.1.2/neutron.egg-info/requires.txt.~1~	2015-10-13 10:36:16.000000000 -0700
-+++ neutron-2015.1.2/neutron.egg-info/requires.txt	2016-01-28 22:52:36.925598086 -0800
-@@ -1,12 +1,10 @@
- pbr!=0.7,<1.0,>=0.6
--Paste
- PasteDeploy>=1.5.0
- Routes!=2.0,>=1.12.3
- eventlet!=0.17.0,>=0.16.1
- greenlet>=0.3.2
- httplib2>=0.7.5
- requests!=2.4.0,>=2.2.0
--jsonrpclib
- Jinja2>=2.6 # BSD License3 clause
- keystonemiddleware<1.6.0,>=1.5.0
- netaddr>=0.7.12
-@@ -26,7 +24,6 @@ oslo.i18n<1.6.0,>=1.5.0 # Apache-2.0
- oslo.log<1.1.0,>=1.0.0 # Apache-2.0
- oslo.messaging<1.9.0,>=1.8.0 # Apache-2.0
- oslo.middleware<1.1.0,>=1.0.0 # Apache-2.0
--oslo.rootwrap<1.7.0,>=1.6.0 # Apache-2.0
- oslo.serialization<1.5.0,>=1.4.0 # Apache-2.0
- oslo.utils!=1.4.1,<1.5.0,>=1.4.0 # Apache-2.0
- python-novaclient<2.24.0,>=2.22.0
---- neutron-2015.1.2/requirements.txt.~1~	2015-10-13 10:35:16.000000000 -0700
-+++ neutron-2015.1.2/requirements.txt	2016-01-28 22:52:54.449783471 -0800
-@@ -3,14 +3,12 @@
+--- neutron-8.1.2/requirements.txt.~1~	2016-06-09 18:45:36.000000000 -0700
++++ neutron-8.1.2/requirements.txt	2016-07-04 17:40:16.125858350 -0700
+@@ -3,7 +3,6 @@
  # process, which may cause wedges in the gate later.
- pbr!=0.7,<1.0,>=0.6
+ pbr>=1.6 # Apache-2.0
  
--Paste
- PasteDeploy>=1.5.0
- Routes!=2.0,>=1.12.3
- eventlet!=0.17.0,>=0.16.1
- greenlet>=0.3.2
- httplib2>=0.7.5
- requests!=2.4.0,>=2.2.0
--jsonrpclib
- Jinja2>=2.6 # BSD License (3 clause)
- keystonemiddleware<1.6.0,>=1.5.0
- netaddr>=0.7.12
-@@ -30,7 +28,6 @@ oslo.i18n<1.6.0,>=1.5.0 # Apache-2.0
- oslo.log<1.1.0,>=1.0.0 # Apache-2.0
- oslo.messaging<1.9.0,>=1.8.0 # Apache-2.0
- oslo.middleware<1.1.0,>=1.0.0 # Apache-2.0
--oslo.rootwrap<1.7.0,>=1.6.0 # Apache-2.0
- oslo.serialization<1.5.0,>=1.4.0 # Apache-2.0
- oslo.utils!=1.4.1,<1.5.0,>=1.4.0 # Apache-2.0
- 
+-Paste # MIT
+ PasteDeploy>=1.5.0 # MIT
+ Routes!=2.0,!=2.1,!=2.3.0,>=1.12.3;python_version=='2.7' # MIT
+ Routes!=2.0,!=2.3.0,>=1.12.3;python_version!='2.7' # MIT
+@@ -37,7 +36,6 @@ oslo.messaging>=4.0.0 # Apache-2.0
+ oslo.middleware>=3.0.0 # Apache-2.0
+ oslo.policy>=0.5.0 # Apache-2.0
+ oslo.reports>=0.6.0 # Apache-2.0
+-oslo.rootwrap>=2.0.0 # Apache-2.0
+ oslo.serialization>=1.10.0 # Apache-2.0
+ oslo.service>=1.0.0 # Apache-2.0
+ oslo.utils>=3.5.0 # Apache-2.0
--- a/components/openstack/neutron/patches/05-alembic-migrations.patch	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,266 +0,0 @@
-In-house patch to skip over unnecessary database migrations for Neutron.
-Juno database tables for Neutron do not need these migrations when upgrading
-to Kilo.
-
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/agent_init_ops.py.orig	2016-04-22 23:23:15.523526779 -0700
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/agent_init_ops.py	2016-04-22 23:25:39.337181119 -0700
-@@ -23,7 +23,10 @@
- 
- 
- def upgrade():
--    op.create_table(
-+    bind = op.get_bind()
-+    insp = sa.engine.reflection.Inspector.from_engine(bind)
-+    if 'agents' not in insp.get_table_names():
-+        op.create_table(
-         'agents',
-         sa.Column('id', sa.String(length=36), nullable=False),
-         sa.Column('agent_type', sa.String(length=255), nullable=False),
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/core_init_ops.py.orig	2016-04-22 23:25:50.350653015 -0700
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/core_init_ops.py	2016-04-22 23:30:15.715403373 -0700
-@@ -19,7 +19,11 @@
- 
- 
- def upgrade():
--    op.create_table(
-+    bind = op.get_bind()
-+    insp = sa.engine.reflection.Inspector.from_engine(bind)
-+    table_names = insp.get_table_names()
-+    if 'networks' not in table_names:
-+        op.create_table(
-         'networks',
-         sa.Column('tenant_id', sa.String(length=255), nullable=True),
-         sa.Column('id', sa.String(length=36), nullable=False),
-@@ -29,7 +33,8 @@
-         sa.Column('shared', sa.Boolean(), nullable=True),
-         sa.PrimaryKeyConstraint('id'))
- 
--    op.create_table(
-+    if 'ports' not in table_names:
-+        op.create_table(
-         'ports',
-         sa.Column('tenant_id', sa.String(length=255), nullable=True),
-         sa.Column('id', sa.String(length=36), nullable=False),
-@@ -43,7 +48,8 @@
-         sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ),
-         sa.PrimaryKeyConstraint('id'))
- 
--    op.create_table(
-+    if 'subnets' not in table_names:
-+        op.create_table(
-         'subnets',
-         sa.Column('tenant_id', sa.String(length=255), nullable=True),
-         sa.Column('id', sa.String(length=36), nullable=False),
-@@ -57,7 +63,8 @@
-         sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ),
-         sa.PrimaryKeyConstraint('id'))
- 
--    op.create_table(
-+    if 'dnsnameservers' not in table_names:
-+        op.create_table(
-         'dnsnameservers',
-         sa.Column('address', sa.String(length=128), nullable=False),
-         sa.Column('subnet_id', sa.String(length=36), nullable=False),
-@@ -65,7 +72,8 @@
-                                 ondelete='CASCADE'),
-         sa.PrimaryKeyConstraint('address', 'subnet_id'))
- 
--    op.create_table(
-+    if 'ipallocationpools' not in table_names:
-+        op.create_table(
-         'ipallocationpools',
-         sa.Column('id', sa.String(length=36), nullable=False),
-         sa.Column('subnet_id', sa.String(length=36), nullable=True),
-@@ -75,7 +83,8 @@
-                                 ondelete='CASCADE'),
-         sa.PrimaryKeyConstraint('id'))
- 
--    op.create_table(
-+    if 'subnetroutes' not in table_names:
-+        op.create_table(
-         'subnetroutes',
-         sa.Column('destination', sa.String(length=64), nullable=False),
-         sa.Column('nexthop', sa.String(length=64), nullable=False),
-@@ -84,7 +93,8 @@
-                                 ondelete='CASCADE'),
-         sa.PrimaryKeyConstraint('destination', 'nexthop', 'subnet_id'))
- 
--    op.create_table(
-+    if 'ipallocations' not in table_names:
-+        op.create_table(
-         'ipallocations',
-         sa.Column('port_id', sa.String(length=36), nullable=True),
-         sa.Column('ip_address', sa.String(length=64), nullable=False),
-@@ -97,7 +107,8 @@
-                                 ondelete='CASCADE'),
-         sa.PrimaryKeyConstraint('ip_address', 'subnet_id', 'network_id'))
- 
--    op.create_table(
-+    if 'ipavailabilityranges' not in table_names:
-+        op.create_table(
-         'ipavailabilityranges',
-         sa.Column('allocation_pool_id', sa.String(length=36), nullable=False),
-         sa.Column('first_ip', sa.String(length=64), nullable=False),
-@@ -106,7 +117,8 @@
-                                 ['ipallocationpools.id'], ondelete='CASCADE'),
-         sa.PrimaryKeyConstraint('allocation_pool_id', 'first_ip', 'last_ip'))
- 
--    op.create_table(
-+    if 'networkdhcpagentbindings' not in table_names:
-+        op.create_table(
-         'networkdhcpagentbindings',
-         sa.Column('network_id', sa.String(length=36), nullable=False),
-         sa.Column('dhcp_agent_id', sa.String(length=36), nullable=False),
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/l3_init_ops.py.orig	2016-04-22 23:35:15.205163303 -0700
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/l3_init_ops.py	2016-04-22 23:33:36.741262443 -0700
-@@ -31,14 +31,20 @@
- 
- 
- def upgrade():
--    op.create_table(
-+    bind = op.get_bind()
-+    insp = sa.engine.reflection.Inspector.from_engine(bind)
-+    table_names = insp.get_table_names()
-+
-+    if 'externalnetworks' not in table_names:
-+        op.create_table(
-         'externalnetworks',
-         sa.Column('network_id', sa.String(length=36), nullable=False),
-         sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
-                                 ondelete='CASCADE'),
-         sa.PrimaryKeyConstraint('network_id'))
- 
--    op.create_table(
-+    if 'routers' not in table_names:
-+        op.create_table(
-         'routers',
-         sa.Column('tenant_id', sa.String(length=255), nullable=True),
-         sa.Column('id', sa.String(length=36), nullable=False),
-@@ -51,7 +57,8 @@
-         sa.ForeignKeyConstraint(['gw_port_id'], ['ports.id'], ),
-         sa.PrimaryKeyConstraint('id'))
- 
--    op.create_table(
-+    if 'floatingips' not in table_names:
-+        op.create_table(
-         'floatingips',
-         sa.Column('tenant_id', sa.String(length=255), nullable=True),
-         sa.Column('id', sa.String(length=36), nullable=False),
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/other_extensions_init_ops.py.orig	2016-04-22 23:35:35.724592904 -0700
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/other_extensions_init_ops.py	2016-04-22 23:37:11.709485583 -0700
-@@ -34,7 +34,10 @@
-         sa.PrimaryKeyConstraint('provider_name', 'resource_id'),
-         sa.UniqueConstraint('resource_id'))
- 
--    op.create_table(
-+    bind = op.get_bind()
-+    insp = sa.engine.reflection.Inspector.from_engine(bind)
-+    if 'quotas' not in insp.get_table_names():
-+        op.create_table(
-         'quotas',
-         sa.Column('id', sa.String(length=36), nullable=False),
-         sa.Column('tenant_id', sa.String(length=255), nullable=True),
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py.orig 2016-04-23 01:59:50.554268246 -0700
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py      2016-04-23 02:01:10.767910540 -0700
-@@ -26,6 +26,7 @@
- down_revision = 'e197124d4b9'
-
- from alembic import op
-+import sqlalchemy as sa
-
- from neutron.db import migration
-
-@@ -41,7 +42,14 @@
-         # configured plugin did not create the agents table.
-         return
-
--    op.create_unique_constraint(
-+    bind = op.get_bind()
-+    insp = sa.engine.reflection.Inspector.from_engine(bind)
-+    u_cons_list = insp.get_unique_constraints(TABLE_NAME)
-+    u_cons = []
-+    for c in u_cons_list:
-+        u_cons.append(c['name'])
-+    if UC_NAME not in u_cons:
-+        op.create_unique_constraint(
-         name=UC_NAME,
-         source=TABLE_NAME,
-         local_cols=['agent_type', 'host']
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/2eeaf963a447_floatingip_status.py.orig	2016-04-22 23:41:17.060181778 -0700
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/2eeaf963a447_floatingip_status.py	2016-04-22 23:57:26.134124564 -0700
-@@ -38,11 +38,20 @@
-         # did not create the floatingips table.
-         return
- 
--    op.add_column('floatingips',
-+    bind = op.get_bind()
-+    insp = sa.engine.reflection.Inspector.from_engine(bind)
-+    col_names_list = insp.get_columns('floatingips')
-+    col_names = []
-+    for c in col_names_list:
-+        col_names.append(c['name'])
-+
-+    if 'last_known_router_id' not in col_names:
-+        op.add_column('floatingips',
-                   sa.Column('last_known_router_id',
-                             sa.String(length=36),
-                             nullable=True))
--    op.add_column('floatingips',
-+    if 'status' not in col_names:
-+        op.add_column('floatingips',
-                   sa.Column('status',
-                             sa.String(length=16),
-                             nullable=True))
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/2447ad0e9585_add_ipv6_mode_props.py.orig	2016-04-22 23:42:16.956052992 -0700
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/2447ad0e9585_add_ipv6_mode_props.py	2016-04-22 23:54:33.628120696 -0700
-@@ -37,7 +37,15 @@
-                    % ('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless'))
-         op.execute("CREATE TYPE ipv6_address_modes AS ENUM ('%s', '%s', '%s')"
-                    % ('slaac', 'dhcpv6-stateful', 'dhcpv6-stateless'))
--    op.add_column('subnets',
-+    bind = op.get_bind()
-+    insp = sa.engine.reflection.Inspector.from_engine(bind)
-+    col_names_list = insp.get_columns('subnets')
-+    col_names = []
-+    for c in col_names_list:
-+        col_names.append(c['name'])
-+
-+    if 'ipv6_ra_mode' not in col_names:
-+        op.add_column('subnets',
-                   sa.Column('ipv6_ra_mode',
-                             sa.Enum('slaac',
-                                     'dhcpv6-stateful',
-@@ -45,7 +53,8 @@
-                                     name='ipv6_ra_modes'),
-                             nullable=True)
-                   )
--    op.add_column('subnets',
-+    if 'ipv6_address_mode' not in col_names:
-+        op.add_column('subnets',
-                   sa.Column('ipv6_address_mode',
-                             sa.Enum('slaac',
-                                     'dhcpv6-stateful',
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/544673ac99ab_add_router_port_table.py.orig	2016-04-22 23:43:11.770727387 -0700
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/544673ac99ab_add_router_port_table.py	2016-04-22 23:45:53.016161819 -0700
-@@ -40,7 +40,10 @@
- 
- 
- def upgrade():
--    op.create_table(
-+    bind = op.get_bind()
-+    insp = sa.engine.reflection.Inspector.from_engine(bind)
-+    if 'routerports' not in insp.get_table_names():
-+        op.create_table(
-         'routerports',
-         sa.Column('router_id', sa.String(length=36), nullable=False),
-         sa.Column('port_id', sa.String(length=36), nullable=False),
-@@ -59,6 +59,6 @@
-             ['ports.id'],
-             ondelete='CASCADE'
-         ),
--    )
-+        )
-
--    op.execute(SQL_STATEMENT)
-+        op.execute(SQL_STATEMENT)
-
--- a/components/openstack/neutron/patches/06-ml2-ovs-support.patch	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,590 +0,0 @@
-Changes to Neutron Open vSwitch agent to port it to Solaris. These changes
-will eventually be proposed upstream.
-  
-*** neutron-2015.1.2/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py	2015-10-13 10:35:16.000000000 -0700
---- new/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py	2016-05-14 07:44:21.485893158 -0700
-***************
-*** 15,20 ****
---- 15,21 ----
-  #    under the License.
-  
-  import hashlib
-+ import platform
-  import signal
-  import sys
-  import time
-***************
-*** 33,42 ****
---- 34,45 ----
-  from neutron.agent.linux import ip_lib
-  from neutron.agent import rpc as agent_rpc
-  from neutron.agent import securitygroups_rpc as sg_rpc
-+ from neutron.agent.solaris import net_lib
-  from neutron.api.rpc.handlers import dvr_rpc
-  from neutron.common import config as common_config
-  from neutron.common import constants as q_const
-  from neutron.common import exceptions
-+ from neutron.common import log
-  from neutron.common import topics
-  from neutron.common import utils as q_utils
-  from neutron import context
-***************
-*** 53,58 ****
---- 56,70 ----
-  # A placeholder for dead vlans.
-  DEAD_VLAN_TAG = q_const.MAX_VLAN_TAG + 1
-  
-+ # Solaris specific additional OpenFlow tables to steer packets to/from VNICs
-+ # on top of VXLAN datalink.
-+ LEARN_FROM_PORTS = 2
-+ # Broadcast/Unknown Unicast/Multicast (BUM) tables
-+ OUTBOUND_UCAST_BUM_TABLE = 3
-+ INBOUND_UCAST_BUM_TABLE = 11
-+ INBOUND_UCAST_TABLE = 12
-+ INBOUND_BUM_TABLE = 13
-+ 
-  
-  class DeviceListRetrievalError(exceptions.NeutronException):
-      message = _("Unable to retrieve port details for devices: %(devices)s "
-***************
-*** 1685,1690 ****
---- 1697,2190 ----
-                                 "Agent and Server side."))
-  
-  
-+ class SolarisOVSNeutronAgent(OVSNeutronAgent):
-+     """Solaris implementation of OVS L2 Agent"""
-+ 
-+     def __init__(self, integ_br, tun_br, local_ip,
-+                  bridge_mappings, polling_interval, tunnel_types=None,
-+                  veth_mtu=None, l2_population=False,
-+                  enable_distributed_routing=False,
-+                  minimize_polling=False,
-+                  ovsdb_monitor_respawn_interval=(
-+                      constants.DEFAULT_OVSDBMON_RESPAWN),
-+                  arp_responder=False,
-+                  prevent_arp_spoofing=True,
-+                  use_veth_interconnection=False,
-+                  quitting_rpc_timeout=None):
-+         '''Please see the Base Class' constructor for parameters info
-+         '''
-+         self.tun_ofport = None
-+         # mapping of VNIC's OpenFlow Port Number (ofport) to
-+         # VXLAN segmentation id.
-+         self.br_port_segid = {}
-+         # mapping of VXLAN sgementation id to set of ports on that segment.
-+         # The port is a ovs_lib.VifPort object.
-+         self.br_segid_ports = {}
-+         # mapping of Neutron port UUID to ovs_lib.VifPort object.
-+         self.vif_ports = {}
-+         super(SolarisOVSNeutronAgent, self).\
-+             __init__(integ_br, tun_br,
-+                      local_ip, bridge_mappings, polling_interval,
-+                      tunnel_types, veth_mtu, l2_population,
-+                      enable_distributed_routing, minimize_polling,
-+                      ovsdb_monitor_respawn_interval, arp_responder,
-+                      prevent_arp_spoofing, use_veth_interconnection,
-+                      quitting_rpc_timeout)
-+ 
-+     def _setup_tunnel_port(self, br, port_name, remote_ip, tunnel_type):
-+         LOG.debug(_("Setting up tunnel(%s) for remote_ip: %s") %
-+                   (tunnel_type, remote_ip))
-+         if tunnel_type != p_const.TYPE_VXLAN:
-+             return
-+         self.tun_br_ofports[tunnel_type][remote_ip] = remote_ip
-+         remote_ips = self.tun_br_ofports[tunnel_type].values()
-+         LOG.debug(_("current list of remote_ips: %s"), remote_ips)
-+         for ofport, segmentation_id in self.br_port_segid.iteritems():
-+             flood_local_ofports = self.br_segid_ports[segmentation_id]
-+             self._mod_flood_to_tun_flows(ofport, remote_ips, segmentation_id,
-+                                          flood_local_ofports - set([ofport]))
-+ 
-+     def cleanup_tunnel_port(self, br, remote_ip, tunnel_type):
-+         LOG.debug(_("Cleaning up tunnel(%s) for remote_ip: %s") %
-+                   (tunnel_type, remote_ip))
-+         if tunnel_type != p_const.TYPE_VXLAN:
-+             return
-+         self.tun_br_ofports[tunnel_type].pop(remote_ip, None)
-+         remote_ips = self.tun_br_ofports[tunnel_type].values()
-+         for ofport, segmentation_id in self.br_port_segid.iteritems():
-+             flood_local_ofports = self.br_segid_ports[segmentation_id]
-+             self._mod_flood_to_tun_flows(ofport, remote_ips, segmentation_id,
-+                                          flood_local_ofports - set([ofport]))
-+ 
-+     # The following methods are called through RPC.
-+     #     add_fdb_entries(), remove_fdb_entries(), update_fdb_entries()
-+     # These methods are overridden from L2populationRpcCallBackMixin class.
-+     @log.log
-+     def add_fdb_entries(self, context, fdb_entries, host=None):
-+         # Needed for L2 Population support. Will be added later
-+         pass
-+ 
-+     @log.log
-+     def remove_fdb_entries(self, context, fdb_entries, host=None):
-+         # Needed for L2 Population support. Will be added later
-+         pass
-+ 
-+     @log.log
-+     def update_fdb_entries(self, context, fdb_entries, host=None):
-+         # Needed for L2 Population support. Will be added later
-+         pass
-+ 
-+     def setup_integration_br(self):
-+         '''Setup the integration bridge and remove all existing flows.'''
-+ 
-+         # Ensure the integration bridge is created.
-+         # ovs_lib.OVSBridge.create() will run
-+         #   ovs-vsctl -- --may-exist add-br BRIDGE_NAME
-+         # which does nothing if bridge already exists.
-+         self.int_br.create()
-+         self.int_br.set_secure_mode()
-+ 
-+         self.int_br.remove_all_flows()
-+         # Switch all traffic using normal-mode OVS only if tunneling
-+         # is disabled. Otherwise, we will need to add various OpenFlow tables
-+         # and flows to switch traffic.
-+         if not self.enable_tunneling:
-+             self.int_br.add_flow(priority=1, actions="normal")
-+         # Add a canary flow to int_br to track OVS restarts
-+         self.int_br.add_flow(table=constants.CANARY_TABLE, priority=0,
-+                              actions="drop")
-+ 
-+     def setup_physical_bridges(self, bridge_mappings):
-+         '''Makes sure that the uplink port for a given physical network
-+         exists in the integration bridge.
-+         '''
-+         self.phys_brs = {}
-+         # We do not use either int_ofports or phys_ofports below, however
-+         # we need to initialize them to empty values since it is used in
-+         # the common code which is mostly no-op for us.
-+         self.int_ofports = {}
-+         self.phys_ofports = {}
-+         ovs = ovs_lib.BaseOVS()
-+         for physical_network, uplink_port in bridge_mappings.iteritems():
-+             LOG.info(_LI("Mapping physical network %(physical_network)s to "
-+                          "uplink port %(uplink_port)s"),
-+                      {'physical_network': physical_network,
-+                       'uplink_port': uplink_port})
-+             if not ovs.port_exists(uplink_port):
-+                 LOG.error(_LE("Uplink port %(uplink_port)s for physical "
-+                               "network %(physical_network)s does not exist. "
-+                               "Agent terminated!"),
-+                           {'physical_network': physical_network,
-+                            'uplink_port': uplink_port})
-+                 sys.exit(1)
-+             self.phys_brs[physical_network] = uplink_port
-+ 
-+     def setup_ancillary_bridges(self, integ_br, tun_br):
-+         '''Setup ancillary bridges - for example br-ex.'''
-+         ovs = ovs_lib.BaseOVS()
-+         ovs_bridges = set(ovs.get_bridges())
-+         # Remove all known bridges
-+         ovs_bridges.remove(integ_br)
-+ 
-+         # Filter list of bridges to those that have external
-+         # bridge-id's configured
-+         br_names = []
-+         for bridge in ovs_bridges:
-+             bridge_id = ovs.get_bridge_external_bridge_id(bridge)
-+             if bridge_id != bridge:
-+                 br_names.append(bridge)
-+         ovs_bridges.difference_update(br_names)
-+         ancillary_bridges = []
-+         for bridge in ovs_bridges:
-+             br = ovs_lib.OVSBridge(bridge)
-+             LOG.info(_LI('Adding %s to list of bridges.'), bridge)
-+             ancillary_bridges.append(br)
-+         return ancillary_bridges
-+ 
-+     def reset_tunnel_br(self, tun_br_name=None):
-+         '''(re)initialize the tunnel bridge.
-+ 
-+         :param tun_br_name: the name of the tunnel bridge.
-+         '''
-+         # Solaris doesn't have a separate tunnel bridge, instead we
-+         # re-use the integration bridge itself.
-+         if self.tun_br is None:
-+             self.tun_br = self.int_br
-+ 
-+         # create ovs.vxlan1 datalink and add it to integration bridge
-+         if not self.local_ip:
-+             LOG.error(_LE("local_ip parameter is not set. Cannot have "
-+                           "tunneling enabled without it. Agent terminated!"))
-+             exit(1)
-+         if not net_lib.Datalink.datalink_exists("ovs.vxlan1"):
-+             # create the required vxlan
-+             cmd = ['/usr/sbin/dladm', 'create-vxlan', '-t', '-p',
-+                    'addr=%s,vni=flow' % (self.local_ip), 'ovs.vxlan1']
-+             try:
-+                 utils.execute(cmd)
-+             except Exception as e:
-+                 LOG.error(_LE("failed to create VXLAN tunnel end point "
-+                               "ovs.vxlan1: %s. Agent terminated!") % (e))
-+                 exit(1)
-+         # set openvswitch property to on
-+         try:
-+             cmd = ['/usr/sbin/dladm', 'show-linkprop', '-p',
-+                    'openvswitch', '-co', 'value', 'ovs.vxlan1']
-+             stdout = utils.execute(cmd)
-+             if stdout.strip() == 'off':
-+                 cmd = ['/usr/sbin/dladm', 'set-linkprop', '-t', '-p',
-+                        'openvswitch=on', 'ovs.vxlan1']
-+                 utils.execute(cmd)
-+         except Exception as e:
-+             LOG.error(_LE("failed to set 'openvswitch' property on "
-+                           "ovs.vxlan1: %s. Agent terminated!") % (e))
-+             exit(1)
-+ 
-+         attrs = [('type', 'vxlan'),
-+                  ('options', {'remote_ip': 'flow'}),
-+                  ('options', {'key': 'flow'})]
-+         self.tun_br.replace_port('ovs.vxlan1', *attrs)
-+         self.tun_ofport = self.tun_br.get_port_ofport('ovs.vxlan1')
-+         if self.tun_ofport == constants.OFPORT_INVALID:
-+             LOG.error(_LE("Failed to add ovs.vxlan1 to integration bridge. "
-+                           "Cannot have tunneling enabled on this agent. "
-+                           "Agent terminated!"))
-+             exit(1)
-+ 
-+     def setup_tunnel_br(self):
-+         '''Setup the tunnel bridge
-+ 
-+         Add all flows to the tunnel bridge.
-+         '''
-+ 
-+         #
-+         # Add flows for inbound packets
-+         #
-+ 
-+         # Table 0 (default) will sort incoming traffic depending on in_port.
-+         # Forward all the packets coming in from all the ports of the bridge
-+         # to respective learning tables (LEARN_FROM_TUN or LEARN_FROM_PORTS).
-+         self.tun_br.add_flow(priority=1,
-+                              in_port=self.tun_ofport,
-+                              actions="resubmit(,%s)" %
-+                              constants.LEARN_FROM_TUN)
-+         self.tun_br.add_flow(priority=0, actions="drop")
-+ 
-+         # LEARN_FROM_TUN table will have a single flow using a learn action to
-+         # dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac
-+         # addresses
-+         learned_flow = ("table=%s,"
-+                         "priority=1,"
-+                         "hard_timeout=300,"
-+                         "NXM_NX_TUN_ID[],"
-+                         "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],"
-+                         "load:NXM_NX_TUN_IPV4_SRC[]->NXM_NX_TUN_IPV4_DST[],"
-+                         "output:NXM_OF_IN_PORT[]" %
-+                         constants.UCAST_TO_TUN)
-+ 
-+         # Once remote mac addresses are learned, packets are sent to
-+         # INBOUND_UCAST_BUM_TABLE where the packets are triaged based on
-+         # whether they are unicast or broadcast/multicast and sent to
-+         # respective tables either for forwarding or flooding
-+         self.tun_br.add_flow(table=constants.LEARN_FROM_TUN,
-+                              priority=1,
-+                              actions="learn(%s),resubmit(,%s)" %
-+                              (learned_flow, INBOUND_UCAST_BUM_TABLE))
-+ 
-+         # INBOUND_UCAST_TABLE handles forwarding the packet to the right port
-+         self.tun_br.add_flow(table=INBOUND_UCAST_BUM_TABLE,
-+                              priority=0,
-+                              dl_dst="00:00:00:00:00:00/01:00:00:00:00:00",
-+                              actions="resubmit(,%s)" % INBOUND_UCAST_TABLE)
-+ 
-+         # INBOUND_BUM_TABLE handles flooding for broadcast/unknown-unicast/
-+         # multicast packets
-+         self.tun_br.add_flow(table=INBOUND_UCAST_BUM_TABLE,
-+                              priority=0,
-+                              dl_dst="01:00:00:00:00:00/01:00:00:00:00:00",
-+                              actions="resubmit(,%s)" % INBOUND_BUM_TABLE)
-+ 
-+         # INBOUND_UCAST_TABLE has flows dynamically added by learn action of
-+         # a flow in LEARN_FROM_PORTS table. These flows forward a packet to a
-+         # port that matches the destination MAC address. If no flow matches,
-+         # then the packet will be resubmitted to INBOUND_BUM_TABLE for
-+         # flooding.
-+         self.tun_br.add_flow(table=INBOUND_UCAST_TABLE,
-+                              priority=0,
-+                              actions="resubmit(,%s)" %
-+                              INBOUND_BUM_TABLE)
-+         self.tun_br.add_flow(table=INBOUND_BUM_TABLE,
-+                              priority=0,
-+                              actions="drop")
-+ 
-+         # Egress unicast will be handled in table UCAST_TO_TUN, where remote
-+         # mac addresses will be learned. For now, just add a default flow that
-+         # will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them
-+         # as broadcasts/multicasts
-+         self.tun_br.add_flow(table=constants.UCAST_TO_TUN,
-+                              priority=0,
-+                              actions="resubmit(,%s)" %
-+                              constants.FLOOD_TO_TUN)
-+ 
-+         # FLOOD_TO_TUN will handle flooding to tunnels based on segmentation
-+         # id. For now, add a default drop action
-+         self.tun_br.add_flow(table=constants.FLOOD_TO_TUN,
-+                              priority=0,
-+                              actions="drop")
-+ 
-+         #
-+         # add flows for outbound packets
-+         #
-+ 
-+         # LEARN_FROM_PORTS table will have a single flow using two learn
-+         # actions to dynamically set-up flows in INBOUND_UCAST_TABLE and
-+         # UCAST_TO_TUN corresponding to local mac addresses
-+         learned_flow = ("table=%s,"
-+                         "priority=1,"
-+                         "hard_timeout=300,"
-+                         "NXM_NX_TUN_ID[],"
-+                         "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],"
-+                         "output:NXM_OF_IN_PORT[]")
-+         self.tun_br.add_flow(table=LEARN_FROM_PORTS,
-+                              priority=1,
-+                              actions="learn(%s),learn(%s),resubmit(,%s)" %
-+                              (learned_flow % INBOUND_UCAST_TABLE,
-+                               learned_flow % constants.UCAST_TO_TUN,
-+                               OUTBOUND_UCAST_BUM_TABLE))
-+ 
-+         # Once local MAC addresses are learned, packets are sent to
-+         # OUTBOUND_UCAST_BUM_TABLE where the packet is triaged based on whether
-+         # they are unicast or broadcast/multicast and sent to respective tables
-+         # either for forwarding or for flooding
-+         self.tun_br.add_flow(table=OUTBOUND_UCAST_BUM_TABLE,
-+                              priority=0,
-+                              dl_dst="00:00:00:00:00:00/01:00:00:00:00:00",
-+                              actions="resubmit(,%s)" % constants.UCAST_TO_TUN)
-+         # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding
-+         self.tun_br.add_flow(table=OUTBOUND_UCAST_BUM_TABLE,
-+                              priority=0,
-+                              dl_dst="01:00:00:00:00:00/01:00:00:00:00:00",
-+                              actions="resubmit(,%s)" % constants.FLOOD_TO_TUN)
-+ 
-+     def check_changed_vlans(self, registered_ports):
-+         # Not applicable to Solaris
-+         return []
-+ 
-+     def _mod_flood_to_tun_flows(self, ofport, remote_ips, segmentation_id,
-+                                 local_ofports):
-+         LOG.debug(_("Modifying flooding for %s to all %s for VNI %s on %s") %
-+                   (ofport, remote_ips, segmentation_id, local_ofports))
-+         if not local_ofports and not remote_ips:
-+             return
-+         action_prefix = ""
-+         if local_ofports:
-+             action_prefix = "output:%s" % _ofport_set_to_str(local_ofports)
-+         if not remote_ips:
-+             assert local_ofports
-+             self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN,
-+                                  in_port="%s" % ofport,
-+                                  actions="%s" % action_prefix)
-+             return
-+         action_str = ""
-+         if action_prefix:
-+             action_str = "%s," % action_prefix
-+         action_str += "set_tunnel:%s" % segmentation_id
-+         # for each of the remote_ip
-+         for remote_ip in remote_ips:
-+             action_str += ",set_field:%s->tun_dst,output:%s" % \
-+                 (remote_ip, self.tun_ofport)
-+ 
-+         self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN,
-+                              in_port="%s" % ofport,
-+                              actions="%s" % action_str)
-+ 
-+     def port_bound(self, port, net_uuid,
-+                    network_type, physical_network,
-+                    segmentation_id, fixed_ips, device_owner,
-+                    ovs_restarted):
-+         '''Bind port to net_uuid/lsw_id and install flow for inbound traffic
-+         to vm.
-+ 
-+         :param port: a ovslib.VifPort object.
-+         :param net_uuid: the net_uuid this port is to be associated with.
-+         :param network_type: the network type ('gre', 'vlan', 'flat', 'local')
-+         :param physical_network: the physical network for 'vlan' or 'flat'
-+         :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
-+         :param fixed_ips: the ip addresses assigned to this port
-+         :param device_owner: the string indicative of owner of this port
-+         :param ovs_restarted: indicates if this is called for an OVS restart.
-+         '''
-+ 
-+         LOG.info(_LI("Setting up datapath for port: %s connected to "
-+                      "network: %s of type: %s") % (port.vif_id, net_uuid,
-+                                                    network_type))
-+ 
-+         if network_type in constants.TUNNEL_NETWORK_TYPES:
-+             if self.enable_tunneling:
-+                 remote_ips = self.tun_br_ofports[network_type].values()
-+                 # add a flow to flood Broadcast/Unknown Unicast/Multicast
-+                 # packets from this port to all the remote_ips and local
-+                 # ports on port's segmentation_id.
-+                 self._mod_flood_to_tun_flows(port.ofport, remote_ips,
-+                     segmentation_id, self.br_segid_ports.get(segmentation_id))
-+ 
-+                 # add segmentation id for all the packets from this port and
-+                 # send it to table 2 for learning.
-+                 self.tun_br.add_flow(priority=1,
-+                                      in_port="%s" % (port.ofport),
-+                                      actions="set_tunnel:%s,"
-+                                      "resubmit(,%s)" %
-+                                      (segmentation_id, LEARN_FROM_PORTS))
-+ 
-+                 # update flow that steers inbound broadcast/unknown/multicast
-+                 # packets on this segmentation id to all of the ports
-+                 # (including this port)
-+                 self.br_port_segid[port.ofport] = segmentation_id
-+                 if self.br_segid_ports.get(segmentation_id):
-+                     self.br_segid_ports[segmentation_id].add(port.ofport)
-+                 else:
-+                     self.br_segid_ports[segmentation_id] = set([port.ofport])
-+                 ofports_str = \
-+                     _ofport_set_to_str(self.br_segid_ports[segmentation_id])
-+                 self.tun_br.mod_flow(table=INBOUND_BUM_TABLE,
-+                                      tun_id=segmentation_id,
-+                                      actions="output:%s" % ofports_str)
-+                 # we need to modify flows for other ports that are part of
-+                 # this segmentation ID
-+                 ofports = self.br_segid_ports[segmentation_id]
-+                 for ofport in ofports:
-+                     if ofport == port.ofport:
-+                         continue
-+                     self._mod_flood_to_tun_flows(ofport, remote_ips,
-+                                                  segmentation_id,
-+                                                  ofports - set([ofport]))
-+                 self.vif_ports[port.vif_id] = port
-+             else:
-+                 LOG.error(_LE("Cannot provision %(network_type)s network for "
-+                               "net-id=%(net_uuid)s - tunneling disabled"),
-+                           {'network_type': network_type,
-+                            'net_uuid': net_uuid})
-+         elif network_type == p_const.TYPE_FLAT:
-+             if physical_network not in self.phys_brs:
-+                 LOG.error(_LE("Cannot provision flat network for "
-+                               "net-id=%(net_uuid)s - no uplink port for "
-+                               "physical_network %(physical_network)s"),
-+                           {'net_uuid': net_uuid,
-+                            'physical_network': physical_network})
-+         elif network_type == p_const.TYPE_VLAN:
-+             if physical_network in self.phys_brs:
-+                 # Do not bind a port if it's already bound
-+                 cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag")
-+                 if cur_tag != segmentation_id:
-+                     self.int_br.set_db_attribute("Port", port.port_name, "tag",
-+                                                  segmentation_id)
-+                 if port.ofport != -1:
-+                     self.int_br.delete_flows(in_port=port.ofport)
-+             else:
-+                 LOG.error(_LE("Cannot provision VLAN network for "
-+                               "net-id=%(net_uuid)s - no uplink-port for "
-+                               "physical_network %(physical_network)s"),
-+                           {'net_uuid': net_uuid,
-+                            'physical_network': physical_network})
-+         else:
-+             LOG.error(_LE("Cannot provision unknown network type "
-+                           "%(network_type)s for net-id=%(net_uuid)s"),
-+                       {'network_type': network_type, 'net_uuid': net_uuid})
-+ 
-+     def port_unbound(self, vif_id, net_uuid=None):
-+         '''Unbind port.
-+ 
-+         Removes all the OpenFlow rules associated with the port going away.
-+ 
-+         :param vif_id: the id of the vif
-+         :param net_uuid: the net_uuid this port is associated with.
-+         '''
-+         LOG.info(_LI("Removing flows for port: %s" % (vif_id)))
-+         if self.enable_tunneling:
-+             port = self.vif_ports.pop(vif_id, None)
-+             if port is None:
-+                 return
-+             # remove all the OpenFlows that we have added for this port
-+             # across all the tables.
-+             self.tun_br.delete_flows(in_port=port.ofport)
-+             self.tun_br.delete_flows(table=constants.FLOOD_TO_TUN,
-+                                      in_port=port.ofport)
-+             segid = self.br_port_segid.pop(port.ofport, None)
-+             if segid is None:
-+                 return
-+             self.tun_br.delete_flows(table=INBOUND_UCAST_TABLE,
-+                                      tun_id=segid, dl_dst=port.vif_mac)
-+             self.tun_br.delete_flows(table=constants.UCAST_TO_TUN,
-+                                      tun_id=segid, dl_dst=port.vif_mac)
-+             if self.br_segid_ports.get(segid) is None:
-+                 return
-+             self.br_segid_ports[segid].discard(port.ofport)
-+             ofports = self.br_segid_ports[segid]
-+             if ofports:
-+                 # update brodcast/multicast table to not to include this port
-+                 ofportstr = _ofport_set_to_str(ofports)
-+                 self.tun_br.mod_flow(table=INBOUND_BUM_TABLE, tun_id=segid,
-+                                      actions="output:%s" % ofportstr)
-+                 for ofport in ofports:
-+                     remote_ips = \
-+                         self.tun_br_ofports[p_const.TYPE_VXLAN].values()
-+                     self._mod_flood_to_tun_flows(ofport, remote_ips, segid,
-+                                                  ofports - set([ofport]))
-+             else:
-+                 # if this was the last port for that segmentation ID, then
-+                 # remove all associated flows from broadcast/multicast table
-+                 self.tun_br.delete_flows(table=INBOUND_BUM_TABLE, tun_id=segid)
-+ 
-+     def port_dead(self, port, log_errors=True):
-+         # Not required for Solaris
-+         pass
-+ 
-+     def update_stale_ofport_rules(self):
-+         # Not required for Solaris since we don't support ARP spoofing
-+         # protection yet
-+         pass
-+ 
-+ 
-  def _ofport_set_to_str(ofport_set):
-      return ",".join(map(str, ofport_set))
-  
-***************
-*** 1696,1702 ****
-      :returns: a map of agent configuration parameters
-      """
-      try:
-!         bridge_mappings = q_utils.parse_mappings(config.OVS.bridge_mappings)
-      except ValueError as e:
-          raise ValueError(_("Parsing bridge_mappings failed: %s.") % e)
-  
---- 2196,2210 ----
-      :returns: a map of agent configuration parameters
-      """
-      try:
-!         if platform.system() == "SunOS":
-!             # In case of Solaris, we want to allow multiple physical networks
-!             # to share the same uplink-port
-!             bridge_mappings = \
-!                 q_utils.parse_mappings(config.OVS.bridge_mappings,
-!                                        unique_values=False)
-!         else:
-!             bridge_mappings = \
-!                 q_utils.parse_mappings(config.OVS.bridge_mappings)
-      except ValueError as e:
-          raise ValueError(_("Parsing bridge_mappings failed: %s.") % e)
-  
-***************
-*** 1749,1755 ****
-          cfg.CONF.set_default('ip_lib_force_root', True)
-  
-      try:
-!         agent = OVSNeutronAgent(**agent_config)
-      except RuntimeError as e:
-          LOG.error(_LE("%s Agent terminated!"), e)
-          sys.exit(1)
---- 2257,2266 ----
-          cfg.CONF.set_default('ip_lib_force_root', True)
-  
-      try:
-!         if platform.system() == "SunOS":
-!             agent = SolarisOVSNeutronAgent(**agent_config)
-!         else:
-!             agent = OVSNeutronAgent(**agent_config)
-      except RuntimeError as e:
-          LOG.error(_LE("%s Agent terminated!"), e)
-          sys.exit(1)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/patches/06-opts.patch	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,77 @@
+In-house patch to further adjust Neutron's oslo-config-generator files
+to account for which modules are actually imported for each service and
+to include the Solaris Neutron options in the corresponding
+configuration files. This patch potentially can be sent upstream.
+
+--- neutron-8.1.2/etc/oslo-config-generator/dhcp_agent.ini.~1~	2016-06-09 18:45:29.000000000 -0700
++++ neutron-8.1.2/etc/oslo-config-generator/dhcp_agent.ini	2016-07-20 13:31:54.054132810 -0700
+@@ -5,3 +5,6 @@ wrap_width = 79
+ namespace = neutron.base.agent
+ namespace = neutron.dhcp.agent
+ namespace = oslo.log
++namespace = oslo.concurrency
++namespace = oslo.messaging
++namespace = oslo.service.service
+--- neutron-8.1.2/etc/oslo-config-generator/l3_agent.ini.~1~	2016-06-09 18:45:29.000000000 -0700
++++ neutron-8.1.2/etc/oslo-config-generator/l3_agent.ini	2016-07-20 13:32:06.220585515 -0700
+@@ -5,3 +5,7 @@ wrap_width = 79
+ namespace = neutron.base.agent
+ namespace = neutron.l3.agent
+ namespace = oslo.log
++namespace = oslo.concurrency
++namespace = oslo.messaging
++namespace = oslo.service.periodic_task
++namespace = oslo.service.service
+--- neutron-8.1.2/etc/oslo-config-generator/metadata_agent.ini.~1~	2016-06-09 18:45:36.000000000 -0700
++++ neutron-8.1.2/etc/oslo-config-generator/metadata_agent.ini	2016-07-20 17:21:11.247524910 -0700
+@@ -4,3 +4,5 @@ wrap_width = 79
+ 
+ namespace = neutron.metadata.agent
+ namespace = oslo.log
++namespace = oslo.concurrency
++namespace = oslo.messaging
+--- neutron-8.1.2/etc/oslo-config-generator/neutron.conf.~1~	2016-06-09 18:45:29.000000000 -0700
++++ neutron-8.1.2/etc/oslo-config-generator/neutron.conf	2016-07-20 00:31:17.805667675 -0700
+@@ -13,7 +13,11 @@ namespace = oslo.db
+ namespace = oslo.policy
+ namespace = oslo.concurrency
+ namespace = oslo.messaging
+-namespace = oslo.middleware.cors
++namespace = oslo.middleware
+ namespace = oslo.service.sslutils
+ namespace = oslo.service.wsgi
+ namespace = keystonemiddleware.auth_token
++namespace = oslo.reports
++namespace = oslo.service.periodic_task
++namespace = oslo.service.service
++namespace = oslo.versionedobjects
+--- neutron-8.1.2/neutron/opts.py.~1~	2016-06-09 18:45:36.000000000 -0700
++++ neutron-8.1.2/neutron/opts.py	2016-07-19 15:10:18.760175240 -0700
+@@ -23,12 +23,15 @@ import neutron.agent.dhcp.config
+ import neutron.agent.l2.extensions.manager
+ import neutron.agent.l3.config
+ import neutron.agent.l3.ha
++import neutron.agent.l3.solaris_agent
+ import neutron.agent.linux.interface
+ import neutron.agent.linux.pd
+ import neutron.agent.linux.ra
+ import neutron.agent.metadata.config
+ import neutron.agent.ovsdb.api
+ import neutron.agent.securitygroups_rpc
++import neutron.agent.solaris.interface
++import neutron.agent.solaris.ra
+ import neutron.db.agents_db
+ import neutron.db.agentschedulers_db
+ import neutron.db.dvr_mac_db
+@@ -195,7 +198,10 @@ def list_l3_agent_opts():
+              neutron.service.service_opts,
+              neutron.agent.l3.ha.OPTS,
+              neutron.agent.linux.pd.OPTS,
+-             neutron.agent.linux.ra.OPTS)
++             neutron.agent.linux.ra.OPTS,
++             neutron.agent.l3.solaris_agent.L3NATAgent.OPTS,
++             neutron.agent.solaris.interface.OPTS,
++             neutron.agent.solaris.ra.OPTS)
+          )
+     ]
+ 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/patches/07-ml2-ovs-support.patch	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,552 @@
+Changes to Neutron Open vSwitch agent to port it to Solaris. These changes
+will eventually be proposed upstream.
+
+--- neutron-8.1.2/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py	2016-06-09 18:45:36.000000000 -0700
++++ new/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py	2016-08-18 13:23:57.331696500 -0700
+@@ -15,12 +15,14 @@
+ 
+ import collections
+ import functools
++import platform
+ import signal
+ import sys
+ import time
+ 
+ import netaddr
+ from oslo_config import cfg
++from oslo_log import helpers as log_helpers
+ from oslo_log import log as logging
+ import oslo_messaging
+ from oslo_service import loopingcall
+@@ -36,6 +38,7 @@
+ from neutron.agent.l2.extensions import manager as ext_manager
+ from neutron.agent import rpc as agent_rpc
+ from neutron.agent import securitygroups_rpc as sg_rpc
++from neutron.agent.solaris import net_lib
+ from neutron.api.rpc.callbacks import resources
+ from neutron.api.rpc.handlers import dvr_rpc
+ from neutron.common import config
+@@ -145,10 +148,16 @@
+ 
+         self.fullsync = False
+         # init bridge classes with configured datapath type.
+-        self.br_int_cls, self.br_phys_cls, self.br_tun_cls = (
+-            functools.partial(bridge_classes[b],
+-                              datapath_type=ovs_conf.datapath_type)
+-            for b in ('br_int', 'br_phys', 'br_tun'))
++        if platform.system() == "SunOS":
++            self.br_int_cls = functools.partial(bridge_classes['br_int'],
++                datapath_type=ovs_conf.datapath_type)
++            self.br_phys_cls = None
++            self.br_tun_cls = None
++        else:
++            self.br_int_cls, self.br_phys_cls, self.br_tun_cls = (
++                functools.partial(bridge_classes[b],
++                datapath_type=ovs_conf.datapath_type)
++                for b in ('br_int', 'br_phys', 'br_tun'))
+ 
+         self.use_veth_interconnection = ovs_conf.use_veth_interconnection
+         self.veth_mtu = agent_conf.veth_mtu
+@@ -2081,9 +2090,489 @@
+                                "in both the Agent and Server side."))
+ 
+ 
++class SolarisOVSNeutronAgent(OVSNeutronAgent):
++    """Solaris implementation of OVS L2 Agent"""
++
++    def __init__(self, bridge_classes, conf=None):
++        '''Constructor.
++
++        :param bridge_classes: a dict for bridge classes.
++        :param conf: an instance of ConfigOpts
++        '''
++        self.tun_ofport = None
++        # mapping of VNIC's OpenFlow Port Number (ofport) to
++        # VXLAN segmentation id.
++        self.br_port_segid = {}
++        # mapping of VXLAN sgementation id to set of ports on that segment.
++        # The port is a ovs_lib.VifPort object.
++        self.br_segid_ports = {}
++        # mapping of Neutron port UUID to ovs_lib.VifPort object.
++        self.vif_ports = {}
++        super(SolarisOVSNeutronAgent, self).__init__(bridge_classes, conf)
++
++    def _parse_bridge_mappings(self, bridge_mappings):
++        try:
++            return n_utils.parse_mappings(bridge_mappings, unique_values=False)
++        except ValueError as e:
++            raise ValueError(_("Parsing bridge_mappings failed: %s.") % e)
++
++    def check_changed_vlans(self):
++        # Not applicable to Solaris
++        return []
++
++    def _setup_tunnel_port(self, br, port_name, remote_ip, tunnel_type):
++        LOG.debug(_("Setting up tunnel(%s) for remote_ip: %s") %
++                  (tunnel_type, remote_ip))
++        if tunnel_type != p_const.TYPE_VXLAN:
++            return
++        self.tun_br_ofports[tunnel_type][remote_ip] = remote_ip
++        remote_ips = self.tun_br_ofports[tunnel_type].values()
++        LOG.debug(_("current list of remote_ips: %s"), remote_ips)
++        for ofport, segmentation_id in self.br_port_segid.iteritems():
++            flood_local_ofports = self.br_segid_ports[segmentation_id]
++            self._mod_flood_to_tun_flows(ofport, remote_ips, segmentation_id,
++                                         flood_local_ofports - set([ofport]))
++
++    def cleanup_tunnel_port(self, br, remote_ip, tunnel_type):
++        LOG.debug(_("Cleaning up tunnel(%s) for remote_ip: %s") %
++                  (tunnel_type, remote_ip))
++        if tunnel_type != p_const.TYPE_VXLAN:
++            return
++        self.tun_br_ofports[tunnel_type].pop(remote_ip, None)
++        remote_ips = self.tun_br_ofports[tunnel_type].values()
++        for ofport, segmentation_id in self.br_port_segid.iteritems():
++            flood_local_ofports = self.br_segid_ports[segmentation_id]
++            self._mod_flood_to_tun_flows(ofport, remote_ips, segmentation_id,
++                                         flood_local_ofports - set([ofport]))
++
++    # The following methods are called through RPC.
++    #     add_fdb_entries(), remove_fdb_entries(), update_fdb_entries()
++    # These methods are overridden from L2populationRpcCallBackMixin class.
++    @log_helpers.log_method_call
++    def add_fdb_entries(self, context, fdb_entries, host=None):
++        # Needed for L2 Population support. Will be added later
++        pass
++
++    @log_helpers.log_method_call
++    def remove_fdb_entries(self, context, fdb_entries, host=None):
++        # Needed for L2 Population support. Will be added later
++        pass
++
++    @log_helpers.log_method_call
++    def update_fdb_entries(self, context, fdb_entries, host=None):
++        # Needed for L2 Population support. Will be added later
++        pass
++
++    def port_dead(self, port, log_errors=True):
++        '''Once a port has no binding or it is administratively disabled,
++           add a flow to drop packets coming from that port.
++        '''
++        cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag",
++                                         log_errors=log_errors)
++        if cur_tag:
++            self.int_br.clear_db_attribute("Port", port.port_name, "tag")
++        if port.ofport != -1:
++            self.int_br.drop_port(in_port=port.ofport)
++
++    def setup_integration_br(self):
++        '''Setup the integration bridge and remove all existing flows.'''
++
++        # Ensure the integration bridge is created.
++        # ovs_lib.OVSBridge.create() will run
++        #   ovs-vsctl -- --may-exist add-br BRIDGE_NAME
++        # which does nothing if bridge already exists.
++        self.int_br.create()
++        self.int_br.set_secure_mode()
++        self.int_br.setup_controllers(self.conf)
++
++        if self.conf.AGENT.drop_flows_on_start:
++            self.int_br.delete_flows()
++        # Switch all traffic using normal-mode OVS only if tunneling
++        # is disabled. Otherwise, we will need to add various OpenFlow tables
++        # and flows to switch traffic.
++        if not self.enable_tunneling:
++            self.int_br.install_normal()
++        # Add a canary flow to int_br to track OVS restarts
++        self.int_br.setup_canary_table()
++
++    def setup_physical_bridges(self, bridge_mappings):
++        '''Makes sure that the uplink port for a given physical network
++        exists in the integration bridge.
++        '''
++        self.phys_brs = {}
++        # We do not use either int_ofports or phys_ofports below, however
++        # we need to initialize them to empty values since it is used in
++        # the common code which is mostly no-op for us.
++        self.int_ofports = {}
++        self.phys_ofports = {}
++        ovs = ovs_lib.BaseOVS()
++        for physical_network, uplink_port in bridge_mappings.iteritems():
++            LOG.info(_LI("Mapping physical network %(physical_network)s to "
++                         "uplink port %(uplink_port)s"),
++                     {'physical_network': physical_network,
++                      'uplink_port': uplink_port})
++            if not ovs.port_exists(uplink_port):
++                LOG.error(_LE("Uplink port %(uplink_port)s for physical "
++                              "network %(physical_network)s does not exist. "
++                              "Agent terminated!"),
++                          {'physical_network': physical_network,
++                           'uplink_port': uplink_port})
++                sys.exit(1)
++            self.phys_brs[physical_network] = uplink_port
++
++    def setup_ancillary_bridges(self, integ_br, tun_br):
++        '''Setup ancillary bridges - for example br-ex.'''
++        ovs = ovs_lib.BaseOVS()
++        ovs_bridges = set(ovs.get_bridges())
++        # Remove all known bridges
++        ovs_bridges.remove(integ_br)
++
++        # Filter list of bridges to those that have external
++        # bridge-id's configured
++        br_names = []
++        for bridge in ovs_bridges:
++            bridge_id = ovs.get_bridge_external_bridge_id(bridge)
++            if bridge_id != bridge:
++                br_names.append(bridge)
++        ovs_bridges.difference_update(br_names)
++        ancillary_bridges = []
++        for bridge in ovs_bridges:
++            br = ovs_lib.OVSBridge(bridge)
++            LOG.info(_LI('Adding %s to list of bridges.'), bridge)
++            ancillary_bridges.append(br)
++        return ancillary_bridges
++
++    def setup_tunnel_br(self, tun_br_name=None):
++        '''(re)initialize the tunnel bridge.
++
++        :param tun_br_name: the name of the tunnel bridge.
++        '''
++        # Solaris doesn't have a separate tunnel bridge, instead we
++        # re-use the integration bridge itself.
++        if self.tun_br is None:
++            self.tun_br = self.int_br
++
++        # create ovs.vxlan1 datalink and add it to integration bridge
++        if not self.local_ip:
++            LOG.error(_LE("local_ip parameter is not set. Cannot have "
++                          "tunneling enabled without it. Agent terminated!"))
++            exit(1)
++        if not net_lib.Datalink.datalink_exists("ovs.vxlan1"):
++            # create the required vxlan
++            cmd = ['/usr/sbin/dladm', 'create-vxlan', '-t', '-p',
++                   'addr=%s,vni=flow' % (self.local_ip), 'ovs.vxlan1']
++            try:
++                utils.execute(cmd)
++            except Exception as e:
++                LOG.error(_LE("failed to create VXLAN tunnel end point "
++                              "ovs.vxlan1: %s. Agent terminated!") % (e))
++                exit(1)
++        # set openvswitch property to on
++        try:
++            cmd = ['/usr/sbin/dladm', 'show-linkprop', '-p',
++                   'openvswitch', '-co', 'value', 'ovs.vxlan1']
++            stdout = utils.execute(cmd)
++            if stdout.strip() == 'off':
++                cmd = ['/usr/sbin/dladm', 'set-linkprop', '-t', '-p',
++                       'openvswitch=on', 'ovs.vxlan1']
++                utils.execute(cmd)
++        except Exception as e:
++            LOG.error(_LE("failed to set 'openvswitch' property on "
++                          "ovs.vxlan1: %s. Agent terminated!") % (e))
++            exit(1)
++
++        attrs = [('type', 'vxlan'),
++                 ('options', {'remote_ip': 'flow'}),
++                 ('options', {'key': 'flow'})]
++        self.tun_br.replace_port('ovs.vxlan1', *attrs)
++        self.tun_ofport = self.tun_br.get_port_ofport('ovs.vxlan1')
++        if self.tun_ofport == constants.OFPORT_INVALID:
++            LOG.error(_LE("Failed to add ovs.vxlan1 to integration bridge. "
++                          "Cannot have tunneling enabled on this agent. "
++                          "Agent terminated!"))
++            exit(1)
++
++    def setup_tunnel_br_flows(self):
++        '''Setup the tunnel bridge
++
++        Add all flows to the tunnel bridge.
++        '''
++        self.tun_br.setup_default_tunnel_table(self.tun_ofport,
++                                               self.arp_responder_enabled)
++
++    def _mod_flood_to_tun_flows(self, ofport, remote_ips, segmentation_id,
++                                local_ofports):
++        LOG.debug(_("Modifying flooding for %s to all %s for VNI %s on %s") %
++                  (ofport, remote_ips, segmentation_id, local_ofports))
++        if not local_ofports and not remote_ips:
++            return
++        action_prefix = ""
++        if local_ofports:
++            action_prefix = ("output:%s" %
++                             self.tun_br._ofport_set_to_str(local_ofports))
++        if not remote_ips:
++            assert local_ofports
++            self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN,
++                                 in_port="%s" % ofport,
++                                 actions="%s" % action_prefix)
++            return
++        action_str = ""
++        if action_prefix:
++            action_str = "%s," % action_prefix
++        action_str += "set_tunnel:%s" % segmentation_id
++        # for each of the remote_ip
++        for remote_ip in remote_ips:
++            action_str += ",set_field:%s->tun_dst,output:%s" % \
++                (remote_ip, self.tun_ofport)
++
++        self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN,
++                             in_port="%s" % ofport,
++                             actions="%s" % action_str)
++
++    def port_bound(self, port, net_uuid,
++                   network_type, physical_network,
++                   segmentation_id, fixed_ips, device_owner,
++                   ovs_restarted):
++        '''Bind port to net_uuid/lsw_id and install flow for inbound traffic
++        to vm.
++
++        :param port: a ovslib.VifPort object.
++        :param net_uuid: the net_uuid this port is to be associated with.
++        :param network_type: the network type ('gre', 'vlan', 'flat', 'local')
++        :param physical_network: the physical network for 'vlan' or 'flat'
++        :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
++        :param fixed_ips: the ip addresses assigned to this port
++        :param device_owner: the string indicative of owner of this port
++        :param ovs_restarted: indicates if this is called for an OVS restart.
++        '''
++
++        LOG.info(_LI("Setting up datapath for port: %s connected to "
++                     "network: %s of type: %s") % (port.vif_id, net_uuid,
++                                                   network_type))
++
++        if network_type in constants.TUNNEL_NETWORK_TYPES:
++            if self.enable_tunneling:
++                # delete any drop flows
++                self.int_br.delete_flows(in_port=port.ofport)
++                remote_ips = self.tun_br_ofports[network_type].values()
++                # add a flow to flood Broadcast/Unknown Unicast/Multicast
++                # packets from this port to all the remote_ips and local
++                # ports on port's segmentation_id.
++                self._mod_flood_to_tun_flows(port.ofport, remote_ips,
++                    segmentation_id, self.br_segid_ports.get(segmentation_id))
++
++                # add segmentation id for all the packets from this port and
++                # send it to table 2 for learning.
++                self.tun_br.add_flow(priority=1,
++                                     in_port="%s" % (port.ofport),
++                                     actions="set_tunnel:%s,"
++                                     "resubmit(,%s)" %
++                                     (segmentation_id,
++                                      constants.LEARN_FROM_PORTS))
++
++                # update flow that steers inbound broadcast/unknown/multicast
++                # packets on this segmentation id to all of the ports
++                # (including this port)
++                self.br_port_segid[port.ofport] = segmentation_id
++                if self.br_segid_ports.get(segmentation_id):
++                    self.br_segid_ports[segmentation_id].add(port.ofport)
++                else:
++                    self.br_segid_ports[segmentation_id] = set([port.ofport])
++                ofports_str = self.tun_br._ofport_set_to_str(
++                    self.br_segid_ports[segmentation_id])
++                self.tun_br.mod_flow(table=constants.INBOUND_BUM_TABLE,
++                                     tun_id=segmentation_id,
++                                     actions="output:%s" % ofports_str)
++                # we need to modify flows for other ports that are part of
++                # this segmentation ID
++                ofports = self.br_segid_ports[segmentation_id]
++                for ofport in ofports:
++                    if ofport == port.ofport:
++                        continue
++                    self._mod_flood_to_tun_flows(ofport, remote_ips,
++                                                 segmentation_id,
++                                                 ofports - set([ofport]))
++                self.vif_ports[port.vif_id] = port
++            else:
++                LOG.error(_LE("Cannot provision %(network_type)s network for "
++                              "net-id=%(net_uuid)s - tunneling disabled"),
++                          {'network_type': network_type,
++                           'net_uuid': net_uuid})
++                return False
++        elif network_type == p_const.TYPE_FLAT:
++            if physical_network not in self.phys_brs:
++                LOG.error(_LE("Cannot provision flat network for "
++                              "net-id=%(net_uuid)s - no uplink port for "
++                              "physical_network %(physical_network)s"),
++                          {'net_uuid': net_uuid,
++                           'physical_network': physical_network})
++                return False
++            self.vif_ports[port.vif_id] = port
++            # delete any drop flows
++            self.int_br.delete_flows(in_port=port.ofport)
++        elif network_type == p_const.TYPE_VLAN:
++            if physical_network in self.phys_brs:
++                # Do not bind a port if it's already bound
++                cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag")
++                if cur_tag != segmentation_id:
++                    self.int_br.set_db_attribute("Port", port.port_name, "tag",
++                                                 segmentation_id)
++                if port.ofport != -1:
++                    self.int_br.delete_flows(in_port=port.ofport)
++                self.vif_ports[port.vif_id] = port
++            else:
++                LOG.error(_LE("Cannot provision VLAN network for "
++                              "net-id=%(net_uuid)s - no uplink-port for "
++                              "physical_network %(physical_network)s"),
++                          {'net_uuid': net_uuid,
++                           'physical_network': physical_network})
++                return False
++        else:
++            LOG.error(_LE("Cannot provision unknown network type "
++                          "%(network_type)s for net-id=%(net_uuid)s"),
++                      {'network_type': network_type, 'net_uuid': net_uuid})
++            return False
++        return True
++
++    def _add_port_tag_info(self, need_binding_ports):
++        pass
++
++    def _bind_devices(self, need_binding_ports):
++        devices_up = []
++        devices_down = []
++        failed_devices = []
++        for port_detail in need_binding_ports:
++            device = port_detail['device']
++            # update plugin about port status
++            # FIXME(salv-orlando): Failures while updating device status
++            # must be handled appropriately. Otherwise this might prevent
++            # neutron server from sending network-vif-* events to the nova
++            # API server, thus possibly preventing instance spawn.
++            if port_detail.get('admin_state_up'):
++                LOG.debug("Setting status for %s to UP", device)
++                devices_up.append(device)
++            else:
++                LOG.debug("Setting status for %s to DOWN", device)
++                devices_down.append(device)
++        if devices_up or devices_down:
++            devices_set = self.plugin_rpc.update_device_list(
++                self.context, devices_up, devices_down, self.agent_id,
++                self.conf.host)
++            failed_devices = (devices_set.get('failed_devices_up') +
++                devices_set.get('failed_devices_down'))
++            if failed_devices:
++                LOG.error(_LE("Configuration for devices %s failed!"),
++                          failed_devices)
++        LOG.info(_LI("Configuration for devices up %(up)s and devices "
++                     "down %(down)s completed."),
++                 {'up': devices_up, 'down': devices_down})
++        return set(failed_devices)
++
++    def port_unbound(self, vif_id, net_uuid=None):
++        '''Unbind port.
++
++        Removes all the OpenFlow rules associated with the port going away.
++
++        :param vif_id: the id of the vif
++        :param net_uuid: the net_uuid this port is associated with.
++        '''
++        LOG.info(_LI("Removing flows for port: %s" % (vif_id)))
++        port = self.vif_ports.pop(vif_id, None)
++        if port is None:
++            return
++        if self.enable_tunneling:
++            # remove all the OpenFlows that we have added for this port
++            # across all the tables.
++            self.tun_br.delete_flows(in_port=port.ofport)
++            self.tun_br.delete_flows(table=constants.FLOOD_TO_TUN,
++                                     in_port=port.ofport)
++            segid = self.br_port_segid.pop(port.ofport, None)
++            if segid is None:
++                return
++            self.tun_br.delete_flows(table=constants.INBOUND_UCAST_TABLE,
++                                     tun_id=segid, dl_dst=port.vif_mac)
++            self.tun_br.delete_flows(table=constants.UCAST_TO_TUN,
++                                     tun_id=segid, dl_dst=port.vif_mac)
++            if self.br_segid_ports.get(segid) is None:
++                return
++            self.br_segid_ports[segid].discard(port.ofport)
++            ofports = self.br_segid_ports[segid]
++            if ofports:
++                # update brodcast/multicast table to not to include this port
++                ofportstr = self.tun_br._ofport_set_to_str(ofports)
++                self.tun_br.mod_flow(table=constants.INBOUND_BUM_TABLE,
++                                     tun_id=segid,
++                                     actions="output:%s" % ofportstr)
++                for ofport in ofports:
++                    remote_ips = \
++                        self.tun_br_ofports[p_const.TYPE_VXLAN].values()
++                    self._mod_flood_to_tun_flows(ofport, remote_ips, segid,
++                                                 ofports - set([ofport]))
++            else:
++                # if this was the last port for that segmentation ID, then
++                # remove all associated flows from broadcast/multicast table
++                self.tun_br.delete_flows(table=constants.INBOUND_BUM_TABLE,
++                                         tun_id=segid)
++        else:
++            self.int_br.delete_flows(in_port=port.ofport)
++
++    def update_stale_ofport_rules(self):
++        # Not required for Solaris since we don't support ARP spoofing
++        # protection yet
++        pass
++
++    def _rewire_zones_anet(self):
++        port_names = self.int_br.get_port_name_list()
++        for port_name in port_names:
++            if '/' not in port_name:
++                continue
++            cmd = ['/usr/sbin/dladm', 'show-linkprop', '-p', 'ofport',
++                   '-co', 'value', port_name]
++            try:
++                stdout = utils.execute(cmd, log_fail_as_error=False)
++            except:
++                continue
++            if stdout.strip() != '0':
++                continue
++
++            LOG.debug(_LE("Zone's anet '%s' was rebooted from within the zone,"
++                          " so we need to delete and add the corresponding"
++                          " OVS port") % (port_name))
++            # needs re-wiring. So delete and add the port
++            external_ids = self.int_br.db_get_val('Interface', port_name,
++                                                  'external_ids')
++            self.int_br.delete_port(port_name)
++            self.int_br.add_port(port_name, ('external_ids', external_ids))
++
++    def _agent_has_updates(self, polling_manager):
++        # check if any anet ports on OVS bridge requires re-wiring. This is
++        # needed if an user reboots the zone from inside the zone. This
++        # workaround is needed until OVS is integrated with Zones and is only
++        # needed on nova compute
++        cmd = ['/usr/bin/pgrep', 'nova-compute']
++        try:
++            stdout = utils.execute(cmd, log_fail_as_error=False)
++        except:
++            stdout = ""
++        if stdout:
++            self._rewire_zones_anet()
++
++        return super(SolarisOVSNeutronAgent, self)._agent_has_updates(
++            polling_manager)
++
++    def cleanup_stale_flows(self):
++        LOG.info(_LI("Cleaning stale %s flows"), self.int_br.br_name)
++        self.int_br.cleanup_flows()
++
++
+ def validate_local_ip(local_ip):
+     """Verify if the ip exists on the agent's host."""
+-    if not ip_lib.IPWrapper().get_device_by_ip(local_ip):
++    if platform.system() == "SunOS":
++        local_ip_valid = net_lib.IPInterface.ipaddr_exists(local_ip)
++    else:
++        local_ip_valid = ip_lib.IPWrapper().get_device_by_ip(local_ip)
++
++    if not local_ip_valid:
+         LOG.error(_LE("Tunneling can't be enabled with invalid local_ip '%s'."
+                       " IP couldn't be found on this host's interfaces."),
+                   local_ip)
+@@ -2116,7 +2605,10 @@
+     validate_tunnel_config(cfg.CONF.AGENT.tunnel_types, cfg.CONF.OVS.local_ip)
+ 
+     try:
+-        agent = OVSNeutronAgent(bridge_classes, cfg.CONF)
++        if platform.system() == "SunOS":
++            agent = SolarisOVSNeutronAgent(bridge_classes, cfg.CONF)
++        else:
++            agent = OVSNeutronAgent(bridge_classes, cfg.CONF)
+     except (RuntimeError, ValueError) as e:
+         LOG.error(_LE("%s Agent terminated!"), e)
+         sys.exit(1)
--- a/components/openstack/neutron/patches/07-ovs-agent-monitor-assertion-fix.patch	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,53 +0,0 @@
-In-house patch to fix an issue where-in we are sending two kill events, from
-different contexts, to halt green threads and as a result we are tripping over
-an assert in Neutron OVS agent. It is tracked by
-
-  https://bugs.launchpad.net/neutron/+bug/1350903 (Ovs agent fails to kill ovsdb
-  monitor properly)
-
-and is still open.
-
-
-*** neutron-2015.1.2/neutron/agent/linux/ovsdb_monitor.py	Tue Oct 13 10:35:16 2015
---- neutron-2015.1.2/neutron/agent/linux/ovsdb_monitor.py	Wed Mar 23 12:20:07 2016
-***************
-*** 94,101 ****
-                      eventlet.sleep()
-  
-      def _kill(self, *args, **kwargs):
-          self.data_received = False
-!         super(SimpleInterfaceMonitor, self)._kill(*args, **kwargs)
-  
-      def _read_stdout(self):
-          data = super(SimpleInterfaceMonitor, self)._read_stdout()
---- 94,123 ----
-                      eventlet.sleep()
-  
-      def _kill(self, *args, **kwargs):
-+         """Override async_process method.
-+ 
-+         Kill the process and the associated watcher greenthreads.
-+         :param respawning: Optional, whether respawn will be subsequently
-+                attempted.
-+         """
-          self.data_received = False
-! 
-!         if not self._kill_event:
-!             return
-! 
-!         # Halt the greenthreads
-!         if not self._kill_event.ready():
-!             self._kill_event.send()
-! 
-!         pid = self.pid
-!         if pid:
-!             self._kill_process(pid)
-! 
-!         respawning = kwargs.get('respawning')
-!         if not respawning:
-!             # Clear the kill event to ensure the process can be
-!             # explicitly started again.
-!             self._kill_event = None
-  
-      def _read_stdout(self):
-          data = super(SimpleInterfaceMonitor, self)._read_stdout()
--- a/components/openstack/neutron/patches/08-ovs-binding-failed-fix.patch	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,284 +0,0 @@
-Patches to recover from ovs port binding_failed error by correcting the
-configuration and/or restarting neutron-openvswitch-agent. Without this patch,
-port will have to be deleted and recreated to recover from the error.
-  https://bugs.launchpad.net/neutron/+bug/1399249
-  https://review.openstack.org/#/c/212854/
-
-*** neutron-2015.1.2/neutron/plugins/ml2/plugin.py	Tue Oct 13 10:35:16 2015
---- new/neutron/plugins/ml2/plugin.py	Wed May 25 17:49:35 2016
-***************
-*** 246,313 ****
-  
-      def _bind_port_if_needed(self, context, allow_notify=False,
-                               need_notify=False):
-!         plugin_context = context._plugin_context
-!         port_id = context._port['id']
-  
-!         # Since the mechanism driver bind_port() calls must be made
-!         # outside a DB transaction locking the port state, it is
-!         # possible (but unlikely) that the port's state could change
-!         # concurrently while these calls are being made. If another
-!         # thread or process succeeds in binding the port before this
-!         # thread commits its results, the already committed results are
-!         # used. If attributes such as binding:host_id,
-!         # binding:profile, or binding:vnic_type are updated
-!         # concurrently, this loop retries binding using the new
-!         # values.
-!         count = 0
-!         while True:
-!             # First, determine whether it is necessary and possible to
-!             # bind the port.
-!             binding = context._binding
-!             if (binding.vif_type != portbindings.VIF_TYPE_UNBOUND
-!                 or not binding.host):
-!                 # We either don't need to bind the port, or can't, so
-!                 # notify if needed and return.
-                  if allow_notify and need_notify:
-                      self._notify_port_updated(context)
-                  return context
-  
-!             # Limit binding attempts to avoid any possibility of
-!             # infinite looping and to ensure an error is logged
-!             # instead. This does not need to be tunable because no
-!             # more than a couple attempts should ever be required in
-!             # normal operation. Log at info level if not 1st attempt.
-!             count += 1
-!             if count > MAX_BIND_TRIES:
-!                 LOG.error(_LE("Failed to commit binding results for %(port)s "
-!                               "after %(max)s tries"),
-!                           {'port': port_id, 'max': MAX_BIND_TRIES})
-!                 return context
-!             if count > 1:
-!                 greenthread.sleep(0)  # yield
-!                 LOG.info(_LI("Attempt %(count)s to bind port %(port)s"),
-!                          {'count': count, 'port': port_id})
-  
-!             # The port isn't already bound and the necessary
-!             # information is available, so attempt to bind the port.
-              bind_context = self._bind_port(context)
-  
-!             # Now try to commit result of attempting to bind the port.
-!             new_context, did_commit = self._commit_port_binding(
-!                 plugin_context, port_id, binding, bind_context)
-!             if not new_context:
-!                 # The port has been deleted concurrently, so just
-!                 # return the unbound result from the initial
-!                 # transaction that completed before the deletion.
-!                 LOG.debug("Port %s has been deleted concurrently",
-!                           port_id)
-!                 return context
-!             # Need to notify if we succeed and our results were
-!             # committed.
-!             if did_commit and (new_context._binding.vif_type !=
-!                                portbindings.VIF_TYPE_BINDING_FAILED):
-                  need_notify = True
-!             context = new_context
-  
-      def _bind_port(self, orig_context):
-          # Construct a new PortContext from the one from the previous
---- 246,308 ----
-  
-      def _bind_port_if_needed(self, context, allow_notify=False,
-                               need_notify=False):
-!         for count in range(1, MAX_BIND_TRIES + 1):
-!             if count > 1:
-!                 # yield for binding retries so that we give other threads a
-!                 # chance to do their work
-!                 greenthread.sleep(0)
-! 
-!                 # multiple attempts shouldn't happen very often so we log each
-!                 # attempt after the 1st.
-!                 LOG.info(_LI("Attempt %(count)s to bind port %(port)s"),
-!                          {'count': count, 'port': context._port['id']})
-! 
-!             bind_context, need_notify, try_again = self._attempt_binding(
-!                 context, need_notify)
-! 
-!             if count == MAX_BIND_TRIES or not try_again:
-!                 if self._should_bind_port(context):
-!                     # At this point, we attempted to bind a port and reached
-!                     # its final binding state. Binding either succeeded or
-!                     # exhausted all attempts, thus no need to try again.
-!                     # Now, the port and its binding state should be committed.
-!                     context, need_notify, try_again = (
-!                         self._commit_port_binding(context, bind_context,
-!                                                   need_notify, try_again))
-!                 else:
-!                     context = bind_context
-  
-!             if not try_again:
-                  if allow_notify and need_notify:
-                      self._notify_port_updated(context)
-                  return context
-  
-!         LOG.error(_LE("Failed to commit binding results for %(port)s "
-!                       "after %(max)s tries"),
-!                   {'port': context._port['id'], 'max': MAX_BIND_TRIES})
-!         return context
-! 
-!     def _should_bind_port(self, context):
-!         return (context._binding.host and context._binding.vif_type
-!                 in (portbindings.VIF_TYPE_UNBOUND,
-!                     portbindings.VIF_TYPE_BINDING_FAILED))
-! 
-!     def _attempt_binding(self, context, need_notify):
-!         try_again = False
-  
-!         if self._should_bind_port(context):
-              bind_context = self._bind_port(context)
-  
-!             if bind_context._binding.vif_type != \
-!                     portbindings.VIF_TYPE_BINDING_FAILED:
-!                 # Binding succeeded. Suggest notifying of successful binding.
-                  need_notify = True
-!             else:
-!                 # Current attempt binding failed, try to bind again.
-!                 try_again = True
-!             context = bind_context
-! 
-!         return context, need_notify, try_again
-  
-      def _bind_port(self, orig_context):
-          # Construct a new PortContext from the one from the previous
-***************
-*** 331,362 ****
-          self.mechanism_manager.bind_port(new_context)
-          return new_context
-  
-!     def _commit_port_binding(self, plugin_context, port_id, orig_binding,
-!                              new_context):
-          session = plugin_context.session
-!         new_binding = new_context._binding
-  
-          # After we've attempted to bind the port, we begin a
-          # transaction, get the current port state, and decide whether
-          # to commit the binding results.
-!         #
-!         # REVISIT: Serialize this operation with a semaphore to
-!         # prevent deadlock waiting to acquire a DB lock held by
-!         # another thread in the same process, leading to 'lock wait
-!         # timeout' errors.
-!         with contextlib.nested(lockutils.lock('db-access'),
-!                                session.begin(subtransactions=True)):
-              # Get the current port state and build a new PortContext
-              # reflecting this state as original state for subsequent
-              # mechanism driver update_port_*commit() calls.
-              port_db, cur_binding = db.get_locked_port_and_binding(session,
-                                                                    port_id)
-              if not port_db:
-!                 # The port has been deleted concurrently.
-!                 return (None, None)
-              oport = self._make_port_dict(port_db)
-              port = self._make_port_dict(port_db)
-!             network = new_context.network.current
-              if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
-                  # REVISIT(rkukura): The PortBinding instance from the
-                  # ml2_port_bindings table, returned as cur_binding
---- 326,366 ----
-          self.mechanism_manager.bind_port(new_context)
-          return new_context
-  
-!     def _commit_port_binding(self, orig_context, bind_context,
-!                              need_notify, try_again):
-!         port_id = orig_context._port['id']
-!         plugin_context = orig_context._plugin_context
-          session = plugin_context.session
-!         orig_binding = orig_context._binding
-!         new_binding = bind_context._binding
-  
-          # After we've attempted to bind the port, we begin a
-          # transaction, get the current port state, and decide whether
-          # to commit the binding results.
-!         with session.begin(subtransactions=True):
-              # Get the current port state and build a new PortContext
-              # reflecting this state as original state for subsequent
-              # mechanism driver update_port_*commit() calls.
-              port_db, cur_binding = db.get_locked_port_and_binding(session,
-                                                                    port_id)
-+             # Since the mechanism driver bind_port() calls must be made
-+             # outside a DB transaction locking the port state, it is
-+             # possible (but unlikely) that the port's state could change
-+             # concurrently while these calls are being made. If another
-+             # thread or process succeeds in binding the port before this
-+             # thread commits its results, the already committed results are
-+             # used. If attributes such as binding:host_id, binding:profile,
-+             # or binding:vnic_type are updated concurrently, the try_again
-+             # flag is returned to indicate that the commit was unsuccessful.
-              if not port_db:
-!                 # The port has been deleted concurrently, so just
-!                 # return the unbound result from the initial
-!                 # transaction that completed before the deletion.
-!                 LOG.debug("Port %s has been deleted concurrently", port_id)
-!                 return orig_context, False, False
-              oport = self._make_port_dict(port_db)
-              port = self._make_port_dict(port_db)
-!             network = bind_context.network.current
-              if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
-                  # REVISIT(rkukura): The PortBinding instance from the
-                  # ml2_port_bindings table, returned as cur_binding
-***************
-*** 400,428 ****
-                  cur_binding.vif_type = new_binding.vif_type
-                  cur_binding.vif_details = new_binding.vif_details
-                  db.clear_binding_levels(session, port_id, cur_binding.host)
-!                 db.set_binding_levels(session, new_context._binding_levels)
-!                 cur_context._binding_levels = new_context._binding_levels
-  
-                  # Update PortContext's port dictionary to reflect the
-                  # updated binding state.
-                  self._update_port_dict_binding(port, cur_binding)
-  
-                  # Update the port status if requested by the bound driver.
-!                 if (new_context._binding_levels and
-!                     new_context._new_port_status):
-!                     port_db.status = new_context._new_port_status
-!                     port['status'] = new_context._new_port_status
-  
-                  # Call the mechanism driver precommit methods, commit
-                  # the results, and call the postcommit methods.
-                  self.mechanism_manager.update_port_precommit(cur_context)
-          if commit:
-              self.mechanism_manager.update_port_postcommit(cur_context)
-  
-!         # Continue, using the port state as of the transaction that
-!         # just finished, whether that transaction committed new
-!         # results or discovered concurrent port state changes.
-!         return (cur_context, commit)
-  
-      def _update_port_dict_binding(self, port, binding):
-          port[portbindings.HOST_ID] = binding.host
---- 404,437 ----
-                  cur_binding.vif_type = new_binding.vif_type
-                  cur_binding.vif_details = new_binding.vif_details
-                  db.clear_binding_levels(session, port_id, cur_binding.host)
-!                 db.set_binding_levels(session, bind_context._binding_levels)
-!                 cur_context._binding_levels = bind_context._binding_levels
-  
-                  # Update PortContext's port dictionary to reflect the
-                  # updated binding state.
-                  self._update_port_dict_binding(port, cur_binding)
-  
-                  # Update the port status if requested by the bound driver.
-!                 if (bind_context._binding_levels and
-!                     bind_context._new_port_status):
-!                     port_db.status = bind_context._new_port_status
-!                     port['status'] = bind_context._new_port_status
-  
-                  # Call the mechanism driver precommit methods, commit
-                  # the results, and call the postcommit methods.
-                  self.mechanism_manager.update_port_precommit(cur_context)
-          if commit:
-+             # Continue, using the port state as of the transaction that
-+             # just finished, whether that transaction committed new
-+             # results or discovered concurrent port state changes.
-+             # Also, Trigger notification for successful binding commit.
-              self.mechanism_manager.update_port_postcommit(cur_context)
-+             need_notify = True
-+             try_again = False
-+         else:
-+             try_again = True
-  
-!         return cur_context, need_notify, try_again
-  
-      def _update_port_dict_binding(self, port, binding):
-          port[portbindings.HOST_ID] = binding.host
--- a/components/openstack/neutron/patches/09-dhcp-agent-warning-fix.patch	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,22 +0,0 @@
-In-house patch to remove confusing namespace warning message in the log file.
-
-*** neutron-2015.1.2/neutron/agent/common/config.py	Tue Oct 13 10:35:16 2015
---- new/neutron/agent/common/config.py	Fri Jun  3 13:25:51 2016
-***************
-*** 54,61 ****
-  USE_NAMESPACES_OPTS = [
-      cfg.BoolOpt('use_namespaces', default=True,
-                  help=_("Allow overlapping IP. This option is deprecated and "
-!                        "will be removed in a future release."),
-!                 deprecated_for_removal=True),
-  ]
-  
-  IPTABLES_OPTS = [
---- 54,60 ----
-  USE_NAMESPACES_OPTS = [
-      cfg.BoolOpt('use_namespaces', default=True,
-                  help=_("Allow overlapping IP. This option is deprecated and "
-!                        "will be removed in a future release.")),
-  ]
-  
-  IPTABLES_OPTS = [
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/patches/09-ml2-ovs-agent-misc.patch	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,244 @@
+Changes to Neutron Open vSwitch agent support code to port it to
+Solaris. These changes will eventually be proposed upstream.
+
+--- neutron-8.1.2/neutron/agent/linux/ovsdb_monitor.py.~1~	2016-06-09 18:45:29.000000000 -0700
++++ neutron-8.1.2/neutron/agent/linux/ovsdb_monitor.py	2016-07-04 16:19:23.483846060 -0700
+@@ -12,6 +12,8 @@
+ #    License for the specific language governing permissions and limitations
+ #    under the License.
+ 
++import platform
++
+ import eventlet
+ from oslo_log import log as logging
+ from oslo_serialization import jsonutils
+@@ -40,7 +42,8 @@ class OvsdbMonitor(async_process.AsyncPr
+             cmd.append(','.join(columns))
+         if format:
+             cmd.append('--format=%s' % format)
+-        super(OvsdbMonitor, self).__init__(cmd, run_as_root=True,
++        run_as_root = (False if platform.system() == "SunOS" else True)
++        super(OvsdbMonitor, self).__init__(cmd, run_as_root=run_as_root,
+                                            respawn_interval=respawn_interval,
+                                            log_output=True,
+                                            die_on_error=True)
+--- neutron-8.1.2/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/main.py.~1~	2016-06-09 18:45:30.000000000 -0700
++++ neutron-8.1.2/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/main.py	2016-07-04 16:19:23.482403960 -0700
+@@ -14,6 +14,8 @@
+ #    License for the specific language governing permissions and limitations
+ #    under the License.
+ 
++import platform
++
+ from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
+     import br_int
+ from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
+@@ -28,9 +30,14 @@ def init_config():
+ 
+ 
+ def main():
+-    bridge_classes = {
+-        'br_int': br_int.OVSIntegrationBridge,
+-        'br_phys': br_phys.OVSPhysicalBridge,
+-        'br_tun': br_tun.OVSTunnelBridge,
+-    }
++    if platform.system() == "SunOS":
++        bridge_classes = {
++            'br_int': br_int.SolarisOVSIntegrationBridge
++        }
++    else:
++        bridge_classes = {
++            'br_int': br_int.OVSIntegrationBridge,
++            'br_phys': br_phys.OVSPhysicalBridge,
++            'br_tun': br_tun.OVSTunnelBridge,
++        }
+     ovs_neutron_agent.main(bridge_classes)
+--- neutron-8.1.2/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_int.py	2016-06-09 18:45:36.000000000 -0700
++++ new/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/ovs_ofctl/br_int.py	2016-07-11 10:56:56.936780790 -0700
+@@ -185,3 +185,168 @@
+     def delete_arp_spoofing_allow_rules(self, port):
+         self.delete_flows(table_id=constants.ARP_SPOOF_TABLE,
+                           in_port=port)
++
++
++class SolarisOVSIntegrationBridge(OVSIntegrationBridge):
++    """Solaris openvswitch agent br_int0 specific logic."""
++
++    def setup_default_table(self):
++        self.install_normal()
++        self.setup_canary_table()
++
++    def setup_default_tunnel_table(self, tun_ofport, arp_responder_enabled):
++        #
++        # Add flows for inbound packets
++        #
++
++        # Table 0 (default) will sort incoming traffic depending on in_port.
++        # Forward all the packets coming in from all the ports of the bridge
++        # to respective learning tables (LEARN_FROM_TUN or LEARN_FROM_PORTS).
++        self.add_flow(priority=1,
++                      in_port=tun_ofport,
++                      actions="resubmit(,%s)" %
++                      constants.LEARN_FROM_TUN)
++        self.add_flow(priority=0, actions="drop")
++
++        # LEARN_FROM_TUN table will have a single flow using a learn action to
++        # dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac
++        # addresses
++        learned_flow = ("table=%s,"
++                        "priority=1,"
++                        "hard_timeout=300,"
++                        "NXM_NX_TUN_ID[],"
++                        "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],"
++                        "load:NXM_NX_TUN_IPV4_SRC[]->NXM_NX_TUN_IPV4_DST[],"
++                        "output:NXM_OF_IN_PORT[]" %
++                        constants.UCAST_TO_TUN)
++
++        # Once remote mac addresses are learned, packets are sent to
++        # INBOUND_UCAST_BUM_TABLE where the packets are triaged based on
++        # whether they are unicast or broadcast/multicast and sent to
++        # respective tables either for forwarding or flooding
++        self.add_flow(table=constants.LEARN_FROM_TUN,
++                      priority=1,
++                      actions="learn(%s),resubmit(,%s)" %
++                      (learned_flow, constants.INBOUND_UCAST_BUM_TABLE))
++
++        # INBOUND_UCAST_TABLE handles forwarding the packet to the right port
++        self.add_flow(table=constants.INBOUND_UCAST_BUM_TABLE,
++                      priority=0,
++                      dl_dst="00:00:00:00:00:00/01:00:00:00:00:00",
++                      actions="resubmit(,%s)" % constants.INBOUND_UCAST_TABLE)
++
++        # INBOUND_BUM_TABLE handles flooding for broadcast/unknown-unicast/
++        # multicast packets
++        self.add_flow(table=constants.INBOUND_UCAST_BUM_TABLE,
++                      priority=0,
++                      dl_dst="01:00:00:00:00:00/01:00:00:00:00:00",
++                      actions="resubmit(,%s)" % constants.INBOUND_BUM_TABLE)
++
++        # INBOUND_UCAST_TABLE has flows dynamically added by learn action of
++        # a flow in LEARN_FROM_PORTS table. These flows forward a packet to a
++        # port that matches the destination MAC address. If no flow matches,
++        # then the packet will be resubmitted to INBOUND_BUM_TABLE for
++        # flooding.
++        self.add_flow(table=constants.INBOUND_UCAST_TABLE,
++                      priority=0,
++                      actions="resubmit(,%s)" % constants.INBOUND_BUM_TABLE)
++        self.add_flow(table=constants.INBOUND_BUM_TABLE,
++                      priority=0,
++                      actions="drop")
++
++        # Egress unicast will be handled in table UCAST_TO_TUN, where remote
++        # mac addresses will be learned. For now, just add a default flow that
++        # will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them
++        # as broadcasts/multicasts
++        self.add_flow(table=constants.UCAST_TO_TUN,
++                      priority=0,
++                      actions="resubmit(,%s)" %
++                      constants.FLOOD_TO_TUN)
++
++        # FLOOD_TO_TUN will handle flooding to tunnels based on segmentation
++        # id. For now, add a default drop action
++        self.add_flow(table=constants.FLOOD_TO_TUN,
++                      priority=0, actions="drop")
++
++        #
++        # add flows for outbound packets
++        #
++
++        # LEARN_FROM_PORTS table will have a single flow using two learn
++        # actions to dynamically set-up flows in INBOUND_UCAST_TABLE and
++        # UCAST_TO_TUN corresponding to local mac addresses
++        learned_flow = ("table=%s,"
++                        "priority=1,"
++                        "hard_timeout=300,"
++                        "NXM_NX_TUN_ID[],"
++                        "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],"
++                        "output:NXM_OF_IN_PORT[]")
++        self.add_flow(table=constants.LEARN_FROM_PORTS,
++                      priority=1,
++                      actions="learn(%s),learn(%s),resubmit(,%s)" %
++                      (learned_flow % constants.INBOUND_UCAST_TABLE,
++                       learned_flow % constants.UCAST_TO_TUN,
++                        constants.OUTBOUND_UCAST_BUM_TABLE))
++
++        # Once local MAC addresses are learned, packets are sent to
++        # OUTBOUND_UCAST_BUM_TABLE where the packet is triaged based on whether
++        # they are unicast or broadcast/multicast and sent to respective tables
++        # either for forwarding or for flooding
++        self.add_flow(table=constants.OUTBOUND_UCAST_BUM_TABLE,
++                      priority=0,
++                      dl_dst="00:00:00:00:00:00/01:00:00:00:00:00",
++                      actions="resubmit(,%s)" % constants.UCAST_TO_TUN)
++        # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding
++        self.add_flow(table=constants.OUTBOUND_UCAST_BUM_TABLE,
++                      priority=0,
++                      dl_dst="01:00:00:00:00:00/01:00:00:00:00:00",
++                      actions="resubmit(,%s)" % constants.FLOOD_TO_TUN)
++
++    @staticmethod
++    def _ofport_set_to_str(ports_set):
++        return ",".join(map(str, ports_set))
++
++    def provision_local_vlan(self, port, lvid, segmentation_id):
++        pass
++
++    def reclaim_local_vlan(self, port, segmentation_id):
++        pass
++
++    @staticmethod
++    def _dvr_to_src_mac_table_id(network_type):
++        pass
++
++    def install_dvr_to_src_mac(self, network_type, vlan_tag, gateway_mac,
++                               dst_mac, dst_port):
++        pass
++
++    def delete_dvr_to_src_mac(self, network_type, vlan_tag, dst_mac):
++        pass
++
++    def add_dvr_mac_vlan(self, mac, port):
++        pass
++
++    def remove_dvr_mac_vlan(self, mac):
++        pass
++
++    def add_dvr_mac_tun(self, mac, port):
++        pass
++
++    def remove_dvr_mac_tun(self, mac, port):
++        pass
++
++    def install_icmpv6_na_spoofing_protection(self, port, ip_addresses):
++        pass
++
++    def install_arp_spoofing_protection(self, port, ip_addresses):
++        pass
++
++    def delete_arp_spoofing_protection(self, port):
++        pass
++
++    def delete_arp_spoofing_allow_rules(self, port):
++        pass
++
++    def set_allowed_macs_for_port(self, port, mac_addresses=None,
++                                  allow_all=False):
++        pass
+--- neutron-8.1.2/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py	2016-06-09 18:45:30.000000000 -0700
++++ new/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py	2016-07-11 13:50:42.542106187 -0700
+@@ -77,6 +77,15 @@
+ VXLAN_TUN_TO_LV = 4
+ GENEVE_TUN_TO_LV = 6
+ 
++# Solaris specific additional OpenFlow tables to steer packets to/from VNICs
++# on top of VXLAN datalink.
++LEARN_FROM_PORTS = 2
++# Broadcast/Unknown Unicast/Multicast (BUM) tables
++OUTBOUND_UCAST_BUM_TABLE = 3
++INBOUND_UCAST_BUM_TABLE = 11
++INBOUND_UCAST_TABLE = 12
++INBOUND_BUM_TABLE = 13
++
+ DVR_NOT_LEARN = 9
+ LEARN_FROM_TUN = 10
+ UCAST_TO_TUN = 20
--- a/components/openstack/neutron/patches/10-floatingip-remove-port-on-failed-create.patch	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-Patch to make call to plugin's delete_port() when floatingip creation fails
-after the plugin has created the port. These changes will eventually be
-proposed upstream.
-
-*** neutron-2015.1.2/neutron/db/l3_db.py	Tue Oct 13 10:35:16 2015
---- new/neutron/db/l3_db.py	Wed Jul 13 23:51:38 2016
-***************
-*** 954,963 ****
-                  floating_ip_address=floating_ip_address,
-                  floating_port_id=external_port['id'])
-              fip['tenant_id'] = tenant_id
-!             # Update association with internal port
-!             # and define external IP address
-!             self._update_fip_assoc(context, fip,
-!                                    floatingip_db, external_port)
-              context.session.add(floatingip_db)
-  
-          return self._make_floatingip_dict(floatingip_db)
---- 954,969 ----
-                  floating_ip_address=floating_ip_address,
-                  floating_port_id=external_port['id'])
-              fip['tenant_id'] = tenant_id
-!             try:
-!                 # Update association with internal port
-!                 # and define external IP address
-!                 self._update_fip_assoc(context, fip,
-!                                        floatingip_db, external_port)
-!             except:
-!                 with excutils.save_and_reraise_exception():
-!                     self._core_plugin.delete_port(context.elevated(),
-!                                                   external_port['id'],
-!                                                   l3_port_check=False)
-              context.session.add(floatingip_db)
-  
-          return self._make_floatingip_dict(floatingip_db)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/patches/10-interface-driver-entry-point.patch	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,22 @@
+The Solaris OVS Interface driver needs to be added to the list of entry points
+in order to be discovered and loaded by stevedore.
+
+*** neutron-8.0.0/setup.cfg	2016-04-07 00:47:18.000000000 -0700
+--- new/setup.cfg	2016-05-24 13:31:12.688314864 -0700
+***************
+*** 155,161 ****
+  	ivs = neutron.agent.linux.interface:IVSInterfaceDriver
+  	linuxbridge = neutron.agent.linux.interface:BridgeInterfaceDriver
+  	null = neutron.agent.linux.interface:NullDriver
+! 	openvswitch = neutron.agent.linux.interface:OVSInterfaceDriver
+  neutron.agent.firewall_drivers = 
+  	noop = neutron.agent.firewall:NoopFirewallDriver
+  	iptables = neutron.agent.linux.iptables_firewall:IptablesFirewallDriver
+--- 155,161 ----
+  	ivs = neutron.agent.linux.interface:IVSInterfaceDriver
+  	linuxbridge = neutron.agent.linux.interface:BridgeInterfaceDriver
+  	null = neutron.agent.linux.interface:NullDriver
+! 	openvswitch = neutron.agent.solaris.interface:OVSInterfaceDriver
+  neutron.agent.firewall_drivers = 
+  	noop = neutron.agent.firewall:NoopFirewallDriver
+  	iptables = neutron.agent.linux.iptables_firewall:IptablesFirewallDriver
--- a/components/openstack/neutron/patches/11-mysql_cluster_support.patch	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1172 +0,0 @@
-This patchset is for bug:
-
-22726251 - Neutron needs to support MySQL Cluster
-
-This fixes the following aspects of Neutron:
-1. Implementation of an oslo.db configuration parameter to specify the MySQL
-   storage engine (mysql_storage_engine).
-2. Replacement of hardcoded SQL statements that set the engine to "InnoDB"
-   to the above configuration value.
-3. Logic to handle SQL differences between MySQL InnoDB and MySQL Cluster (NDB).
-   This includes column lengths, constraints, foreign keys, and indexes.
-
-This has not been committed upstream, but has been filed in launchpad:
-
-https://bugs.launchpad.net/neutron/+bug/1564110
-
-
---- neutron-2015.1.2/neutron/db/model_base.py.orig	2016-07-29 13:15:07.909806102 -0600
-+++ neutron-2015.1.2/neutron/db/model_base.py	2016-07-29 13:27:41.817140969 -0600
-@@ -13,15 +13,24 @@
- # See the License for the specific language governing permissions and
- # limitations under the License.
- 
-+from oslo_config import cfg
- from oslo_db.sqlalchemy import models
- from sqlalchemy.ext import declarative
- from sqlalchemy import orm
- 
-+# Attempt to determine the context that this module is being used.
-+# If via neutron-db-manage and cli.py, import global variable. If not,
-+# use oslo_config.
-+try:
-+     from neutron.db.migration.cli import mysql_storage_eng_type
-+except ImportError:
-+     CONF = cfg.CONF
-+     mysql_storage_eng_type = CONF.database.mysql_storage_engine
- 
- class NeutronBase(models.ModelBase):
-     """Base class for Neutron Models."""
- 
--    __table_args__ = {'mysql_engine': 'InnoDB'}
-+    __table_args__ = {'mysql_engine': mysql_storage_eng_type}
- 
-     def __iter__(self):
-         self._i = iter(orm.object_mapper(self).columns)
---- neutron-2015.1.2/neutron/db/api.py.orig	2016-07-29 13:15:14.380329905 -0600
-+++ neutron-2015.1.2/neutron/db/api.py	2016-07-28 14:15:31.186601900 -0600
-@@ -19,7 +19,6 @@ from oslo_config import cfg
- from oslo_db.sqlalchemy import session
- from sqlalchemy import exc
- 
--
- _FACADE = None
- 
- MAX_RETRIES = 10
-@@ -50,10 +49,30 @@ def get_session(autocommit=True, expire_
- @contextlib.contextmanager
- def autonested_transaction(sess):
-     """This is a convenience method to not bother with 'nested' parameter."""
-+    # Attempt to determine the context that this module is being used.
-+    # If via neutron-db-manage and cli.py, import global variable. If not,
-+    # use oslo_config.
-     try:
--        session_context = sess.begin_nested()
--    except exc.InvalidRequestError:
--        session_context = sess.begin(subtransactions=True)
--    finally:
--        with session_context as tx:
--            yield tx
-+        from neutron.db.migration.cli import mysql_storage_eng_type
-+    except ImportError:
-+        CONF = cfg.CONF
-+        mysql_storage_eng_type = CONF.database.mysql_storage_engine
-+
-+    # MySQL Cluster NDB does not support nested transactions
-+    # TODO (oorgeron) Look into making this workable.
-+    if mysql_storage_eng_type == "NDBCLUSTER":
-+        try:
-+            session_context = sess.begin(subtransactions=True, nested=False)
-+        except exc.InvalidRequestError:
-+            session_context = sess.begin(subtransactions=False, nested=False)
-+        finally:
-+            with session_context as tx:
-+                yield tx
-+    else:
-+        try:
-+            session_context = sess.begin_nested()
-+        except exc.InvalidRequestError:
-+            session_context = sess.begin(subtransactions=True)
-+        finally:
-+            with session_context as tx:
-+                yield tx
---- neutron-2015.1.2/neutron/db/db_base_plugin_v2.py.orig	2016-07-29 13:15:20.388338380 -0600
-+++ neutron-2015.1.2/neutron/db/db_base_plugin_v2.py	2016-07-28 14:15:31.187095505 -0600
-@@ -46,6 +46,15 @@ from neutron.plugins.common import const
- 
- LOG = logging.getLogger(__name__)
- 
-+# Attempt to determine the context that this module is being used.
-+# If via neutron-db-manage and cli.py, import global variable. If not,
-+# use oslo_config.
-+try:
-+     from neutron.db.migration.cli import mysql_storage_eng_type
-+except ImportError:
-+     CONF = cfg.CONF
-+     mysql_storage_eng_type = CONF.database.mysql_storage_engine
-+
- # Ports with the following 'device_owner' values will not prevent
- # network deletion.  If delete_network() finds that all ports on a
- # network have these owners, it will explicitly delete each port
-@@ -1443,16 +1452,26 @@ class NeutronDbPluginV2(neutron_plugin_b
-                                                    port_id=port['id'],
-                                                    ip_address=ip_address,
-                                                    subnet_id=subnet['id'])
--                try:
--                    # Do the insertion of each IP allocation entry within
--                    # the context of a nested transaction, so that the entry
--                    # is rolled back independently of other entries whenever
--                    # the corresponding port has been deleted.
--                    with context.session.begin_nested():
--                        context.session.add(allocated)
--                except db_exc.DBReferenceError:
--                    LOG.debug("Port %s was deleted while updating it with an "
--                              "IPv6 auto-address. Ignoring.", port['id'])
-+                # MySQL Cluster NDB does not support nested transactions
-+                # TODO (oorgeron) Look into making this workable.
-+                if mysql_storage_eng_type == "NDBCLUSTER":
-+                    try:
-+                        with context.session.begin(subtransactions=True):
-+                            context.session.add(allocated)
-+                    except db_exc.DBReferenceError:
-+                        LOG.debug("Port %s was deleted while updating it with an "
-+                                  "IPv6 auto-address. Ignoring.", port['id'])
-+                else:
-+                    try:
-+                        # Do the insertion of each IP allocation entry within
-+                        # the context of a nested transaction, so that the entry
-+                        # is rolled back independently of other entries whenever
-+                        # the corresponding port has been deleted.
-+                        with context.session.begin_nested():
-+                            context.session.add(allocated)
-+                    except db_exc.DBReferenceError:
-+                        LOG.debug("Port %s was deleted while updating it with an "
-+                                  "IPv6 auto-address. Ignoring.", port['id'])
- 
-     def _update_subnet_dns_nameservers(self, context, id, s):
-         old_dns_list = self._get_dns_by_subnet(context, id)
-@@ -1797,8 +1816,16 @@ class NeutronDbPluginV2(neutron_plugin_b
-                 # within a transaction, so that it can be rolled back to the
-                 # point before its failure while maintaining the enclosing
-                 # transaction
--                return self._create_port_with_mac(
--                    context, network_id, port_data, mac, nested=True)
-+
-+                # MySQL Cluster NDB does not support nested transactions
-+                # TODO (oorgeron) Look into making this workable.
-+                if mysql_storage_eng_type == "NDBCLUSTER":
-+                    return self._create_port_with_mac(
-+                        context, network_id, port_data, mac, nested=False)
-+                else:
-+                    return self._create_port_with_mac(
-+                        context, network_id, port_data, mac, nested=True)
-+
-             except n_exc.MacAddressInUse:
-                 LOG.debug('Generated mac %(mac_address)s exists on '
-                           'network %(network_id)s',
---- neutron-2015.1.2/neutron/db/migration/cli.py.orig	2016-07-29 13:15:27.041205822 -0600
-+++ neutron-2015.1.2/neutron/db/migration/cli.py	2016-07-28 14:15:31.187312338 -0600
-@@ -59,6 +59,9 @@ _db_opts = [
-     cfg.StrOpt('engine',
-                default='',
-                help=_('Database engine')),
-+    cfg.StrOpt('mysql_storage_engine',
-+               default='',
-+               help=_('MySQL Storage Engine')),
- ]
- 
- CONF = cfg.ConfigOpts()
-@@ -234,5 +237,11 @@ def main():
-     config = get_alembic_config()
-     config.neutron_config = CONF
- 
-+    # Make global variable available for modules that will be called
-+    # by neutron-db-manage. This will prevent namespace collisions
-+    # between oslo_config and albemic_config
-+    global mysql_storage_eng_type
-+    mysql_storage_eng_type = CONF.database.mysql_storage_engine
-+
-     #TODO(gongysh) enable logging
-     CONF.command.func(config, CONF.command.name)
---- neutron-2015.1.2/neutron/db/migration/models/frozen.py.orig	2016-07-29 13:15:33.940472370 -0600
-+++ neutron-2015.1.2/neutron/db/migration/models/frozen.py	2016-07-29 13:29:33.819709706 -0600
-@@ -22,7 +22,8 @@ Based on this comparison database can be
- Current HEAD commit is 59da928e945ec58836d34fd561d30a8a446e2728
- """
- 
--
-+from alembic import context
-+from oslo_config import cfg
- import sqlalchemy as sa
- from sqlalchemy.ext import declarative
- from sqlalchemy.ext.orderinglist import ordering_list
-@@ -32,6 +33,8 @@ from sqlalchemy import schema
- from neutron.db import model_base
- from neutron.openstack.common import uuidutils
- 
-+config = context.config
-+CONF = config.neutron_config
- 
- # Dictionary of all tables that was renamed:
- # {new_table_name: old_table_name}
-@@ -77,6 +80,10 @@ DHCPV6_STATELESS = 'dhcpv6-stateless'
- 
- BASEV2 = declarative.declarative_base(cls=model_base.NeutronBaseV2)
- 
-+if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+   db_string_length = 128
-+else:
-+   db_string_length = 255
- 
- #neutron/db/models_v2.py
- class HasTenant(object):
-@@ -218,17 +225,30 @@ class Agent(BASEV2, HasId):
-                             name='uniq_agents0agent_type0host'),
-     )
- 
--    agent_type = sa.Column(sa.String(255), nullable=False)
--    binary = sa.Column(sa.String(255), nullable=False)
--    topic = sa.Column(sa.String(255), nullable=False)
--    host = sa.Column(sa.String(255), nullable=False)
--    admin_state_up = sa.Column(sa.Boolean, default=True,
--                               server_default=sa.sql.true(), nullable=False)
--    created_at = sa.Column(sa.DateTime, nullable=False)
--    started_at = sa.Column(sa.DateTime, nullable=False)
--    heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False)
--    description = sa.Column(sa.String(255))
--    configurations = sa.Column(sa.String(4095), nullable=False)
-+    if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+        agent_type = sa.Column(sa.String(db_string_length), nullable=False)
-+        binary = sa.Column(sa.String(db_string_length), nullable=False)
-+        topic = sa.Column(sa.String(db_string_length), nullable=False)
-+        host = sa.Column(sa.String(db_string_length), nullable=False)
-+        admin_state_up = sa.Column(sa.Boolean, default=True,
-+                                   server_default=sa.sql.true(), nullable=False)
-+        created_at = sa.Column(sa.DateTime, nullable=False)
-+        started_at = sa.Column(sa.DateTime, nullable=False)
-+        heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False)
-+        description = sa.Column(sa.String(db_string_length))
-+        configurations = sa.Column(sa.Text(4095), nullable=False)
-+    else:
-+        agent_type = sa.Column(sa.String(db_string_length), nullable=False)
-+        binary = sa.Column(sa.String(db_string_length), nullable=False)
-+        topic = sa.Column(sa.String(db_string_length), nullable=False)
-+        host = sa.Column(sa.String(db_string_length), nullable=False)
-+        admin_state_up = sa.Column(sa.Boolean, default=True,
-+                                   server_default=sa.sql.true(), nullable=False)
-+        created_at = sa.Column(sa.DateTime, nullable=False)
-+        started_at = sa.Column(sa.DateTime, nullable=False)
-+        heartbeat_timestamp = sa.Column(sa.DateTime, nullable=False)
-+        description = sa.Column(sa.String(db_string_length))
-+        configurations = sa.Column(sa.String(4095), nullable=False)
- 
- 
- #neutron/db/agentschedulers_db.py
-@@ -431,10 +451,15 @@ class Vip(BASEV2, HasId, HasTenant, HasS
- 
- #neutron/db/loadbalancer/loadbalancer_db.py
- class Member(BASEV2, HasId, HasTenant, HasStatusDescription):
--    __table_args__ = (
--        sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port',
--                                   name='uniq_member0pool_id0address0port'),
--    )
-+    
-+    # MySQL Cluster NDB does not support this constraint.
-+    # TODO (oorgeron) Look into making this workable.
-+    if CONF.database.mysql_storage_engine != "NDBCLUSTER":
-+        __table_args__ = (
-+            sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port',
-+                                       name='uniq_member0pool_id0address0port'),
-+        )
-+
-     pool_id = sa.Column(sa.String(36), sa.ForeignKey("pools.id"),
-                         nullable=False)
-     address = sa.Column(sa.String(64), nullable=False)
-@@ -1229,8 +1254,15 @@ class PortBinding(BASEV2):
-     profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False,
-                         default='', server_default='')
-     vif_type = sa.Column(sa.String(64), nullable=False)
--    vif_details = sa.Column(sa.String(4095), nullable=False, default='',
-+    if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+        vif_details = sa.Column(sa.Text(4095), nullable=False)
-+        profile = sa.Column(sa.Text(length=4095),nullable=False)
-+    else:
-+        vif_details = sa.Column(sa.String(4095), nullable=False,
-+                                server_default='')
-+        profile = sa.Column(sa.String(length=4095),nullable=False,
-                             server_default='')
-+
-     driver = sa.Column(sa.String(64))
-     segment = sa.Column(sa.String(36),
-                         sa.ForeignKey('ml2_network_segments.id',
-@@ -1827,10 +1859,17 @@ class PoolPort(BASEV2):
- class IdentifierMap(BASEV2, HasTenant):
-     __tablename__ = 'cisco_csr_identifier_map'
- 
--    ipsec_site_conn_id = sa.Column(sa.String(64),
--                                   sa.ForeignKey('ipsec_site_connections.id',
--                                                 ondelete="CASCADE"),
--                                   primary_key=True)
-+    if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+        ipsec_site_conn_id = sa.Column(sa.String(36),
-+                                       sa.ForeignKey('ipsec_site_connections.id',
-+                                                     ondelete="CASCADE"),
-+                                       primary_key=True)
-+    else:
-+        ipsec_site_conn_id = sa.Column(sa.String(64),
-+                                       sa.ForeignKey('ipsec_site_connections.id',
-+                                                     ondelete="CASCADE"),
-+                                       primary_key=True)
-+
-     csr_tunnel_id = sa.Column(sa.Integer, nullable=False)
-     csr_ike_policy_id = sa.Column(sa.Integer, nullable=False)
-     csr_ipsec_policy_id = sa.Column(sa.Integer, nullable=False)
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/agent_init_ops.py.orig	2016-07-29 13:15:57.799898995 -0600
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/agent_init_ops.py	2016-07-29 13:30:30.011137004 -0600
-@@ -17,26 +17,49 @@
- # This module only manages the 'agents' table. Binding tables are created
- # in the modules for relevant resources
- 
--
-+from alembic import context
- from alembic import op
- import sqlalchemy as sa
- 
-+config = context.config
-+CONF = config.neutron_config
-+
-+if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+   db_string_length = 128
-+else:
-+   db_string_length = 255
- 
- def upgrade():
-     bind = op.get_bind()
-     insp = sa.engine.reflection.Inspector.from_engine(bind)
-     if 'agents' not in insp.get_table_names():
--        op.create_table(
--        'agents',
--        sa.Column('id', sa.String(length=36), nullable=False),
--        sa.Column('agent_type', sa.String(length=255), nullable=False),
--        sa.Column('binary', sa.String(length=255), nullable=False),
--        sa.Column('topic', sa.String(length=255), nullable=False),
--        sa.Column('host', sa.String(length=255), nullable=False),
--        sa.Column('admin_state_up', sa.Boolean(), nullable=False),
--        sa.Column('created_at', sa.DateTime(), nullable=False),
--        sa.Column('started_at', sa.DateTime(), nullable=False),
--        sa.Column('heartbeat_timestamp', sa.DateTime(), nullable=False),
--        sa.Column('description', sa.String(length=255), nullable=True),
--        sa.Column('configurations', sa.String(length=4095), nullable=False),
--        sa.PrimaryKeyConstraint('id'))
-+        if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+            op.create_table(
-+                'agents',
-+                sa.Column('id', sa.String(length=36), nullable=False),
-+                sa.Column('agent_type', sa.String(length=db_string_length), nullable=False),
-+                sa.Column('binary', sa.String(length=db_string_length), nullable=False),
-+                sa.Column('topic', sa.String(length=db_string_length), nullable=False),
-+                sa.Column('host', sa.String(length=db_string_length), nullable=False),
-+                sa.Column('admin_state_up', sa.Boolean(), nullable=False),
-+                sa.Column('created_at', sa.DateTime(), nullable=False),
-+                sa.Column('started_at', sa.DateTime(), nullable=False),
-+                sa.Column('heartbeat_timestamp', sa.DateTime(), nullable=False),
-+                sa.Column('description', sa.String(length=db_string_length), nullable=True),
-+                sa.Column('configurations', sa.Text(length=4095), nullable=False),
-+                sa.PrimaryKeyConstraint('id'))
-+        else:
-+            op.create_table(
-+                'agents',
-+                sa.Column('id', sa.String(length=36), nullable=False),
-+                sa.Column('agent_type', sa.String(length=db_string_length), nullable=False),
-+                sa.Column('binary', sa.String(length=db_string_length), nullable=False),
-+                sa.Column('topic', sa.String(length=db_string_length), nullable=False),
-+                sa.Column('host', sa.String(length=db_string_length), nullable=False),
-+                sa.Column('admin_state_up', sa.Boolean(), nullable=False),
-+                sa.Column('created_at', sa.DateTime(), nullable=False),
-+                sa.Column('started_at', sa.DateTime(), nullable=False),
-+                sa.Column('heartbeat_timestamp', sa.DateTime(), nullable=False),
-+                sa.Column('description', sa.String(length=db_string_length), nullable=True),
-+                sa.Column('configurations', sa.String(length=4095), nullable=False),
-+                sa.PrimaryKeyConstraint('id'))
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/heal_script.py.orig	2016-07-29 13:16:06.570595984 -0600
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/heal_script.py	2016-07-29 13:31:36.373228770 -0600
-@@ -19,6 +19,7 @@ import alembic
- from alembic import autogenerate as autogen
- from alembic import context
- from alembic import op
-+from oslo_config import cfg
- 
- import sqlalchemy
- from sqlalchemy import schema as sa_schema
-@@ -29,6 +30,9 @@ from sqlalchemy import types
- from neutron.db.migration.models import frozen as frozen_models
- from neutron.i18n import _LI, _LW
- 
-+config = context.config
-+CONF = config.neutron_config
-+
- LOG = logging.getLogger(__name__)
- 
- METHODS = {}
-@@ -70,7 +74,10 @@ def heal():
-         'compare_server_default': _compare_server_default,
-     }
-     mc = alembic.migration.MigrationContext.configure(op.get_bind(), opts=opts)
--    set_storage_engine(op.get_bind(), "InnoDB")
-+    if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+        set_storage_engine(op.get_bind(), 'NDBCLUSTER')
-+    else:
-+        set_storage_engine(op.get_bind(), 'InnoDB')
-     diff = autogen.compare_metadata(mc, models_metadata)
-     for el in diff:
-         execute_alembic_command(el)
-@@ -286,4 +293,6 @@ def set_storage_engine(bind, engine):
-     if bind.dialect.name == 'mysql':
-         for table in insp.get_table_names():
-             if insp.get_table_options(table)['mysql_engine'] != engine:
--                op.execute("ALTER TABLE %s ENGINE=%s" % (table, engine))
-+                op.execute("ALTER TABLE %(db_table)s Engine=%(mysql_storage_engine)s"
-+                           % dict(db_table=table,
-+                           mysql_storage_engine=CONF.database.mysql_storage_engine))
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/157a5d299379_ml2_binding_profile.py.orig	2016-07-29 13:16:13.971296279 -0600
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/157a5d299379_ml2_binding_profile.py	2016-07-29 13:32:20.859598698 -0600
-@@ -25,14 +25,24 @@ Create Date: 2014-02-13 23:48:25.147279
- revision = '157a5d299379'
- down_revision = '50d5ba354c23'
- 
-+from alembic import context
- from alembic import op
- import sqlalchemy as sa
- 
- from neutron.db import migration
- 
-+config = context.config
-+CONF = config.neutron_config
- 
- def upgrade():
-     if migration.schema_has_table('ml2_port_bindings'):
--        op.add_column('ml2_port_bindings',
--                      sa.Column('profile', sa.String(length=4095),
--                                nullable=False, server_default=''))
-+        # MySQL Cluster (NDB) does not support rows longer than 14000.
-+        # This configures the profile column as TEXT to keep the row size down.
-+        if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+            op.add_column('ml2_port_bindings',
-+                          sa.Column('profile', sa.Text(length=4095),
-+                                    nullable=False))
-+        else:
-+            op.add_column('ml2_port_bindings',
-+                          sa.Column('profile', sa.String(length=4095),
-+                                    nullable=False, server_default=''))
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py.orig	2016-07-29 13:16:20.121107758 -0600
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/50d5ba354c23_ml2_binding_vif_details.py	2016-07-29 13:32:42.870041087 -0600
-@@ -25,11 +25,14 @@ Create Date: 2014-02-11 23:21:59.577972
- revision = '50d5ba354c23'
- down_revision = '27cc183af192'
- 
-+from alembic import context
- from alembic import op
- import sqlalchemy as sa
- 
- from neutron.db import migration
- 
-+config = context.config
-+CONF = config.neutron_config
- 
- def upgrade():
- 
-@@ -38,9 +41,17 @@ def upgrade():
-         # did not create the ml2_port_bindings table.
-         return
- 
--    op.add_column('ml2_port_bindings',
--                  sa.Column('vif_details', sa.String(length=4095),
--                            nullable=False, server_default=''))
-+    # MySQL Cluster (NDB) does not support rows longer than 14000.
-+    # This converts vif_details to TEXT to keep the row size down.
-+    if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+        op.add_column('ml2_port_bindings',
-+                      sa.Column('vif_details', sa.Text(length=4095),
-+                                nullable=False))
-+    else:
-+        op.add_column('ml2_port_bindings',
-+                      sa.Column('vif_details', sa.String(length=4095),
-+                                nullable=False, server_default=''))
-+
-     if op.get_bind().engine.name == 'ibm_db_sa':
-         op.execute(
-             "UPDATE ml2_port_bindings SET"
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py.orig	2016-07-29 13:16:26.541620109 -0600
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/e197124d4b9_add_unique_constrain.py	2016-07-29 13:33:26.620005420 -0600
-@@ -25,16 +25,27 @@ Create Date: 2013-11-17 10:09:37.728903
- revision = 'e197124d4b9'
- down_revision = 'havana'
- 
-+from alembic import context
- from alembic import op
-+from oslo_config import cfg
- 
- from neutron.db import migration
- 
-+config = context.config
-+CONF = config.neutron_config
- 
- CONSTRAINT_NAME = 'uniq_member0pool_id0address0port'
- TABLE_NAME = 'members'
- 
- 
- def upgrade():
-+
-+    # MySQL Cluster, a.k.a. NDB, does not support this migration step.
-+    # This test will skip this migration.
-+    # TODO (oorgeron) Look into making this workable.
-+    if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+        return
-+
-     if migration.schema_has_table(TABLE_NAME):
-         op.create_unique_constraint(
-             name=CONSTRAINT_NAME,
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/86d6d9776e2b_cisco_apic_driver_update_l3.py.orig	2016-07-29 13:16:33.520954213 -0600
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/86d6d9776e2b_cisco_apic_driver_update_l3.py	2016-07-29 13:34:14.398075900 -0600
-@@ -25,19 +25,31 @@ Create Date: 2014-04-23 09:27:08.177021
- revision = '86d6d9776e2b'
- down_revision = '236b90af57ab'
- 
--
-+from alembic import context
- from alembic import op
-+from oslo_config import cfg
- import sqlalchemy as sa
- 
-+config = context.config
-+CONF = config.neutron_config
-+
- 
- def upgrade():
- 
-     op.drop_table('cisco_ml2_apic_contracts')
-     op.drop_table('cisco_ml2_apic_epgs')
- 
--    op.create_table(
--        'cisco_ml2_apic_contracts',
--        sa.Column('tenant_id', sa.String(length=255)),
--        sa.Column('router_id', sa.String(length=64), nullable=False),
--        sa.ForeignKeyConstraint(['router_id'], ['routers.id']),
--        sa.PrimaryKeyConstraint('router_id'))
-+    if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+        op.create_table(
-+            'cisco_ml2_apic_contracts',
-+            sa.Column('tenant_id', sa.String(length=255)),
-+            sa.Column('router_id', sa.String(length=36), nullable=False),
-+            sa.ForeignKeyConstraint(['router_id'], ['routers.id']),
-+            sa.PrimaryKeyConstraint('router_id'))
-+    else:
-+        op.create_table(
-+            'cisco_ml2_apic_contracts',
-+            sa.Column('tenant_id', sa.String(length=255)),
-+            sa.Column('router_id', sa.String(length=64), nullable=False),
-+            sa.ForeignKeyConstraint(['router_id'], ['routers.id']),
-+            sa.PrimaryKeyConstraint('router_id'))
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/24c7ea5160d7_cisco_csr_vpnaas.py.orig	2016-07-29 13:16:40.087477384 -0600
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/24c7ea5160d7_cisco_csr_vpnaas.py	2016-07-29 13:34:49.694936881 -0600
-@@ -23,25 +23,43 @@
- revision = '24c7ea5160d7'
- down_revision = '492a106273f8'
- 
-+from alembic import context
- from alembic import op
-+from oslo_config import cfg
- import sqlalchemy as sa
- 
- from neutron.db import migration
- 
-+config = context.config
-+CONF = config.neutron_config
- 
- def upgrade():
-     if not migration.schema_has_table('ipsec_site_connections'):
-         # The vpnaas service plugin was not configured.
-         return
--    op.create_table(
--        'cisco_csr_identifier_map',
--        sa.Column('tenant_id', sa.String(length=255), nullable=True),
--        sa.Column('ipsec_site_conn_id', sa.String(length=64),
--                  primary_key=True),
--        sa.Column('csr_tunnel_id', sa.Integer(), nullable=False),
--        sa.Column('csr_ike_policy_id', sa.Integer(), nullable=False),
--        sa.Column('csr_ipsec_policy_id', sa.Integer(), nullable=False),
--        sa.ForeignKeyConstraint(['ipsec_site_conn_id'],
--                                ['ipsec_site_connections.id'],
--                                ondelete='CASCADE')
--    )
-+    if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+        op.create_table(
-+            'cisco_csr_identifier_map',
-+            sa.Column('tenant_id', sa.String(length=255), nullable=True),
-+            sa.Column('ipsec_site_conn_id', sa.String(length=36),
-+                      primary_key=True),
-+            sa.Column('csr_tunnel_id', sa.Integer(), nullable=False),
-+            sa.Column('csr_ike_policy_id', sa.Integer(), nullable=False),
-+            sa.Column('csr_ipsec_policy_id', sa.Integer(), nullable=False),
-+            sa.ForeignKeyConstraint(['ipsec_site_conn_id'],
-+                                    ['ipsec_site_connections.id'],
-+                                    ondelete='CASCADE')
-+        )
-+    else:  
-+        op.create_table(
-+            'cisco_csr_identifier_map',
-+            sa.Column('tenant_id', sa.String(length=255), nullable=True),
-+            sa.Column('ipsec_site_conn_id', sa.String(length=64),
-+                      primary_key=True),
-+            sa.Column('csr_tunnel_id', sa.Integer(), nullable=False),
-+            sa.Column('csr_ike_policy_id', sa.Integer(), nullable=False),
-+            sa.Column('csr_ipsec_policy_id', sa.Integer(), nullable=False),
-+            sa.ForeignKeyConstraint(['ipsec_site_conn_id'],
-+                                    ['ipsec_site_connections.id'],
-+                                    ondelete='CASCADE')
-+        )
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/16cdf118d31d_extra_dhcp_options_ipv6_support.py.orig	2016-07-29 13:16:46.865282929 -0600
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/16cdf118d31d_extra_dhcp_options_ipv6_support.py	2016-07-29 13:35:19.078239546 -0600
-@@ -25,11 +25,16 @@ Create Date: 2014-10-23 17:04:19.796731
- revision = '16cdf118d31d'
- down_revision = '14be42f3d0a5'
- 
-+from alembic import context
- from alembic import op
-+from oslo_config import cfg
- import sqlalchemy as sa
- 
- from neutron.db import migration
- 
-+config = context.config
-+CONF = config.neutron_config
-+
- CONSTRAINT_NAME_OLD = 'uidx_portid_optname'
- CONSTRAINT_NAME_NEW = 'uniq_extradhcpopts0portid0optname0ipversion'
- TABLE_NAME = 'extradhcpopts'
-@@ -47,8 +52,11 @@ def upgrade():
-                   server_default='4', nullable=False))
-         op.execute("UPDATE extradhcpopts SET ip_version = 4")
- 
--    op.create_unique_constraint(
--        name=CONSTRAINT_NAME_NEW,
--        source='extradhcpopts',
--        local_cols=['port_id', 'opt_name', 'ip_version']
--    )
-+    # MySQL Cluster NDB does not support this constraint.
-+    # TODO (oorgeron) Look into making this workable.
-+    if CONF.database.mysql_storage_engine != "NDBCLUSTER":
-+        op.create_unique_constraint(
-+            name=CONSTRAINT_NAME_NEW,
-+            source='extradhcpopts',
-+            local_cols=['port_id', 'opt_name', 'ip_version']
-+        )
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/327ee5fde2c7_set_innodb_engine.py.orig	2016-07-29 13:16:54.825004264 -0600
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/327ee5fde2c7_set_innodb_engine.py	2016-07-29 13:35:50.605636987 -0600
-@@ -26,7 +26,12 @@ revision = '327ee5fde2c7'
- down_revision = '4eba2f05c2f4'
- 
- 
-+from alembic import context
- from alembic import op
-+from oslo_config import cfg
-+
-+config = context.config
-+CONF = config.neutron_config
- 
- # This list contain tables that could be deployed before change that converts
- # all tables to InnoDB appeared
-@@ -37,4 +42,6 @@ TABLES = ['router_extra_attributes', 'dv
- def upgrade():
-     if op.get_bind().dialect.name == 'mysql':
-         for table in TABLES:
--            op.execute("ALTER TABLE %s ENGINE=InnoDB" % table)
-+            op.execute("ALTER TABLE %(db_table)s ENGINE=%(mysql_storage_engine)s"
-+                       % dict(db_table=table,
-+                       mysql_storage_engine=CONF.database.mysql_storage_engine))
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/20c469a5f920_add_index_for_port.py.orig	2016-07-29 13:17:00.753891761 -0600
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/20c469a5f920_add_index_for_port.py	2016-07-29 13:36:19.221516843 -0600
-@@ -25,11 +25,16 @@ Create Date: 2015-04-01 04:12:49.898443
- revision = '20c469a5f920'
- down_revision = '28a09af858a8'
- 
-+from alembic import context
- from alembic import op
- 
-+config = context.config
-+CONF = config.neutron_config
- 
- def upgrade():
--    op.create_index(op.f('ix_ports_network_id_device_owner'),
--                    'ports', ['network_id', 'device_owner'], unique=False)
--    op.create_index(op.f('ix_ports_network_id_mac_address'),
--                    'ports', ['network_id', 'mac_address'], unique=False)
-+    # MySQL Cluster (NDB) does not support these indexes via alembic
-+    if CONF.database.mysql_storage_engine != "NDBCLUSTER":
-+        op.create_index(op.f('ix_ports_network_id_device_owner'),
-+                        'ports', ['network_id', 'device_owner'], unique=False)
-+        op.create_index(op.f('ix_ports_network_id_mac_address'),
-+                        'ports', ['network_id', 'mac_address'], unique=False)
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/2a1ee2fb59e0_add_mac_address_unique_constraint.py.orig	2016-07-29 13:17:07.106004816 -0600
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/2a1ee2fb59e0_add_mac_address_unique_constraint.py	2016-07-29 13:36:52.862356573 -0600
-@@ -25,15 +25,26 @@ Create Date: 2015-01-10 11:44:27.550349
- revision = '2a1ee2fb59e0'
- down_revision = '41662e32bce2'
- 
-+from alembic import context
- from alembic import op
-+from oslo_config import cfg
-+
-+config = context.config
-+CONF = config.neutron_config
- 
- TABLE_NAME = 'ports'
- CONSTRAINT_NAME = 'uniq_ports0network_id0mac_address'
- 
- 
- def upgrade():
--    op.create_unique_constraint(
--        name=CONSTRAINT_NAME,
--        source=TABLE_NAME,
--        local_cols=['network_id', 'mac_address']
--    )
-+
-+    # MySQL Cluster NDB does not support this constraint.
-+    # TODO (oorgeron) Look into making this workable.
-+    if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+        return
-+    else:
-+        op.create_unique_constraint(
-+            name=CONSTRAINT_NAME,
-+            source=TABLE_NAME,
-+            local_cols=['network_id', 'mac_address']
-+        )
---- neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py.orig	2016-07-29 13:17:16.553516359 -0600
-+++ neutron-2015.1.2/neutron/db/migration/alembic_migrations/versions/2026156eab2f_l2_dvr_models.py	2016-07-29 13:37:31.406649583 -0600
-@@ -26,37 +26,73 @@ revision = '2026156eab2f'
- down_revision = '3927f7f7c456'
- 
- 
-+from alembic import context
- from alembic import op
- import sqlalchemy as sa
- 
-+config = context.config
-+CONF = config.neutron_config
- 
- def upgrade():
--    op.create_table(
--        'dvr_host_macs',
--        sa.Column('host', sa.String(length=255), nullable=False),
--        sa.Column('mac_address', sa.String(length=32),
--                  nullable=False, unique=True),
--        sa.PrimaryKeyConstraint('host')
--    )
--    op.create_table(
--        'ml2_dvr_port_bindings',
--        sa.Column('port_id', sa.String(length=36), nullable=False),
--        sa.Column('host', sa.String(length=255), nullable=False),
--        sa.Column('router_id', sa.String(length=36), nullable=True),
--        sa.Column('vif_type', sa.String(length=64), nullable=False),
--        sa.Column('vif_details', sa.String(length=4095),
--                  nullable=False, server_default=''),
--        sa.Column('vnic_type', sa.String(length=64),
--                  nullable=False, server_default='normal'),
--        sa.Column('profile', sa.String(length=4095),
--                  nullable=False, server_default=''),
--        sa.Column('cap_port_filter', sa.Boolean(), nullable=False),
--        sa.Column('driver', sa.String(length=64), nullable=True),
--        sa.Column('segment', sa.String(length=36), nullable=True),
--        sa.Column(u'status', sa.String(16), nullable=False),
--        sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
--                                ondelete='CASCADE'),
--        sa.ForeignKeyConstraint(['segment'], ['ml2_network_segments.id'],
--                                ondelete='SET NULL'),
--        sa.PrimaryKeyConstraint('port_id', 'host')
--    )
-+    # MySQL Cluster (NDB) does not support rows longer than 14000.
-+    # This reduces the size of columns or converts them to TEXT to keep the row size down.
-+    if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+        op.create_table(
-+            'dvr_host_macs',
-+            sa.Column('host', sa.String(length=128), nullable=False),
-+            sa.Column('mac_address', sa.String(length=32),
-+                      nullable=False, unique=True),
-+            sa.PrimaryKeyConstraint('host')
-+        )
-+        op.create_table(
-+            'ml2_dvr_port_bindings',
-+            sa.Column('port_id', sa.String(length=36), nullable=False),
-+            sa.Column('host', sa.String(length=128), nullable=False),
-+            sa.Column('router_id', sa.String(length=36), nullable=True),
-+            sa.Column('vif_type', sa.String(length=64), nullable=False),
-+            sa.Column('vif_details', sa.Text(length=4095),
-+                      nullable=False, server_default=''),
-+            sa.Column('vnic_type', sa.String(length=64),
-+                      nullable=False, server_default='normal'),
-+            sa.Column('profile', sa.Text(length=4095),
-+                      nullable=False, server_default=''),
-+            sa.Column('cap_port_filter', sa.Boolean(), nullable=False),
-+            sa.Column('driver', sa.String(length=64), nullable=True),
-+            sa.Column('segment', sa.String(length=36), nullable=True),
-+            sa.Column(u'status', sa.String(16), nullable=False),
-+            sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
-+                                    ondelete='CASCADE'),
-+            sa.ForeignKeyConstraint(['segment'], ['ml2_network_segments.id'],
-+                                    ondelete='SET NULL'),
-+            sa.PrimaryKeyConstraint('port_id', 'host')
-+        )
-+    else:
-+        op.create_table(
-+            'dvr_host_macs',
-+            sa.Column('host', sa.String(length=255), nullable=False),
-+            sa.Column('mac_address', sa.String(length=32),
-+                      nullable=False, unique=True),
-+            sa.PrimaryKeyConstraint('host')
-+        )
-+        op.create_table(
-+            'ml2_dvr_port_bindings',
-+            sa.Column('port_id', sa.String(length=36), nullable=False),
-+            sa.Column('host', sa.String(length=255), nullable=False),
-+            sa.Column('router_id', sa.String(length=36), nullable=True),
-+            sa.Column('vif_type', sa.String(length=64), nullable=False),
-+            sa.Column('vif_details', sa.String(length=4095),
-+                      nullable=False, server_default=''),
-+            sa.Column('vnic_type', sa.String(length=64),
-+                      nullable=False, server_default='normal'),
-+            sa.Column('profile', sa.String(length=4095),
-+                      nullable=False, server_default=''),
-+            sa.Column('cap_port_filter', sa.Boolean(), nullable=False),
-+            sa.Column('driver', sa.String(length=64), nullable=True),
-+            sa.Column('segment', sa.String(length=36), nullable=True),
-+            sa.Column(u'status', sa.String(16), nullable=False),
-+            sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
-+                                    ondelete='CASCADE'),
-+            sa.ForeignKeyConstraint(['segment'], ['ml2_network_segments.id'],
-+                                    ondelete='SET NULL'),
-+            sa.PrimaryKeyConstraint('port_id', 'host')
-+        )
---- neutron-2015.1.2/neutron/db/extradhcpopt_db.py.orig	2016-07-29 13:17:23.118174498 -0600
-+++ neutron-2015.1.2/neutron/db/extradhcpopt_db.py	2016-07-29 13:38:57.088142872 -0600
-@@ -13,6 +13,7 @@
- #    License for the specific language governing permissions and limitations
- #    under the License.
- 
-+from oslo_config import cfg
- from oslo_log import log as logging
- import sqlalchemy as sa
- from sqlalchemy import orm
-@@ -23,36 +24,67 @@ from neutron.db import model_base
- from neutron.db import models_v2
- from neutron.extensions import extra_dhcp_opt as edo_ext
- 
-+# Attempt to determine the context that this module is being used.
-+# If via neutron-db-manage and cli.py, import global variable. If not,
-+# use oslo_config.
-+try:
-+     from neutron.db.migration.cli import mysql_storage_eng_type
-+except ImportError:
-+     CONF = cfg.CONF
-+     mysql_storage_eng_type = CONF.database.mysql_storage_engine
- 
- LOG = logging.getLogger(__name__)
- 
--
--class ExtraDhcpOpt(model_base.BASEV2, models_v2.HasId):
--    """Represent a generic concept of extra options associated to a port.
--
--    Each port may have none to many dhcp opts associated to it that can
--    define specifically different or extra options to DHCP clients.
--    These will be written to the <network_id>/opts files, and each option's
--    tag will be referenced in the <network_id>/host file.
--    """
--    port_id = sa.Column(sa.String(36),
--                        sa.ForeignKey('ports.id', ondelete="CASCADE"),
--                        nullable=False)
--    opt_name = sa.Column(sa.String(64), nullable=False)
--    opt_value = sa.Column(sa.String(255), nullable=False)
--    ip_version = sa.Column(sa.Integer, server_default='4', nullable=False)
--    __table_args__ = (sa.UniqueConstraint(
--        'port_id',
--        'opt_name',
--        'ip_version',
--        name='uniq_extradhcpopts0portid0optname0ipversion'),
--                      model_base.BASEV2.__table_args__,)
--
--    # Add a relationship to the Port model in order to instruct SQLAlchemy to
--    # eagerly load extra_dhcp_opts bindings
--    ports = orm.relationship(
--        models_v2.Port,
--        backref=orm.backref("dhcp_opts", lazy='joined', cascade='delete'))
-+# MySQL Cluster NDB does not support this constraint.
-+# TODO (oorgeron) Look into making this workable.
-+if mysql_storage_eng_type == "NDBCLUSTER":
-+    class ExtraDhcpOpt(model_base.BASEV2, models_v2.HasId):
-+        """Represent a generic concept of extra options associated to a port.
-+
-+        Each port may have none to many dhcp opts associated to it that can
-+        define specifically different or extra options to DHCP clients.
-+        These will be written to the <network_id>/opts files, and each option's
-+        tag will be referenced in the <network_id>/host file.
-+        """
-+        port_id = sa.Column(sa.String(36),
-+                            sa.ForeignKey('ports.id', ondelete="CASCADE"),
-+                            nullable=False)
-+        opt_name = sa.Column(sa.String(64), nullable=False)
-+        opt_value = sa.Column(sa.String(255), nullable=False)
-+        ip_version = sa.Column(sa.Integer, server_default='4', nullable=False)
-+
-+        # Add a relationship to the Port model in order to instruct SQLAlchemy to
-+        # eagerly load extra_dhcp_opts bindings
-+        ports = orm.relationship(
-+            models_v2.Port,
-+            backref=orm.backref("dhcp_opts", lazy='joined', cascade='delete'))
-+else:
-+    class ExtraDhcpOpt(model_base.BASEV2, models_v2.HasId):
-+        """Represent a generic concept of extra options associated to a port.
-+
-+        Each port may have none to many dhcp opts associated to it that can
-+        define specifically different or extra options to DHCP clients.
-+        These will be written to the <network_id>/opts files, and each option's
-+        tag will be referenced in the <network_id>/host file.
-+        """
-+        port_id = sa.Column(sa.String(36),
-+                            sa.ForeignKey('ports.id', ondelete="CASCADE"),
-+                            nullable=False)
-+        opt_name = sa.Column(sa.String(64), nullable=False)
-+        opt_value = sa.Column(sa.String(255), nullable=False)
-+        ip_version = sa.Column(sa.Integer, server_default='4', nullable=False)
-+        __table_args__ = (sa.UniqueConstraint(
-+            'port_id',
-+            'opt_name',
-+            'ip_version',
-+            name='uniq_extradhcpopts0portid0optname0ipversion'),
-+                          model_base.BASEV2.__table_args__,)
-+
-+        # Add a relationship to the Port model in order to instruct SQLAlchemy to
-+        # eagerly load extra_dhcp_opts bindings
-+        ports = orm.relationship(
-+            models_v2.Port,
-+            backref=orm.backref("dhcp_opts", lazy='joined', cascade='delete'))
- 
- 
- class ExtraDhcpOptMixin(object):
---- neutron-2015.1.2/neutron/db/models_v2.py.orig	2016-07-29 13:17:29.114096786 -0600
-+++ neutron-2015.1.2/neutron/db/models_v2.py	2016-07-29 13:40:10.416011760 -0600
-@@ -13,6 +13,7 @@
- #    License for the specific language governing permissions and limitations
- #    under the License.
- 
-+from oslo_config import cfg
- import sqlalchemy as sa
- from sqlalchemy import orm
- 
-@@ -21,6 +22,14 @@ from neutron.common import constants
- from neutron.db import model_base
- from neutron.openstack.common import uuidutils
- 
-+# Attempt to determine the context that this module is being used.
-+# If via neutron-db-manage and cli.py, import global variable. If not,
-+# use oslo_config.
-+try:
-+     from neutron.db.migration.cli import mysql_storage_eng_type
-+except ImportError:
-+     CONF = cfg.CONF
-+     mysql_storage_eng_type = CONF.database.mysql_storage_engine
- 
- class HasTenant(object):
-     """Tenant mixin, add to subclasses that have a tenant."""
-@@ -126,47 +135,89 @@ class SubnetRoute(model_base.BASEV2, Rou
-                           primary_key=True)
- 
- 
--class Port(model_base.BASEV2, HasId, HasTenant):
--    """Represents a port on a Neutron v2 network."""
-+# MySQL Cluster NDB does not support this constraint.
-+# TODO (oorgeron) Look into making this workable.
-+if mysql_storage_eng_type == "NDBCLUSTER":
-+    class Port(model_base.BASEV2, HasId, HasTenant):
-+        """Represents a port on a Neutron v2 network."""
-+
-+        name = sa.Column(sa.String(attr.NAME_MAX_LEN))
-+        network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"),
-+                               nullable=False)
-+        fixed_ips = orm.relationship(IPAllocation, backref='ports', lazy='joined')
-+        mac_address = sa.Column(sa.String(32), nullable=False)
-+        admin_state_up = sa.Column(sa.Boolean(), nullable=False)
-+        status = sa.Column(sa.String(16), nullable=False)
-+        device_id = sa.Column(sa.String(attr.DEVICE_ID_MAX_LEN), nullable=False)
-+        device_owner = sa.Column(sa.String(attr.DEVICE_OWNER_MAX_LEN),
-+                                 nullable=False)
-+        __table_args__ = (
-+            sa.Index(
-+                'ix_ports_network_id_mac_address', 'network_id', 'mac_address'),
-+            sa.Index(
-+                'ix_ports_network_id_device_owner', 'network_id', 'device_owner'),
-+            model_base.BASEV2.__table_args__
-+        )
-+
-+        def __init__(self, id=None, tenant_id=None, name=None, network_id=None,
-+                     mac_address=None, admin_state_up=None, status=None,
-+                     device_id=None, device_owner=None, fixed_ips=None):
-+            self.id = id
-+            self.tenant_id = tenant_id
-+            self.name = name
-+            self.network_id = network_id
-+            self.mac_address = mac_address
-+            self.admin_state_up = admin_state_up
-+            self.device_owner = device_owner
-+            self.device_id = device_id
-+            # Since this is a relationship only set it if one is passed in.
-+            if fixed_ips:
-+                self.fixed_ips = fixed_ips
-+
-+            # NOTE(arosen): status must be set last as an event is triggered on!
-+            self.status = status            
-+else:
-+    class Port(model_base.BASEV2, HasId, HasTenant):
-+        """Represents a port on a Neutron v2 network."""
-+
-+        name = sa.Column(sa.String(attr.NAME_MAX_LEN))
-+        network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"),
-+                               nullable=False)
-+        fixed_ips = orm.relationship(IPAllocation, backref='ports', lazy='joined')
-+        mac_address = sa.Column(sa.String(32), nullable=False)
-+        admin_state_up = sa.Column(sa.Boolean(), nullable=False)
-+        status = sa.Column(sa.String(16), nullable=False)
-+        device_id = sa.Column(sa.String(attr.DEVICE_ID_MAX_LEN), nullable=False)
-+        device_owner = sa.Column(sa.String(attr.DEVICE_OWNER_MAX_LEN),
-+                                 nullable=False)
-+        __table_args__ = (
-+            sa.Index(
-+                'ix_ports_network_id_mac_address', 'network_id', 'mac_address'),
-+            sa.Index(
-+                'ix_ports_network_id_device_owner', 'network_id', 'device_owner'),
-+            sa.UniqueConstraint(
-+                network_id, mac_address,
-+                name='uniq_ports0network_id0mac_address'),
-+            model_base.BASEV2.__table_args__
-+        )
-+
-+        def __init__(self, id=None, tenant_id=None, name=None, network_id=None,
-+                     mac_address=None, admin_state_up=None, status=None,
-+                     device_id=None, device_owner=None, fixed_ips=None):
-+            self.id = id
-+            self.tenant_id = tenant_id
-+            self.name = name
-+            self.network_id = network_id
-+            self.mac_address = mac_address
-+            self.admin_state_up = admin_state_up
-+            self.device_owner = device_owner
-+            self.device_id = device_id
-+            # Since this is a relationship only set it if one is passed in.
-+            if fixed_ips:
-+                self.fixed_ips = fixed_ips
- 
--    name = sa.Column(sa.String(attr.NAME_MAX_LEN))
--    network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"),
--                           nullable=False)
--    fixed_ips = orm.relationship(IPAllocation, backref='ports', lazy='joined')
--    mac_address = sa.Column(sa.String(32), nullable=False)
--    admin_state_up = sa.Column(sa.Boolean(), nullable=False)
--    status = sa.Column(sa.String(16), nullable=False)
--    device_id = sa.Column(sa.String(attr.DEVICE_ID_MAX_LEN), nullable=False)
--    device_owner = sa.Column(sa.String(attr.DEVICE_OWNER_MAX_LEN),
--                             nullable=False)
--    __table_args__ = (
--        sa.Index(
--            'ix_ports_network_id_mac_address', 'network_id', 'mac_address'),
--        sa.Index(
--            'ix_ports_network_id_device_owner', 'network_id', 'device_owner'),
--        sa.UniqueConstraint(
--            network_id, mac_address,
--            name='uniq_ports0network_id0mac_address'),
--        model_base.BASEV2.__table_args__
--    )
--
--    def __init__(self, id=None, tenant_id=None, name=None, network_id=None,
--                 mac_address=None, admin_state_up=None, status=None,
--                 device_id=None, device_owner=None, fixed_ips=None):
--        self.id = id
--        self.tenant_id = tenant_id
--        self.name = name
--        self.network_id = network_id
--        self.mac_address = mac_address
--        self.admin_state_up = admin_state_up
--        self.device_owner = device_owner
--        self.device_id = device_id
--        # Since this is a relationship only set it if one is passed in.
--        if fixed_ips:
--            self.fixed_ips = fixed_ips
--
--        # NOTE(arosen): status must be set last as an event is triggered on!
--        self.status = status
-+            # NOTE(arosen): status must be set last as an event is triggered on!
-+            self.status = status
- 
- 
- class DNSNameServer(model_base.BASEV2):
---- neutron-2015.1.2/neutron/tests/functional/db/test_migrations.py.orig	2016-07-29 13:17:35.264552054 -0600
-+++ neutron-2015.1.2/neutron/tests/functional/db/test_migrations.py	2016-07-29 13:41:12.657034516 -0600
-@@ -19,6 +19,7 @@ import pprint
- import alembic
- import alembic.autogenerate
- import alembic.migration
-+from alembic import context
- from alembic import script as alembic_script
- import mock
- from oslo_config import cfg
-@@ -35,6 +36,9 @@ LOG = logging.getLogger(__name__)
- 
- cfg.CONF.import_opt('core_plugin', 'neutron.common.config')
- 
-+config = context.config
-+CONF = config.neutron_config
-+
- CORE_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
- 
- # These tables are still in the neutron database, but their models have moved
-@@ -195,7 +199,7 @@ class _TestModelsMigrations(test_migrati
-         self.assertTrue(len(tables) > 0,
-                         "No tables found. Wrong schema?")
-         noninnodb = [table for table in tables if
--                     insp.get_table_options(table)['mysql_engine'] != 'InnoDB'
-+                     insp.get_table_options(table)['mysql_engine'] != CONF.database.mysql_storage_engine
-                      and table != 'alembic_version']
-         self.assertEqual(0, len(noninnodb), "%s non InnoDB tables created" %
-                                             noninnodb)
---- neutron-2015.1.2/neutron/plugins/ml2/drivers/cisco/apic/apic_model.py.orig	2016-07-29 13:17:41.409730731 -0600
-+++ neutron-2015.1.2/neutron/plugins/ml2/drivers/cisco/apic/apic_model.py	2016-07-29 13:41:57.002278572 -0600
-@@ -13,6 +13,7 @@
- #    License for the specific language governing permissions and limitations
- #    under the License.
- 
-+from alembic import context
- import sqlalchemy as sa
- from sqlalchemy import orm
- 
-@@ -22,6 +23,13 @@ from neutron.db import model_base
- from neutron.db import models_v2
- from neutron.plugins.ml2 import models as models_ml2
- 
-+config = context.config
-+CONF = config.neutron_config
-+
-+if CONF.database.mysql_storage_engine == "NDBCLUSTER":
-+    router_string_length = 64
-+else:
-+    router_string_length = 36
- 
- class RouterContract(model_base.BASEV2, models_v2.HasTenant):
- 
-@@ -34,8 +42,8 @@ class RouterContract(model_base.BASEV2,
- 
-     __tablename__ = 'cisco_ml2_apic_contracts'
- 
--    router_id = sa.Column(sa.String(64), sa.ForeignKey('routers.id',
--                                                       ondelete='CASCADE'),
-+    router_id = sa.Column(sa.String(router_string_length), 
-+                          sa.ForeignKey('routers.id', ondelete='CASCADE'),
-                           primary_key=True)
- 
- 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/patches/vpnaas-01-vpn_db.patch_1	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,28 @@
+This works around a l3-agent bug which causes an exception if l3-agent is
+started and no VPN's have been defined. This function gets called with
+kwargs being NULL.
+
+This patch is Solaris-specific and not suitable for upstream
+contribution.
+
+--- neutron-vpnaas-8.0.0/neutron_vpnaas/db/vpn/vpn_db.py.~1~	2016-04-07 00:44:22.000000000 -0700
++++ neutron-vpnaas-8.0.0/neutron_vpnaas/db/vpn/vpn_db.py	2016-05-08 21:39:11.200481880 -0700
+@@ -721,11 +721,15 @@ def vpn_callback(resource, event, trigge
+ 
+ 
+ def migration_callback(resource, event, trigger, **kwargs):
+-    context = kwargs['context']
+-    router = kwargs['router']
++    try:
++        context = kwargs['context']
++        router = kwargs['router']
++    except:
++        context = []
++        pass
+     vpn_plugin = manager.NeutronManager.get_service_plugins().get(
+         p_constants.VPN)
+-    if vpn_plugin:
++    if vpn_plugin and context:
+         vpn_plugin.check_router_in_use(context, router['id'])
+     return True
+ 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/patches/vpnaas-02-opts.patch_1	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,38 @@
+In-house patch to include the (public) Solaris VPNaaS options in the
+corresponding configuration files. This patch potentially can be sent
+upstream.
+
+--- neutron-vpnaas-8.0.0/neutron_vpnaas/opts.py.~1~	2016-04-07 00:44:22.000000000 -0700
++++ neutron-vpnaas-8.0.0/neutron_vpnaas/opts.py	2016-06-19 23:06:51.215920180 -0700
+@@ -15,6 +15,7 @@ import neutron.services.provider_configu
+ import neutron_vpnaas.services.vpn.agent
+ import neutron_vpnaas.services.vpn.device_drivers.ipsec
+ import neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec
++import neutron_vpnaas.services.vpn.device_drivers.solaris_ipsec
+ 
+ 
+ def list_agent_opts():
+@@ -27,7 +28,9 @@ def list_agent_opts():
+          neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec.
+          strongswan_opts),
+         ('pluto',
+-         neutron_vpnaas.services.vpn.device_drivers.ipsec.pluto_opts)
++         neutron_vpnaas.services.vpn.device_drivers.ipsec.pluto_opts),
++        ('solaris',
++         neutron_vpnaas.services.vpn.device_drivers.solaris_ipsec.solaris_opts)
+     ]
+ 
+ 
+--- neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/agent.py.~1~	2016-04-07 00:44:22.000000000 -0700
++++ neutron-vpnaas-8.0.0/neutron_vpnaas/services/vpn/agent.py	2016-06-19 23:14:24.505511035 -0700
+@@ -37,7 +37,9 @@ vpn_agent_opts = [
+                        'neutron_vpnaas.services.vpn.device_drivers.'
+                        'fedora_strongswan_ipsec.FedoraStrongSwanDriver, '
+                        'neutron_vpnaas.services.vpn.device_drivers.'
+-                       'libreswan_ipsec.LibreSwanDriver'],
++                       'libreswan_ipsec.LibreSwanDriver, '
++                       'neutron_vpnaas.services.vpn.device_drivers.'
++                       'solaris_ipsec.SolarisIPsecDriver'],
+         help=_("The vpn device drivers Neutron will use")),
+ ]
+ cfg.CONF.register_opts(vpn_agent_opts, 'vpnagent')
--- a/components/openstack/neutron/vpnaas_patches/01-vpn_db_add_solaris.patch	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
-This works around a l3-agent bug which causes an exception if l3-agent is
-started and no VPN's have been defined. This function gets called with
-kwargs being NULL.
-
-This patch is Solaris-specific and not suitable for upstream
-contribution.
-
---- vendor-packages/neutron_vpnaas/db/vpn/vpn_db.py	Fri Dec 11 09:16:02 2015
-+++ new/usr/lib/python2.7/vendor-packages/neutron_vpnaas/db/vpn/vpn_db.py	Fri Dec 11 09:13:02 2015
-@@ -699,15 +699,20 @@
-             resource_id = kwargs.get('subnet_id')
-         check_func(context, resource_id)
- 
- 
- def migration_callback(resource, event, trigger, **kwargs):
--    context = kwargs['context']
--    router = kwargs['router']
-+    try:
-+        context = kwargs['context']
-+        router = kwargs['router']
-+    except:
-+        context = []
-+        pass
-+
-     vpnservice = manager.NeutronManager.get_service_plugins().get(
-         constants.VPN)
--    if vpnservice:
-+    if vpnservice and context:
-         vpnservice.check_router_in_use(context, router['id'])
-     return True
- 
- 
- def subscribe():