PSARC 2016/268 Neutron EVS Plugin EOF
authorLaszlo Peter <laszlo.peter@oracle.com>
Wed, 07 Sep 2016 14:48:41 -0700
changeset 6848 8e252a37ed0d
parent 6847 57069587975f
child 6849 f9a2279efa0d
PSARC 2016/268 Neutron EVS Plugin EOF 24465835 Update Neutron for the Mitaka release 22271305 EOF monolithic neutron plugin for Openstack 18734794 port-create --fixed-ip accepts invalid argument, creates port with wrong IP
components/openstack/neutron/Makefile
components/openstack/neutron/files/agent/l3/solaris_agent.py
components/openstack/neutron/files/agent/solaris/dhcp.py
components/openstack/neutron/files/agent/solaris/interface.py
components/openstack/neutron/files/agent/solaris/namespace_manager.py
components/openstack/neutron/files/agent/solaris/net_lib.py
components/openstack/neutron/files/agent/solaris/pd.py
components/openstack/neutron/files/bgp_dragent.ini
components/openstack/neutron/files/dhcp_agent.ini
components/openstack/neutron/files/evs/migrate/__init__.py
components/openstack/neutron/files/evs/migrate/evs-neutron-migration.py
components/openstack/neutron/files/evs/migrate/havana_api.py
components/openstack/neutron/files/evs/migrate/migrate-evs-to-ovs
components/openstack/neutron/files/evs/plugin.py
components/openstack/neutron/files/evs_plugin.ini
components/openstack/neutron/files/l3_agent.ini
components/openstack/neutron/files/metadata_agent.ini
components/openstack/neutron/files/metering_agent.ini
components/openstack/neutron/files/ml2_conf.ini
components/openstack/neutron/files/neutron-dhcp-agent
components/openstack/neutron/files/neutron-l3-agent
components/openstack/neutron/files/neutron-openvswitch-agent.xml
components/openstack/neutron/files/neutron-server
components/openstack/neutron/files/neutron-upgrade
components/openstack/neutron/files/neutron.conf
components/openstack/neutron/files/neutron_vpnaas.conf
components/openstack/neutron/files/ovs_neutron_plugin.ini
components/openstack/neutron/files/plugins/ml2/linuxbridge_agent.ini
components/openstack/neutron/files/plugins/ml2/macvtap_agent.ini
components/openstack/neutron/files/plugins/ml2/ml2_conf.ini
components/openstack/neutron/files/plugins/ml2/ml2_conf_sriov.ini
components/openstack/neutron/files/plugins/ml2/openvswitch_agent.ini
components/openstack/neutron/files/plugins/ml2/sriov_agent.ini
components/openstack/neutron/files/services/vpn/device_drivers/solaris_ipsec.py
components/openstack/neutron/files/vpn_agent.ini
components/openstack/neutron/neutron.p5m
components/openstack/neutron/patches/01-dhcp-agent-add-solaris.patch
components/openstack/neutron/patches/02-l3-agent-add-solaris.patch
components/openstack/neutron/patches/03-metadata-driver-solaris.patch
components/openstack/neutron/patches/04-requirements.patch
components/openstack/neutron/patches/05-alembic-migrations.patch
components/openstack/neutron/patches/06-ml2-ovs-support.patch
components/openstack/neutron/patches/06-opts.patch
components/openstack/neutron/patches/07-ml2-ovs-support.patch
components/openstack/neutron/patches/07-ovs-agent-monitor-assertion-fix.patch
components/openstack/neutron/patches/08-ovs-binding-failed-fix.patch
components/openstack/neutron/patches/09-dhcp-agent-warning-fix.patch
components/openstack/neutron/patches/09-ml2-ovs-agent-misc.patch
components/openstack/neutron/patches/10-floatingip-remove-port-on-failed-create.patch
components/openstack/neutron/patches/10-interface-driver-entry-point.patch
components/openstack/neutron/patches/11-mysql_cluster_support.patch
components/openstack/neutron/patches/vpnaas-01-vpn_db.patch_1
components/openstack/neutron/patches/vpnaas-02-opts.patch_1
components/openstack/neutron/vpnaas_patches/01-vpn_db_add_solaris.patch
--- a/components/openstack/neutron/Makefile	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/Makefile	Wed Sep 07 14:48:41 2016 -0700
@@ -27,36 +27,36 @@
 include ../../../make-rules/shared-targets.mk
 
 COMPONENT_NAME=		neutron
-COMPONENT_CODENAME=	kilo
-COMPONENT_VERSION=	2015.1.2
-COMPONENT_BE_VERSION=	2015.1
+COMPONENT_CODENAME=	mitaka
+COMPONENT_VERSION=	8.1.2
+COMPONENT_BE_VERSION=	2016.1
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:3ff282d75e86ea25f64e97e24b2960e7ffaeef7cf4a69c16d20ffe18065d0ef0
-COMPONENT_ARCHIVE_URL=	http://launchpad.net/$(COMPONENT_NAME)/$(COMPONENT_CODENAME)/$(COMPONENT_VERSION)/+download/$(COMPONENT_ARCHIVE)
+    sha256:a3fdeed1421e1586bbdabd046474f1060bff4751257eacd90489f9e1b6eeff9d
+COMPONENT_ARCHIVE_URL=	https://tarballs.openstack.org/$(COMPONENT_NAME)/$(COMPONENT_ARCHIVE)
 COMPONENT_SIG_URL=	$(COMPONENT_ARCHIVE_URL).asc
 COMPONENT_PROJECT_URL=	http://www.openstack.org/
 COMPONENT_BUGDB=	service/neutron
-IPS_COMPONENT_VERSION=	0.$(COMPONENT_VERSION)
 IPS_PKG_NAME=		cloud/openstack/neutron
 
-TPNO=			25791
-TPNO_VPNAAS=		27275
-
-NUM_EXTRA_ARCHIVES=	1
+TPNO=			30361
+TPNO_VPNAAS=		30362
 
 COMPONENT_NAME_1=	neutron-vpnaas
-COMPONENT_SRC_1=	$(COMPONENT_NAME_1)-$(COMPONENT_VERSION)
+COMPONENT_VERSION_1=	8.1.2
+COMPONENT_SRC_1=	$(COMPONENT_NAME_1)-$(COMPONENT_VERSION_1)
 COMPONENT_ARCHIVE_1=	$(COMPONENT_SRC_1).tar.gz
 COMPONENT_ARCHIVE_HASH_1=	\
-    sha256:969d0d098db2d5df33d3008d3139821330bafcc7d7e684472db8b4c23b2126e6
-COMPONENT_ARCHIVE_URL_1=	http://launchpad.net/neutron/$(COMPONENT_CODENAME)/$(COMPONENT_VERSION)/+download/$(COMPONENT_ARCHIVE_1)
+    sha256:3852d8bf27c49c1beb0813a327d10e50b185f26e4479ad168498c4a2c6c97dd2
+COMPONENT_ARCHIVE_URL_1=https://tarballs.openstack.org/$(COMPONENT_NAME_1)/$(COMPONENT_ARCHIVE_1)
 COMPONENT_SIG_URL_1=	$(COMPONENT_ARCHIVE_URL_1).asc
 SOURCE_DIR_1=		$(COMPONENT_DIR)/$(COMPONENT_SRC_1)
+
 DEVICE_DRIVERS=		neutron_vpnaas/services/vpn/device_drivers
 DEVICE_TEMPLATE=	$(DEVICE_DRIVERS)/template/solaris
 
+PKG_PROTO_DIRS =	$(SOURCE_DIR_1)
 PKG_VARS +=		COMPONENT_BE_VERSION
 
 include $(WS_MAKE_RULES)/prep.mk
@@ -80,10 +80,25 @@
 PKG_MACROS +=		PYVER=$(PYTHON_VERSIONS)
 PKG_MACROS +=		PYV=$(shell echo $(PYTHON_VERSIONS) | tr -d .)
 
+install-vpnaas: $(SOURCE_DIR_1)/.installed
+
+$(SOURCE_DIR_1)/.installed:
+	(cd $(SOURCE_DIR_1); \
+	 $(ENV) \
+	     HOME=$(BUILD_DIR)/config-$* \
+	     PROTO_DIR=$(PYTHON_VERSIONS:%=$(BUILD_DIR)/$(MACH)-%) \
+	     $(COMPONENT_BUILD_ENV) \
+	     $(PYTHON.$(BITS)) ./setup.py build \
+		 --build-base $(PYTHON_VERSIONS:%=$(BUILD_DIR)/$(MACH)-%); \
+	 $(ENV) \
+	     HOME=$(BUILD_DIR)/config-$* \
+	     PROTO_DIR=$(PYTHON_VERSIONS:%=$(BUILD_DIR)/$(MACH)-%) \
+	     $(COMPONENT_BUILD_ENV) \
+	     $(PYTHON.$(BITS)) ./setup.py install $(COMPONENT_INSTALL_ARGS))
+	$(TOUCH) $(SOURCE_DIR_1)/.installed
+
 # move all the proper files into place and construct .pyc files for them
 COMPONENT_POST_BUILD_ACTION += \
-    $(GPATCH) -d $(SOURCE_DIR_1) $(GPATCH_FLAGS) \
-	< vpnaas_patches/01-vpn_db_add_solaris.patch; \
     (cd $(SOURCE_DIR_1) ; \
 	$(ENV) PROTO_DIR=$(PYTHON_VERSIONS:%=$(BUILD_DIR)/$(MACH)-%) \
 	    HOME=$(BUILD_DIR)/config-$* $(COMPONENT_BUILD_ENV) \
@@ -102,10 +117,6 @@
 	 files/neutron-server.xml \
 	 files/neutron-upgrade.xml \
 	 $(PROTO_DIR)/lib/svc/manifest/application/openstack; \
-    $(MKDIR) $(PROTO_DIR)/usr/lib/neutron; \
-    $(CP) files/evs/migrate/evs-neutron-migration.py \
-	 $(PROTO_DIR)/usr/lib/neutron/evs-neutron-migration; \
-    $(MKDIR) $(PROTO_DIR)/$(PYTHON_LIB)/neutron/agent; \
     $(MKDIR) $(PROTO_DIR)/$(PYTHON_LIB)/neutron/agent/l3; \
     $(CP) files/agent/l3/solaris_agent.py \
 	 $(PROTO_DIR)/$(PYTHON_LIB)/neutron/agent/l3; \
@@ -114,27 +125,21 @@
 	 files/agent/solaris/__init__.py \
 	 files/agent/solaris/dhcp.py \
 	 files/agent/solaris/interface.py \
+	 files/agent/solaris/namespace_manager.py \
 	 files/agent/solaris/net_lib.py \
 	 files/agent/solaris/packetfilter.py \
+	 files/agent/solaris/pd.py \
 	 files/agent/solaris/ra.py \
 	 $(PROTO_DIR)/$(PYTHON_LIB)/neutron/agent/solaris; \
-    $(MKDIR) $(PROTO_DIR)/$(PYTHON_LIB)/neutron/plugins/evs; \
-    $(TOUCH) $(PROTO_DIR)/$(PYTHON_LIB)/neutron/plugins/evs/__init__.py; \
-    $(CP) files/evs/plugin.py $(PROTO_DIR)/$(PYTHON_LIB)/neutron/plugins/evs; \
-    $(MKDIR) $(PROTO_DIR)/$(PYTHON_LIB)/neutron/plugins/evs/migrate; \
-    $(CP) \
-	 files/evs/migrate/__init__.py \
-	 files/evs/migrate/havana_api.py \
-	 $(PROTO_DIR)/$(PYTHON_LIB)/neutron/plugins/evs/migrate; \
     $(MKDIR) $(PROTO_DIR)/$(PYTHON_LIB)/$(DEVICE_TEMPLATE); \
     $(CP) files/services/vpn/device_drivers/solaris_ipsec.py \
 	$(PROTO_DIR)/$(PYTHON_LIB)/$(DEVICE_DRIVERS); \
     $(CP) \
+	files/services/vpn/device_drivers/template/solaris/ike.secret.template \
+	files/services/vpn/device_drivers/template/solaris/ike.template \
 	files/services/vpn/device_drivers/template/solaris/ikev2.secret.template \
-	files/services/vpn/device_drivers/template/solaris/ike.template \
 	files/services/vpn/device_drivers/template/solaris/ikev2.template \
 	files/services/vpn/device_drivers/template/solaris/ipsecinit.conf.template \
-	files/services/vpn/device_drivers/template/solaris/ike.secret.template \
 	$(PROTO_DIR)/$(PYTHON_LIB)/$(DEVICE_TEMPLATE)
 
 COMPONENT_POST_INSTALL_ACTION += \
@@ -143,30 +148,24 @@
 # common targets
 build:		$(BUILD_NO_ARCH)
 
-install:	$(INSTALL_NO_ARCH)
+install:	$(INSTALL_NO_ARCH) install-vpnaas
 
 test:		$(NO_TESTS)
 
-system-test:    $(NO_TESTS)
-
+system-test:	$(NO_TESTS)
 
 REQUIRED_PACKAGES += cloud/openstack/openstack-common
-REQUIRED_PACKAGES += library/python/alembic-27
 REQUIRED_PACKAGES += library/python/eventlet-27
-REQUIRED_PACKAGES += library/python/iniparse-27
 REQUIRED_PACKAGES += library/python/netaddr-27
-REQUIRED_PACKAGES += library/python/netifaces-27
-REQUIRED_PACKAGES += library/python/neutronclient-27
 REQUIRED_PACKAGES += library/python/oslo.config-27
-REQUIRED_PACKAGES += library/python/oslo.db-27
+REQUIRED_PACKAGES += library/python/oslo.log-27
+REQUIRED_PACKAGES += library/python/oslo.messaging-27
+REQUIRED_PACKAGES += library/python/oslo.utils-27
 REQUIRED_PACKAGES += library/python/simplejson-27
 REQUIRED_PACKAGES += library/python/six-27
-REQUIRED_PACKAGES += library/python/sqlalchemy-27
 REQUIRED_PACKAGES += network/arping
 REQUIRED_PACKAGES += network/firewall
 REQUIRED_PACKAGES += service/network/dnsmasq
-REQUIRED_PACKAGES += service/network/evs
 REQUIRED_PACKAGES += service/network/openvswitch
 REQUIRED_PACKAGES += system/core-os
-REQUIRED_PACKAGES += system/management/rad/client/rad-python
 REQUIRED_PACKAGES += system/network
--- a/components/openstack/neutron/files/agent/l3/solaris_agent.py	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/agent/l3/solaris_agent.py	Wed Sep 07 14:48:41 2016 -0700
@@ -18,15 +18,16 @@
 #
 
 """
-Based off generic l3_agent (neutron/agent/l3_agent) code
+Based off of generic l3_agent (neutron/agent/l3/agent.py) code
 """
 
 import errno
 import netaddr
 
-from oslo.config import cfg
+from oslo_config import cfg
 from oslo_log import log as logging
 
+from neutron._i18n import _, _LE, _LW
 from neutron.agent.common import ovs_lib
 from neutron.agent.l3 import agent as l3_agent
 from neutron.agent.l3 import router_info as router
@@ -40,6 +41,7 @@
 from neutron.callbacks import resources
 from neutron.common import constants as l3_constants
 from neutron.common import exceptions as n_exc
+from neutron.common import ipv6_utils
 from neutron.common import utils as common_utils
 
 from neutron_vpnaas.services.vpn import vpn_service
@@ -60,6 +62,8 @@
         self.pf = packetfilter.PacketFilter("_auto/neutron:l3:agent")
         self.iptables_manager = None
         self.remove_route = False
+        self.router_namespace = None
+        self.ns_name = None
         self.ipnet_gwportname = dict()
         self.tenant_subnets = dict()
         self.tenant_subnets['all_tenants'] = set()
@@ -93,14 +97,141 @@
         dname += '_0'
         return dname.replace('-', '_')
 
-    def routes_updated(self):
-        pass
+    def update_routing_table(self, operation, route):
+        if operation == 'replace':
+            operation = 'change'
+            cmd = ['/usr/sbin/route', 'get', route['destination']]
+            try:
+                utils.execute(cmd, log_fail_as_error=False)
+            except:
+                operation = 'add'
+            cmd = ['/usr/sbin/route', operation, route['destination'],
+                   route['nexthop']]
+            utils.execute(cmd)
+        else:
+            assert operation == 'delete'
+            cmd = ['/usr/sbin/route', 'delete', route['destination'],
+                   route['nexthop']]
+            utils.execute(cmd)
+
+    def _add_floating_ip_rules(self, interface_name, fip, fip_statuses):
+        fixed_ip = fip['fixed_ip_address']
+        fip_ip = fip['floating_ip_address']
+        for ipnet, gwportname in self.ipnet_gwportname.iteritems():
+            if fixed_ip in ipnet:
+                break
+        else:
+            fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
+            LOG.warn(_("Unable to configure IP address for floating IP(%s)"
+                       " '%s' for '%s'") % (fip['id'], fip_ip, fixed_ip))
+            return False
+
+        label = 'fip_%s' % str(fip_ip)
+        fip_rules = ['pass out quick from %s to any nat-to %s static-port '
+                     'label %s_out reply-to %[email protected]%s' % (fixed_ip, fip_ip, label,
+                                                      fixed_ip,  gwportname)]
+        fip_rules.append('pass in quick from any to %s rdr-to %s label %s_in '
+                         'route-to %[email protected]%s' % (fip_ip, fixed_ip, label,
+                                             fixed_ip, gwportname))
+        self.pf.add_rules(fip_rules, [interface_name, fip_ip])
+        return True
+
+    def process_floating_ip_addresses(self, interface_name):
+        """Configure IP addresses on router's external gateway interface.
+
+        Ensures addresses for existing floating IPs and cleans up
+        those that should not longer be configured.
+        """
+
+        fip_statuses = {}
+        if interface_name is None:
+            LOG.debug('No Interface for floating IPs router: %s',
+                      self.router['id'])
+            return fip_statuses
+
+        ipintf = net_lib.IPInterface(interface_name)
+        ipaddr_list = ipintf.ipaddr_list()['static']
+
+        existing_cidrs = set(ipaddr_list)
+        new_cidrs = set()
 
-    def _get_existing_devices(self):
-        return net_lib.Datalink.show_link()
+        floating_ips = self.get_floating_ips()
+        # Loop once to ensure that floating ips are configured.
+        for fip in floating_ips:
+            fixed_ip = fip['fixed_ip_address']
+            fip_ip = fip['floating_ip_address']
+            fip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX
+            new_cidrs.add(fip_cidr)
+            fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
+            if fip_cidr not in existing_cidrs:
+                try:
+                    ipintf.create_address(fip_cidr, ifcheck=False,
+                                          addrcheck=False)
+                    if not self._add_floating_ip_rules(interface_name, fip,
+                                                       fip_statuses):
+                        continue
+                    net_lib.send_ip_addr_adv_notif(interface_name,
+                                                   fip['floating_ip_address'],
+                                                   self.agent_conf)
+                except Exception as err:
+                    # any exception occurred here should cause the floating IP
+                    # to be set in error state
+                    fip_statuses[fip['id']] = (
+                        l3_constants.FLOATINGIP_STATUS_ERROR)
+                    LOG.warn(_("Unable to configure IP address for "
+                               "floating IP: %s: %s") % (fip['id'], err))
+                    # remove the fip_cidr address if it was added
+                    try:
+                        ipintf.delete_address(fip_cidr)
+                    except:
+                        pass
+                    continue
+            else:
+                existing_anchor_rules = self.pf.list_anchor_rules(
+                    [interface_name, fip_ip])
+                # check if existing fip has been reassigned
+                fip_reassigned = any([fixed_ip not in rule for rule in
+                                      existing_anchor_rules])
+                if fip_reassigned:
+                    LOG.debug("Floating ip '%s' reassigned to '%s'",
+                              fip_ip, fixed_ip)
+                    # flush rules associated with old fixed_ip and add
+                    # new rules for the new fixed_ip
+                    self.pf.remove_anchor([interface_name, fip_ip])
+                    if not self._add_floating_ip_rules(interface_name, fip,
+                                                       fip_statuses):
+                        continue
+                elif fip_statuses[fip['id']] == fip['status']:
+                    # mark the status as not changed. we can't remove it
+                    # because that's how the caller determines that it was
+                    # removed (TODO(gmoodalb): check this)
+                    fip_statuses[fip['id']] = router.FLOATINGIP_STATUS_NOCHANGE
+
+            LOG.debug("Floating ip %(id)s added, status %(status)s",
+                      {'id': fip['id'],
+                       'status': fip_statuses.get(fip['id'])})
+
+        # Clean up addresses that no longer belong on the gateway interface and
+        # remove the binat-to PF rule associated with them
+        for ip_cidr in existing_cidrs - new_cidrs:
+            if ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX):
+                LOG.debug("Removing floating ip %s from interface %s",
+                          ip_cidr, ipintf)
+                self.pf.remove_anchor([interface_name, ip_cidr.split('/')[0]])
+                ipintf.delete_address(ip_cidr, addrcheck=False)
+        return fip_statuses
+
+    def delete(self, agent):
+        self.router['gw_port'] = None
+        self.router[l3_constants.INTERFACE_KEY] = []
+        self.router[l3_constants.FLOATINGIP_KEY] = []
+        self.process_delete(agent)
+        self.disable_radvd()
 
     def internal_network_added(self, port):
         internal_dlname = self.get_internal_device_name(port['id'])
+        LOG.debug("adding internal network: port(%s), interface(%s)",
+                  port['id'], internal_dlname)
         # driver just returns if datalink and IP interface already exists
         self.driver.plug(port['tenant_id'], port['network_id'], port['id'],
                          internal_dlname, port['mac_address'],
@@ -121,9 +252,8 @@
         # listening at self.agent_conf.metadata_port
         if self.agent_conf.enable_metadata_proxy and ipversion == 4:
             rules.append('pass in quick proto tcp to 169.254.169.254/32 '
-                         'port 80 rdr-to 127.0.0.1 port %s label metadata_%s '
-                         'reply-to %s' % (self.agent_conf.metadata_port,
-                          internal_dlname, internal_dlname))
+                         'port 80 rdr-to 127.0.0.1 port %s label metadata_%s'
+                         % (self.agent_conf.metadata_port, internal_dlname))
 
         # Since we support shared router model, we need to block the new
         # internal port from reaching other tenant's ports. However, if
@@ -173,6 +303,8 @@
 
     def internal_network_removed(self, port):
         internal_dlname = self.get_internal_device_name(port['id'])
+        LOG.debug("removing internal network: port(%s) interface(%s)",
+                  port['id'], internal_dlname)
         # remove the anchor and tables associated with this internal port
         self.pf.remove_anchor_recursively([internal_dlname])
         if self.ex_gw_port and self._snat_enabled:
@@ -181,9 +313,14 @@
             self.pf.remove_anchor_recursively([external_dlname,
                                                internal_dlname])
         if net_lib.Datalink.datalink_exists(internal_dlname):
-            self.driver.fini_l3(internal_dlname)
             self.driver.unplug(internal_dlname)
 
+    def _get_existing_devices(self):
+        return net_lib.Datalink.show_link()
+
+    def internal_network_updated(self, interface_name, ip_cidrs):
+        pass
+
     def _apply_common_rules(self, all_subnets, internal_ports):
         v4_subnets = [subnet for subnet in all_subnets
                       if netaddr.IPNetwork(subnet).version == 4]
@@ -289,7 +426,7 @@
                 self.pf.replace_table_entry(allow_tblname, list(allow_subnets),
                                             [internal_dlname, 'normal'])
 
-    def _process_internal_ports(self):
+    def _process_internal_ports(self, pd):
         existing_port_ids = set([p['id'] for p in self.internal_ports])
 
         internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
@@ -309,14 +446,26 @@
         enable_ra = False
         for p in new_ports:
             self.internal_network_added(p)
+            LOG.debug("appending port %s to internal_ports cache", p)
             self.internal_ports.append(p)
             enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
+            for subnet in p['subnets']:
+                if ipv6_utils.is_ipv6_pd_enabled(subnet):
+                    interface_name = self.get_internal_device_name(p['id'])
+                    pd.enable_subnet(self.router_id, subnet['id'],
+                                     subnet['cidr'],
+                                     interface_name, p['mac_address'])
 
         for p in old_ports:
             self.internal_network_removed(p)
+            LOG.debug("removing port %s from internal_ports cache", p)
             self.internal_ports.remove(p)
             enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
+            for subnet in p['subnets']:
+                if ipv6_utils.is_ipv6_pd_enabled(subnet):
+                    pd.disable_subnet(self.router_id, subnet['id'])
 
+#         updated_cidres = []
 #         if updated_ports:
 #             for index, p in enumerate(internal_ports):
 #                 if not updated_ports.get(p['id']):
@@ -324,10 +473,27 @@
 #                 self.internal_ports[index] = updated_ports[p['id']]
 #                 interface_name = self.get_internal_device_name(p['id'])
 #                 ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips'])
+#                 LOG.debug("updating internal network for port %s", p)
+#                 updated_cidrs += ip_cidrs
+
 #                 self.driver.init_l3(interface_name, ip_cidrs=ip_cidrs,
 #                         namespace=self.ns_name)
 #                 enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
 
+#         # Check if there is any pd prefix update
+#         for p in internal_ports:
+#             if p['id'] in (set(current_port_ids) & set(existing_port_ids)):
+#                 for subnet in p.get('subnets', []):
+#                     if ipv6_utils.is_ipv6_pd_enabled(subnet):
+#                         old_prefix = pd.update_subnet(self.router_id,
+#                                                       subnet['id'],
+#                                                       subnet['cidr'])
+#                         if old_prefix:
+#                             self._internal_network_updated(p, subnet['id'],
+#                                                            subnet['cidr'],
+#                                                            old_prefix,
+#                                                            updated_cidrs)
+#                             enable_ra = True
         # Enable RA
         if enable_ra:
             self.radvd.enable(internal_ports)
@@ -342,110 +508,14 @@
         for stale_dev in stale_devs:
             LOG.debug(_('Deleting stale internal router device: %s'),
                       stale_dev)
-            self.driver.fini_l3(stale_dev)
+            pd.remove_stale_ri_ifname(self.router_id, stale_dev)
             self.driver.unplug(stale_dev)
 
-    def _add_floating_ip_rules(self, interface_name, fip, fip_statuses):
-        fixed_ip = fip['fixed_ip_address']
-        fip_ip = fip['floating_ip_address']
-        for ipnet, gwportname in self.ipnet_gwportname.iteritems():
-            if fixed_ip in ipnet:
-                break
-        else:
-            fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
-            LOG.warn(_("Unable to configure IP address for floating IP(%s)"
-                       " '%s' for '%s'") % (fip['id'], fip_ip, fixed_ip))
-            return False
-
-        label = 'fip_%s' % str(fip_ip)
-        fip_rules = ['pass out quick from %s to any nat-to %s static-port '
-                     'label %s_out reply-to %[email protected]%s' % (fixed_ip, fip_ip, label,
-                                                      fixed_ip,  gwportname)]
-        fip_rules.append('pass in quick from any to %s rdr-to %s label %s_in '
-                         'route-to %[email protected]%s' % (fip_ip, fixed_ip, label,
-                                             fixed_ip, gwportname))
-        self.pf.add_rules(fip_rules, [interface_name, fip_ip])
-        return True
-
-    def process_floating_ip_addresses(self, interface_name):
-        """Configure IP addresses on router's external gateway interface.
-
-        Ensures addresses for existing floating IPs and cleans up
-        those that should not longer be configured.
-        """
-
-        fip_statuses = {}
-        if interface_name is None:
-            LOG.debug('No Interface for floating IPs router: %s',
-                      self.router['id'])
-            return fip_statuses
-
-        ipintf = net_lib.IPInterface(interface_name)
-        ipaddr_list = ipintf.ipaddr_list()['static']
-
-        existing_cidrs = set(ipaddr_list)
-        new_cidrs = set()
-
-        floating_ips = self.get_floating_ips()
-
-        # Loop once to ensure that floating ips are configured.
-        for fip in floating_ips:
-            fixed_ip = fip['fixed_ip_address']
-            fip_ip = fip['floating_ip_address']
-            fip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX
-            new_cidrs.add(fip_cidr)
-            if fip_cidr not in existing_cidrs:
-                try:
-                    ipintf.create_address(fip_cidr, ifcheck=False,
-                                          addrcheck=False)
-                    if not self._add_floating_ip_rules(interface_name, fip,
-                                                       fip_statuses):
-                        continue
-                    net_lib.send_ip_addr_adv_notif(interface_name,
-                                                   fip['floating_ip_address'],
-                                                   self.agent_conf)
-                except Exception as err:
-                    # any exception occurred here should cause the floating IP
-                    # to be set in error state
-                    fip_statuses[fip['id']] = (
-                        l3_constants.FLOATINGIP_STATUS_ERROR)
-                    LOG.warn(_("Unable to configure IP address for "
-                               "floating IP: %s: %s") % (fip['id'], err))
-                    # remove the fip_cidr address if it was added
-                    try:
-                        ipintf.delete_address(fip_cidr)
-                    except:
-                        pass
-                    continue
-            else:
-                existing_anchor_rules = self.pf.list_anchor_rules(
-                    [interface_name, fip_ip])
-                # check if existing fip has been reassigned
-                fip_reassigned = any([fixed_ip not in rule for rule in
-                                      existing_anchor_rules])
-                if fip_reassigned:
-                    LOG.debug("Floating ip '%s' reassigned to '%s'",
-                              fip_ip, fixed_ip)
-                    # flush rules associated with old fixed_ip and add
-                    # new rules for the new fixed_ip
-                    self.pf.remove_anchor([interface_name, fip_ip])
-                    if not self._add_floating_ip_rules(interface_name, fip,
-                                                       fip_statuses):
-                        continue
-            fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
-            LOG.debug("Floating ip %(id)s added, status %(status)s",
-                      {'id': fip['id'], 'status': fip_statuses.get(fip['id'])})
-
-        # Clean up addresses that no longer belong on the gateway interface and
-        # remove the binat-to PF rule associated with them
-        for ip_cidr in existing_cidrs - new_cidrs:
-            if ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX):
-                self.pf.remove_anchor([interface_name, ip_cidr.split('/')[0]])
-                ipintf.delete_address(ip_cidr, addrcheck=False)
-        return fip_statuses
-
     # TODO(gmoodalb): need to do more work on ipv6 gateway
     def external_gateway_added(self, ex_gw_port, external_dlname):
+        LOG.debug("External gateway added: port(%s), interface(%s)",
+                  ex_gw_port, external_dlname)
+        # TODO(gmoodalb): add MTU to plug()?
         self.driver.plug(ex_gw_port['tenant_id'], ex_gw_port['network_id'],
                          ex_gw_port['id'], external_dlname,
                          ex_gw_port['mac_address'],
@@ -493,6 +563,8 @@
         pass
 
     def external_gateway_removed(self, ex_gw_port, external_dlname):
+        LOG.debug("External gateway removed: port(%s), interface(%s)",
+                  ex_gw_port, external_dlname)
         # remove nested anchor rule first
         self.pf.remove_nested_anchor_rule(None, external_dlname)
 
@@ -509,11 +581,10 @@
                 utils.execute(cmd, check_exit_code=False)
 
         if net_lib.Datalink.datalink_exists(external_dlname):
-            self.driver.fini_l3(external_dlname)
             self.driver.unplug(external_dlname,
                                self.agent_conf.external_network_bridge)
 
-    def _process_external_gateway(self, ex_gw_port):
+    def _process_external_gateway(self, ex_gw_port, pd):
         # TODO(Carl) Refactor to clarify roles of ex_gw_port vs self.ex_gw_port
         ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
                          self.ex_gw_port and self.ex_gw_port['id'])
@@ -523,24 +594,16 @@
         if ex_gw_port_id:
             interface_name = self.get_external_device_name(ex_gw_port_id)
         if ex_gw_port:
-            def _gateway_ports_equal(port1, port2):
-                def _get_filtered_dict(d, ignore):
-                    return dict((k, v) for k, v in d.iteritems()
-                                if k not in ignore)
-
-                keys_to_ignore = set(['binding:host_id'])
-                port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
-                port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
-                return port1_filtered == port2_filtered
-
             if not self.ex_gw_port:
                 self.external_gateway_added(ex_gw_port, interface_name)
+                pd.add_gw_interface(self.router['id'], interface_name)
                 ex_gw_port_status = 'added'
-            elif not _gateway_ports_equal(ex_gw_port, self.ex_gw_port):
+            elif not self._gateway_ports_equal(ex_gw_port, self.ex_gw_port):
                 self.external_gateway_updated(ex_gw_port, interface_name)
                 ex_gw_port_status = 'updated'
         elif not ex_gw_port and self.ex_gw_port:
             self.external_gateway_removed(self.ex_gw_port, interface_name)
+            pd.remove_gw_interface(self.router['id'])
             ex_gw_port_status = 'removed'
 
         # Remove any external stale router interfaces (i.e., l3e.. VNICs)
@@ -551,12 +614,12 @@
         for stale_dev in stale_devs:
             LOG.debug(_('Deleting stale external router device: %s'),
                       stale_dev)
-            self.driver.fini_l3(stale_dev)
             self.driver.unplug(stale_dev)
 
         # Process SNAT rules for external gateway
-        self.perform_snat_action(self._handle_router_snat_rules,
-                                 interface_name, ex_gw_port_status)
+        gw_port = self._router.get('gw_port')
+        self._handle_router_snat_rules(gw_port, interface_name,
+                                       ex_gw_port_status)
 
     def external_gateway_snat_rules(self, ex_gw_port_ip, external_dlname):
         rules = {}
@@ -573,7 +636,10 @@
         return rules
 
     def _handle_router_snat_rules(self, ex_gw_port, external_dlname,
-                                  ex_gw_port_status, action):
+                                  ex_gw_port_status):
+        # Todo(gmoodalb): need this when we support address_scope
+        # self.process_external_port_address_scope_routing(iptables_manager)
+
         # Remove all the old SNAT rules
         # This is safe because if use_namespaces is set as False
         # then the agent can only configure one router, otherwise
@@ -585,7 +651,7 @@
                     self.pf.remove_anchor(snat_anchor.split('/')[-2:])
 
         # And add them back if the action is add_rules
-        if action == 'add_rules' and ex_gw_port_status in ['added', 'updated']:
+        if ex_gw_port_status in ['added', 'updated']:
             # NAT rules are added only if ex_gw_port has an IPv4 address
             ex_gw_port_ip = ex_gw_port['fixed_ips'][0]['ip_address']
             if netaddr.IPAddress(ex_gw_port_ip).version != 4:
@@ -596,10 +662,10 @@
                 self.pf.add_rules(rules, [external_dlname, internal_dlname])
 
     def process_external(self, agent):
-        existing_floating_ips = self.floating_ips
+        fip_statuses = {}
         try:
             ex_gw_port = self.get_ex_gw_port()
-            self._process_external_gateway(ex_gw_port)
+            self._process_external_gateway(ex_gw_port, agent.pd)
             # TODO(Carl) Return after setting existing_floating_ips and
             # still call update_fip_statuses?
             if not ex_gw_port:
@@ -609,20 +675,22 @@
             # configure their addresses on the external gateway port
             interface_name = self.get_external_device_name(ex_gw_port['id'])
             fip_statuses = self.configure_fip_addresses(interface_name)
-        except (n_exc.FloatingIpSetupException,
-                n_exc.IpTablesApplyException) as e:
+        except n_exc.FloatingIpSetupException:
                 # All floating IPs must be put in error state
-                LOG.exception(e)
+                LOG.exception(_LE("Failed to process floating IPs."))
                 fip_statuses = self.put_fips_in_error_state()
+        finally:
+            self.update_fip_statuses(agent, fip_statuses)
 
-        agent.update_fip_statuses(self, existing_floating_ips, fip_statuses)
+    def process_external_port_address_scope_routing(self, iptables_manager):
+        pass
+
+    def process_address_scope(self):
+        pass
 
 
 class L3NATAgent(l3_agent.L3NATAgentWithStateReport):
     OPTS = [
-        cfg.StrOpt('external_network_datalink', default='net0',
-                   help=_("Name of the datalink that connects to "
-                          "an external network.")),
         cfg.BoolOpt('allow_forwarding_between_networks', default=False,
                     help=_("Allow forwarding of packets between tenant's "
                            "networks")),
@@ -636,6 +704,35 @@
         self.service = vpn_service.VPNService(self)
         self.device_drivers = self.service.load_device_drivers(host)
 
+    def _check_config_params(self):
+        """Check items in configuration files.
+
+        Check for required and invalid configuration items.
+        The actual values are not verified for correctness.
+        """
+        if not self.conf.interface_driver:
+            msg = _LE('An interface driver must be specified')
+            LOG.error(msg)
+            raise SystemExit(1)
+
+        if not self.conf.router_id:
+            msg = _LE('Router id (router_id) is required to be set.')
+            LOG.error(msg)
+            raise SystemExit(1)
+
+        if self.conf.ipv6_gateway:
+            # ipv6_gateway configured. Check for valid v6 link-local address.
+            try:
+                msg = _LE("%s used in config as ipv6_gateway is not a valid "
+                          "IPv6 link-local address."),
+                ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway)
+                if ip_addr.version != 6 or not ip_addr.is_link_local():
+                    LOG.error(msg, self.conf.ipv6_gateway)
+                    raise SystemExit(1)
+            except netaddr.AddrFormatError:
+                LOG.error(msg, self.conf.ipv6_gateway)
+                raise SystemExit(1)
+
     def _router_added(self, router_id, router):
         args = []
         kwargs = {
@@ -660,10 +757,9 @@
                       self.conf.external_network_bridge)
             return
 
-        # If namespaces are disabled, only process the router associated
+        # We don't support namespaces so only process the router associated
         # with the configured agent id.
-        if (not self.conf.use_namespaces and
-                router['id'] != self.conf.router_id):
+        if (router['id'] != self.conf.router_id):
             raise n_exc.RouterNotCompatibleWithAgent(router_id=router['id'])
 
         # Either ex_net_id or handle_internal_only_routers must be set
--- a/components/openstack/neutron/files/agent/solaris/dhcp.py	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/agent/solaris/dhcp.py	Wed Sep 07 14:48:41 2016 -0700
@@ -19,18 +19,20 @@
 
 import abc
 import netaddr
+import os
 
-from oslo.config import cfg
+from oslo_config import cfg
 from oslo_log import log as logging
+from oslo_utils import excutils
 
+from neutron._i18n import _, _LI, _LW, _LE
+from neutron.agent.linux import dhcp
 from neutron.agent.linux import utils
-from neutron.agent.linux import dhcp
 from neutron.agent.solaris import net_lib
 from neutron.common import constants
 from neutron.common import exceptions
 from neutron.common import ipv6_utils
 
-
 LOG = logging.getLogger(__name__)
 
 
@@ -43,11 +45,24 @@
                                       version, plugin)
         self.device_manager = DeviceManager(self.conf, plugin)
 
+    # overrides method in DhcpLocalProcess due to no namespace support
+    def _destroy_namespace_and_port(self):
+        try:
+            self.device_manager.destroy(self.network, self.interface_name)
+        except RuntimeError:
+            LOG.warning(_LW('Failed trying to delete interface: %s'),
+                        self.interface_name)
+
     def _build_cmdline_callback(self, pid_file):
+        # We ignore local resolv.conf if dns servers are specified
+        # or if local resolution is explicitly disabled.
+        _no_resolv = (
+            '--no-resolv' if self.conf.dnsmasq_dns_servers or
+            not self.conf.dnsmasq_local_resolv else '')
         cmd = [
             '/usr/lib/inet/dnsmasq',
             '--no-hosts',
-            '--no-resolv',
+            _no_resolv,
             '--strict-order',
             '--bind-interfaces',
             '--interface=%s' % self.interface_name,
@@ -56,7 +71,8 @@
             '--dhcp-hostsfile=%s' % self.get_conf_file_name('host'),
             '--addn-hosts=%s' % self.get_conf_file_name('addn_hosts'),
             '--dhcp-optsfile=%s' % self.get_conf_file_name('opts'),
-            '--dhcp-leasefile=%s' % self.get_conf_file_name('leases')
+            '--dhcp-leasefile=%s' % self.get_conf_file_name('leases'),
+            '--dhcp-match=set:ipxe,175',
         ]
 
         possible_leases = 0
@@ -110,7 +126,7 @@
                 possible_leases += cidr.size
 
         if cfg.CONF.advertise_mtu:
-            mtu = self.network.mtu
+            mtu = getattr(self.network, 'mtu', 0)
             # Do not advertise unknown mtu
             if mtu > 0:
                 cmd.append('--dhcp-option-force=option:mtu,%d' % mtu)
@@ -132,12 +148,35 @@
         if self.conf.dhcp_broadcast_reply:
             cmd.append('--dhcp-broadcast')
 
+        if self.conf.dnsmasq_base_log_dir:
+            log_dir = os.path.join(
+                self.conf.dnsmasq_base_log_dir,
+                self.network.id)
+            try:
+                if not os.path.exists(log_dir):
+                    os.makedirs(log_dir)
+            except OSError:
+                LOG.error(_LE('Error while create dnsmasq log dir: %s'),
+                          log_dir)
+            else:
+                log_filename = os.path.join(log_dir, 'dhcp_dns_log')
+                cmd.append('--log-queries')
+                cmd.append('--log-dhcp')
+                cmd.append('--log-facility=%s' % log_filename)
+
         return cmd
 
-    def _release_lease(self, mac_address, ip):
+    def _release_lease(self, mac_address, ip, client_id):
         """Release a DHCP lease."""
+        if netaddr.IPAddress(ip).version == constants.IP_VERSION_6:
+            # Note(SridharG) dhcp_release is only supported for IPv4
+            # addresses. For more details, please refer to man page.
+            return
+
         cmd = ['/usr/lib/inet/dhcp_release', self.interface_name,
                ip, mac_address]
+        if client_id:
+            cmd.append(client_id)
         utils.execute(cmd)
 
     def _make_subnet_interface_ip_map(self):
@@ -157,105 +196,95 @@
     def __init__(self, conf, plugin):
         super(DeviceManager, self).__init__(conf, plugin)
 
-    def setup_dhcp_port(self, network):
-        """Create/update DHCP port for the host if needed and return port."""
+    def _set_default_route(self, network, device_name):
+        """Sets the default gateway for this dhcp namespace.
+
+        This method is idempotent and will only adjust the route if adjusting
+        it would change it from what it already is.  This makes it safe to call
+        and avoids unnecessary perturbation of the system.
+        """
+        pass
 
-        device_id = self.get_device_id(network)
-        subnets = {}
-        dhcp_enabled_subnet_ids = []
-        for subnet in network.subnets:
-            if subnet.enable_dhcp:
-                dhcp_enabled_subnet_ids.append(subnet.id)
-                subnets[subnet.id] = subnet
+    def _setup_existing_dhcp_port(self, network, device_id, dhcp_subnets):
+        """Set up the existing DHCP port, if there is one."""
 
-        dhcp_port = None
+        # To avoid pylint thinking that port might be undefined after
+        # the following loop...
+        port = None
+
+        # Look for an existing DHCP port for this network.
         for port in network.ports:
             port_device_id = getattr(port, 'device_id', None)
             port_device_owner = getattr(port, 'device_owner', None)
-
-            # if the agent is started on a different node, then the
-            # device_ids will be different since they are based off
-            # hostname.
             if (port_device_id == device_id or
-                    (port_device_owner == constants.DEVICE_OWNER_DHCP and
-                     port_device_id.startswith('dhcp'))):
-                port_fixed_ips = []
-                for fixed_ip in port.fixed_ips:
-                    port_fixed_ips.append({'subnet_id': fixed_ip.subnet_id,
-                                           'ip_address': fixed_ip.ip_address})
-                    if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
-                        dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id)
+                (port_device_owner == constants.DEVICE_OWNER_DHCP and
+                 port_device_id.startswith('dhcp'))):
+                # If using gateway IPs on this port, we can skip the
+                # following code, whose purpose is just to review and
+                # update the Neutron-allocated IP addresses for the
+                # port.
+                if self.driver.use_gateway_ips:
+                    return port
+                # Otherwise break out, as we now have the DHCP port
+                # whose subnets and addresses we need to review.
+                break
+        else:
+            return None
 
-                # If there are dhcp_enabled_subnet_ids here that means that
-                # we need to add those to the port and call update.
-                if dhcp_enabled_subnet_ids:
-                    port_fixed_ips.extend(
-                        [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
-                    dhcp_port = self.plugin.update_dhcp_port(
-                        port.id, {'port': {'network_id': network.id,
-                                           'fixed_ips': port_fixed_ips}})
-                    if not dhcp_port:
-                        raise exceptions.Conflict()
-                else:
-                    dhcp_port = port
-                # break since we found port that matches device_id
-                break
+        # Compare what the subnets should be against what is already
+        # on the port.
+        dhcp_enabled_subnet_ids = set(dhcp_subnets)
+        port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips)
 
-        # check for a reserved DHCP port
-        if dhcp_port is None:
-            LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
-                      ' does not yet exist. Checking for a reserved port.',
-                      {'device_id': device_id, 'network_id': network.id})
-            for port in network.ports:
-                port_device_id = getattr(port, 'device_id', None)
-                if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT:
-                    dhcp_port = self.plugin.update_dhcp_port(
-                        port.id, {'port': {'network_id': network.id,
-                                           'device_id': device_id}})
-                    if dhcp_port:
-                        break
+        # If those differ, we need to call update.
+        if dhcp_enabled_subnet_ids != port_subnet_ids:
+            # Collect the subnets and fixed IPs that the port already
+            # has, for subnets that are still in the DHCP-enabled set.
+            wanted_fixed_ips = []
+            for fixed_ip in port.fixed_ips:
+                if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
+                    wanted_fixed_ips.append(
+                        {'subnet_id': fixed_ip.subnet_id,
+                         'ip_address': fixed_ip.ip_address})
 
-        # DHCP port has not yet been created.
-        if dhcp_port is None:
-            LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
-                      ' does not yet exist.', {'device_id': device_id,
-                                               'network_id': network.id})
-            port_dict = dict(
-                name='',
-                admin_state_up=True,
-                device_id=device_id,
-                network_id=network.id,
-                tenant_id=network.tenant_id,
-                fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
-            dhcp_port = self.plugin.create_dhcp_port({'port': port_dict})
+            # Add subnet IDs for new DHCP-enabled subnets.
+            wanted_fixed_ips.extend(
+                dict(subnet_id=s)
+                for s in dhcp_enabled_subnet_ids - port_subnet_ids)
 
-        if not dhcp_port:
-            raise exceptions.Conflict()
+            # Update the port to have the calculated subnets and fixed
+            # IPs.  The Neutron server will allocate a fresh IP for
+            # each subnet that doesn't already have one.
+            port = self.plugin.update_dhcp_port(
+                port.id,
+                {'port': {'network_id': network.id,
+                          'fixed_ips': wanted_fixed_ips}})
+            if not port:
+                raise exceptions.Conflict()
 
-        # Convert subnet_id to subnet dict
-        fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
-                          ip_address=fixed_ip.ip_address,
-                          subnet=subnets[fixed_ip.subnet_id])
-                     for fixed_ip in dhcp_port.fixed_ips]
-
-        ips = [dhcp.DictModel(item) if isinstance(item, dict) else item
-               for item in fixed_ips]
-        dhcp_port.fixed_ips = ips
-
-        return dhcp_port
+        return port
 
     def setup(self, network):
         """Create and initialize a device for network's DHCP on this host."""
         port = self.setup_dhcp_port(network)
+        self._update_dhcp_port(network, port)
         interface_name = self.get_interface_name(network, port)
 
         if net_lib.Datalink.datalink_exists(interface_name):
             LOG.debug('Reusing existing device: %s.', interface_name)
         else:
-            self.driver.plug(network.tenant_id, network.id,
-                             port.id, interface_name, port.mac_address,
-                             network=network,
-                             vif_type=getattr(port, 'binding:vif_type', None))
+            try:
+                self.driver.plug(network.tenant_id, network.id,
+                                 port.id, interface_name, port.mac_address,
+                                 network=network, mtu=network.get('mtu'),
+                                 vif_type=getattr(port, 'binding:vif_type',
+                                                  None))
+            except Exception:
+                with excutils.save_and_reraise_exception():
+                    LOG.exception(_LE('Unable to plug DHCP port for '
+                                      'network %s. Releasing port.'),
+                                  network.id)
+                    self.plugin.release_dhcp_port(network.id, port.device_id)
         ip_cidrs = []
         addrconf = False
         for fixed_ip in port.fixed_ips:
@@ -267,14 +296,17 @@
             else:
                 addrconf = True
 
+        if self.driver.use_gateway_ips:
+            # For each DHCP-enabled subnet, add that subnet's gateway
+            # IP address to the Linux device for the DHCP port.
+            for subnet in network.subnets:
+                if not subnet.enable_dhcp:
+                    continue
+                gateway = subnet.gateway_ip
+                if gateway:
+                    net = netaddr.IPNetwork(subnet.cidr)
+                    ip_cidrs.append('%s/%s' % (gateway, net.prefixlen))
+
         self.driver.init_l3(interface_name, ip_cidrs, addrconf=addrconf)
 
         return interface_name
-
-    def destroy(self, network, device_name):
-        """Destroy the device used for the network's DHCP on this host."""
-
-        self.driver.fini_l3(device_name)
-        self.driver.unplug(device_name)
-        self.plugin.release_dhcp_port(network.id,
-                                      self.get_device_id(network))
--- a/components/openstack/neutron/files/agent/solaris/interface.py	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/agent/solaris/interface.py	Wed Sep 07 14:48:41 2016 -0700
@@ -15,9 +15,6 @@
 # @author: Girish Moodalbail, Oracle, Inc.
 
 from openstack_common import get_ovsdb_info
-import rad.client as radcli
-import rad.connect as radcon
-import rad.bindings.com.oracle.solaris.rad.evscntl_1 as evsbind
 
 from oslo_config import cfg
 from oslo_log import log as logging
@@ -33,23 +30,32 @@
 LOG = logging.getLogger(__name__)
 
 OPTS = [
-    cfg.StrOpt('evs_controller', default='ssh://[email protected]',
-               help=_("An URI that specifies an EVS controller"))
+    cfg.StrOpt('admin_user',
+               help=_("Admin username")),
+    cfg.StrOpt('admin_password',
+               help=_("Admin password"),
+               secret=True),
+    cfg.StrOpt('admin_tenant_name',
+               help=_("Admin tenant name")),
+    cfg.StrOpt('auth_url',
+               help=_("Authentication URL")),
+    cfg.StrOpt('auth_strategy', default='keystone',
+               help=_("The type of authentication to use")),
+    cfg.StrOpt('auth_region',
+               help=_("Authentication region")),
+    cfg.StrOpt('endpoint_type',
+               default='publicURL',
+               help=_("Network service endpoint type to pull from "
+                      "the keystone catalog")),
 ]
 
 
-class EVSControllerError(exceptions.NeutronException):
-    message = _("EVS controller: %(errmsg)s")
-
-    def __init__(self, evs_errmsg):
-        super(EVSControllerError, self).__init__(errmsg=evs_errmsg)
+class OVSInterfaceDriver(object):
+    """Driver used to manage Solaris OVS VNICs.
 
-
-class SolarisVNICDriver(object):
-    """Driver used to manage Solaris EVS VNICs.
-
-    This class provides methods to create/delete an EVS VNIC and
-    plumb/unplumb ab IP interface and addresses on the EVS VNIC.
+    This class provides methods to create/delete a Crossbow VNIC and
+    add it as a port of OVS bridge.
+    TODO(gmoodalb): More methods to implement here for MITAKA??
     """
 
     # TODO(gmoodalb): dnsmasq uses old style `ifreq', so 16 is the maximum
@@ -63,32 +69,23 @@
 
     def __init__(self, conf):
         self.conf = conf
-        try:
-            self.rad_uri = radcon.RadURI(conf.evs_controller)
-        except ValueError as err:
-            raise SystemExit(_("Specified evs_controller is invalid: %s"), err)
-
-        self._rad_connection = None
-        # set the controller property for this host
-        cmd = ['/usr/sbin/evsadm', 'show-prop', '-co', 'value', '-p',
-               'controller']
-        stdout = utils.execute(cmd)
-        if conf.evs_controller != stdout.strip():
-            cmd = ['/usr/sbin/evsadm', 'set-prop', '-p',
-                   'controller=%s' % (conf.evs_controller)]
-            utils.execute(cmd)
+        self._neutron_client = None
 
     @property
-    def rad_connection(self):
-        if (self._rad_connection is not None and
-                self._rad_connection._closed is None):
-            return self._rad_connection
-
-        LOG.debug(_("Connecting to EVS Controller at %s") %
-                  self.conf.evs_controller)
-
-        self._rad_connection = self.rad_uri.connect()
-        return self._rad_connection
+    def neutron_client(self):
+        if self._neutron_client:
+            return self._neutron_client
+        from neutronclient.v2_0 import client
+        self._neutron_client = client.Client(
+            username=self.conf.admin_user,
+            password=self.conf.admin_password,
+            tenant_name=self.conf.admin_tenant_name,
+            auth_url=self.conf.auth_url,
+            auth_strategy=self.conf.auth_strategy,
+            region_name=self.conf.auth_region,
+            endpoint_type=self.conf.endpoint_type
+        )
+        return self._neutron_client
 
     def fini_l3(self, device_name):
         ipif = net_lib.IPInterface(device_name)
@@ -113,120 +110,7 @@
 
     def plug(self, tenant_id, network_id, port_id, datalink_name, mac_address,
              network=None, bridge=None, namespace=None, prefix=None,
-             protection=False, vif_type=None):
-        """Plug in the interface."""
-
-        if net_lib.Datalink.datalink_exists(datalink_name):
-            LOG.info(_("Device %s already exists"), datalink_name)
-            return
-
-        if datalink_name.startswith('l3e'):
-            # verify external network parameter settings
-            dl = net_lib.Datalink(datalink_name)
-            # determine the network type of the external network
-            # TODO(gmoodalb): use EVS RAD APIs
-            evsname = network_id
-            cmd = ['/usr/sbin/evsadm', 'show-evs', '-co', 'l2type,vid',
-                   '-f', 'evs=%s' % evsname]
-            try:
-                stdout = utils.execute(cmd)
-            except Exception as err:
-                LOG.error(_("Failed to retrieve the network type for "
-                            "the external network, and it is required "
-                            "to create an external gateway port: %s") % err)
-                return
-            output = stdout.splitlines()[0].strip()
-            l2type, vid = output.split(':')
-            if l2type != 'flat' and l2type != 'vlan':
-                LOG.error(_("External network should be either Flat or "
-                            "VLAN based, and it is required to "
-                            "create an external gateway port"))
-                return
-            elif (l2type == 'vlan' and
-                  self.conf.get("external_network_datalink", None)):
-                LOG.warning(_("external_network_datalink is deprecated in "
-                              "Juno and will be removed in the next release "
-                              "of Solaris OpenStack. Please use the evsadm "
-                              "set-controlprop subcommand to setup the "
-                              "uplink-port for an external network"))
-                # proceed with the old-style of doing things
-                dl.create_vnic(self.conf.external_network_datalink,
-                               mac_address=mac_address, vid=vid)
-                return
-
-        try:
-            evsc = self.rad_connection.get_object(evsbind.EVSController())
-            vports_info = evsc.getVPortInfo("vport=%s" % (port_id))
-            if vports_info:
-                vport_info = vports_info[0]
-                # This is to handle HA when the 1st DHCP/L3 agent is down and
-                # the second DHCP/L3 agent tries to connect its VNIC to EVS, we
-                # will end up in "vport in use" error. So, we need to reset the
-                # vport before we connect the VNIC to EVS.
-                if vport_info.status == evsbind.VPortStatus.USED:
-                    LOG.debug(_("Retrieving EVS: %s"), vport_info.evsuuid)
-                    pat = radcli.ADRGlobPattern({'uuid': network_id,
-                                                 'tenant': tenant_id})
-                    evs_objs = self.rad_connection.list_objects(evsbind.EVS(),
-                                                                pat)
-                    if evs_objs:
-                        evs = self.rad_connection.get_object(evs_objs[0])
-                        evs.resetVPort(port_id, "force=yes")
-
-                if not protection:
-                    LOG.debug(_("Retrieving VPort: %s"), port_id)
-                    pat = radcli.ADRGlobPattern({'uuid': port_id,
-                                                 'tenant': tenant_id,
-                                                 'evsuuid': network_id})
-                    vport_objs = self.rad_connection.list_objects(
-                        evsbind.VPort(), pat)
-                    if vport_objs:
-                        vport = self.rad_connection.get_object(vport_objs[0])
-                        vport.setProperty("protection=none")
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-
-        dl = net_lib.Datalink(datalink_name)
-        evs_vport = "%s/%s" % (network_id, port_id)
-        dl.connect_vnic(evs_vport, tenant_id)
-
-    def unplug(self, device_name, namespace=None, prefix=None):
-        """Unplug the interface."""
-
-        dl = net_lib.Datalink(device_name)
-        dl.delete_vnic()
-
-
-class OVSInterfaceDriver(SolarisVNICDriver):
-    """Driver used to manage Solaris OVS VNICs.
-
-    This class provides methods to create/delete a Crossbow VNIC and
-    add it as a port of OVS bridge.
-    """
-
-    def __init__(self, conf):
-        self.conf = conf
-        self._neutron_client = None
-
-    @property
-    def neutron_client(self):
-        if self._neutron_client:
-            return self._neutron_client
-        from neutronclient.v2_0 import client
-        self._neutron_client = client.Client(
-            username=self.conf.admin_user,
-            password=self.conf.admin_password,
-            tenant_name=self.conf.admin_tenant_name,
-            auth_url=self.conf.auth_url,
-            auth_strategy=self.conf.auth_strategy,
-            region_name=self.conf.auth_region,
-            endpoint_type=self.conf.endpoint_type
-        )
-        return self._neutron_client
-
-    def plug(self, tenant_id, network_id, port_id, datalink_name, mac_address,
-             network=None, bridge=None, namespace=None, prefix=None,
-             protection=False, vif_type=None):
+             protection=False, mtu=None, vif_type=None):
         """Plug in the interface."""
 
         if net_lib.Datalink.datalink_exists(datalink_name):
@@ -312,6 +196,10 @@
     def unplug(self, datalink_name, bridge=None, namespace=None, prefix=None):
         """Unplug the interface."""
 
+        # remove any IP addresses on top of this datalink, otherwise we will
+        # get 'device busy' error while deleting the datalink
+        self.fini_l3(datalink_name)
+
         dl = net_lib.Datalink(datalink_name)
         dl.delete_vnic()
 
@@ -329,3 +217,43 @@
         except RuntimeError as err:
             LOG.error(_("Failed unplugging interface '%s': %s") %
                       (datalink_name, err))
+
+    @property
+    def use_gateway_ips(self):
+        """Whether to use gateway IPs instead of unique IP allocations.
+
+        In each place where the DHCP agent runs, and for each subnet for
+        which DHCP is handling out IP addresses, the DHCP port needs -
+        at the Linux level - to have an IP address within that subnet.
+        Generally this needs to be a unique Neutron-allocated IP
+        address, because the subnet's underlying L2 domain is bridged
+        across multiple compute hosts and network nodes, and for HA
+        there may be multiple DHCP agents running on that same bridged
+        L2 domain.
+
+        However, if the DHCP ports - on multiple compute/network nodes
+        but for the same network - are _not_ bridged to each other,
+        they do not need each to have a unique IP address.  Instead
+        they can all share the same address from the relevant subnet.
+        This works, without creating any ambiguity, because those
+        ports are not all present on the same L2 domain, and because
+        no data within the network is ever sent to that address.
+        (DHCP requests are broadcast, and it is the network's job to
+        ensure that such a broadcast will reach at least one of the
+        available DHCP servers.  DHCP responses will be sent _from_
+        the DHCP port address.)
+
+        Specifically, for networking backends where it makes sense,
+        the DHCP agent allows all DHCP ports to use the subnet's
+        gateway IP address, and thereby to completely avoid any unique
+        IP address allocation.  This behaviour is selected by running
+        the DHCP agent with a configured interface driver whose
+        'use_gateway_ips' property is True.
+
+        When an operator deploys Neutron with an interface driver that
+        makes use_gateway_ips True, they should also ensure that a
+        gateway IP address is defined for each DHCP-enabled subnet,
+        and that the gateway IP address doesn't change during the
+        subnet's lifetime.
+        """
+        return False
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/agent/solaris/namespace_manager.py	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,52 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+class NamespaceManager(object):
+    ''' Re-implements neutron.agent.l3.namespace_manager'''
+
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, value, traceback):
+        if exc_type:
+            # An exception occurred in the caller's with statement
+            return False
+        return True
+
+    def keep_router(self, router_id):
+        pass
+
+    def keep_ext_net(self, ext_net_id):
+        pass
+
+    def get_prefix_and_id(self, ns_name):
+        return None
+
+    def is_managed(self, ns_name):
+        return False
+
+    def list_all(self):
+        return set()
+
+    def ensure_router_cleanup(self, router_id):
+        pass
+
+    def _cleanup(self, ns_prefix, ns_id):
+        pass
--- a/components/openstack/neutron/files/agent/solaris/net_lib.py	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/agent/solaris/net_lib.py	Wed Sep 07 14:48:41 2016 -0700
@@ -56,8 +56,10 @@
         return True
 
     @classmethod
-    def ipaddr_exists(cls, ifname, ipaddr):
-        cmd = ['/usr/sbin/ipadm', 'show-addr', '-po', 'addr', ifname]
+    def ipaddr_exists(cls, ipaddr, ifname=None):
+        cmd = ['/usr/sbin/ipadm', 'show-addr', '-po', 'addr']
+        if ifname:
+            cmd.append(ifname)
         stdout = cls.execute(cmd)
 
         return ipaddr in stdout
@@ -86,7 +88,7 @@
             if temp:
                 cmd.append('-t')
             self.execute_with_pfexec(cmd)
-        elif addrcheck and self.ipaddr_exists(self._ifname, ipaddr):
+        elif addrcheck and self.ipaddr_exists(ipaddr, self._ifname):
             return
 
         # If an address is IPv6, then to create a static IPv6 address
@@ -99,8 +101,8 @@
             mac_addr = stdout.splitlines()[0].strip()
             ll_addr = netaddr.EUI(mac_addr).ipv6_link_local()
 
-            if addrcheck and not self.ipaddr_exists(self._ifname,
-                                                    str(ll_addr)):
+            if addrcheck and not self.ipaddr_exists(str(ll_addr),
+                                                    self._ifname):
                 # create a link-local address
                 cmd = ['/usr/sbin/ipadm', 'create-addr', '-T', 'static', '-a',
                        str(ll_addr), self._ifname]
@@ -135,7 +137,7 @@
         self.execute_with_pfexec(cmd)
 
     def delete_address(self, ipaddr, addrcheck=True):
-        if addrcheck and not self.ipaddr_exists(self._ifname, ipaddr):
+        if addrcheck and not self.ipaddr_exists(ipaddr, self._ifname):
             return
 
         cmd = ['/usr/sbin/ipadm', 'show-addr', '-po', 'addrobj,addr',
@@ -179,19 +181,6 @@
             return False
         return True
 
-    def connect_vnic(self, evsvport, tenantname=None, temp=True):
-        if self.datalink_exists(self._dlname):
-            return
-
-        cmd = ['/usr/sbin/dladm', 'create-vnic', '-c', evsvport, self._dlname]
-        if temp:
-            cmd.append('-t')
-        if tenantname:
-            cmd.append('-T')
-            cmd.append(tenantname)
-
-        self.execute_with_pfexec(cmd)
-
     def create_vnic(self, lower_link, mac_address=None, vid=None, temp=True):
         if self.datalink_exists(self._dlname):
             return
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/agent/solaris/pd.py	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,79 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from neutron.common import utils
+
+OPTS = []
+
+
+class PrefixDelegation(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def enable_subnet(self, router_id, subnet_id, prefix, ri_ifname, mac):
+        return
+
+    @utils.synchronized("l3-agent-pd")
+    def disable_subnet(self, router_id, subnet_id):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def update_subnet(self, router_id, subnet_id, prefix):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def add_gw_interface(self, router_id, gw_ifname):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def delete_router_pd(self, router):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def remove_gw_interface(self, router_id):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def sync_router(self, router_id):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def remove_stale_ri_ifname(self, router_id, stale_ifname):
+        pass
+
+    @utils.synchronized("l3-agent-pd")
+    def process_prefix_update(self):
+        pass
+
+    def after_start(self):
+        pass
+
+
[email protected]("l3-agent-pd")
+def remove_router(resource, event, l3_agent, **kwargs):
+    pass
+
+
+def get_router_entry(ns_name):
+    return {'gw_interface': None,
+            'ns_name': None,
+            'subnets': {}}
+
+
[email protected]("l3-agent-pd")
+def add_router(resource, event, l3_agent, **kwargs):
+    pass
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/bgp_dragent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,151 @@
+[DEFAULT]
+
+#
+# From neutron.base.agent
+#
+
+# Name of Open vSwitch bridge to use (string value)
+#ovs_integration_bridge = br-int
+
+# Uses veth for an OVS interface or not. Support kernels with limited namespace
+# support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. (boolean
+# value)
+#ovs_use_veth = false
+
+# MTU setting for device. This option will be removed in Newton. Please use the
+# system-wide segment_mtu setting which the agents will take into account when
+# wiring VIFs. (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#network_device_mtu = <None>
+
+# The driver used to manage the virtual interface. (string value)
+#interface_driver = <None>
+
+# Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs
+# commands will fail with ALARMCLOCK error. (integer value)
+#ovs_vsctl_timeout = 10
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[AGENT]
+
+#
+# From neutron.base.agent
+#
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
+
+
+[BGP]
+
+#
+# From neutron.bgp.agent
+#
+
+# BGP speaker driver class to be instantiated. (string value)
+#bgp_speaker_driver = <None>
+
+# 32-bit BGP identifier, typically an IPv4 address owned by the system running
+# the BGP DrAgent. (string value)
+#bgp_router_id = <None>
--- a/components/openstack/neutron/files/dhcp_agent.ini	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/dhcp_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -1,102 +1,213 @@
 [DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = False
+
+#
+# From neutron.base.agent
+#
+
+# Name of Open vSwitch bridge to use (string value)
+ovs_integration_bridge = br_int0
+
+# Uses veth for an OVS interface or not. Support kernels with limited namespace
+# support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. (boolean
+# value)
+#ovs_use_veth = false
+
+# MTU setting for device. This option will be removed in Newton. Please use the
+# system-wide segment_mtu setting which the agents will take into account when
+# wiring VIFs. (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#network_device_mtu = <None>
+
+# The driver used to manage the virtual interface. (string value)
+interface_driver = neutron.agent.solaris.interface.OVSInterfaceDriver
+
+# Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs
+# commands will fail with ALARMCLOCK error. (integer value)
+#ovs_vsctl_timeout = 10
+
+#
+# From neutron.dhcp.agent
+#
 
 # The DHCP agent will resync its state with Neutron to recover from any
-# transient notification or rpc errors. The interval is number of
-# seconds between attempts.
-# resync_interval = 5
-
-# The DHCP agent requires an interface driver be set. Choose the one that best
-# matches your plugin.
-# interface_driver =
-
-# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
-# BigSwitch/Floodlight)
-# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-
-# Interface driver for Solaris Open vSwitch
-# interface_driver = neutron.agent.solaris.interface.OVSInterfaceDriver
-
-# Name of Open vSwitch bridge to use
-# ovs_integration_bridge = br_int0
+# transient notification or RPC errors. The interval is number of seconds
+# between attempts. (integer value)
+#resync_interval = 5
 
-# Use veth for an OVS interface or not.
-# Support kernels with limited namespace support
-# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
-# ovs_use_veth = False
-
-# Example of interface_driver option for LinuxBridge
-# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-
-# Interface driver for Solaris Elastic Virtual Switch (EVS)
-interface_driver = neutron.agent.solaris.interface.SolarisVNICDriver
-
-# The agent can use other DHCP drivers.  Dnsmasq is the simplest and requires
-# no additional setup of the DHCP server.
+# The driver used to manage the DHCP server. (string value)
 dhcp_driver = neutron.agent.solaris.dhcp.Dnsmasq
 
-# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
-# iproute2 package that supports namespaces). This option is deprecated and
-# will be removed in a future release, at which point the old behavior of
-# use_namespaces = True will be enforced.
-use_namespaces = False
-
 # The DHCP server can assist with providing metadata support on isolated
 # networks. Setting this value to True will cause the DHCP server to append
-# specific host routes to the DHCP request. The metadata service will only
-# be activated when the subnet does not contain any router port. The guest
+# specific host routes to the DHCP request. The metadata service will only be
+# activated when the subnet does not contain any router port. The guest
 # instance must be configured to request host routes via DHCP (Option 121).
-# enable_isolated_metadata = False
+# This option doesn't have any effect when force_metadata is set to True.
+# (boolean value)
+#enable_isolated_metadata = false
 
-# Allows for serving metadata requests coming from a dedicated metadata
-# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
-# is connected to a Neutron router from which the VMs send metadata
-# request. In this case DHCP Option 121 will not be injected in VMs, as
-# they will be able to reach 169.254.169.254 through a router.
-# This option requires enable_isolated_metadata = True
-# enable_metadata_network = False
+# In some cases the Neutron router is not present to provide the metadata IP
+# but the DHCP server can be used to provide this info. Setting this value will
+# force the DHCP server to append specific host routes to the DHCP request. If
+# this option is set, then the metadata service will be activated for all the
+# networks. (boolean value)
+#force_metadata = false
+
+# Allows for serving metadata requests coming from a dedicated metadata access
+# network whose CIDR is 169.254.169.254/16 (or larger prefix), and is connected
+# to a Neutron router from which the VMs send metadata:1 request. In this case
+# DHCP Option 121 will not be injected in VMs, as they will be able to reach
+# 169.254.169.254 through a router. This option requires
+# enable_isolated_metadata = True. (boolean value)
+#enable_metadata_network = false
 
 # Number of threads to use during sync process. Should not exceed connection
-# pool size configured on server.
-# num_sync_threads = 4
+# pool size configured on server. (integer value)
+#num_sync_threads = 4
+
+# Location to store DHCP server config files. (string value)
+#dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames. This option is deprecated. It has
+# been moved to neutron.conf as dns_domain. It will be removed in a future
+# release. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file. (string value)
+#dnsmasq_config_file =
 
-# Location to store DHCP server config files
-# dhcp_confs = $state_path/dhcp
+# Comma-separated list of the DNS servers which will be used as forwarders.
+# (list value)
+# Deprecated group/name - [DEFAULT]/dnsmasq_dns_server
+#dnsmasq_dns_servers = <None>
+
+# Base log dir for dnsmasq logging. The log contains DHCP and DNS log
+# information and is useful for debugging issues with either DHCP or DNS. If
+# this section is null, disable dnsmasq log. (string value)
+#dnsmasq_base_log_dir = <None>
 
-# Domain to use for building the hostnames
-# dhcp_domain = openstacklocal
+# Enables the dnsmasq service to provide name resolution for instances via DNS
+# resolvers on the host running the DHCP agent. Effectively removes the '--no-
+# resolv' option from the dnsmasq process arguments. Adding custom DNS
+# resolvers to the 'dnsmasq_dns_servers' option disables this feature. (boolean
+# value)
+#dnsmasq_local_resolv = false
+
+# Limit number of leases to prevent a denial-of-service. (integer value)
+#dnsmasq_lease_max = 16777216
 
-# Override the default dnsmasq settings with this file
-# dnsmasq_config_file =
+# Use broadcast in DHCP replies. (boolean value)
+#dhcp_broadcast_reply = false
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
 
-# Comma-separated list of DNS servers which will be used by dnsmasq
-# as forwarders.
-# dnsmasq_dns_servers =
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
 
-# Limit number of leases to prevent a denial-of-service.
-# dnsmasq_lease_max = 16777216
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
 
-# Location to DHCP lease relay UNIX domain socket
-# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
 
-# Use broadcast in DHCP replies
-# dhcp_broadcast_reply = False
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
 
-# dhcp_delete_namespaces, which is false by default, can be set to True if
-# namespaces can be deleted cleanly on the host running the dhcp agent.
-# Do not enable this until you understand the problem with the Linux iproute
-# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
-# you are sure that your version of iproute does not suffer from the problem.
-# If True, namespaces will be deleted when a dhcp server is disabled.
-# dhcp_delete_namespaces = False
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
 
-# Timeout for ovs-vsctl commands.
-# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
-# ovs_vsctl_timeout = 10
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[AGENT]
 
-# An URI that specifies an EVS controller. It is of the form
-# ssh://[email protected], where user is the username to use to connect
-# to EVS controller specified by hostname. By default it's set to
-# ssh://[email protected]
-# evs_controller = ssh://[email protected]
+#
+# From neutron.base.agent
+#
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
--- a/components/openstack/neutron/files/evs/migrate/__init__.py	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,13 +0,0 @@
-# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
--- a/components/openstack/neutron/files/evs/migrate/evs-neutron-migration.py	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,432 +0,0 @@
-#!/usr/bin/python2.7
-#
-# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-#
-# This script migrates the network, subnet and port information from EVS DB to
-# neutron-server DB. It also re-creates routers and floatingips tables with
-# Neutron's l3 schema. This script needs to be run for the proper upgrade of
-# Neutron from Havana to Juno release.
-#
-
-import ConfigParser
-import time
-
-from oslo.config import cfg
-from oslo.db import exception as excp
-from oslo.db import options as db_options
-import rad.bindings.com.oracle.solaris.rad.evscntl as evsc
-import rad.connect as radcon
-import sqlalchemy as sa
-from sqlalchemy import MetaData, sql
-from sqlalchemy.orm import sessionmaker
-from sqlalchemy.schema import DropConstraint
-
-from neutron import context as ctx
-from neutron.db import common_db_mixin, model_base
-from neutron.plugins.evs.migrate import havana_api
-
-
-def create_db_network(nw, engine, ext_ro):
-    ''' Method for creating networks table in the neutron-server DB
-        Input params:
-        @nw - Dictionary with values from EVS DB
-        @engine - SQL engine
-        @ext_ro - External router
-    '''
-    # Importing locally because these modules end up importing neutron.wsgi
-    # which causes RAD to hang
-    from neutron.db import db_base_plugin_v2
-    from neutron.db import external_net_db as ext_net
-    model_base.BASEV2.metadata.bind = engine
-    for _none in range(60):
-        try:
-            model_base.BASEV2.metadata.create_all(engine)
-            break
-        except sa.exc.OperationalError as err:
-            # mysql is not ready. sleep for 2 more seconds
-            time.sleep(2)
-    else:
-        print "Unable to connect to MySQL:  %s" % err
-        print ("Please verify MySQL is properly configured and online "
-               "before using svcadm(1M) to clear this service.")
-        raise RuntimeError
-    ctxt = ctx.get_admin_context()
-    inst = db_base_plugin_v2.NeutronDbPluginV2()
-    dup = False
-    try:
-        db_base_plugin_v2.NeutronDbPluginV2.create_network(inst, ctxt, nw)
-        print "\nnetwork=%s added" % nw['network']['name']
-        if ext_ro:
-            ext_nw = ext_net.ExternalNetwork(network_id=nw['network']['id'])
-            session = sessionmaker()
-            session.configure(bind=engine)
-            s = session()
-            s.add(ext_nw)
-            s.commit()
-    except excp.DBDuplicateEntry:
-        print "\nnetwork '%s' already exists" % nw['network']['name']
-        dup = True
-    return dup
-
-
-def create_db_subnet(sub):
-    ''' Method for creating subnets table in the neutron-server DB
-        Input params:
-        @sub - Dictionary with values from EVS DB
-    '''
-    # Importing locally because this module ends up importing neutron.wsgi
-    # which causes RAD to hang
-    from neutron.db import db_base_plugin_v2
-    ctxt = ctx.get_admin_context()
-    inst = db_base_plugin_v2.NeutronDbPluginV2()
-    try:
-        db_base_plugin_v2.NeutronDbPluginV2.create_subnet(inst, ctxt, sub)
-        print "\nsubnet=%s added" % sub['subnet']['id']
-    except excp.DBDuplicateEntry:
-        print "\nsubnet '%s' already exists" % sub['subnet']['id']
-
-
-def create_db_port(port):
-    ''' Method for creating ports table in the neutron-server DB
-        Input params:
-        @port - Dictionary with values from EVS DB
-    '''
-    # Importing locally because this module ends up importing neutron.wsgi
-    # which causes RAD to hang
-    from neutron.db import db_base_plugin_v2
-    ctxt = ctx.get_admin_context()
-    inst = db_base_plugin_v2.NeutronDbPluginV2()
-    try:
-        db_base_plugin_v2.NeutronDbPluginV2.create_port(inst, ctxt, port)
-        print "\nport=%s added" % port['port']['id']
-    except excp.DBDuplicateEntry:
-        print "\nport '%s' already exists" % port['port']['id']
-
-
-def main():
-    print "Start Migration."
-
-    # Connect to EVS controller
-    config = ConfigParser.RawConfigParser()
-    config.readfp(open("/etc/neutron/plugins/evs/evs_plugin.ini"))
-    if config.has_option("EVS", 'evs_controller'):
-        config_suh = config.get("EVS", 'evs_controller')
-    else:
-        config_suh = 'ssh://[email protected]'
-    suh = config_suh.split('://')
-    if len(suh) != 2 or suh[0] != 'ssh' or not suh[1].strip():
-        raise SystemExit(_("Specified evs_controller is invalid"))
-    uh = suh[1].split('@')
-    if len(uh) != 2 or not uh[0].strip() or not uh[1].strip():
-        raise SystemExit(_("'user' and 'hostname' need to be specified "
-                           "for evs_controller"))
-    try:
-        rc = radcon.connect_ssh(uh[1], user=uh[0])
-    except:
-        raise SystemExit(_("Cannot connect to EVS Controller"))
-    try:
-        evs_contr = rc.get_object(evsc.EVSController())
-    except:
-        raise SystemExit(_("Could not retrieve EVS info from EVS Controller"))
-
-    config.readfp(open("/etc/neutron/neutron.conf"))
-    if config.has_option("database", 'connection'):
-        SQL_CONNECTION = config.get("database", 'connection')
-    else:
-        SQL_CONNECTION = 'sqlite:////var/lib/neutron/neutron.sqlite'
-
-    conf = cfg.CONF
-    db_options.set_defaults(cfg.CONF,
-                            connection=SQL_CONNECTION,
-                            sqlite_db='', max_pool_size=10,
-                            max_overflow=20, pool_timeout=10)
-
-    neutron_engine = sa.create_engine(SQL_CONNECTION)
-    router_port_ids = {}
-
-    evsinfo = evs_contr.getEVSInfo()
-    for e in evsinfo:
-        ext_ro = False
-        for p in e.props:
-            if p.name == 'OpenStack:router:external' and p.value == 'True':
-                ext_ro = True
-        # Populate networks table
-        n = {
-            'tenant_id': e.tenantname,
-            'id': e.uuid,
-            'name': e.name,
-            'status': 'ACTIVE',
-            'admin_state_up': True,
-            'shared': False
-            }
-        nw = {'network': n}
-        dup = create_db_network(nw, neutron_engine, ext_ro)
-        if dup:
-            continue  # No need to iterate over subnets and ports
-
-        # Populate subnets table
-        if not e.ipnets:
-            continue
-        for i in e.ipnets:
-            cidr = None
-            gateway_ip = None
-            enable_dhcp = None
-            dns = []
-            host = []
-            start = []
-            for p in i.props:
-                if p.name == 'subnet':
-                    cidr = p.value
-                elif p.name == 'defrouter':
-                    gateway_ip = p.value
-                elif p.name == 'OpenStack:enable_dhcp':
-                    enable_dhcp = p.value == 'True'
-                elif p.name == 'OpenStack:dns_nameservers':
-                    dns = p.value.split(',')
-                elif p.name == 'OpenStack:host_routes':
-                    hh = p.value.split(',')
-                    for h in range(0, len(hh), 2):
-                        d = {hh[h]: hh[h+1]}
-                        host.append(d)
-                elif p.name == 'pool':
-                    ss = p.value.split(',')
-                    for s in ss:
-                        if '-' in s:
-                            d = {'start': s.split('-')[0],
-                                 'end': s.split('-')[1]}
-                            start.append(d)
-                        else:
-                            d = {'start': s, 'end': s}
-                            start.append(d)
-            ip_version = 4 if i.ipvers == evsc.IPVersion.IPV4 else 6
-
-            if i.name.startswith(i.uuid[:8]):
-                # Skip autogenerated names
-                name = None
-            else:
-                name = i.name
-            s = {
-                'tenant_id': i.tenantname,
-                'id': i.uuid,
-                'name': name,
-                'network_id': e.uuid,
-                'ip_version': ip_version,
-                'cidr': cidr,
-                'gateway_ip': gateway_ip,
-                'enable_dhcp': enable_dhcp,
-                'shared': False,
-                'allocation_pools': start,
-                'dns_nameservers': dns,
-                'host_routes': host
-                }
-
-            sub = {'subnet': s}
-            create_db_subnet(sub)
-
-        # Populate ports table
-        if not e.vports:
-            continue
-        for j in e.vports:
-            device_owner = ''
-            device_id = ''
-            mac_address = None
-            ipaddr = None
-            for v in j.props:
-                if v.name == 'OpenStack:device_owner':
-                    device_owner = v.value
-                    if v.value in ('network:router_interface',
-                                   'network:router_gateway'):
-                        router_port_ids[j.uuid] = v.value
-                elif v.name == 'OpenStack:device_id':
-                    device_id = v.value
-                elif v.name == 'macaddr':
-                    mac_address = v.value
-                elif v.name == 'ipaddr':
-                    ipaddr = v.value.split('/')[0]
-            if j.name.startswith(j.uuid[:8]):
-                # Skip autogenerated names
-                name = None
-            else:
-                name = j.name
-
-            p = {
-                'tenant_id': j.tenantname,
-                'id': j.uuid,
-                'name': name,
-                'network_id': e.uuid,
-                'mac_address': mac_address,
-                'admin_state_up': True,
-                'status': 'ACTIVE',
-                'device_id': device_id,
-                'device_owner': device_owner,
-                'fixed_ips': [{'subnet_id': e.ipnets[0].uuid,
-                               'ip_address': ipaddr}]
-                }
-            port = {'port': p}
-            create_db_port(port)
-
-    # Change the schema of the floatingips and routers tables by doing
-    # the following:
-    #     Fetch the floatingip, router entry using EVS API,
-    #     Temporarily store the information,
-    #     Delete floatingip, router entry,
-    #     Remove floatingip, router as a constraint from existing tables,
-    #     Drop the routers, floatingips table,
-    #     Add router, floatingip entry using Neutron API
-
-    # Importing locally because this module ends up importing neutron.wsgi
-    # which causes RAD to hang
-    from neutron.db import l3_db
-    havana_api.configure_db()
-    session = havana_api.get_session()
-
-    # Fetch the floatingip entry using EVS API
-    query = session.query(havana_api.FloatingIP)
-    floatingips = query.all()
-    fl = []
-    if floatingips:
-        for f in floatingips:
-            fi = {
-                'id': f['id'],
-                'floating_ip_address': f['floating_ip_address'],
-                'floating_network_id': f['floating_network_id'],
-                'floating_port_id': f['floating_port_id'],
-                'fixed_port_id': f['fixed_port_id'],
-                'fixed_ip_address': f['fixed_ip_address'],
-                'tenant_id': f['tenant_id'],
-                'router_id': f['router_id'],
-                }
-            fl.append(fi)
-
-        # Delete floatingip entry
-        ctxt = ctx.get_admin_context()
-        ctxt = havana_api.get_evs_context(ctxt)
-        with ctxt.session.begin(subtransactions=True):
-            cm_db_inst = common_db_mixin.CommonDbMixin()
-            query = common_db_mixin.CommonDbMixin._model_query(cm_db_inst,
-                                                               ctxt,
-                                                               havana_api.
-                                                               FloatingIP)
-            for fip in query:
-                ctxt.session.delete(fip)
-
-    # Fetch the router entry using EVS API
-    query = session.query(havana_api.Router)
-    routers = []
-    try:
-        routers = query.all()
-    except sa.exc.OperationalError:
-        pass
-    if routers:
-        for r in routers:
-            router_id = r['id']
-            rt = {
-                'tenant_id': r['tenant_id'],
-                'id': r['id'],
-                'name': r['name'],
-                'admin_state_up': r['admin_state_up'],
-                'gw_port_id': r['gw_port_id'],
-                'status': 'ACTIVE'
-                }
-
-        # Delete router entry
-        ctxt = ctx.get_admin_context()
-        ctxt = havana_api.get_evs_context(ctxt)
-        with ctxt.session.begin(subtransactions=True):
-            cm_db_inst = common_db_mixin.CommonDbMixin()
-            query = common_db_mixin.CommonDbMixin._model_query(cm_db_inst,
-                                                               ctxt,
-                                                               havana_api.
-                                                               Router)
-            router = query.filter(havana_api.Router.id == router_id).one()
-            ctxt.session.delete(router)
-
-    engine = sa.create_engine(SQL_CONNECTION)
-    meta = MetaData()
-    conn = engine.connect()
-    trans = conn.begin()
-    meta.reflect(engine)
-
-    # Remove router as a constraint from existing tables,
-    # Drop the routers table to remove old schema
-    for t in meta.tables.values():
-        for fk in t.foreign_keys:
-            if fk.column.table.name == "routers":
-                if fk.constraint.name:
-                    engine.execute(DropConstraint(fk.constraint))
-    for t in meta.tables.values():
-        if t.name == "routers":
-            t.drop(bind=conn)
-
-    # Remove floatingip as a constraint from existing tables,
-    # Drop the floatingip table to remove old schema
-    for t in meta.tables.values():
-        for fk in t.foreign_keys:
-            if fk.column.table.name == "floatingips":
-                if fk.constraint.name:
-                    engine.execute(DropConstraint(fk.constraint))
-    for t in meta.tables.values():
-        if t.name == "floatingips":
-            t.drop(bind=conn)
-    conn.close()
-
-    # Add the routers and floatingips using the schema in l3_db.py
-
-    setattr(l3_db.Router, 'enable_snat', sa.Column(sa.Boolean,
-            default=True, server_default=sql.true(), nullable=False))
-    neutron_engine = sa.create_engine(SQL_CONNECTION)
-    model_base.BASEV2.metadata.bind = neutron_engine
-    model_base.BASEV2.metadata.create_all(neutron_engine)
-    if routers:
-        ctxt = ctx.get_admin_context()
-        with ctxt.session.begin(subtransactions=True):
-            router_db = l3_db.Router(id=router_id,
-                                     tenant_id=r['tenant_id'],
-                                     name=rt['name'],
-                                     admin_state_up=rt['admin_state_up'],
-                                     gw_port_id=rt['gw_port_id'],
-                                     status="ACTIVE")
-            ctxt.session.add(router_db)
-            print "\nrouter=%s updated" % rt['name']
-        with ctxt.session.begin(subtransactions=True):
-            for i, j in router_port_ids.iteritems():
-                router_port = l3_db.RouterPort(
-                    port_id=i,
-                    router_id=router_id,
-                    port_type=j)
-                ctxt.session.add(router_port)
-
-    if floatingips:
-        ctxt = ctx.get_admin_context()
-        with ctxt.session.begin(subtransactions=True):
-            for i in fl:
-                fl_db = l3_db.FloatingIP(
-                    id=i['id'],
-                    floating_ip_address=i['floating_ip_address'],
-                    floating_network_id=i['floating_network_id'],
-                    floating_port_id=i['floating_port_id'],
-                    fixed_port_id=i['fixed_port_id'],
-                    fixed_ip_address=i['fixed_ip_address'],
-                    router_id=i['router_id'],
-                    tenant_id=i['tenant_id'])
-                ctxt.session.add(fl_db)
-                print "\nfloatingip=%s updated" % i['floating_ip_address']
-
-    print "\nEnd Migration."
-
-
-if __name__ == '__main__':
-    main()
--- a/components/openstack/neutron/files/evs/migrate/havana_api.py	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,102 +0,0 @@
-#
-# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import ConfigParser
-
-from sqlalchemy.ext import declarative
-from sqlalchemy import orm
-import sqlalchemy as sa
-
-from neutron.context import ContextBase
-from neutron.db import model_base
-from neutron.openstack.common import uuidutils
-
-EVS_DB_BASE = declarative.declarative_base(cls=model_base.NeutronBaseV2)
-EVS_DB_ENGINE = None
-EVS_DB_MAKER = None
-
-
-class EVSContext(ContextBase):
-    @property
-    def session(self):
-        return self._session
-
-    @session.setter
-    def session(self, session):
-        self._session = session
-
-
-def configure_db():
-    global EVS_DB_ENGINE
-    if not EVS_DB_ENGINE:
-        config = ConfigParser.RawConfigParser()
-        config.readfp(open("/etc/neutron/neutron.conf"))
-        if config.has_option("database", 'connection'):
-            sql_connection = config.get("database", 'connection')
-        else:
-            sql_connection = 'sqlite:////var/lib/neutron/neutron.sqlite'
-        EVS_DB_ENGINE = sa.create_engine(sql_connection, echo=False)
-        EVS_DB_BASE.metadata.create_all(EVS_DB_ENGINE)
-
-
-def get_session(autocommit=True, expire_on_commit=False):
-    global EVS_DB_ENGINE, EVS_DB_MAKER
-    assert EVS_DB_ENGINE
-    if not EVS_DB_MAKER:
-        EVS_DB_MAKER = orm.sessionmaker(bind=EVS_DB_ENGINE,
-                                        autocommit=autocommit,
-                                        expire_on_commit=expire_on_commit)
-    return EVS_DB_MAKER()
-
-
-def get_evs_context(context):
-    """Overrides the Neutron DB session with EVS DB session"""
-
-    evs_context = EVSContext.from_dict(context.to_dict())
-    evs_context.session = get_session()
-
-    return evs_context
-
-
-class Router(EVS_DB_BASE):
-    """Represents a v2 neutron router."""
-
-    id = sa.Column(sa.String(36), primary_key=True,
-                   default=uuidutils.generate_uuid)
-    name = sa.Column(sa.String(255))
-    status = sa.Column(sa.String(16))
-    admin_state_up = sa.Column(sa.Boolean)
-    tenant_id = sa.Column(sa.String(255))
-    gw_port_id = sa.Column(sa.String(36))
-    gw_port_network_id = sa.Column(sa.String(36))
-
-
-class FloatingIP(EVS_DB_BASE):
-    """Represents a floating IP address.
-
-    This IP address may or may not be allocated to a tenant, and may or
-    may not be associated with an internal port/ip address/router.
-    """
-
-    id = sa.Column(sa.String(36), primary_key=True,
-                   default=uuidutils.generate_uuid)
-    floating_ip_address = sa.Column(sa.String(64), nullable=False)
-    floating_network_id = sa.Column(sa.String(36), nullable=False)
-    floating_port_id = sa.Column(sa.String(36), nullable=False)
-    fixed_port_id = sa.Column(sa.String(36))
-    fixed_ip_address = sa.Column(sa.String(64))
-    router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'))
-    tenant_id = sa.Column(sa.String(255))
--- a/components/openstack/neutron/files/evs/migrate/migrate-evs-to-ovs	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1740 +0,0 @@
-#!/usr/bin/python2.7
-#
-# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-#
-# There are four aspects to migrate an OpenStack cloud running Neutron EVS
-# to Neutron ML2 + OVS and they are enumerated below. This script needs to
-# be run on each of the nodes that is either -- compute, controller, or
-# network -- and the script infers the role of the node based on the SMF
-# services running and does one or more of the operations enumerated below.
-#
-# 1. Populate Neutron ML2 tables
-# ------------------------------
-# Neutron ML2 plugin uses a different set of tables to manage various
-# network types and mechanism drivers underneath it. The names of these
-# tables start with ml2_* and the content of these tables will need to be
-# inferred from other Neutron tables and from EVS controller
-#
-# 2. Update existing configuration files
-# --------------------------------------
-# Following files need to be updated for various Neutron services.
-#  - /etc/neutron/neutron.conf
-#   - change core_plugin option to neutron.plugins.ml2.plugin.Ml2Plugin
-#
-#  - /etc/neutron/dhcp_agent.ini
-#   - change interface_driver option to \
-#    neutron.agent.solaris.interface.SolarisOVSInterfaceDriver
-#   - set ovs_integration_bridge to br_int0
-#
-#  - /etc/neutron/l3_agent.ini
-#   - change interface_driver option to \
-#       neutron.agent.solaris.interface.SolarisOVSInterfaceDriver
-#   - set ovs_integration_bridge to br_int0
-#   - set external_network_bridge to br_ex0
-#   - add service tenant's neutron user credentials to communicate with
-#       neutron-server
-#
-# Following files need to be updated on every node where nova-compute runs.
-#  - /etc/nova/nova.conf
-#    The only change to this file is to add an ovs_bridge
-#    option set to 'br_int0' (default OVS bridge to which various VNICs
-#    (Neutron ports) are added)
-#
-# 3. Create new configuration files
-# ---------------------------------
-# Following new file needs to be created on the node running neutron-server.
-#  - /etc/neutron/plugins/ml2/ml2_conf.ini
-#
-# Following new file needs to be created on every node running either
-# nova-compute, neutron-dhcp-agent, or neutron-l3-agent.
-#  - /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
-#
-# The majority of the contents of the file is inferred from EVS controller
-#
-# 4. Migrate all the VMs from EVS to OVS
-# --------------------------------------
-# The anets of each VM, spawned in Neutron EVS cloud, has one global(tenant)
-# and two anet(evs and vport) properites that are EVS specific. We will need
-# to clear those properties. Before we do that, we will need to first fetch
-# the information (MAC address, lower-link, and such) from EVS controller
-# for a given anet which is uniquely identified by <tenant, evs, vport> and
-# explicitly set corresponding anet properties. This step needs to be
-# repeated for other EVS based anets, if any, in the VM.
-#
-
-import argparse
-from collections import OrderedDict
-from datetime import datetime
-import iniparse
-import netaddr as na
-import netifaces as ni
-import os
-import pwd
-import re
-from shutil import copy2, move
-import signal
-import socket
-import sqlalchemy as sa
-from subprocess import check_output, check_call, CalledProcessError, PIPE
-import sys
-import uuid
-
-import rad.bindings.com.oracle.solaris.rad.evscntl_1 as evscntl
-import rad.bindings.com.oracle.solaris.rad.zonemgr_1 as zonemgr
-import rad.client as radcli
-import rad.connect as radcon
-
-from oslo_db.sqlalchemy import session
-from neutronclient.v2_0 import client as neutron_client
-from neutron.extensions import portbindings
-from neutron.openstack.common import uuidutils
-
-# SMF services
-SVC_NOVA_COMPUTE = 'nova-compute:default'
-SVC_NEUTRON_SERVER = 'neutron-server:default'
-SVC_DHCP_AGENT = 'neutron-dhcp-agent:default'
-SVC_L3_AGENT = 'neutron-l3-agent:default'
-SVC_METADATA_AGENT = 'neutron-metadata-agent:default'
-SVC_OVS_AGENT = 'neutron-openvswitch-agent:default'
-SVC_VSWITCH_SERVER = 'vswitch-server:default'
-SVC_OVSDB_SERVER = 'ovsdb-server:default'
-SVC_NEUTRON_UPGRADE = 'neutron-upgrade:default'
-
-
-ALL_SVCS = [SVC_NEUTRON_SERVER, SVC_DHCP_AGENT, SVC_L3_AGENT, SVC_NOVA_COMPUTE]
-curnode_svcs = []
-
-# conf files
-NEUTRON_CONF = '/etc/neutron/neutron.conf'
-ML2_INI = '/etc/neutron/plugins/ml2/ml2_conf.ini'
-OVS_INI = '/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini'
-EVS_INI = '/etc/neutron/plugins/evs/evs_plugin.ini'
-DHCP_INI = '/etc/neutron/dhcp_agent.ini'
-L3_INI = '/etc/neutron/l3_agent.ini'
-METADATA_INI = '/etc/neutron/metadata_agent.ini'
-NOVA_CONF = '/etc/nova/nova.conf'
-
-# constants
-ML2_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
-OVS_INTFC_DRIVER = 'neutron.agent.solaris.interface.OVSInterfaceDriver'
-OVS_INT_BRIDGE = 'br_int0'
-OVS_EXT_BRIDGE = 'br_ex0'
-VXLAN_UPLINK_PORT = 'ovs.vxlan1'
-FLAT_PHYS_NET = 'flatnet'
-EXT_VLAN_PHYS_NET = 'extnet'
-RABBITMQ_DEFAULT_USERID = 'guest'
-RABBITMQ_DEFAULT_PASSWORD = 'guest'
-L2_TYPE_VLAN = 'vlan'
-L2_TYPE_VXLAN = 'vxlan'
-L2_TYPE_FLAT = 'flat'
-UID_NEUTRON = 84
-UID_NOVA = 85
-
-# file ownership
-file_owner = {
-    NEUTRON_CONF: UID_NEUTRON,
-    ML2_INI: UID_NEUTRON,
-    OVS_INI: UID_NEUTRON,
-    EVS_INI: UID_NEUTRON,
-    DHCP_INI: UID_NEUTRON,
-    L3_INI: UID_NEUTRON,
-    METADATA_INI: UID_NEUTRON,
-    NOVA_CONF: UID_NOVA
-}
-
-# LOGGING LEVELS
-LOG_DEBUG = 'DEBUG:'
-LOG_INFO = 'INFO:'
-LOG_WARN = 'WARN:'
-LOG_ERROR = 'ERROR:'
-
-HOSTNAME = socket.gethostname().split('.')[0]
-
-evsutil = None
-l2type = None
-external_network_datalink = None
-external_network_name = None
-external_network_vid = None
-bridge_mappings = {}
-neutron_conn = {}
-
-
-def log_msg(level, msg, oneliner=True):
-    if oneliner:
-        msg = msg.replace('\n', ' ')
-        msg = re.sub(r'\s\s+', ' ', msg)
-    print level, msg
-
-
-class ZoneConfig(object):
-    """ZoneConfig - context manager for access zone configurations.
-    Automatically opens the configuration for a zone and commits any changes
-    before exiting
-    """
-    def __init__(self, zone):
-        """zone is a zonemgr object representing either a kernel zone or
-        non-global zone.
-        """
-        self.zone = zone
-        self.editing = False
-
-    def __enter__(self):
-        """enables the editing of the zone."""
-        try:
-            self.zone.editConfig()
-            self.editing = True
-            return self
-        except:
-            raise
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        """looks for any kind of exception before exiting.  If one is found,
-        cancel any configuration changes and reraise the exception.  If not,
-        commit the new configuration.
-        """
-        if exc_type is not None and self.editing:
-            # We received some kind of exception.  Cancel the config and raise.
-            self.zone.cancelConfig()
-            raise
-        else:
-            # commit the config
-            try:
-                self.zone.commitConfig()
-            except:
-                raise
-
-    def get_resources(self, resource_type):
-        """Get list of resources of specified type
-        """
-        try:
-            return self.zone.getResources(zonemgr.Resource(resource_type))
-        except:
-            raise
-
-    def set_resource_prop(self, resource, prop, value, rsc_filter=None):
-        """sets a property for an existing resource.
-        """
-        try:
-            if isinstance(resource, basestring):
-                resource = zonemgr.Resource(resource, rsc_filter)
-            self.zone.setResourceProperties(resource,
-                                            [zonemgr.Property(prop, value)])
-        except:
-            raise
-
-    def clear_resource_props(self, resource, props, rsc_filter=None):
-        """Clear property values of a given resource
-        """
-        try:
-            if isinstance(resource, basestring):
-                resource = zonemgr.Resource(resource, rsc_filter)
-            self.zone.clearResourceProperties(resource, props)
-        except:
-            raise
-
-    def lookup_resource_property(self, resource, prop, rsc_filter=None):
-        """Lookup specified property from specified Solaris Zone resource."""
-        try:
-            if isinstance(resource, basestring):
-                resource = zonemgr.Resource(resource, rsc_filter)
-            val = self.zone.getResourceProperties(resource, [prop])
-        except radcli.ObjectError:
-            return None
-        except Exception:
-            raise
-        return val[0].value if val else None
-
-
-class ZoneUtil(object):
-    """Zone utility functions like getting list of zones, zone names etc.
-    """
-    def __init__(self):
-        self.rc = radcon.connect_unix()
-
-    def get_zone_by_name(self, name):
-            """Return a Solaris Zones object via RAD by name."""
-            try:
-                zone = self.rc.get_object(
-                    zonemgr.Zone(), radcli.ADRGlobPattern({'name': name}))
-            except radcli.NotFoundError:
-                return None
-            except Exception:
-                raise
-            return zone
-
-    def _get_zone_objects(self):
-        """Return a list of all Solaris Zones objects via RAD."""
-        return self.rc.list_objects(zonemgr.Zone())
-
-    def get_zone_names(self):
-        """Return the names of all the instances known to the virtualization
-        layer, as a list.
-        """
-        instances_list = []
-        for zone in self._get_zone_objects():
-            instances_list.append(self.rc.get_object(zone).name)
-        return instances_list
-
-
-class EVSUtil():
-    """Use to access EVS info.
-    """
-    def __init__(self):
-        ctl_locn = self._get_evs_controller()
-        try:
-            self.rad_uri = radcon.RadURI(ctl_locn)
-        except ValueError as err:
-            raise SystemExit(_("Specified evs_controller is invalid: %s"), err)
-        try:
-            self._rc = self.rad_uri.connect()
-        except:
-            raise SystemExit(_("Cannot connect to EVS Controller"))
-        try:
-            self._evs_contr = self._rc.get_object(evscntl.EVSController())
-        except:
-            raise SystemExit(_("Failed to get EVS Controller"))
-        self.l2type = self._evs_contr.getProperty('l2-type')[0].current_value
-        self._evsinfo = None
-        self._vportinfo = None
-        self._l2rangeinfo = None
-        self._evs_cache = {}
-        # _global_vlanrange_to_nw_uplink does not contain host specific entries
-        # and is of the form:
-        # {comma separated vlanrange strings: (physical n/w name, uplink port)}
-        self._global_vlanrange_to_nw_uplink = {}
-        # _local_vlanrange_to_uplink contains only this host specific entries
-        # and is of the form:
-        # {comma separated vlanrange strings: uplink port}
-        self._local_vlanrange_to_uplink = {}
-        # global uplink port for flatnet
-        self._global_flat_nw_uplink = None
-        # local uplink port for flatnet
-        self._local_flat_nw_uplink = None
-
-    def _get_evs_controller(self):
-        if (set(curnode_svcs) &
-                set([SVC_NOVA_COMPUTE, SVC_DHCP_AGENT, SVC_L3_AGENT])):
-            try:
-                evsc = check_output(['/usr/sbin/evsadm', 'show-prop', '-co',
-                                     'value', '-p', 'controller']).strip()
-            except:
-                raise SystemExit(_("Could not determine EVS Controller "
-                                   "RAD URI"))
-            return evsc.strip()
-
-        assert SVC_NEUTRON_SERVER in curnode_svcs
-        # get evs_controller from EVS_INI
-        config = iniparse.ConfigParser()
-        config.readfp(open(EVS_INI))
-        try:
-            evsc = config.get("EVS", "evs_controller")
-        except:
-            return 'ssh://[email protected]'
-        return evsc.strip()
-
-    @property
-    def evsinfo(self):
-        if not self._evsinfo:
-            self._evsinfo = self._evs_contr.getEVSInfo()
-        return self._evsinfo
-
-    @property
-    def vportinfo(self):
-        if not self._vportinfo:
-            self._vportinfo = self._evs_contr.getVPortInfo()
-        return self._vportinfo
-
-    @property
-    def l2rangeinfo(self):
-        if not self._l2rangeinfo:
-            self._l2rangeinfo = self._evs_contr.getL2TypeIdRange()
-        return self._l2rangeinfo
-
-    @property
-    def global_flat_nw_uplink(self):
-        if not self._global_flat_nw_uplink:
-            self.get_global_vlanrange_nw_uplink_map()
-        return self._global_flat_nw_uplink
-
-    @property
-    def local_flat_nw_uplink(self):
-        if not self._local_flat_nw_uplink:
-            self.get_local_vlanrange_uplink_map()
-        return self._local_flat_nw_uplink
-
-    def _get_vport(self, tenant_name, evs_uuid, vport_uuid):
-        pat = radcli.ADRGlobPattern({'tenant': tenant_name,
-                                     'evsuuid': evs_uuid,
-                                     'uuid': vport_uuid})
-        adrnames = self._rc.list_objects(evscntl.VPort(), pat)
-        if not adrnames:
-            return None
-        return self._rc.get_object(adrnames[0])
-
-    def get_macaddr(self, tenant_name, evs_uuid, vport_uuid):
-        vport = self._get_vport(tenant_name, evs_uuid, vport_uuid)
-        return vport.getProperty('macaddr')[0].current_value
-
-    def _get_evs(self, tenant_name, evs_uuid):
-        if evs_uuid in self._evs_cache:
-            return self._evs_cache[evs_uuid]
-        pat = radcli.ADRGlobPattern({'tenant': tenant_name,
-                                     'uuid': evs_uuid})
-        adrnames = self._rc.list_objects(evscntl.EVS(), pat)
-        if not adrnames:
-            return None
-        evs = self._rc.get_object(adrnames[0])
-        self._evs_cache[evs_uuid] = evs
-        return evs
-
-    def _vid_in_vidrange(self, vid, vidrange):
-        # vidrange is of the form 1-5,10-20,30-35
-        vlan_ranges = vidrange.split(',')
-        for vlan_range_str in vlan_ranges:
-            vlan_range = vlan_range_str.split("-")
-            vlan_start = int(vlan_range[0])
-            if len(vlan_range) == 2:
-                vlan_end = int(vlan_range[1]) + 1
-            else:
-                vlan_end = vlan_start + 1
-            if vid in xrange(vlan_start, vlan_end):
-                return True
-        return False
-
-    def get_global_vlanrange_nw_uplink_map(self):
-        if self._global_vlanrange_to_nw_uplink:
-            return self._global_vlanrange_to_nw_uplink
-        i = 1
-        extnet_found = False
-        for l2ri in self.l2rangeinfo:
-            if l2ri.host or l2ri.name != 'uplink-port':
-                continue
-            uplink_port = l2ri.value
-            for range_prop in l2ri.range:
-                if range_prop.name != 'vlan-range':
-                    if range_prop.name == 'flat-range':
-                        self._global_flat_nw_uplink = uplink_port
-                    continue
-                vlanrange = range_prop.value
-                phys_nw = ''
-                if external_network_vid and not extnet_found:
-                    extnet_found = self._vid_in_vidrange(external_network_vid,
-                                                         vlanrange)
-                    if extnet_found:
-                        phys_nw = EXT_VLAN_PHYS_NET
-                if not phys_nw:
-                    phys_nw = 'physnet' + str(i)
-                    i += 1
-                self._global_vlanrange_to_nw_uplink[vlanrange] = (phys_nw,
-                                                                  uplink_port)
-        return self._global_vlanrange_to_nw_uplink
-
-    def get_local_vlanrange_uplink_map(self):
-        if self._local_vlanrange_to_uplink:
-            return self._local_vlanrange_to_uplink
-        for l2ri in self.l2rangeinfo:
-            if not l2ri.host:
-                continue
-            l2ri_hostname = l2ri.host.split('.')[0]
-            if l2ri_hostname != HOSTNAME or l2ri.name != 'uplink-port':
-                continue
-            uplink_port = l2ri.value
-            for range_prop in l2ri.range:
-                if range_prop.name != 'vlan-range':
-                    if range_prop.name == 'flat-range':
-                        self._local_flat_nw_uplink = uplink_port
-                    continue
-                vlanrange = range_prop.value
-                self._local_vlanrange_to_uplink[vlanrange] = uplink_port
-        return self._local_vlanrange_to_uplink
-
-    def _get_vlanrange_dict_val(self, vlanrangedict, vlanid):
-        """Each key in vlanrangedict is of the form
-        'vid_start_1-vid_end_1,vid_start_2-vid_end_2'..
-        This method parses the keys and finds the one which contains the
-        required vlanid and returns its corresponding dictionary value.
-        """
-        for vlan_ranges_str, value in vlanrangedict.iteritems():
-            if self._vid_in_vidrange(vlanid, vlan_ranges_str):
-                return value
-
-    def get_uplink_port(self, tenant_name, evs_uuid):
-        """ For VXLAN the uplink port is always ovs.vxlan1.
-        For flat, we can return local or global uplink port after executing
-        get_local_vlanrange_uplink_map() or get_global_vlanrange_uplink_map().
-        For vlan, to find we first find the vlan-id associated
-        with this evs. Then check which l2range object contains this vlan-id
-        for this host and get the corresponding uplink-port.
-        """
-        if l2type == L2_TYPE_VXLAN:
-            return VXLAN_UPLINK_PORT
-        elif l2type == L2_TYPE_FLAT:
-            if self.local_flat_nw_uplink:
-                return self.local_flat_nw_uplink
-            return self.global_flat_nw_uplink
-        assert l2type == L2_TYPE_VLAN
-        evs = self._get_evs(tenant_name, evs_uuid)
-        vlanid = int(evs.getProperty('vlanid')[0].current_value)
-        val = self._get_vlanrange_dict_val(
-            self.get_local_vlanrange_uplink_map(), vlanid)
-        if val:
-            return val
-        val = self._get_vlanrange_dict_val(
-            self.get_global_vlanrange_nw_uplink_map(), vlanid)[1]
-        return val
-
-    def get_vni_range_list(self):
-        vni_ranges_list = []
-        for l2ri in self.l2rangeinfo:
-            if l2ri.host:
-                continue
-            for range_prop in l2ri.range:
-                if range_prop.name != 'vxlan-range':
-                    continue
-                vni_ranges_list += range_prop.value.split(',')
-        return vni_ranges_list
-
-    def get_vxlan_addrs_and_uplinks(self):
-        local_vxlan_addr, local_uplink_port = '', ''
-        global_vxlan_addr, global_uplink_port = '', ''
-        for l2ri in self.l2rangeinfo:
-            if l2ri.host:
-                if l2ri.host.split('.')[0] != HOSTNAME:
-                    # Don't care about other hosts' configurations
-                    continue
-                if l2ri.name == 'vxlan-addr':
-                    local_vxlan_addr = l2ri.value
-                    # if we found -h vxlan-addr, we don't need the other values
-                    break
-                elif l2ri.name == 'uplink-port':
-                    for range_prop in l2ri.range:
-                        if range_prop.name == 'vxlan-range':
-                            local_uplink_port = l2ri.value
-                            break
-            else:
-                if l2ri.name == 'vxlan_addr' and l2ri.value != '0.0.0.0':
-                    global_vxlan_addr = l2ri.value
-                else:
-                    for range_prop in l2ri.range:
-                        if range_prop.name == 'vxlan-range':
-                            global_uplink_port = l2ri.value
-                            break
-            if local_vxlan_addr and local_uplink_port and global_vxlan_addr \
-                    and global_uplink_port:
-                break
-        return (local_vxlan_addr, local_uplink_port, global_vxlan_addr,
-                global_uplink_port)
-
-
-def get_db_connection():
-    config = iniparse.ConfigParser()
-    config.readfp(open(NEUTRON_CONF))
-    if config.has_option('database', 'connection'):
-        connection = config.get('database', 'connection')
-    else:
-        raise SystemExit(_("Connection url for target DB not found."))
-    return connection
-
-
-class DBEVSToMl2(object):
-    def __init__(self):
-        self._table_names = ['ml2_network_segments', 'ml2_vxlan_allocations',
-                             'ml2_vlan_allocations', 'ml2_port_binding_levels',
-                             'ml2_port_bindings', 'router_extra_attributes']
-        self._vif_type = portbindings.VIF_TYPE_OVS
-        self._driver_type = 'openvswitch'
-        # _vlan_xrange_to_nw is a list of tuples to hold the mapping from
-        # vlan-id to physical_network. The tuple format is
-        # (xrange(vid_range_start, vid_range_end), physical_network).
-        self._vlan_xrange_to_nw = []
-
-    def __call__(self):
-        connection = get_db_connection()
-        engine = session.create_engine(connection)
-        metadata = sa.MetaData()
-        self._check_db_schema_version(engine, metadata)
-        # Autoload the ports table to ensure that foreign keys to it and
-        # the network table can be created for the new tables.
-        sa.Table('ports', metadata, autoload=True, autoload_with=engine)
-        metadata.create_all(engine)
-        self._clear_tables(engine, metadata)
-        self._get_vlanrange_mapping()
-        self._migrate_network_segments(engine, metadata)
-        self._migrate_vlan_allocations(engine)
-        self._migrate_vxlan_allocations(engine)
-        self._migrate_port_bindings(engine, metadata)
-        self._add_router_extra_attributes(engine, metadata)
-
-    def _check_db_schema_version(self, engine, metadata):
-        """Check that current version of the db schema is supported."""
-        supported_schema_version = 'kilo'
-        version_table = sa.Table(
-            'alembic_version', metadata, autoload=True, autoload_with=engine)
-        versions = [v[0] for v in engine.execute(version_table.select())]
-        if not versions:
-            raise ValueError(_("Missing version in alembic_versions table"))
-        elif len(versions) > 1:
-            raise ValueError(_("Multiple versions in alembic_versions table:"
-                               " %s") % versions)
-        current_version = versions[0]
-        if current_version != supported_schema_version:
-            raise SystemError(_("Unsupported database schema %(current)s. "
-                                "Please migrate your database to one of "
-                                " following versions: %(supported)s")
-                              % {'current': current_version,
-                                 'supported': supported_schema_version}
-                              )
-
-    def _clear_tables(self, engine, metadata):
-        for tbl_name in self._table_names:
-            sa.Table(tbl_name, metadata, autoload=True, autoload_with=engine)
-            tbl = metadata.tables[tbl_name]
-            engine.execute(tbl.delete())
-
-    def _get_vlanrange_mapping(self):
-        vlanrange_to_nw_uplink = evsutil.get_global_vlanrange_nw_uplink_map()
-        # mapping from vlan-id to physical_network
-        for vlan_ranges_str, (nw, _) in vlanrange_to_nw_uplink.iteritems():
-            vlan_ranges = vlan_ranges_str.split(',')
-            for vlan_range_str in vlan_ranges:
-                vlan_range = vlan_range_str.split("-")
-                vlan_start = int(vlan_range[0])
-                if len(vlan_range) == 2:
-                    vlan_end = int(vlan_range[1]) + 1
-                else:
-                    vlan_end = vlan_start + 1
-                self._vlan_xrange_to_nw.append((xrange(vlan_start, vlan_end),
-                                                nw))
-
-    def _get_phys_net(self, l2type, vid):
-        if l2type == L2_TYPE_VLAN:
-            for vid_range, phys in self._vlan_xrange_to_nw:
-                if vid in vid_range:
-                    return phys
-        elif l2type == L2_TYPE_FLAT:
-            return FLAT_PHYS_NET
-        return None
-
-    def _add_router_extra_attributes(self, engine, metadata):
-        routers = engine.execute("SELECT id FROM routers")
-        routers = list(routers)
-        records = []
-        for router in routers:
-            router_ext_attr = {}
-            router_ext_attr['router_id'] = router[0]
-            router_ext_attr['distributed'] = 0
-            router_ext_attr['service_router'] = 0
-            router_ext_attr['ha'] = 0
-            router_ext_attr['ha_vr_id'] = 0
-            records.append(router_ext_attr)
-
-        if records:
-            sa.Table('router_extra_attributes', metadata, autoload=True,
-                     autoload_with=engine)
-            router_ea = metadata.tables['router_extra_attributes']
-            engine.execute(router_ea.insert(), records)
-
-    def _migrate_network_segments(self, engine, metadata):
-        records = []
-        for evsinfo in evsutil.evsinfo:
-            segment = dict(id=uuidutils.generate_uuid())
-            segment['network_id'] = evsinfo.uuid
-            segment['segmentation_id'] = None
-            for prop in evsinfo.props:
-                if prop.name == 'l2-type':
-                    segment['network_type'] = prop.value
-                elif prop.name == 'vlanid' or prop.name == 'vni':
-                    segment['segmentation_id'] = int(prop.value)
-            phys_net = self._get_phys_net(segment['network_type'],
-                                          segment['segmentation_id'])
-            segment['physical_network'] = phys_net
-            records.append(segment)
-        if records:
-            sa.Table('ml2_network_segments', metadata, autoload=True,
-                     autoload_with=engine)
-            ml2_network_segments = metadata.tables['ml2_network_segments']
-            engine.execute(ml2_network_segments.insert(), records)
-
-    def _migrate_vxlan_allocations(self, engine):
-        vnis = []
-        for evsinfo in evsutil.evsinfo:
-            pdict = dict((prop.name, prop.value) for prop in evsinfo.props)
-            if L2_TYPE_VXLAN not in pdict.values():
-                continue
-            vnis.append(int(pdict['vni']))
-        records = [dict(vxlan_vni=vni, allocated=True) for vni in vnis]
-        if records:
-            metadata = sa.MetaData()
-            sa.Table('ml2_vxlan_allocations', metadata, autoload=True,
-                     autoload_with=engine)
-            vxlan_allocations = metadata.tables['ml2_vxlan_allocations']
-            engine.execute(vxlan_allocations.insert(), records)
-
-    def _migrate_vlan_allocations(self, engine):
-        vid_allocated_map = OrderedDict()
-        # initially set 'allocated' to False for all vids
-        for vid_range, _ in self._vlan_xrange_to_nw:
-            for vid in vid_range:
-                vid_allocated_map[vid] = False
-        for evsinfo in evsutil.evsinfo:
-            pdict = dict((prop.name, prop.value) for prop in evsinfo.props)
-            if L2_TYPE_VLAN not in pdict.values():
-                continue
-            vid = int(pdict['vlanid'])
-            vid_allocated_map[vid] = True
-        records = [
-            dict(physical_network=self._get_phys_net(L2_TYPE_VLAN, vid),
-                 vlan_id=vid, allocated=alloc)
-            for vid, alloc in vid_allocated_map.iteritems()
-        ]
-        if records:
-            metadata = sa.MetaData()
-            sa.Table('ml2_vlan_allocations', metadata, autoload=True,
-                     autoload_with=engine)
-            vlan_allocations = metadata.tables['ml2_vlan_allocations']
-            engine.execute(vlan_allocations.insert(), records)
-
-    def _get_port_segment_map(self, engine):
-        port_segments = engine.execute("""
-            SELECT ports_network.port_id, ml2_network_segments.id AS segment_id
-              FROM ml2_network_segments, (
-                SELECT ports.id AS port_id, ports.network_id
-                  FROM ports
-              ) AS ports_network
-              WHERE ml2_network_segments.network_id = ports_network.network_id
-        """)
-        return dict(x for x in port_segments)
-
-    def _migrate_port_bindings(self, engine, metadata):
-        ml2_bindings = []
-        ml2_binding_levels = []
-        port_segment_map = self._get_port_segment_map(engine)
-        metadata = sa.MetaData()
-        for vportinfo in evsutil.vportinfo:
-            binding = {}
-            binding['port_id'] = vportinfo.uuid
-            binding['host'] = vportinfo.hostname
-            if vportinfo.hostname:
-                binding['vif_type'] = self._vif_type
-                binding['vif_details'] = '{"port_filter": false, ' \
-                    '"ovs_hybrid_plug": false}'
-                ml2_bindings.append(binding)
-                binding_level = {}
-                binding_level['port_id'] = vportinfo.uuid
-                binding_level['host'] = vportinfo.hostname
-                binding_level['level'] = 0
-                binding_level['driver'] = self._driver_type
-                segment_id = port_segment_map.get(binding_level['port_id'])
-                if segment_id:
-                    binding_level['segment_id'] = segment_id
-                ml2_binding_levels.append(binding_level)
-            else:
-                binding['vif_type'] = 'unbound'
-                binding['vif_details'] = ''
-                ml2_bindings.append(binding)
-        if ml2_bindings:
-            sa.Table('ml2_port_bindings', metadata, autoload=True,
-                     autoload_with=engine)
-            ml2_port_bindings = metadata.tables['ml2_port_bindings']
-            engine.execute(ml2_port_bindings.insert(), ml2_bindings)
-        if ml2_binding_levels:
-            sa.Table('ml2_port_binding_levels', metadata, autoload=True,
-                     autoload_with=engine)
-            ml2_port_binding_lvls = metadata.tables['ml2_port_binding_levels']
-            engine.execute(ml2_port_binding_lvls.insert(), ml2_binding_levels)
-
-
-class NovaVmEVSToOVS(object):
-    def _zc_get_evs_vport_vals(self, zc, anet_rsc):
-        """Get mac-address and lower-link for this anet from evs.
-        """
-        mac_addr, uplink_port = None, None
-        tenant_name = zc.lookup_resource_property('global', 'tenant')
-        evs_uuid = zc.lookup_resource_property(anet_rsc, 'evs')
-        vport_uuid = zc.lookup_resource_property(anet_rsc, 'vport')
-        if not evs_uuid or not vport_uuid:
-            return mac_addr, uplink_port
-        mac_addr = evsutil.get_macaddr(tenant_name, evs_uuid, vport_uuid)
-        uplink_port = evsutil.get_uplink_port(tenant_name, evs_uuid)
-        return mac_addr, uplink_port
-
-    def migrate(self, zone):
-        """Update zonecfg by deleting evs-specific and adding ovs-specific conf
-        """
-        installed_port_uuids = []
-        with ZoneConfig(zone) as zc:
-            brand = zc.lookup_resource_property('global', 'brand')
-            anet_update_failed = False
-            for anet_rsc in zc.get_resources('anet'):
-                mac_addr, lower_link = self._zc_get_evs_vport_vals(zc,
-                                                                   anet_rsc)
-                if not mac_addr or not lower_link:
-                    anet_update_failed = True
-                    msg = "Failed to get ovs info for zone"
-                    log_msg(LOG_ERROR, msg)
-                    continue
-                if zone.state == 'installed':
-                    vport_uuid = zc.lookup_resource_property(anet_rsc, 'vport')
-                    if vport_uuid:
-                        installed_port_uuids.append(vport_uuid)
-                fname = 'id' if brand == 'solaris-kz' else 'linkname'
-                fvalue = zc.lookup_resource_property(anet_rsc, fname)
-                zc.clear_resource_props(anet_rsc, ['evs', 'vport'])
-                rsc_filter = [zonemgr.Property(fname, fvalue)]
-                zc.set_resource_prop('anet', 'mac-address', mac_addr,
-                                     rsc_filter)
-                zc.set_resource_prop('anet', 'lower-link', lower_link,
-                                     rsc_filter)
-
-            if not anet_update_failed:
-                zc.clear_resource_props('global', ['tenant'])
-        return installed_port_uuids
-
-
-class ConfigEVSToOVS():
-    def __init__(self):
-        # These are the configuration changes that are fixed, i.e., don't
-        # require extra computation. The data structure format is:
-        # _fixed = {config_file: [(section, param_name, param_value),]}
-        self._fixed = {
-            NEUTRON_CONF: [('DEFAULT', 'core_plugin', ML2_PLUGIN)],
-            ML2_INI: [('ml2_type_flat', 'flat_networks', 'flatnet')],
-            DHCP_INI: [('DEFAULT', 'interface_driver', OVS_INTFC_DRIVER),
-                       ('DEFAULT', 'ovs_integration_bridge', OVS_INT_BRIDGE)],
-            L3_INI: [('DEFAULT', 'interface_driver', OVS_INTFC_DRIVER),
-                     ('DEFAULT', 'ovs_integration_bridge', OVS_INT_BRIDGE),
-                     ('DEFAULT', 'external_network_bridge', OVS_EXT_BRIDGE)],
-            NOVA_CONF: [('neutron', 'ovs_bridge', OVS_INT_BRIDGE)]
-        }
-        # Config changes that are fixed depending on the l2-type
-        if l2type == L2_TYPE_VXLAN:
-            self._fixed[ML2_INI] += [('ml2', 'tenant_network_types', 'vxlan')]
-            self._fixed[OVS_INI] = [('ovs', 'enable_tunneling', 'True'),
-                                    ('agent', 'tunnel_types', 'vxlan')]
-        elif l2type == L2_TYPE_VLAN:
-            self._fixed[ML2_INI] += [('ml2', 'tenant_network_types', 'vlan')]
-        else:
-            assert l2type == L2_TYPE_FLAT
-            self._fixed[ML2_INI] += [('ml2', 'tenant_network_types', 'flat')]
-        self._vxlan_local_ip = None
-
-    def _read_config(self, conf_file):
-        config = iniparse.ConfigParser()
-        config.readfp(open(conf_file))
-        return config
-
-    def _write_config(self, conf_file, config):
-        with open(conf_file, 'wb+') as fp:
-            config.write(fp)
-
-    def _do_fixed(self, conf_file, config):
-        orig_conf_file = conf_file.replace('.migr', '')
-        if orig_conf_file not in self._fixed:
-            return
-        for sec, key, val in self._fixed[orig_conf_file]:
-            config.set(sec, key, val)
-
-    def _do_ml2_vlan_range(self, config):
-        vlanrange_to_nw_uplink = evsutil.get_global_vlanrange_nw_uplink_map()
-        nw_vlan_str_list = []
-        for vlan_ranges_str, (nw, _) in vlanrange_to_nw_uplink.iteritems():
-            vlan_ranges = vlan_ranges_str.split(',')
-            for vlan_range_str in vlan_ranges:
-                vlan_range = vlan_range_str.split("-")
-                vlan_start = vlan_end = vlan_range[0]
-                if len(vlan_range) == 2:
-                    vlan_end = vlan_range[1]
-                nw_vlan_str = nw + ":" + vlan_start + ":" + vlan_end
-                nw_vlan_str_list.append(nw_vlan_str)
-        nw_vlan_strs = ",".join(nw_vlan_str_list)
-        config.set('ml2_type_vlan', 'network_vlan_ranges', nw_vlan_strs)
-
-    def _do_ml2_vni_range(self, config):
-        vni_ranges_list = evsutil.get_vni_range_list()
-        vni_ranges_list = [vr.replace('-', ':') for vr in vni_ranges_list]
-        vni_ranges = ",".join(vni_ranges_list)
-        config.set('ml2_type_vxlan', 'vni_ranges', vni_ranges)
-
-    def _get_rabbit_host(self, conf_file):
-        config = self._read_config(conf_file)
-        host = 'localhost'
-        if config.has_option('DEFAULT', 'rabbit_host'):
-            host = config.get('DEFAULT', 'rabbit_host')
-        elif config.has_option('oslo_messaging_rabbit', 'rabbit_host'):
-            host = config.get('oslo_messaging_rabbit', 'rabbit_host')
-
-        port = '5672'
-        if config.has_option('DEFAULT', 'rabbit_port'):
-            port = config.get('DEFAULT', 'rabbit_port')
-        elif config.has_option('oslo_messaging_rabbit', 'rabbit_port'):
-            port = config.get('oslo_messaging_rabbit', 'rabbit_port')
-
-        hosts = ':'.join([host, port])
-        if config.has_option('DEFAULT', 'rabbit_hosts'):
-            hosts = config.get('DEFAULT', 'rabbit_hosts')
-        elif config.has_option('oslo_messaging_rabbit', 'rabbit_hosts'):
-            hosts = config.get('oslo_messaging_rabbit', 'rabbit_hosts')
-
-        userid = RABBITMQ_DEFAULT_USERID
-        if config.has_option('DEFAULT', 'rabbit_userid'):
-            userid = config.get('DEFAULT', 'rabbit_userid')
-        elif config.has_option('oslo_messaging_rabbit', 'rabbit_userid'):
-            userid = config.get('oslo_messaging_rabbit', 'rabbit_userid')
-
-        passwd = RABBITMQ_DEFAULT_PASSWORD
-        if config.has_option('DEFAULT', 'rabbit_password'):
-            passwd = config.get('DEFAULT', 'rabbit_password')
-        elif config.has_option('oslo_messaging_rabbit', 'rabbit_password'):
-            passwd = config.get('oslo_messaging_rabbit', 'rabbit_password')
-        passwd += '\n'
-
-        return (host, hosts, userid, passwd)
-
-    def _do_rabbit_host(self, config):
-        if SVC_NOVA_COMPUTE in curnode_svcs:
-            (host, hosts, userid, passwd) = self._get_rabbit_host(NOVA_CONF)
-        elif set([SVC_DHCP_AGENT, SVC_L3_AGENT]) & set(curnode_svcs):
-            (host, hosts, userid, passwd) = self._get_rabbit_host(NEUTRON_CONF)
-        else:
-            return
-        if not config.has_section('oslo_messaging_rabbit'):
-            config.add_section('oslo_messaging_rabbit')
-        config.set('oslo_messaging_rabbit', 'rabbit_host', host)
-        config.set('oslo_messaging_rabbit', 'rabbit_hosts', hosts)
-        config.set('oslo_messaging_rabbit', 'rabbit_userid', userid)
-        config.set('oslo_messaging_rabbit', 'rabbit_password', passwd)
-
-    def _get_local_ip(self, if_str='', subnet_str=''):
-        if not if_str and not subnet_str:
-            return None
-        for iface in ni.interfaces():
-            if if_str:
-                if iface != if_str:
-                    continue
-                # Only IPv4 addresses, not considering IPv6 since OVS
-                # doesn't support IPv6 VXLANs
-                for addrinfo in ni.ifaddresses(iface)[ni.AF_INET]:
-                    addr = addrinfo['addr']
-                    if subnet_str:
-                        if na.IPAddress(addr) in na.IPNetwork(subnet_str):
-                            return addr
-                    else:
-                        if addr != '127.0.0.1':
-                            return addr
-                break
-            else:
-                for addrinfo in ni.ifaddresses(iface)[ni.AF_INET]:
-                    addr = addrinfo['addr']
-                    if na.IPAddress(addr) in na.IPNetwork(subnet_str):
-                        return addr
-        return None
-
-    def _get_vxlan_local_ip(self):
-        """Returns the local_ip for vxlan_endpoint. It is found as follows:
-        1. If host specific vxlan-addr is present, use it.
-        2. If local uplink-port and global vxlan-addr(subnet) is present, use
-        the first IP address on that uplink-port which is in the subnet.
-        3. If local uplink-port, use the first IP on the uplink-port.
-        4. If global uplink-port and global vxlan-addr(subnet), use first
-        IP address on that uplink-port which is in the subnet.
-        5. If global vxlan-addr is configured only, use the first IP address
-        on any interface that is in the subnet of global vxlan-addr.
-        """
-        if self._vxlan_local_ip:
-            return self._vxlan_local_ip
-        (laddr, lup, gaddr, gup) = evsutil.get_vxlan_addrs_and_uplinks()
-        if laddr:
-            self._vxlan_local_ip = laddr
-        elif lup:
-            self._vxlan_local_ip = self._get_local_ip(lup, gaddr)
-        else:
-            self._vxlan_local_ip = self._get_local_ip(gup, gaddr)
-        return self._vxlan_local_ip
-
-    def _do_neutron_credentials(self, config, input_file, section):
-        neutron_cfg = self._read_config(input_file)
-        tenant = None
-        if neutron_cfg.has_option(section, 'admin_tenant_name'):
-            tenant = neutron_cfg.get(section, 'admin_tenant_name')
-            config.set('DEFAULT', 'admin_tenant_name', tenant)
-        user = None
-        if neutron_cfg.has_option(section, 'admin_user'):
-            user = neutron_cfg.get(section, 'admin_user')
-            config.set('DEFAULT', 'admin_user', user)
-        passwd = None
-        if neutron_cfg.has_option(section, 'admin_password'):
-            passwd = neutron_cfg.get(section, 'admin_password')
-            config.set('DEFAULT', 'admin_password', passwd)
-        auth_uri_option = ('auth_uri' if input_file == NEUTRON_CONF else
-                           'auth_url')
-        if neutron_cfg.has_option(section, auth_uri_option):
-            auth_url = neutron_cfg.get(section, auth_uri_option)
-            config.set('DEFAULT', 'auth_url', auth_url)
-        if neutron_cfg.has_option(section, 'auth_region'):
-            auth_region = neutron_cfg.get(section, 'auth_region')
-            config.set('DEFAULT', 'auth_region', auth_region)
-
-        if any('%SERVICE_' in val for val in [tenant, user, passwd]):
-            msg = "Neutron credentials are incomplete in %s" % L3_INI
-            log_msg(LOG_WARN, msg)
-
-    def _backup_file(self, orig_file):
-        today = datetime.now().strftime("%Y%m%d%H%M%S")
-        new_file = orig_file + '.' + today
-        try:
-            self._copy_file(orig_file, new_file)
-            msg = "Backed up current %s in %s" % (orig_file, new_file)
-            log_msg(LOG_DEBUG, msg)
-        except (IOError, OSError):
-            msg = "Unable to create a backup of %s" % orig_file
-            log_msg(LOG_WARN, msg)
-
-    def _copy_file(self, orig_file, new_file):
-        copy2(orig_file, new_file)
-        uid = file_owner[orig_file]
-        os.chown(new_file, uid, uid)
-
-    def update_neutron_conf(self):
-        self._backup_file(NEUTRON_CONF)
-        msg = "Updating %s" % NEUTRON_CONF
-        log_msg(LOG_DEBUG, msg)
-        self._copy_file(NEUTRON_CONF, NEUTRON_CONF + '.migr')
-        conf_file = NEUTRON_CONF + '.migr'
-        config = self._read_config(conf_file)
-        self._do_fixed(conf_file, config)
-        service_plugins = 'router'
-        if config.has_option('DEFAULT', 'service_plugins'):
-            service_plugins = config.get('DEFAULT', 'service_plugins')
-            if service_plugins:
-                service_plugins = 'router,' + service_plugins
-            else:
-                service_plugins = 'router'
-        config.set('DEFAULT', 'service_plugins', service_plugins)
-        self._write_config(conf_file, config)
-        move(conf_file, NEUTRON_CONF)
-
-    def update_ml2_conf_ini(self):
-        """
-        Reference target configuration state:
-        [ml2]
-        type_drivers = flat,vlan,vxlan
-        tenant_network_types = vlan
-        mechanism_drivers = openvswitch
-        [ml2_type_flat]
-        flat_networks = external
-        [ml2_type_vlan]
-        network_vlan_ranges = physnet1:300:400,extnet:240:240
-        [ml2_type_gre]
-        [ml2_type_vxlan]
-        [securitygroup]
-        enable_security_group = False
-        enable_ipset = False
-        """
-        self._backup_file(ML2_INI)
-        msg = "Updating %s" % ML2_INI
-        log_msg(LOG_DEBUG, msg)
-        self._copy_file(ML2_INI, ML2_INI + '.migr')
-        conf_file = ML2_INI + '.migr'
-        config = self._read_config(conf_file)
-        self._do_fixed(conf_file, config)
-        if l2type == L2_TYPE_VXLAN:
-            self._do_ml2_vni_range(config)
-        elif l2type == L2_TYPE_VLAN:
-            self._do_ml2_vlan_range(config)
-        self._write_config(conf_file, config)
-        move(conf_file, ML2_INI)
-
-    def update_ovs_neutron_plugin_ini(self, bmap_str):
-        """
-        Reference target configuration state:
-        [ovs]
-        integration_bridge = br_int0
-        bridge_mappings = physnet1:l3stub0 (for VLAN)
-        local_ip = A.B.C.D (for VXLAN)
-        enable_tunneling = True (for VXLAN)
-        [agent]
-        root_helper =
-        tunnel_types = vxlan (for VXLAN)
-        [securitygroup]
-        enable_security_group = False
-        """
-        self._backup_file(OVS_INI)
-        msg = "Updating %s" % OVS_INI
-        log_msg(LOG_DEBUG, msg)
-        self._copy_file(OVS_INI, OVS_INI + '.migr')
-        conf_file = OVS_INI + '.migr'
-        config = self._read_config(conf_file)
-        self._do_fixed(conf_file, config)
-        if l2type == L2_TYPE_VXLAN:
-            local_ip = self._get_vxlan_local_ip()
-            if local_ip:
-                config.set('ovs', 'local_ip', local_ip)
-            else:
-                msg = """Could not determine IP address for VXLAN endpoint.
-                Manually set the local_ip option in ovs_neutron_plugin.ini"""
-                log_msg(LOG_WARN, msg)
-        if bmap_str:
-            config.set('ovs', 'bridge_mappings', bmap_str)
-        self._do_rabbit_host(config)
-        self._write_config(conf_file, config)
-        move(conf_file, OVS_INI)
-
-    def update_dhcp_agent_ini(self):
-        self._backup_file(DHCP_INI)
-        msg = "Updating %s" % DHCP_INI
-        log_msg(LOG_DEBUG, msg)
-        self._copy_file(DHCP_INI, DHCP_INI + '.migr')
-        conf_file = DHCP_INI + '.migr'
-        config = self._read_config(conf_file)
-        self._do_fixed(conf_file, config)
-        self._write_config(conf_file, config)
-        move(conf_file, DHCP_INI)
-
-    def update_l3_agent_ini(self):
-        self._backup_file(L3_INI)
-        msg = "Updating %s" % L3_INI
-        log_msg(LOG_DEBUG, msg)
-        self._copy_file(L3_INI, L3_INI + '.migr')
-        conf_file = L3_INI + '.migr'
-        config = self._read_config(conf_file)
-        if l2type == L2_TYPE_VLAN:
-            global external_network_datalink
-            if config.has_option('DEFAULT', 'external_network_datalink'):
-                external_network_datalink = \
-                    config.get('DEFAULT', 'external_network_datalink')
-                if not external_network_datalink:
-                    external_network_datalink = None
-            else:
-                external_network_datalink = 'net0'
-        self._do_fixed(conf_file, config)
-        if is_svc_online(SVC_METADATA_AGENT):
-            self._do_neutron_credentials(config, METADATA_INI, "DEFAULT")
-        else:
-            self._do_neutron_credentials(config, NEUTRON_CONF,
-                                         "keystone_authtoken")
-        self._write_config(conf_file, config)
-        move(conf_file, L3_INI)
-
-    def update_nova_conf(self):
-        self._backup_file(NOVA_CONF)
-        msg = "Updating %s" % NOVA_CONF
-        log_msg(LOG_DEBUG, msg)
-        self._copy_file(NOVA_CONF, NOVA_CONF + '.migr')
-        conf_file = NOVA_CONF + '.migr'
-        config = self._read_config(conf_file)
-        self._do_fixed(conf_file, config)
-        self._write_config(conf_file, config)
-        move(conf_file, NOVA_CONF)
-
-    def update_Open_vSwitch_other_config(self, bmap_str):
-        bm_str = "other_config:bridge_mappings=" + bmap_str
-        try:
-            check_call(['/usr/bin/pfexec', '/usr/sbin/ovs-vsctl', 'set',
-                        'Open_vSwitch', '.', bm_str])
-            msg = """Successfully set other_config column in Open_vSwitch table
-            with value %s.""" % bm_str
-            log_msg(LOG_DEBUG, msg)
-        except:
-            msg = """Failed to set other_config column in Open_vSwitch table
-            with value %s.""" % bm_str
-            log_msg(LOG_WARN, msg)
-
-
-def enable_svc(svcname, exit_on_fail=False):
-    msg = "Enabling service: %s" % svcname
-    log_msg(LOG_INFO, msg)
-    cmd = ['/usr/bin/pfexec', '/usr/sbin/svcadm', 'enable', '-s']
-    cmd.append(svcname)
-    try:
-        check_call(cmd, stdout=PIPE, stderr=PIPE)
-    except CalledProcessError as err:
-        msg = """Failed to enable %s: %s.
-        Please verify "and manually enable the service""" % (svcname, err)
-        log_msg(LOG_ERROR, msg)
-        if exit_on_fail:
-            msg = "Exiting..."
-            log_msg(LOG_INFO, msg)
-            sys.exit(1)
-
-
-def disable_svc(svcname):
-    msg = "Disabling service: %s" % svcname
-    log_msg(LOG_INFO, msg)
-    try:
-        check_call(['/usr/bin/pfexec', '/usr/sbin/svcadm', 'disable', '-s',
-                    svcname], stdout=PIPE, stderr=PIPE)
-    except CalledProcessError as err:
-        msg = "Failed to disable %s: %s." % (svcname, err)
-        log_msg(LOG_ERROR, msg)
-
-
-def nova_evs_to_ovs(migr_conf_obj):
-    # step-1: disable nova-compute
-    disable_svc(SVC_NOVA_COMPUTE)
-
-    # step-2: update zones' config
-    migr_vm = NovaVmEVSToOVS()
-    determine_neutron_conn_params()
-    zoneutil = ZoneUtil()
-    for name in zoneutil.get_zone_names():
-        zone = zoneutil.get_zone_by_name(name)
-        if not zone:
-            msg = "skipping EVS-OVS migration of VM %s; not found" % name
-            log_msg(LOG_DEBUG, msg)
-            continue
-        if zone.state == 'incomplete':
-            msg = """skipping EVS-OVS migration of VM %s; It is in 'incomplete'
-            state""" % name
-            log_msg(LOG_DEBUG, msg)
-            continue
-        with ZoneConfig(zone) as zc:
-            tenant_name = zc.lookup_resource_property('global', 'tenant')
-            if not tenant_name:
-                msg = """skipping EVS-OVS migration of non-openstack
-                managed VM %s""" % name
-                log_msg(LOG_DEBUG, msg)
-                continue
-            try:
-                uuid.UUID(tenant_name)
-            except:
-                msg = """skipping EVS-OVS migration of non-openstack
-                managed VM %s""" % name
-                log_msg(LOG_DEBUG, msg)
-                continue
-        msg = "Performing EVS-OVS migration of VM: %s" % name
-        log_msg(LOG_INFO, msg)
-
-        # step 2.1: migrate zone config
-        installed_port_uuids = migr_vm.migrate(zone)
-        # step 2.2: shutdown
-        if zone.state == 'running':
-            try:
-                msg = "Shutting down VM: %s, after modifying zone's config" % \
-                    name
-                log_msg(LOG_DEBUG, msg)
-                zone.shutdown()
-            except Exception as ex:
-                msg = """ Failed to shutdown instance %s. The zone's config
-                has been modified to OVS. Manually start the VM""" % name
-                log_msg(LOG_WARN, msg)
-        if installed_port_uuids:
-            nc = neutron_client.Client(
-                username=neutron_conn['username'],
-                password=neutron_conn['password'],
-                tenant_name=neutron_conn['tenant'],
-                auth_url=neutron_conn['auth_url'])
-            for vport_uuid in installed_port_uuids:
-                port_req_body = {'port': {'binding:host_id': HOSTNAME}}
-                nc.update_port(vport_uuid, port_req_body)
-
-    # step-3: change nova.conf
-    migr_conf_obj.update_nova_conf()
-
-    # we will enable the service later
-
-
-def dhcp_evs_to_ovs(migr_conf_obj):
-    # step-1: disable neutron-dhcp-agent
-    disable_svc(SVC_DHCP_AGENT)
-
-    # step-2: change dhcp_agent.ini
-    migr_conf_obj.update_dhcp_agent_ini()
-
-    # we will enable the service later
-
-
-def add_ovs_bridge(bridge_name):
-    try:
-        check_call(['/usr/bin/pfexec', '/usr/sbin/ovs-vsctl', '--',
-                    '--may-exist', 'add-br', bridge_name], stdout=PIPE,
-                   stderr=PIPE)
-        msg = "Created %s ovs bridge" % bridge_name
-        log_msg(LOG_DEBUG, msg)
-        if bridge_name == OVS_EXT_BRIDGE:
-            check_call(['/usr/bin/pfexec', '/usr/sbin/ovs-vsctl',
-                        'br-set-external-id', OVS_EXT_BRIDGE, 'bridge-id',
-                        OVS_EXT_BRIDGE])
-    except CalledProcessError as err:
-        msg = "Failed to create %s ovs bridge: %s" % (bridge_name, err)
-        log_msg(LOG_ERROR, msg)
-
-
-def l3_evs_to_ovs(migr_conf_obj):
-    # step-1: disable neutron-l3-agent
-    disable_svc(SVC_L3_AGENT)
-
-    # step-2: change l3_agent.ini and ovs_neutron_plugin.ini
-    migr_conf_obj.update_l3_agent_ini()
-
-    # step-3: create external network bridge
-    add_ovs_bridge(OVS_EXT_BRIDGE)
-
-    # we will enable the service later
-
-
-def neutron_evs_to_ovs(migr_conf_obj):
-    # step-1: disable neutron-server
-    disable_svc(SVC_NEUTRON_SERVER)
-
-    # step-2: migrate DB to ml2
-    migr_ml2 = DBEVSToMl2()
-    migr_ml2()
-
-    # step-3: change ml2_conf.ini and neutron.conf
-    migr_conf_obj.update_ml2_conf_ini()
-    migr_conf_obj.update_neutron_conf()
-
-    # step-4: enable neutron-server
-    enable_svc(SVC_NEUTRON_SERVER)
-
-
-def is_svc_online(svc, exit_on_maintenance=False):
-    try:
-        state = check_output(['/usr/bin/svcs', '-H', '-o', 'state', svc],
-                             stderr=PIPE)
-    except:
-        return False
-    if exit_on_maintenance and state.strip() == 'maintenance':
-        msg = """Unable to perform EVS to OVS migration as %s is in maintenance
-            state. Please fix the errors and clear the svc before running
-            migration""" % svc
-        log_msg(LOG_ERROR, msg)
-        sys.exit(1)
-    return state.strip() == 'online'
-
-
-def create_backup_be():
-    msg = "Creating backup BE"
-    log_msg(LOG_INFO, msg)
-    boot_envs = check_output(['/usr/sbin/beadm', 'list', '-H'],
-                             stderr=PIPE)
-    for be in boot_envs.splitlines():
-        be_fields = be.split(';')
-        if 'N' in be_fields[2]:
-            curr_be = be_fields[0]
-            backup_be = curr_be + '-backup-ovs-upgrade'
-            break
-    msg = "Active BE is: %s" % curr_be
-    log_msg(LOG_DEBUG, msg)
-    try:
-        check_call(['/usr/sbin/beadm', 'create', backup_be], stdout=PIPE,
-                   stderr=PIPE)
-        msg = "Created backup BE: " + backup_be
-        log_msg(LOG_DEBUG, msg)
-    except:
-        msg = "Backup BE already exists: " + backup_be
-        log_msg(LOG_DEBUG, msg)
-
-
-def get_node_svcs():
-    global curnode_svcs
-    for svc in ALL_SVCS:
-        if is_svc_online(svc):
-            curnode_svcs.append(svc)
-
-
-def get_default_gateways():
-    def_gws = set()
-    routes = check_output(['/usr/bin/pfexec', '/usr/bin/netstat',
-                           '-arn']).splitlines()
-    for route in routes:
-        route = route.strip()
-        elems = route.split()
-        if elems and elems[0] == 'default':
-            def_gws.add(elems[1])
-    return def_gws
-
-
-def add_uplink_to_br(uplink, bridge):
-    def add_ips_and_gws_to_port(port):
-        if ips:
-            try:
-                check_call(['/usr/bin/pfexec', '/usr/sbin/ipadm', 'show-if',
-                            port], stdout=PIPE, stderr=PIPE)
-            except CalledProcessError:
-                check_call(['/usr/bin/pfexec', '/usr/sbin/ipadm', 'create-ip',
-                            port], stdout=PIPE)
-        aconf_configured = False
-        for ip in ips:
-            msg = "Adding IP %s to %s" % (ip, port)
-            log_msg(LOG_DEBUG, msg)
-            addrtype_addr = ip.split(':')
-            addrtype, addr = addrtype_addr[0], addrtype_addr[1]
-            if addrtype == 'static':
-                check_call(['/usr/bin/pfexec', '/usr/sbin/ipadm',
-                            'create-addr', '-T',  addrtype, '-a', addr, port],
-                           stdout=PIPE)
-            elif addrtype == 'addrconf':
-                if not aconf_configured:
-                    check_call(['/usr/bin/pfexec', '/usr/sbin/ipadm',
-                                'create-addr', '-T', addrtype, port],
-                               stdout=PIPE)
-                    aconf_configured = True
-            else:
-                check_call(['/usr/bin/pfexec', '/usr/sbin/ipadm',
-                            'create-addr', '-T', addrtype, port], stdout=PIPE)
-        new_gateways = get_default_gateways()
-        removed_gateways = old_gateways - new_gateways
-        for gw in removed_gateways:
-            # simple check for IPv6 address
-            if ':' in gw:
-                continue
-            msg = "Adding default gateway %s" % gw
-            log_msg(LOG_DEBUG, msg)
-            check_call(['/usr/bin/pfexec', '/usr/sbin/route', 'add', 'default',
-                        gw], stdout=PIPE)
-
-    msg = "Migrating %s link to OVS bridge: %s" % (uplink, bridge)
-    log_msg(LOG_DEBUG, msg)
-    # Store IP and gateway info
-    ips = []
-    old_gateways = get_default_gateways()
-    try:
-        ips = check_output(['/usr/bin/pfexec', '/usr/sbin/ipadm', 'show-addr',
-                            '-po', 'type,addr',
-                            uplink], stderr=PIPE).splitlines()
-        check_call(['/usr/bin/pfexec', '/usr/sbin/ipadm', 'delete-ip',
-                    uplink], stdout=PIPE, stderr=PIPE)
-    except CalledProcessError as err:
-        pass
-
-    try:
-        check_call(['/usr/bin/pfexec', '/usr/sbin/dladm', 'set-linkprop', '-p',
-                    'openvswitch=on', uplink], stdout=PIPE, stderr=PIPE)
-    except CalledProcessError as err:
-        msg = """Failed to set openvswitch property=on for %s - link is busy.
-        Follow the below steps to migrate link to OVS bridge manually.
-        1. Remove any flows, IP etc. so that link is unused.
-        2. dladm set-linkprop -p openvswitch=on %s
-        3. ovs-vsctl -- --may-exist add-port %s %s
-        4. Replumb IPs, if existed before on %s, on %s.""" % \
-            (uplink, uplink, bridge, uplink, uplink, bridge)
-        log_msg(LOG_ERROR, msg, oneliner=False)
-        return
-
-    # add uplink to bridge
-    check_call(['/usr/bin/pfexec', '/usr/sbin/ovs-vsctl', '--', '--may-exist',
-                'add-port', bridge, uplink])
-    try:
-        add_ips_and_gws_to_port(bridge)
-    except CalledProcessError as err:
-        msg = """Failed to configure the IPs(%s) on %s VNIC. Manually
-        configure the IPs and set default gateway""" % (ips, bridge)
-        log_msg(LOG_ERROR, msg)
-
-
-def get_uplink_ports_for_int_bridge():
-    int_uplinks = set(bridge_mappings.values())
-    int_uplinks.discard(external_network_datalink)
-    return int_uplinks
-
-
-def get_uplink_port_for_ext_bridge():
-    if l2type == L2_TYPE_VLAN and external_network_datalink is not None:
-        return external_network_datalink
-    return bridge_mappings.get(external_network_name)
-
-
-def determine_neutron_conn_params():
-        global neutron_conn
-        if neutron_conn:
-            return
-        config = iniparse.ConfigParser()
-        if SVC_NOVA_COMPUTE in curnode_svcs:
-            config.readfp(open(NOVA_CONF))
-            neutron_conn['username'] = config.get('neutron', 'admin_username')
-            neutron_conn['password'] = config.get('neutron', 'admin_password')
-            neutron_conn['tenant'] = config.get('neutron', 'admin_tenant_name')
-            neutron_conn['auth_url'] = \
-                config.get('keystone_authtoken', 'auth_uri')
-        else:
-            config.readfp(open(NEUTRON_CONF))
-            neutron_conn['username'] = \
-                config.get('keystone_authtoken', 'admin_user')
-            neutron_conn['password'] = \
-                config.get('keystone_authtoken', 'admin_password')
-            neutron_conn['tenant'] = \
-                config.get('keystone_authtoken', 'admin_tenant_name')
-            neutron_conn['auth_url'] = \
-                config.get('keystone_authtoken', 'auth_uri')
-
-
-def determine_external_network_name():
-    global external_network_name, external_network_vid
-    determine_neutron_conn_params()
-    nc = neutron_client.Client(username=neutron_conn['username'],
-                               password=neutron_conn['password'],
-                               tenant_name=neutron_conn['tenant'],
-                               auth_url=neutron_conn['auth_url'])
-    search_opts = {'router:external': True}
-    try:
-        external_network = nc.list_networks(**search_opts)['networks']
-    except:
-        msg = """Could not get external network information from
-        neutron-server. Make sure it is online."""
-        log_msg(LOG_ERROR, msg)
-        sys.exit(1)
-
-    if not external_network:
-        return
-    external_network = external_network[0]
-    nw_type = external_network['provider:network_type']
-    if nw_type == L2_TYPE_FLAT:
-        external_network_name = FLAT_PHYS_NET
-    else:
-        assert nw_type == L2_TYPE_VLAN
-        external_network_name = EXT_VLAN_PHYS_NET
-        external_network_vid = external_network['provider:segmentation_id']
-    msg = "External Network name is " + external_network_name
-    log_msg(LOG_DEBUG, msg)
-
-
-def determine_bridge_mappings():
-    global bridge_mappings, external_network_datalink
-    global_nw_uplink_map = evsutil.get_global_vlanrange_nw_uplink_map()
-    local_uplink_map = evsutil.get_local_vlanrange_uplink_map()
-    # Any local uplink ports should have the same vlan-range boundaries
-    # as the global ones. This is expected in an openstack deployment but
-    # is not enforced by evs itself. So we raise a warning if we encounter
-    # a local uplink-port for a vlan-range whose boundaries are different
-    # from any that are defined globally.
-    errs = set(local_uplink_map.keys()) - set(global_nw_uplink_map.keys())
-    if errs:
-        errs = ','.join(errs)
-        msg = """Found the following incorrect vlan_ranges that were not
-        added to bridge_mappings in ovs_neutron_plugin.ini. Please update
-        manually if necessary - %s""" % errs
-        log_msg(LOG_WARN, msg)
-    for vlanranges_str, (nw, uplink) in global_nw_uplink_map.iteritems():
-        uplink = local_uplink_map.get(vlanranges_str, uplink)
-        bridge_mappings[nw] = uplink
-    if evsutil.local_flat_nw_uplink:
-        bridge_mappings[FLAT_PHYS_NET] = evsutil.local_flat_nw_uplink
-    elif evsutil.global_flat_nw_uplink:
-        bridge_mappings[FLAT_PHYS_NET] = evsutil.global_flat_nw_uplink
-
-    external_network_datalink = bridge_mappings.get(external_network_name)
-    if external_network_datalink:
-        msg = "External Network datalink is " + external_network_datalink
-        log_msg(LOG_DEBUG, msg)
-    if bridge_mappings.values().count(external_network_datalink) > 1:
-        msg = """The external network datalink '%s' cannot be the uplink-port
-        of any physical network other than external network. Please satisfy
-        this condition before running migration.""" % external_network_datalink
-        log_msg(LOG_ERROR, msg)
-        sys.exit(1)
-
-    # Depending on l2type and whether l3-agent is running on this node,
-    # bridge_mappings should have the following:
-    # 1. l3-agent not in node and l2type = vxlan => no bridge mappings. This is
-    # already handled since determine_bridge_mappings() won't be called for
-    # this condition.
-    # 2. l3-agent not in node and l2type = vlan/flat => bridge mappings should
-    # not have mapping for external network.
-    # 3. l3-agent in node and l2type = vxlan => bridge mappings should have
-    # only the mapping for external network.
-    # 4. l3-agent in node and l2type = vlan/flat => bridge mappings should have
-    # all the orignial mappings.
-    if SVC_L3_AGENT not in curnode_svcs:
-        bridge_mappings.pop(external_network_name, None)
-    elif l2type == L2_TYPE_VXLAN:
-        bridge_mappings.clear()
-        if external_network_datalink:
-            bridge_mappings[external_network_name] = \
-                external_network_datalink
-
-
-def finish():
-    msg = "Migration Successful"
-    log_msg(LOG_INFO, msg)
-    check_call(['/usr/bin/pfexec', '/usr/sbin/svccfg', '-s',
-                SVC_NEUTRON_UPGRADE, 'setprop', 'config/evs2ovs', '=',
-                'astring:', 'done'], stdout=PIPE, stderr=PIPE)
-    check_call(['/usr/bin/pfexec', '/usr/sbin/svccfg', '-s',
-                SVC_NEUTRON_UPGRADE, 'refresh'], stdout=PIPE, stderr=PIPE)
-    msg = "Exiting..."
-    log_msg(LOG_INFO, msg)
-    sys.exit()
-
-
-def main():
-    # help text
-    parser = argparse.ArgumentParser(
-        formatter_class=argparse.RawDescriptionHelpFormatter, description='''
-    Migration script to migrate OpenStack Cloud based on EVS to an
-    OpenStack cloud based on OVS.
-
-    There are four steps to migration:
-        -- Populate Neutron ML2 tables
-        -- Replace EVS information in existing configuration files with OVS
-           (neutron.conf, dhcp_agent.ini, l3_agent.ini, and nova.conf)
-        -- Add OVS information to new configuration files
-           (ml2_conf.ini and ovs_neutron_agent.ini)
-        -- Clear EVS information in Zones and populate the anets for OVS
-
-    The nodes must be migrated in the following order:
-        -- controller node running neutron-server
-        -- all of the nodes running neutron-dhcp-agent or neutron-l3-agent
-        -- all of the compute nodes
-
-    It is advisable to run migration with nohup if using ssh over a link that
-    is also used by OpenStack.
-    ''')
-    parser.parse_args()
-
-    signal.signal(signal.SIGHUP, signal.SIG_IGN)
-    try:
-        out = check_output(['/usr/bin/pfexec', '/usr/bin/svcprop', '-p',
-                            'config/evs2ovs', SVC_NEUTRON_UPGRADE],
-                           stderr=PIPE)
-        if out.strip() == 'done':
-            msg = "Migration has already run on this node."
-            log_msg(LOG_INFO, msg)
-            return
-    except:
-        pass
-
-    # get the current node services
-    get_node_svcs()
-    if not curnode_svcs:
-        msg = "Nothing to migrate on this node. Quitting."
-        log_msg(LOG_INFO, msg)
-        return
-
-    msg = """The script has determined that following services - %s - are
-    online and the system will be migrated based on these services.""" % \
-        ', '.join(curnode_svcs)
-    log_msg(LOG_INFO, msg)
-
-    # Create backup BE
-    create_backup_be()
-
-    # Even if nova-compute is the only svc on this node, make sure neutron
-    # is also installed.
-    if not set(curnode_svcs) - set([SVC_NOVA_COMPUTE]):
-        try:
-            check_call(['pkg', 'info', 'neutron'], stdout=PIPE, stderr=PIPE)
-        except:
-            msg = "cloud/openstack/neutron pkg not found."
-            log_msg(LOG_ERROR, msg)
-            msg = """cloud/openstack/neutron pkg needs to be installed on this
-            node before migration."""
-            log_msg(LOG_INFO, msg)
-            return
-
-    # If nova-compute is running on this node, we can execute everything as
-    # root. Else, this is a network node and we can execute everything as
-    # neutron user.
-    if SVC_NOVA_COMPUTE not in curnode_svcs:
-        msg = "Changing user to neutron"
-        log_msg(LOG_DEBUG, msg)
-        os.setgid(UID_NEUTRON)
-        os.setuid(UID_NEUTRON)
-
-    global evsutil
-    evsutil = EVSUtil()
-    global l2type
-    l2type = evsutil.l2type
-    msg = "l2type = %s" % l2type
-    log_msg(LOG_DEBUG, msg)
-    migr_conf_obj = ConfigEVSToOVS()
-
-    # step-0: Determine bridge_mappings and ensure external network datalink
-    # is not serving as uplink port for other physical networks. This is only
-    # required if l2-type is VLAN or FLAT or if neutron-l3-agent is running on
-    # this node.
-    if l2type != L2_TYPE_VXLAN or SVC_L3_AGENT in curnode_svcs:
-        determine_external_network_name()
-        determine_bridge_mappings()
-
-    # step-1: Populate ML2 tables and update Neutron and ML2 config files.
-    if SVC_NEUTRON_SERVER in curnode_svcs:
-        msg = "Current migration based on svc: %s" % SVC_NEUTRON_SERVER
-        log_msg(LOG_INFO, msg)
-        neutron_evs_to_ovs(migr_conf_obj)
-        # We have already enabled neutron-server. There is nothing else to do
-        # wrt the service.
-        curnode_svcs.remove(SVC_NEUTRON_SERVER)
-
-    # We don't need to do anything else if neutron-server is the only service
-    # we are migrating on this node.
-    if not curnode_svcs:
-        finish()
-
-    # step-2: add ovs integration bridge and update conf for
-    # neutron-openvswitch-agent.
-    if not is_svc_online(SVC_OVSDB_SERVER, exit_on_maintenance=True):
-        enable_svc(SVC_OVSDB_SERVER, exit_on_fail=True)
-    if not is_svc_online(SVC_VSWITCH_SERVER, exit_on_maintenance=True):
-        enable_svc(SVC_VSWITCH_SERVER, exit_on_fail=True)
-    add_ovs_bridge(OVS_INT_BRIDGE)
-    bmap_str = ''
-    if bridge_mappings:
-        for nw, uplink in bridge_mappings.iteritems():
-            bmap_str += nw + ':' + uplink + ','
-        bmap_str = bmap_str.strip(',')
-    if bmap_str:
-        msg = "bridge_mappings = " + bmap_str
-        log_msg(LOG_DEBUG, msg)
-        migr_conf_obj.update_Open_vSwitch_other_config(bmap_str)
-    migr_conf_obj.update_ovs_neutron_plugin_ini(bmap_str)
-    # we will enable the OVS agent later
-
-    # step-3: migrate the other services.
-    svc_func_map = {
-        SVC_DHCP_AGENT: dhcp_evs_to_ovs,
-        SVC_L3_AGENT: l3_evs_to_ovs,
-        SVC_NOVA_COMPUTE: nova_evs_to_ovs
-    }
-
-    for svc in curnode_svcs:
-        msg = "Current migration based on svc: %s" % svc
-        log_msg(LOG_INFO, msg)
-        svc_func_map[svc](migr_conf_obj)
-
-    # At this point we have disabled all the services that we are interested
-    # in. Now we need to add the right uplink-port to the OVS bridges.
-    if l2type == L2_TYPE_VXLAN:
-        # check if there are any left over evs-vxlan datalinks
-        output = check_output(['/usr/sbin/dladm', 'show-vxlan', '-po', 'link'],
-                              stderr=PIPE)
-        if len(output.strip().splitlines()) != 0:
-            msg = """There are other VXLAN datalinks present and as a result
-            OVS agent will go into maintenance. Please remove these datalinks
-            and clear the OVS agent service."""
-            log_msg(LOG_WARN, msg)
-    else:
-        assert l2type == L2_TYPE_VLAN or l2type == L2_TYPE_FLAT
-        int_uplinks = get_uplink_ports_for_int_bridge()
-        # add the uplink-ports to integration bridge
-        for uplink in int_uplinks:
-            add_uplink_to_br(uplink, OVS_INT_BRIDGE)
-
-    # enable all services
-    enable_svc(SVC_OVS_AGENT)
-    for svc in curnode_svcs:
-        if svc == SVC_L3_AGENT:
-            # add the port to br_ex0
-            ext_uplink = get_uplink_port_for_ext_bridge()
-            if ext_uplink:
-                add_uplink_to_br(ext_uplink, OVS_EXT_BRIDGE)
-        enable_svc(svc)
-
-    finish()
-
-
-if __name__ == "__main__":
-    main()
--- a/components/openstack/neutron/files/evs/plugin.py	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,707 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-# @author: Girish Moodalbail, Oracle, Inc.
-
-import rad.client as radcli
-import rad.connect as radcon
-import rad.bindings.com.oracle.solaris.rad.evscntl_1 as evsbind
-
-from oslo_concurrency import lockutils
-from oslo_config import cfg
-from oslo_db import api as oslo_db_api
-from oslo_log import log as logging
-from oslo_utils import importutils
-
-from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
-from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
-from neutron.api.rpc.handlers import dhcp_rpc
-from neutron.api.rpc.handlers import l3_rpc
-from neutron.api.rpc.handlers import metadata_rpc
-from neutron.api.v2 import attributes
-from neutron.common import constants
-from neutron.common import exceptions
-from neutron.common import rpc as n_rpc
-from neutron.common import topics
-from neutron.db import agents_db
-from neutron.db import agentschedulers_db
-from neutron.db import api as db
-from neutron.db import db_base_plugin_v2
-from neutron.db import external_net_db
-from neutron.db import l3_agentschedulers_db
-from neutron.db import l3_attrs_db
-from neutron.db import l3_gwmode_db
-from neutron.db import models_v2
-from neutron.db import portbindings_db
-from neutron.db import quota_db
-from neutron.db import securitygroups_db
-from neutron.extensions import external_net
-from neutron.extensions import providernet
-from neutron.plugins.common import constants as svc_constants
-from neutron.plugins.ml2 import models
-
-LOG = logging.getLogger(__name__)
-# Only import the vpn server code if it exists.
-try:
-    sp = cfg.CONF.service_plugins
-    vpns = 'vpnaas'
-    if vpns in sp:
-        try:
-            from neutron_vpnaas.db.vpn import vpn_db
-            LOG.debug("Loading VPNaaS service driver.")
-        except ImportError:
-            pass
-    else:
-        LOG.debug("vpnaas service_plugin not configured")
-except:
-    pass
-
-evs_controller_opts = [
-    cfg.StrOpt('evs_controller', default='ssh://[email protected]',
-               help=_("An URI that specifies an EVS controller"))
-]
-
-cfg.CONF.register_opts(evs_controller_opts, "EVS")
-
-
-class EVSControllerError(exceptions.NeutronException):
-    message = _("EVS controller: %(errmsg)s")
-
-    def __init__(self, evs_errmsg):
-        super(EVSControllerError, self).__init__(errmsg=evs_errmsg)
-
-
-class EVSOpNotSupported(exceptions.NeutronException):
-    message = _("Operation not supported by EVS plugin: %(opname)s")
-
-    def __init__(self, evs_errmsg):
-        super(EVSOpNotSupported, self).__init__(opname=evs_errmsg)
-
-
-class EVSNotFound(exceptions.NeutronException):
-    message = _("Network %(net_id)s could not be found in EVS")
-
-
-class EVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
-                         agentschedulers_db.DhcpAgentSchedulerDbMixin,
-                         external_net_db.External_net_db_mixin,
-                         l3_agentschedulers_db.L3AgentSchedulerDbMixin,
-                         l3_gwmode_db.L3_NAT_db_mixin):
-    """Implements v2 Neutron Plug-in API specification.
-
-    All the neutron API calls to create/delete/retrieve Network/Subnet/Port
-    are forwarded to EVS controller through Solaris RAD. The RAD connection
-    to EVS Controller is over SSH. In order that this plugin can communicate
-    with EVS Controller non-interactively and securely, one should setup SSH
-    authentication with pre-shared keys between the host running neutron-server
-    and the host running EVS controller.
-
-    The following table maps OpenStack Neutron resources and attributes to
-    Solaris Elastic Virtual Switch resources and attributes
-
-    |---------------------+------------------+------------------------------|
-    | OpenStack Neutron   | Solaris EVS      | Comments                     |
-    |---------------------+------------------+------------------------------|
-    | Network             | EVS              | Represents an isolated L2    |
-    | -- name             | -- name          | segment; implemented either  |
-    | -- id               | -- uuid          | through VLANs or VXLANs      |
-    | -- tenant_id        | -- tenant        |                              |
-    | -- shared           | Always False     |                              |
-    | -- admin_state_up   | Always True      |                              |
-    | -- status           | Always ACTIVE    |                              |
-    | -- provider:        |                  |                              |
-    |    network_type     |  -- l2-type      | (either VLAN or VXLAN)       |
-    | -- provider:        |                  |                              |
-    |    segmentation_id  |  -- vlanid/vni   |                              |
-    |                     |                  |                              |
-    |                     |                  |                              |
-    | Subnet              | IPnet            | An IP network represents     |
-    | -- name             | -- name          | a block of either IPv4       |
-    | -- id               | -- uuid          | or IPv6 addresses (subnet)   |
-    | -- network_id       | -- evs           | along with a default router  |
-    | -- tenant_id        | -- tenant        | for the block                |
-    | -- cidr             | -- subnet        |                              |
-    | -- gateway_ip       | -- defrouter     |                              |
-    | -- allocation_pools | -- start/stop    |                              |
-    | -- dns_nameservers  | -- OpenStack:\   |                              |
-    |                     | dns_nameservers  |                              |
-    | -- host_routes      | -- OpenStack:\   |                              |
-    |                     | host_routes      |                              |
-    | -- enable_dhcp      | -- OpenStack:\   |                              |
-    |                     | enable_dhcp      |                              |
-    | -- shared           | Always False     |                              |
-    |                     |                  |                              |
-    | Port                | VPort            | A VPort represents the point |
-    | -- name             | -- name          | of attachment between the    |
-    | -- id               | -- uuid          | VNIC and an EVS. It          |
-    | -- network_id       | -- evs           | encapsulates various network |
-    | -- tenant_id        | -- tenant        | configuration parameters (   |
-    | -- status           | -- status        | MAC addresses, IP addresses, |
-    | -- mac_address      | -- macaddr       | and SLAs)                    |
-    | -- fixed_ips        | -- ipaddr        |                              |
-    | -- device_id        | -- OpenStack:\   |                              |
-    |                     |    device_id     |                              |
-    | -- device_owner     | -- OpenStack:\   |                              |
-    |                     |    device_owner  |                              |
-    | -- security_groups  | -- Not Supported |                              |
-    | -- admin_state_up   | Always UP        |                              |
-    |---------------------+------------------+------------------------------|
-    """
-
-    _supported_extension_aliases = ["provider", "external-net", "router",
-                                    "ext-gw-mode", "quotas", "agent",
-                                    "l3_agent_scheduler",
-                                    "dhcp_agent_scheduler"]
-
-    def __init__(self):
-        self.network_scheduler = importutils.import_object(
-            cfg.CONF.network_scheduler_driver
-        )
-        self.router_scheduler = importutils.import_object(
-            cfg.CONF.router_scheduler_driver
-        )
-        self._setup_rpc()
-        self._rad_connection = None
-
-    @property
-    def rad_connection(self):
-        # Since there is no connect_uri() yet, we need to do
-        # parsing of ssh://[email protected] ourselves
-        suh = cfg.CONF.EVS.evs_controller.split('://')
-        if len(suh) != 2 or suh[0] != 'ssh' or not suh[1].strip():
-            raise SystemExit(_("Specified evs_controller is invalid"))
-        uh = suh[1].split('@')
-        if len(uh) != 2 or not uh[0].strip() or not uh[1].strip():
-            raise SystemExit(_("'user' and 'hostname' need to be specified "
-                               "for evs_controller"))
-
-        if (self._rad_connection is not None and
-                self._rad_connection._closed is None):
-            return self._rad_connection
-
-        LOG.debug(_("Connecting to EVS Controller at %s as %s") %
-                  (uh[1], uh[0]))
-        self._rad_connection = radcon.connect_ssh(uh[1], user=uh[0])
-        return self._rad_connection
-
-    def _setup_rpc(self):
-        # RPC support
-        self.service_topics = {svc_constants.CORE: topics.PLUGIN,
-                               svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
-        self.conn = n_rpc.create_connection(new=True)
-        self.endpoints = [dhcp_rpc.DhcpRpcCallback(),
-                          l3_rpc.L3RpcCallback(),
-                          agents_db.AgentExtRpcCallback(),
-                          metadata_rpc.MetadataRpcCallback()]
-        for svc_topic in self.service_topics.values():
-            self.conn.create_consumer(svc_topic, self.endpoints, fanout=False)
-        # Consume from all consumers in a thread
-        self.conn.consume_in_threads()
-        self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
-        self.l3_agent_notifier = l3_rpc_agent_api.L3AgentNotifyAPI()
-
-        # needed by AgentSchedulerDbMixin()
-        self.agent_notifiers[constants.AGENT_TYPE_DHCP] = \
-            self.dhcp_agent_notifier
-        self.agent_notifiers[constants.AGENT_TYPE_L3] = \
-            self.l3_agent_notifier
-
-    @property
-    def supported_extension_aliases(self):
-        return self._supported_extension_aliases
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_addIPnet(self, tenantname, evsname, ipnetname,
-                                 propstr):
-        LOG.debug(_("Adding IPnet: %s with properties: %s for tenant: %s "
-                    "and for evs: %s") %
-                  (ipnetname, propstr, tenantname, evsname))
-
-        pat = radcli.ADRGlobPattern({'name': evsname,
-                                     'tenant': tenantname})
-        try:
-            evs = self.rad_connection.get_object(evsbind.EVS(), pat)
-            ipnet = evs.addIPnet(propstr, ipnetname)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-        return ipnet
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_updateIPnet(self, ipnetuuid, propstr):
-        LOG.debug(_("Updating IPnet with id: %s with property string: %s") %
-                  (ipnetuuid, propstr))
-        pat = radcli.ADRGlobPattern({'uuid': ipnetuuid})
-        try:
-            ipnetlist = self.rad_connection.list_objects(evsbind.IPnet(), pat)
-            if not ipnetlist:
-                return
-            assert len(ipnetlist) == 1
-            ipnet = self.rad_connection.get_object(ipnetlist[0])
-            ipnet.setProperty(propstr)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-
-    def _subnet_pool_to_evs_pool(self, subnet):
-        poolstr = ""
-        # obtain the optional allocation pool
-        pools = subnet.get('allocation_pools')
-        if not pools or pools is attributes.ATTR_NOT_SPECIFIED:
-            return poolstr
-
-        for pool in pools:
-            if poolstr:
-                poolstr += ","
-            # if start and end address is same, EVS expects the address
-            # to be provided as-is instead of x.x.x.x-x.x.x.x
-            if pool['start'] == pool['end']:
-                poolstr += pool['start']
-            else:
-                poolstr += "%s-%s" % (pool['start'], pool['end'])
-        return poolstr
-
-    def create_subnet(self, context, subnet):
-        """Creates a subnet(IPnet) for a given network(EVS).
-
-         An IP network represents a block of either IPv4 or IPv6 addresses
-         (i.e., subnet) along with a default router for the block. Only one
-         IPnet can be associated with an EVS. All the zones/VNICs that
-         connect to the EVS, through a VPort, will get an IP address from the
-         IPnet associated with the EVS.
-        """
-
-        if (subnet['subnet']['host_routes'] is not
-                attributes.ATTR_NOT_SPECIFIED):
-            raise EVSOpNotSupported(_("setting --host-route for a subnet "
-                                      "not supported"))
-
-        poolstr = self._subnet_pool_to_evs_pool(subnet['subnet'])
-
-        with context.session.begin(subtransactions=True):
-            # create the subnet in the DB
-            db_subnet = super(EVSNeutronPluginV2, self).create_subnet(context,
-                                                                      subnet)
-            ipnetname = db_subnet['name']
-            if not ipnetname:
-                ipnetname = None
-            evsname = db_subnet['network_id']
-            tenantname = db_subnet['tenant_id']
-            proplist = ['subnet=%s' % db_subnet['cidr']]
-            defrouter = db_subnet['gateway_ip']
-            if not defrouter:
-                defrouter = '0.0.0.0' if db_subnet['ip_version'] == 4 else '::'
-            proplist.append('defrouter=%s' % defrouter)
-            proplist.append('uuid=%s' % db_subnet['id'])
-            if poolstr:
-                proplist.append('pool=%s' % (poolstr))
-            self._evs_controller_addIPnet(tenantname, evsname, ipnetname,
-                                          ",".join(proplist))
-
-        return db_subnet
-
-    def update_subnet(self, context, id, subnet):
-        LOG.debug(_("Updating Subnet: %s with %s") % (id, subnet))
-        if (set(subnet['subnet'].keys()) - set(('enable_dhcp',
-                                                'allocation_pools',
-                                                'dns_nameservers',
-                                                'ipv6_address_mode',
-                                                'ipv6_ra_mode'))):
-                raise EVSOpNotSupported(_("only following subnet attributes "
-                                          "enable-dhcp, allocation-pool, "
-                                          "dns-nameserver, ipv6-address-mode, "
-                                          "and ipv6-ra-mode can be updated"))
-
-        poolstr = self._subnet_pool_to_evs_pool(subnet['subnet'])
-
-        with context.session.begin(subtransactions=True):
-            # update subnet in DB
-            retval = super(EVSNeutronPluginV2, self).\
-                update_subnet(context, id, subnet)
-            # update EVS IPnet with allocation pool info
-            if poolstr:
-                self._evs_controller_updateIPnet(id, "pool=%s" % poolstr)
-
-        return retval
-
-    def get_subnet(self, context, id, fields=None):
-        LOG.debug(_("Getting subnet: %s"), id)
-        subnet = super(EVSNeutronPluginV2, self).get_subnet(context, id, None)
-        return self._fields(subnet, fields)
-
-    def get_subnets(self, context, filters=None, fields=None,
-                    sorts=None, limit=None, marker=None, page_reverse=False):
-        subnets = super(EVSNeutronPluginV2, self).\
-            get_subnets(context, filters, None, sorts, limit, marker,
-                        page_reverse)
-        return [self._fields(subnet, fields) for subnet in subnets]
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_removeIPnet(self, tenantname, evsname, ipnetuuid,
-                                    auto_created_ports):
-        LOG.debug(_("Removing IPnet with id: %s for tenant: %s for evs: %s") %
-                  (ipnetuuid, tenantname, evsname))
-        pat = radcli.ADRGlobPattern({'name': evsname, 'tenant': tenantname})
-        try:
-            evs = self.rad_connection.get_object(evsbind.EVS(), pat)
-            if auto_created_ports:
-                LOG.debug(_("Need to remove following ports %s before "
-                            "removing the IPnet") % (auto_created_ports))
-                for port in auto_created_ports:
-                    try:
-                        evs.removeVPort(port['id'], "force=yes")
-                    except radcli.ObjectError as oe:
-                        # '43' corresponds to EVS' EVS_ENOENT_VPORT error code
-                        if oe.get_payload().err == 43:
-                            LOG.debug(_("VPort %s could not be found") %
-                                      (port['id']))
-            evs.removeIPnet(ipnetuuid)
-        except (radcli.NotFoundError, radcli.ObjectError) as oe:
-            # '42' corresponds to EVS' EVS_ENOENT_IPNET error code
-            if oe.get_payload() is None or oe.get_payload().err == 42:
-                # EVS doesn't have that IPnet, return success to delete
-                # the IPnet from Neutron DB.
-                LOG.debug(_("IPnet could not be found in EVS."))
-                return
-            raise EVSControllerError(oe.get_payload().errmsg)
-
-    def delete_subnet(self, context, id):
-        subnet = self.get_subnet(context, id)
-        if not subnet:
-            return
-
-        with context.session.begin(subtransactions=True):
-            # get a list of ports automatically created by Neutron
-            auto_created_ports = context.session.query(models_v2.Port).\
-                filter(models_v2.Port.device_owner.
-                       in_(db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS)).all()
-            # delete subnet in DB
-            super(EVSNeutronPluginV2, self).delete_subnet(context, id)
-            self._evs_controller_removeIPnet(subnet['tenant_id'],
-                                             subnet['network_id'], id,
-                                             auto_created_ports)
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_createEVS(self, tenantname, evsname, propstr):
-        LOG.debug(_("Adding EVS: %s with properties: %s for tenant: %s") %
-                  (evsname, propstr, tenantname))
-        try:
-            evs = self.rad_connection.\
-                get_object(evsbind.EVSController()).\
-                createEVS(propstr, tenantname, evsname)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-        return evs
-
-    def _extend_network_dict(self, network, evs):
-        for prop in evs.props:
-            if prop.name == 'l2-type':
-                network[providernet.NETWORK_TYPE] = prop.value
-            elif prop.name == 'vlanid' or prop.name == 'vni':
-                network[providernet.SEGMENTATION_ID] = int(prop.value)
-
-    def create_network(self, context, network):
-        """Creates a network(EVS) for a given tenant.
-
-        An Elastic Virtual Switch (EVS) is a virtual switch that spans
-        one or more servers (physical machines). It represents an isolated L2
-        segment, and the isolation is implemented either through VLANs or
-        VXLANs. An EVS provides network connectivity between the Virtual
-        Machines connected to it. There are two main resources associated with
-        an EVS: IPnet and VPort.
-        """
-
-        if network['network']['admin_state_up'] is False:
-            raise EVSOpNotSupported(_("setting admin_state_up=False for a "
-                                      "network not supported"))
-
-        if network['network']['shared'] is True:
-            raise EVSOpNotSupported(_("setting shared=True for a "
-                                      "network not supported"))
-
-        evsname = network['network']['name']
-        if not evsname:
-            evsname = None
-
-        tenantname = self._get_tenant_id_for_create(context,
-                                                    network['network'])
-        proplist = []
-        network_type = network['network'][providernet.NETWORK_TYPE]
-        if attributes.is_attr_set(network_type):
-            proplist.append('l2-type=%s' % network_type)
-
-        segment_id = network['network'][providernet.SEGMENTATION_ID]
-        if attributes.is_attr_set(segment_id):
-            if (not attributes.is_attr_set(network_type) or
-                    len(network_type) == 0):
-                raise EVSControllerError(_("provider:network_type must be "
-                                           "specified when provider:"
-                                           "segmentation_id is provided"))
-
-            if network_type == 'vxlan':
-                proplist.append('vni=%d' % segment_id)
-            elif network_type == 'vlan':
-                proplist.append('vlanid=%d' % segment_id)
-            else:
-                raise EVSControllerError(_("specified "
-                                           "provider:network_type '%s' not "
-                                           "supported") % network_type)
-
-        propstr = None
-        if proplist:
-            propstr = ",".join(proplist)
-
-        with context.session.begin(subtransactions=True):
-            # create the network in DB
-            net = super(EVSNeutronPluginV2, self).create_network(context,
-                                                                 network)
-            self._process_l3_create(context, net, network['network'])
-            # if --router:external is not set, the above function does
-            # not update net with router:external set to False
-            if net.get(external_net.EXTERNAL) is None:
-                net[external_net.EXTERNAL] = False
-
-            # create EVS on the EVS controller
-            if propstr:
-                propstr += ",uuid=%s" % net['id']
-            else:
-                propstr = "uuid=%s" % net['id']
-            evs = self._evs_controller_createEVS(tenantname, evsname, propstr)
-
-            # add provider information into net
-            self._extend_network_dict(net, evs)
-
-        return net
-
-    def update_network(self, context, id, network):
-        raise EVSOpNotSupported(_("net-update"))
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_getEVS(self, evsuuid):
-        LOG.debug(_("Getting EVS: %s"), evsuuid)
-        try:
-            evslist = self.rad_connection.\
-                get_object(evsbind.EVSController()).\
-                getEVSInfo('evs=%s' % evsuuid)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.getpayload().errmsg)
-        if not evslist:
-            LOG.error(_("EVS framework does not have Neutron network "
-                        "'%s' defined"), evsuuid)
-            return None
-        return evslist[0]
-
-    def get_network(self, context, id, fields=None):
-        with context.session.begin(subtransactions=True):
-            net = super(EVSNeutronPluginV2, self).get_network(context,
-                                                              id, None)
-            # call EVS controller to get provider network information
-            evs = self._evs_controller_getEVS(net['id'])
-            if evs:
-                self._extend_network_dict(net, evs)
-        return self._fields(net, fields)
-
-    def get_networks(self, context, filters=None, fields=None,
-                     sorts=None, limit=None, marker=None, page_reverse=False):
-
-        with context.session.begin(subtransactions=True):
-            nets = super(EVSNeutronPluginV2, self).\
-                get_networks(context, filters, None, sorts, limit, marker,
-                             page_reverse)
-            for net in nets:
-                evs = self._evs_controller_getEVS(net['id'])
-                if evs:
-                    self._extend_network_dict(net, evs)
-        return [self._fields(net, fields) for net in nets]
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_deleteEVS(self, tenantname, evsuuid):
-        LOG.debug(_("Removing EVS with id: %s for tenant: %s") %
-                  (evsuuid, tenantname))
-        try:
-            self.rad_connection.\
-                get_object(evsbind.EVSController()).\
-                deleteEVS(evsuuid, tenantname)
-        except (radcli.NotFoundError, radcli.ObjectError) as oe:
-            # '41' corresponds to EVS' EVS_ENOENT_EVS error code
-            if oe.get_payload() is None or oe.get_payload().err == 41:
-                # EVS doesn't have that EVS, return success to delete
-                # the EVS from Neutron DB.
-                LOG.debug(_("EVS could not be found in EVS backend."))
-                return
-            raise EVSControllerError(oe.get_payload().errmsg)
-
-    def delete_network(self, context, id):
-        with context.session.begin(subtransactions=True):
-            network = self._get_network(context, id)
-
-            qry_network_ports = context.session.query(models_v2.Port).\
-                filter_by(network_id=id).filter(models_v2.Port.device_owner.
-                                                in_(db_base_plugin_v2.
-                                                    AUTO_DELETE_PORT_OWNERS))
-
-            auto_created_ports = qry_network_ports.all()
-            qry_network_ports.delete(synchronize_session=False)
-
-            port_in_use = context.session.query(models_v2.Port).filter_by(
-                network_id=id).first()
-
-            if port_in_use:
-                raise exceptions.NetworkInUse(net_id=id)
-
-            # clean up subnets
-            subnets = self._get_subnets_by_network(context, id)
-            for subnet in subnets:
-                super(EVSNeutronPluginV2, self).delete_subnet(context,
-                                                              subnet['id'])
-                self._evs_controller_removeIPnet(subnet['tenant_id'],
-                                                 subnet['network_id'],
-                                                 subnet['id'],
-                                                 auto_created_ports)
-
-            context.session.delete(network)
-            self._evs_controller_deleteEVS(network['tenant_id'], id)
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_addVPort(self, tenantname, evsname, vportname,
-                                 propstr):
-        LOG.debug(_("Adding VPort: %s with properties: %s for tenant: %s "
-                    "and for evs: %s") %
-                  (vportname, propstr, tenantname, evsname))
-
-        try:
-            pat = radcli.ADRGlobPattern({'name': evsname,
-                                         'tenant': tenantname})
-            evs = self.rad_connection.get_object(evsbind.EVS(), pat)
-            vport = evs.addVPort(propstr, vportname)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-        return vport
-
-    @oslo_db_api.wrap_db_retry(max_retries=db.MAX_RETRIES,
-                               retry_on_request=True,
-                               retry_on_deadlock=True)
-    def create_port(self, context, port):
-        """Creates a port(VPort) for a given network(EVS).
-
-         A VPort represents the point of attachment between the VNIC and an
-         EVS. It encapsulates various network configuration parameters such as
-             -- SLAs (maxbw, cos, and priority)
-             -- IP address and
-             -- MAC address, et al
-         This configuration is inherited by the VNIC when it connects to the
-         VPort.
-        """
-        if port['port']['admin_state_up'] is False:
-            raise EVSOpNotSupported(_("setting admin_state_up=False for a "
-                                      "port not supported"))
-
-        with context.session.begin(subtransactions=True):
-            # for external gateway ports and floating ips, tenant_id
-            # is not set, but EVS does not like it.
-            tenant_id = self._get_tenant_id_for_create(context, port['port'])
-            if not tenant_id:
-                network = self._get_network(context,
-                                            port['port']['network_id'])
-                port['port']['tenant_id'] = network['tenant_id']
-            # create the port in the DB
-            db_port = super(EVSNeutronPluginV2, self).create_port(context,
-                                                                  port)
-            # Neutron allows to create a port on a network that doesn't
-            # yet have subnet associated with it, however EVS doesn't
-            # support this.
-            if not db_port['fixed_ips']:
-                raise EVSOpNotSupported(_("creating a port on a network that "
-                                          "does not yet have subnet "
-                                          "associated with it is not "
-                                          "supported"))
-            tenantname = db_port['tenant_id']
-            vportname = db_port['name']
-            if not vportname:
-                vportname = None
-            evs_id = db_port['network_id']
-            proplist = ['macaddr=%s' % db_port['mac_address']]
-            proplist.append('ipaddr=%s' %
-                            db_port['fixed_ips'][0].get('ip_address'))
-            proplist.append('uuid=%s' % db_port['id'])
-
-            self._evs_controller_addVPort(tenantname, evs_id, vportname,
-                                          ",".join(proplist))
-        return db_port
-
-    def update_port(self, context, id, port):
-        # EVS does not allow updating certain attributes, so check for it
-        state = port['port'].get('admin_state_up')
-        if state and state is False:
-            raise EVSOpNotSupported(_("updating port's admin_state_up to "
-                                      "False is not supported"))
-
-        # Get the original port and fail if any attempt is being made
-        # to change fixed_ips of the port since EVS doesn't support it
-        original_port = super(EVSNeutronPluginV2, self).get_port(context, id)
-        original_ips = original_port['fixed_ips']
-        update_ips = port['port'].get('fixed_ips')
-        if (update_ips and
-            (len(update_ips) != 1 or
-             update_ips[0]['subnet_id'] != original_ips[0]['subnet_id'] or
-             update_ips[0]['ip_address'] != original_ips[0]['ip_address'])):
-            raise EVSOpNotSupported(_("updating port's fixed_ips "
-                                      "is not supported"))
-        LOG.debug(_("Updating port %s with %s") % (id, port))
-        db_port = super(EVSNeutronPluginV2, self).update_port(context,
-                                                              id, port)
-        return db_port
-
-    def get_port(self, context, id, fields=None):
-        LOG.debug(_("Getting port: %s"), id)
-        port = super(EVSNeutronPluginV2, self).get_port(context, id, None)
-        return self._fields(port, fields)
-
-    def get_ports(self, context, filters=None, fields=None,
-                  sorts=None, limit=None, marker=None, page_reverse=False):
-        ports = super(EVSNeutronPluginV2, self).\
-            get_ports(context, filters, None, sorts, limit, marker,
-                      page_reverse)
-        return [self._fields(port, fields) for port in ports]
-
-    @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_removeVPort(self, tenantname, evsname, vportuuid):
-        LOG.debug(_("Removing VPort with id: %s for tenant: %s for evs: %s") %
-                  (vportuuid, tenantname, evsname))
-        pat = radcli.ADRGlobPattern({'name': evsname,
-                                     'tenant': tenantname})
-        try:
-            evs = self.rad_connection.get_object(evsbind.EVS(), pat)
-            evs.removeVPort(vportuuid, "force=yes")
-        except (radcli.NotFoundError, radcli.ObjectError) as oe:
-            # '43' corresponds to EVS' EVS_ENOENT_VPORT error code
-            if oe.get_payload() is None or oe.get_payload().err == 43:
-                # EVS doesn't have that VPort, return success to delete
-                # the VPort from Neutron DB.
-                LOG.debug(_("VPort could not be found in EVS."))
-            else:
-                raise EVSControllerError(oe.get_payload().errmsg)
-
-    def delete_port(self, context, id, l3_port_check=True):
-        if l3_port_check:
-            self.prevent_l3_port_deletion(context, id)
-        self.disassociate_floatingips(context, id)
-        port = self.get_port(context, id)
-        if not port:
-            return
-        with context.session.begin(subtransactions=True):
-            super(EVSNeutronPluginV2, self).delete_port(context, id)
-            self._evs_controller_removeVPort(port['tenant_id'],
-                                             port['network_id'],
-                                             port['id'])
--- a/components/openstack/neutron/files/evs_plugin.ini	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,6 +0,0 @@
-[EVS]
-# An URI that specifies an EVS controller. It is of the form
-# ssh://[email protected], where user is the username to use to connect
-# to EVS controller specified by hostname. By default it's set to
-# ssh://[email protected]
-# evs_controller = ssh://[email protected]
--- a/components/openstack/neutron/files/l3_agent.ini	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/l3_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -1,164 +1,285 @@
 [DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = False
+
+#
+# From neutron.base.agent
+#
 
-# L3 requires that an interface driver be set. Choose the one that best
-# matches your plugin.
-# interface_driver =
+# Name of Open vSwitch bridge to use (string value)
+ovs_integration_bridge = br_int0
 
-# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
-# that supports L3 agent
-# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
-
-# Interface driver for Solaris Open vSwitch
-# interface_driver = neutron.agent.solaris.interface.OVSInterfaceDriver
+# Uses veth for an OVS interface or not. Support kernels with limited namespace
+# support (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. (boolean
+# value)
+#ovs_use_veth = false
 
-# Name of Open vSwitch bridge to use
-# ovs_integration_bridge = br_int0
-
-# Use veth for an OVS interface or not.
-# Support kernels with limited namespace support
-# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
-# ovs_use_veth = False
+# MTU setting for device. This option will be removed in Newton. Please use the
+# system-wide segment_mtu setting which the agents will take into account when
+# wiring VIFs. (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#network_device_mtu = <None>
 
-# Example of interface_driver option for LinuxBridge
-# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
-
-# Interface driver for Solaris Elastic Virtual Switch (EVS)
-interface_driver = neutron.agent.solaris.interface.SolarisVNICDriver
+# The driver used to manage the virtual interface. (string value)
+interface_driver = neutron.agent.solaris.interface.OVSInterfaceDriver
 
-# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
-# iproute2 package that supports namespaces). This option is deprecated and
-# will be removed in a future release, at which point the old behavior of
-# use_namespaces = True will be enforced.
-use_namespaces = False
+# Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs
+# commands will fail with ALARMCLOCK error. (integer value)
+#ovs_vsctl_timeout = 10
+
+#
+# From neutron.l3.agent
+#
 
-# If use_namespaces is set as False then the agent can only configure one
-# router.
-# This is done by setting the specific router_id.
-# router_id =
+# The working mode for the agent. Allowed modes are: 'legacy' - this preserves
+# the existing behavior where the L3 agent is deployed on a centralized
+# networking node to provide L3 services like DNAT, and SNAT. Use this mode if
+# you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality and
+# must be used for an L3 agent that runs on a compute host. 'dvr_snat' - this
+# enables centralized SNAT support in conjunction with DVR.  This mode must be
+# used for an L3 agent running on a centralized node (or in single-host
+# deployments, e.g. devstack) (string value)
+# Allowed values: dvr, dvr_snat, legacy
+#agent_mode = legacy
 
-# When external_network_bridge is set, each L3 agent can be associated
-# with no more than one external network. This value should be set to the UUID
-# of that external network. To allow L3 agent support multiple external
-# networks, both the external_network_bridge and gateway_external_network_id
-# must be left empty.
-# gateway_external_network_id =
+# TCP Port used by Neutron metadata namespace proxy. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#metadata_port = 9697
 
-# With IPv6, the network used for the external gateway does not need
-# to have an associated subnet, since the automatically assigned
-# link-local address (LLA) can be used. However, an IPv6 gateway address
-# is needed for use as the next-hop for the default route. If no IPv6
-# gateway address is configured here, (and only then) the neutron router
-# will be configured to get its default route from router advertisements (RAs)
-# from the upstream router; in which case the upstream router must also be
-# configured to send these RAs.
-# The ipv6_gateway, when configured, should be the LLA of the interface
-# on the upstream router. If a next-hop using a global unique address (GUA)
-# is desired, it needs to be done via a subnet allocated to the network
-# and not through this parameter.
-# ipv6_gateway =
+# Send this many gratuitous ARPs for HA setup, if less than or equal to 0, the
+# feature is disabled (integer value)
+#send_arp_for_ha = 3
+
+# If non-empty, the l3 agent can only configure a router that has the matching
+# router ID. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#router_id =
 
-# Indicates that this L3 agent should also handle routers that do not have
-# an external network gateway configured.  This option should be True only
-# for a single agent in a Neutron deployment, and may be False for all agents
-# if all routers must have an external network gateway
-# handle_internal_only_routers = True
+# Indicates that this L3 agent should also handle routers that do not have an
+# external network gateway configured. This option should be True only for a
+# single agent in a Neutron deployment, and may be False for all agents if all
+# routers must have an external network gateway. (boolean value)
+#handle_internal_only_routers = true
 
-# Name of bridge used for external network traffic. This should be set to
-# empty value for the linux bridge. when this parameter is set, each L3 agent
-# can be associated with no more than one external network.
-external_network_bridge =
+# When external_network_bridge is set, each L3 agent can be associated with no
+# more than one external network. This value should be set to the UUID of that
+# external network. To allow L3 agent support multiple external networks, both
+# the external_network_bridge and gateway_external_network_id must be left
+# empty. (string value)
+#gateway_external_network_id =
 
-# TCP Port used by Neutron metadata server
-# metadata_port = 9697
-
-# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
-# to disable this feature.
-# send_arp_for_ha = 3
+# With IPv6, the network used for the external gateway does not need to have an
+# associated subnet, since the automatically assigned link-local address (LLA)
+# can be used. However, an IPv6 gateway address is needed for use as the next-
+# hop for the default route. If no IPv6 gateway address is configured here,
+# (and only then) the neutron router will be configured to get its default
+# route from router advertisements (RAs) from the upstream router; in which
+# case the upstream router must also be configured to send these RAs. The
+# ipv6_gateway, when configured, should be the LLA of the interface on the
+# upstream router. If a next-hop using a global unique address (GUA) is
+# desired, it needs to be done via a subnet allocated to the network and not
+# through this parameter.  (string value)
+#ipv6_gateway =
 
-# seconds between re-sync routers' data if needed
-# periodic_interval = 40
-
-# seconds to start to sync routers' data after
-# starting agent
-# periodic_fuzzy_delay = 5
-
-# enable_metadata_proxy, which is true by default, can be set to False
-# if the Nova metadata server is not available
-# enable_metadata_proxy = True
+# Driver used for ipv6 prefix delegation. This needs to be an entry point
+# defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for
+# entry points included with the neutron source. (string value)
+#prefix_delegation_driver = dibbler
 
-# Iptables mangle mark used to mark metadata valid requests
-# metadata_access_mark = 0x1
+# Allow running metadata proxy. (boolean value)
+#enable_metadata_proxy = true
 
-# Iptables mangle mark used to mark ingress from external network
-# external_ingress_mark = 0x2
+# Iptables mangle mark used to mark metadata valid requests. This mark will be
+# masked with 0xffff so that only the lower 16 bits will be used. (string
+# value)
+#metadata_access_mark = 0x1
 
-# router_delete_namespaces, which is false by default, can be set to True if
-# namespaces can be deleted cleanly on the host running the L3 agent.
-# Do not enable this until you understand the problem with the Linux iproute
-# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
-# you are sure that your version of iproute does not suffer from the problem.
-# If True, namespaces will be deleted when a router is destroyed.
-# router_delete_namespaces = False
+# Iptables mangle mark used to mark ingress from external network. This mark
+# will be masked with 0xffff so that only the lower 16 bits will be used.
+# (string value)
+#external_ingress_mark = 0x2
 
-# Timeout for ovs-vsctl commands.
-# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
-# ovs_vsctl_timeout = 10
+# Name of bridge used for external network traffic. This should be set to an
+# empty value for the Linux Bridge. When this parameter is set, each L3 agent
+# can be associated with no more than one external network. (string value)
+external_network_bridge = br_ex0
+
+# Seconds between running periodic tasks (integer value)
+#periodic_interval = 40
 
-# The working mode for the agent. Allowed values are:
-# - legacy: this preserves the existing behavior where the L3 agent is
-#   deployed on a centralized networking node to provide L3 services
-#   like DNAT, and SNAT. Use this mode if you do not want to adopt DVR.
-# - dvr: this mode enables DVR functionality, and must be used for an L3
-#   agent that runs on a compute host.
-# - dvr_snat: this enables centralized SNAT support in conjunction with
-#   DVR. This mode must be used for an L3 agent running on a centralized
-#   node (or in single-host deployments, e.g. devstack).
-# agent_mode = legacy
+# Number of separate API worker processes for service. If not specified, the
+# default is equal to the number of CPUs available for best performance.
+# (integer value)
+#api_workers = <None>
+
+# Number of RPC worker processes for service (integer value)
+#rpc_workers = 1
 
-# Location to store keepalived and all HA configurations
-# ha_confs_path = $state_path/ha_confs
+# Number of RPC worker processes dedicated to state reports queue (integer
+# value)
+#rpc_state_report_workers = 1
 
-# VRRP authentication type AH/PASS
-# ha_vrrp_auth_type = PASS
+# Range of seconds to randomly delay when starting the periodic task scheduler
+# to reduce stampeding. (Disable by setting to 0) (integer value)
+#periodic_fuzzy_delay = 5
+
+# Location to store keepalived/conntrackd config files (string value)
+#ha_confs_path = $state_path/ha_confs
+
+# VRRP authentication type (string value)
+# Allowed values: AH, PASS
+#ha_vrrp_auth_type = PASS
 
-# VRRP authentication password
-# ha_vrrp_auth_password =
+# VRRP authentication password (string value)
+#ha_vrrp_auth_password = <None>
 
-# The advertisement interval in seconds
-# ha_vrrp_advert_int = 2
+# The advertisement interval in seconds (integer value)
+#ha_vrrp_advert_int = 2
 
-# Name of the datalink that connects to an external network. By default it's
-# set to net0.
-# external_network_datalink = net0
+# Service to handle DHCPv6 Prefix delegation. (string value)
+#pd_dhcp_driver = dibbler
 
-# Allow forwarding of packets between tenant's networks
-# allow_forwarding_between_networks = False
+# Location to store IPv6 RA config files (string value)
+#ra_confs = $state_path/ra
+
+# MinRtrAdvInterval setting for radvd.conf (integer value)
+#min_rtr_adv_interval = 30
 
-# An URI that specifies an EVS controller. It is of the form
-# ssh://[email protected], where user is the username to use to connect
-# to EVS controller specified by hostname. By default it's set to
-# ssh://[email protected]
-# evs_controller = ssh://[email protected]
+# MaxRtrAdvInterval setting for radvd.conf (integer value)
+#max_rtr_adv_interval = 100
 
-# Admin username
+# Allow forwarding of packets between tenant's networks (boolean value)
+#allow_forwarding_between_networks = false
+
+# Admin username (string value)
 admin_user = %SERVICE_USER%
 
-# Admin password
+# Admin password (string value)
 admin_password = %SERVICE_PASSWORD%
 
-# Admin tenant name
-admin_tenant_name = %SERVICE_PASSWORD%
+# Admin tenant name (string value)
+admin_tenant_name = %SERVICE_TENANT_NAME%
 
-# Authentication URL
+# Authentication URL (string value)
 auth_url = http://localhost:5000/v2.0
 
-# The type of authentication to use
-# auth_strategy = keystone
+# The type of authentication to use (string value)
+#auth_strategy = keystone
+
+# Authentication region (string value)
+#auth_region = <None>
+
+# Network service endpoint type to pull from the keystone catalog (string
+# value)
+#endpoint_type = publicURL
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
 
-# Authentication region
-# auth_region = <None>
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
 
-# Network service endpoint type to pull from the keystone catalog
-# endpoint_type = publicURL
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[AGENT]
+
+#
+# From neutron.base.agent
+#
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
--- a/components/openstack/neutron/files/metadata_agent.ini	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/metadata_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -1,68 +1,178 @@
 [DEFAULT]
-# Show debugging output in log (sets DEBUG log level output)
-# debug = True
+
+#
+# From neutron.metadata.agent
+#
 
-# The Neutron user information for accessing the Neutron API.
-auth_url = http://localhost:5000/v2.0
-auth_region = RegionOne
-# Turn off verification of the certificate for ssl
-# auth_insecure = False
-# Certificate Authority public key (CA cert) file for ssl
-# auth_ca_cert =
-admin_tenant_name = %SERVICE_TENANT_NAME%
-admin_user = %SERVICE_USER%
-admin_password = %SERVICE_PASSWORD%
+# Location for Metadata Proxy UNIX domain socket. (string value)
+#metadata_proxy_socket = $state_path/metadata_proxy
 
-# Network service endpoint type to pull from the keystone catalog
-# endpoint_type = adminURL
+# User (uid or name) running metadata proxy after its initialization (if empty:
+# agent effective user). (string value)
+#metadata_proxy_user =
 
-# IP address used by Nova metadata server
-# nova_metadata_ip = 127.0.0.1
+# Group (gid or name) running metadata proxy after its initialization (if
+# empty: agent effective group). (string value)
+#metadata_proxy_group =
 
-# TCP Port used by Nova metadata server
-# nova_metadata_port = 8775
-
-# Which protocol to use for requests to Nova metadata server, http or https
-# nova_metadata_protocol = http
+# Certificate Authority public key (CA cert) file for ssl (string value)
+#auth_ca_cert = <None>
 
-# Whether insecure SSL connection should be accepted for Nova metadata server
-# requests
-# nova_metadata_insecure = False
+# IP address used by Nova metadata server. (string value)
+#nova_metadata_ip = 127.0.0.1
 
-# Client certificate for nova api, needed when nova api requires client
-# certificates
-# nova_client_cert =
-
-# Private key for nova client certificate
-# nova_client_priv_key =
+# TCP Port used by Nova metadata server. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#nova_metadata_port = 8775
 
 # When proxying metadata requests, Neutron signs the Instance-ID header with a
-# shared secret to prevent spoofing.  You may select any string for a secret,
+# shared secret to prevent spoofing. You may select any string for a secret,
 # but it must match here and in the configuration used by the Nova Metadata
 # Server. NOTE: Nova uses the same config key, but in [neutron] section.
-# metadata_proxy_shared_secret =
+# (string value)
+#metadata_proxy_shared_secret =
+
+# Protocol to access nova metadata, http or https (string value)
+# Allowed values: http, https
+#nova_metadata_protocol = http
 
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
+# Allow to perform insecure SSL (https) requests to nova metadata (boolean
+# value)
+#nova_metadata_insecure = false
+
+# Client certificate for nova metadata api server. (string value)
+#nova_client_cert =
 
-# Metadata Proxy UNIX domain socket mode, 3 values allowed:
-# 'deduce': deduce mode from metadata_proxy_user/group values,
-# 'user': set metadata proxy socket mode to 0o644, to use when
-# metadata_proxy_user is agent effective user or root,
-# 'group': set metadata proxy socket mode to 0o664, to use when
-# metadata_proxy_group is agent effective group,
-# 'all': set metadata proxy socket mode to 0o666, to use otherwise.
-# metadata_proxy_socket_mode = deduce
+# Private key of client certificate. (string value)
+#nova_client_priv_key =
 
-# Number of separate worker processes for metadata server. Defaults to
-# half the number of CPU cores
+# Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce
+# mode from metadata_proxy_user/group values, 'user': set metadata proxy socket
+# mode to 0o644, to use when metadata_proxy_user is agent effective user or
+# root, 'group': set metadata proxy socket mode to 0o664, to use when
+# metadata_proxy_group is agent effective group or root, 'all': set metadata
+# proxy socket mode to 0o666, to use otherwise. (string value)
+# Allowed values: deduce, user, group, all
+#metadata_proxy_socket_mode = deduce
+
+# Number of separate worker processes for metadata server (defaults to half of
+# the number of CPUs) (integer value)
 metadata_workers = 1
 
 # Number of backlog requests to configure the metadata server socket with
-# metadata_backlog = 4096
+# (integer value)
+#metadata_backlog = 4096
+
+# URL to connect to the cache back end. (string value)
+#cache_url = memory://
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
 
-# URL to connect to the cache backend.
-# default_ttl=0 parameter will cause cache entries to never expire.
-# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
-# No cache is used in case no value is passed.
-# cache_url = memory://?default_ttl=5
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[AGENT]
+
+#
+# From neutron.metadata.agent
+#
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/metering_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,112 @@
+[DEFAULT]
+
+#
+# From neutron.metering.agent
+#
+
+# Metering driver (string value)
+#driver = neutron.services.metering.drivers.noop.noop_driver.NoopMeteringDriver
+
+# Interval between two metering measures (integer value)
+#measure_interval = 30
+
+# Interval between two metering reports (integer value)
+#report_interval = 300
+
+# The driver used to manage the virtual interface. (string value)
+#interface_driver = <None>
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
--- a/components/openstack/neutron/files/ml2_conf.ini	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,101 +0,0 @@
-[ml2]
-# (ListOpt) List of network type driver entrypoints to be loaded from
-# the neutron.ml2.type_drivers namespace.
-#
-# type_drivers = local,flat,vlan,gre,vxlan
-# Example: type_drivers = flat,vlan,gre,vxlan
-type_drivers = flat,vlan,vxlan
-
-# (ListOpt) Ordered list of network_types to allocate as tenant
-# networks. The default value 'local' is useful for single-box testing
-# but provides no connectivity between hosts.
-#
-# In the case of Solaris, 'local' can be achieved by using 'flat' network
-# type and Solaris Etherstubs, so 'local' network type as such is not
-# supported.
-#
-# tenant_network_types = local
-# Example: tenant_network_types = vlan,gre,vxlan
-tenant_network_types = vlan
-
-# (ListOpt) Ordered list of networking mechanism driver entrypoints
-# to be loaded from the neutron.ml2.mechanism_drivers namespace.
-# mechanism_drivers =
-# Example: mechanism_drivers = openvswitch,mlnx
-# Example: mechanism_drivers = arista
-# Example: mechanism_drivers = cisco,logger
-# Example: mechanism_drivers = openvswitch,brocade
-# Example: mechanism_drivers = linuxbridge,brocade
-mechanism_drivers = openvswitch
-
-# (ListOpt) Ordered list of extension driver entrypoints
-# to be loaded from the neutron.ml2.extension_drivers namespace.
-# extension_drivers =
-# Example: extension_drivers = anewextensiondriver
-
-# =========== items for MTU selection and advertisement =============
-# (IntOpt) Path MTU.  The maximum permissible size of an unfragmented
-# packet travelling from and to addresses where encapsulated Neutron
-# traffic is sent.  Drivers calculate maximum viable MTU for
-# validating tenant requests based on this value (typically,
-# path_mtu - max encap header size).  If <=0, the path MTU is
-# indeterminate and no calculation takes place.
-# path_mtu = 0
-
-# (IntOpt) Segment MTU.  The maximum permissible size of an
-# unfragmented packet travelling a L2 network segment.  If <=0,
-# the segment MTU is indeterminate and no calculation takes place.
-# segment_mtu = 0
-
-# (ListOpt) Physical network MTUs.  List of mappings of physical
-# network to MTU value.  The format of the mapping is
-# <physnet>:<mtu val>.  This mapping allows specifying a
-# physical network MTU value that differs from the default
-# segment_mtu value.
-# physical_network_mtus =
-# Example: physical_network_mtus = physnet1:1550, physnet2:1500
-# ======== end of items for MTU selection and advertisement =========
-
-[ml2_type_flat]
-# (ListOpt) List of physical_network names with which flat networks
-# can be created. Use * to allow flat networks with arbitrary
-# physical_network names.
-#
-# flat_networks =
-# Example:flat_networks = physnet1,physnet2
-# Example:flat_networks = *
-
-[ml2_type_vlan]
-# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
-# specifying physical_network names usable for VLAN provider and
-# tenant networks, as well as ranges of VLAN tags on each
-# physical_network available for allocation as tenant networks.
-#
-# network_vlan_ranges =
-# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
-
-[ml2_type_gre]
-# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
-# tunnel_id_ranges =
-
-[ml2_type_vxlan]
-# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
-# ranges of VXLAN VNI IDs that are available for tenant network allocation.
-#
-# vni_ranges =
-
-# (StrOpt) Multicast group for the VXLAN interface. When configured, will
-# enable sending all broadcast traffic to this multicast group. When left
-# unconfigured, will disable multicast VXLAN mode.
-#
-# vxlan_group =
-# Example: vxlan_group = 239.1.1.1
-
-[securitygroup]
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-enable_security_group = False
-
-# Use ipset to speed-up the iptables security groups. Enabling ipset support
-# requires that ipset is installed on L2 agent node.
-enable_ipset = False
--- a/components/openstack/neutron/files/neutron-dhcp-agent	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/neutron-dhcp-agent	Wed Sep 07 14:48:41 2016 -0700
@@ -17,13 +17,12 @@
 import ConfigParser
 import os
 import re
+from subprocess import CalledProcessError, Popen, PIPE, check_call
 import sys
 
-from openstack_common import is_ml2_plugin, kill_contract
+from openstack_common import kill_contract
 import smf_include
 
-from subprocess import CalledProcessError, Popen, PIPE, check_call
-
 
 def set_hostmodel(value):
     cmd = ["/usr/sbin/ipadm", "show-prop", "-p", "hostmodel",
@@ -58,9 +57,7 @@
     # 'dh', end with '_0', and in between they are hexadecimal digits.
     prog = re.compile('dh[0-9A-Fa-f\_]{11}_0')
     ret_code = smf_include.SMF_EXIT_OK
-    ovs_bridge = None
-    if is_ml2_plugin():
-        ovs_bridge = get_ovs_bridge()
+    ovs_bridge = get_ovs_bridge()
     for dlname in dlnames:
         if prog.search(dlname) is None:
             continue
@@ -110,7 +107,7 @@
 
 def get_ovs_bridge():
     parser = ConfigParser.ConfigParser()
-    parser.read("/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini")
+    parser.read("/etc/neutron/plugins/ml2/openvswitch_agent.ini")
     try:
         ovs_bridge = parser.get("ovs", "integration_bridge")
     except ConfigParser.NoOptionError:
--- a/components/openstack/neutron/files/neutron-l3-agent	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/neutron-l3-agent	Wed Sep 07 14:48:41 2016 -0700
@@ -20,15 +20,12 @@
 from subprocess import CalledProcessError, Popen, PIPE, check_call
 import sys
 
-import netaddr
-from openstack_common import is_ml2_plugin, kill_contract
+from openstack_common import kill_contract
 import smf_include
 
 from neutron.agent.solaris import packetfilter
 from neutron_vpnaas.services.vpn.device_drivers.solaris_ipsec import \
-    get_vpn_interfaces
-from neutron_vpnaas.services.vpn.device_drivers.solaris_ipsec import \
-    shutdown_vpn
+    get_vpn_interfaces, shutdown_vpn
 
 
 def set_hostmodel(value):
@@ -65,7 +62,6 @@
     # hexadecimal digits.
     prog = re.compile('l3[ie][0-9A-Fa-f\_]{10}_0')
     retcode = smf_include.SMF_EXIT_OK
-    is_ml2 = is_ml2_plugin()
     for dlname in dlnames:
         if prog.search(dlname) is None:
             continue
@@ -82,11 +78,10 @@
             check_call(["/usr/bin/pfexec", "/usr/sbin/dladm", "delete-vnic",
                         dlname])
             # remove the OVS Port
-            if is_ml2:
-                ovs_bridge = get_ovs_bridge(dlname)
-                if ovs_bridge:
-                    check_call(["/usr/bin/pfexec", "/usr/sbin/ovs-vsctl", "--",
-                                "--if-exists", "del-port", ovs_bridge, dlname])
+            ovs_bridge = get_ovs_bridge(dlname)
+            if ovs_bridge:
+                check_call(["/usr/bin/pfexec", "/usr/sbin/ovs-vsctl", "--",
+                            "--if-exists", "del-port", ovs_bridge, dlname])
         except CalledProcessError as err:
             print "failed to remove datalink '%s' used by L3 agent: %s" % \
                 (dlname, err)
@@ -136,15 +131,9 @@
               "enabled before enabling neutron-l3-agent"
         return smf_include.SMF_EXIT_ERR_CONFIG
 
-    # remove any stale PF rules under _auto/neutron:l3:agent anchor
-    pf = packetfilter.PacketFilter('_auto/neutron:l3:agent')
-    pf.remove_anchor_recursively()
-
     cmd = "/usr/bin/pfexec /usr/lib/neutron/neutron-l3-agent " \
-        "--config-file %s --config-file %s --config-file %s" % \
-        tuple(sys.argv[2:5])
-    if is_ml2_plugin():
-        cmd += " --config-file %s" % sys.argv[5]
+        "--config-file %s --config-file %s --config-file %s " \
+        "--config-file %s" % tuple(sys.argv[2:6])
 
     # The VPNaaS shutdown should unplumb all IP tunnels it created. But
     # be paranoid and check for lingering tunnels created by OpenStack
@@ -173,7 +162,7 @@
 def get_ovs_bridge(dlname):
     # retrieve the right OVS bridge based on the interface name
     if dlname.startswith('l3i'):
-        config_file = '/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini'
+        config_file = '/etc/neutron/plugins/ml2/openvswitch_agent.ini'
         section = "ovs"
         option = "integration_bridge"
     else:
--- a/components/openstack/neutron/files/neutron-openvswitch-agent.xml	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/neutron-openvswitch-agent.xml	Wed Sep 07 14:48:41 2016 -0700
@@ -76,7 +76,7 @@
         <propval name='config_path' type='astring'
           value='/etc/neutron/neutron.conf'/>
         <propval name='ovs_config_path' type='astring'
-          value='/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini'/>
+          value='/etc/neutron/plugins/ml2/openvswitch_agent.ini'/>
       </property_group>
     </instance>
 
--- a/components/openstack/neutron/files/neutron-server	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/neutron-server	Wed Sep 07 14:48:41 2016 -0700
@@ -15,19 +15,16 @@
 #    under the License.
 
 import os
+from subprocess import CalledProcessError, check_call
 import sys
 
-from openstack_common import is_ml2_plugin
 import smf_include
-from subprocess import CalledProcessError, check_call
 
 
 def start():
     cfg_files = sys.argv[2:3]
-    if is_ml2_plugin():
-        cfg_files.append("/etc/neutron/plugins/ml2/ml2_conf.ini")
-    else:
-        cfg_files.append("/etc/neutron/plugins/evs/evs_plugin.ini")
+    # It is ML2 plugin for now, until we introduce another plugin
+    cfg_files.append("/etc/neutron/plugins/ml2/ml2_conf.ini")
 
     # verify paths are valid
     for f in cfg_files:
--- a/components/openstack/neutron/files/neutron-upgrade	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/neutron-upgrade	Wed Sep 07 14:48:41 2016 -0700
@@ -20,23 +20,26 @@
 import sys
 import traceback
 
-import iniparse
 import smf_include
-import sqlalchemy
 
 from openstack_common import alter_mysql_tables, create_backups, modify_conf
 
 
 NEUTRON_CONF_MAPPINGS = {
-    # Deprecated group/name
-    ('DEFAULT', 'nova_api_insecure'): ('nova', 'insecure'),
-    ('DEFAULT', 'nova_ca_certificates_file'): ('nova', 'cafile'),
+    # Deprecated group/name for Liberty
+    ('DEFAULT', 'use_syslog'): (None, None),
+    ('DEFAULT', 'log_format'): (None, None),
+    ('DEFAULT', 'rpc_thread_pool_size'):
+        ('DEFAULT', 'executor_thread_pool_size'),
+    ('ml2_sriov', 'agent_required'): (None, None),
+    # Deprecated group/name for Mitaka
+    ('ml2', 'segment_mtu'): ('DEFAULT', 'global_physnet_mtu'),
     ('DEFAULT', 'nova_region_name'): ('nova', 'region_name'),
-    ('DEFAULT', 'max_request_body_size'):
-        ('oslo_middleware', 'max_request_body_size'),
-    ('DEFAULT', 'use-syslog'): (None, None),
-    ('DEFAULT', 'log-format'): (None, None),
-    ('DEFAULT', 'log_format'): (None, None),
+    ('DEFAULT', 'nova_admin_username'): ('nova', 'username'),
+    ('DEFAULT', 'nova_admin_tenant_id'): ('nova', 'tenant_id'),
+    ('DEFAULT', 'nova_admin_tenant_name'): ('nova', 'tenant_name'),
+    ('DEFAULT', 'nova_admin_password'): ('nova', 'password'),
+    ('DEFAULT', 'nova_admin_auth_url'): ('nova', 'auth_url'),
 }
 
 NEUTRON_CONF_EXCEPTIONS = [
@@ -59,7 +62,7 @@
     ('DEFAULT', 'ovs_integration_bridge'),
     ('DEFAULT', 'interface_driver'),
     ('DEFAULT', 'external_network_bridge'),
-    ('DEFAULT', 'evs_controller'),    
+    ('DEFAULT', 'evs_controller'),
 ]
 
 DHCP_AGENT_EXCEPTIONS = [
@@ -79,6 +82,21 @@
     ('DEFAULT', 'metadata_workers'),
 ]
 
+OPENVSWITCH_AGENT_EXCEPTIONS = [
+    ('ovs', 'integration_bridge'),
+    ('ovs', 'tunnel_bridge'),
+    ('securitygroup', 'enable_security_group'),
+    ('securitygroup', 'enable_ipset'),
+]
+
+ML2_CONF_EXCEPTION = [
+    ('ml2', 'type_drivers'),
+    ('ml2', 'tenant_network_types'),
+    ('ml2', 'mechanism_drivers'),
+    ('securitygroup', 'enable_security_group'),
+    ('securitygroup', 'enable_ipset'),
+]
+
 
 def start():
     # pull out the current version of config/upgrade-id
@@ -98,8 +116,10 @@
         # No need to upgrade
         sys.exit(smf_include.SMF_EXIT_OK)
 
+    # TODO: Kilo EVS check. If upgrade is from Kilo running EVS,
+    # fail the upgrade.
+
     # look for any .new files
-    db_connection = None
     if glob.glob('/etc/neutron/*.new'):
         # the versions are different, so perform an upgrade
         # modify the configuration files
@@ -116,19 +136,20 @@
         modify_conf('/etc/neutron/metadata_agent.ini', mapping=None,
                     exception_list=METADATA_AGENT_EXCEPTIONS)
 
-    config = iniparse.RawConfigParser()
-    config.read('/etc/neutron/neutron.conf')
-    if config.has_section('database'):
-        db_connection = config.get('database', 'connection')
-        engine = sqlalchemy.create_engine(db_connection)
-        if engine.url.username != '%SERVICE_USER%':
-            check_call(['/usr/bin/neutron-db-manage', '--config-file',
-                        '/etc/neutron/neutron.conf', 'stamp', 'havana'])
-            check_call(['/usr/bin/neutron-db-manage', '--config-file',
-                        '/etc/neutron/neutron.conf', 'upgrade', 'juno'])
-            check_call(['/usr/bin/neutron-db-manage', '--config-file',
-                        '/etc/neutron/neutron.conf', 'upgrade', 'kilo'])
+    # look for any .new files for ml2 plugin
+    if glob.glob('/etc/neutron/plugins/ml2/*.new'):
+        # modify the configuration files
+
+        # backup all the old configuration files
+        create_backups('/etc/neutron/plugins/ml2')
 
+        modify_conf('/etc/neutron/plugins/ml2/openvswitch_agent.ini',
+                    mapping=None,
+                    exception_list=OPENVSWITCH_AGENT_EXCEPTIONS)
+
+        modify_conf('/etc/neutron/plugins/ml2/ml2_conf.ini',
+                    mapping=None,
+                    exception_list=ML2_CONF_EXCEPTION)
 
     # update the current version
     check_call(['/usr/sbin/svccfg', '-s', os.environ['SMF_FMRI'], 'setprop',
--- a/components/openstack/neutron/files/neutron.conf	Wed Sep 07 14:48:41 2016 -0700
+++ b/components/openstack/neutron/files/neutron.conf	Wed Sep 07 14:48:41 2016 -0700
@@ -1,826 +1,1075 @@
 [DEFAULT]
-# Print more verbose output (set logging level to INFO instead of default WARNING level).
-# verbose = False
+
+#
+# From neutron
+#
 
-# =========Start Global Config Option for Distributed L3 Router===============
-# Setting the "router_distributed" flag to "True" will default to the creation
-# of distributed tenant routers. The admin can override this flag by specifying
-# the type of the router on the create request (admin-only attribute). Default
-# value is "False" to support legacy mode (centralized) routers.
-#
-# router_distributed = False
-#
-# ===========End Global Config Option for Distributed L3 Router===============
+# Where to store Neutron state files. This directory must be writable by the
+# agent. (string value)
+#state_path = /var/lib/neutron
+
+# The host IP to bind to (string value)
+#bind_host = 0.0.0.0
 
-# Print debugging output (set logging level to DEBUG instead of default WARNING level).
-# debug = False
-
-# Where to store Neutron state files.  This directory must be writable by the
-# user executing the agent.
-# state_path = /var/lib/neutron
+# The port to bind to (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#bind_port = 9696
 
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
+# The path for API extensions. Note that this can be a colon-separated list of
+# paths. For example: api_extensions_path =
+# extensions:/path/to/more/exts:/even/more/exts. The __path__ of
+# neutron.extensions is appended to this, so if your extensions are in there
+# you don't need to specify them here. (string value)
+#api_extensions_path =
 
-# use_syslog                           -> syslog
-# log_file and log_dir                 -> log_dir/log_file
-# (not log_file) and log_dir           -> log_dir/{binary_name}.log
-# use_stderr                           -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors                       -> notification system
+# The type of authentication to use (string value)
+#auth_strategy = keystone
 
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-# log_dir =
+# The core plugin Neutron will use (string value)
+#
+# The ML2 plugin provides support for heterogenous networking technologies in
+# the cloud.
+core_plugin = ml2
 
-# publish_errors = False
-
-# Address to bind the API server to
-# bind_host = 0.0.0.0
-
-# Port the bind the API server to
-# bind_port = 9696
+# The service plugins Neutron will use (list value)
+#
+# This option must be set when the core_plugin is set to 'ml2' and the
+# supported values are 'router' and 'vpnaas'.
+service_plugins = router
 
-# Path to the extensions.  Note that this can be a colon-separated list of
-# paths.  For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of neutron.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
+# The base MAC address Neutron will use for VIFs. The first 3 octets will
+# remain unchanged. If the 4th octet is not 00, it will also be used. The
+# others will be randomly generated. (string value)
+#base_mac = fa:16:3e:00:00:00
+
+# How many times Neutron will retry MAC generation (integer value)
+#mac_generation_retries = 16
 
-# (StrOpt) Neutron core plugin entrypoint to be loaded from the
-# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
-# plugins included in the neutron source distribution. For compatibility with
-# previous versions, the class name of a plugin can be specified instead of its
-# entrypoint name.
-#
-# The ML2 plugin provides support for heterogenous networking technologies
-# in the cloud.
-#
-# core_plugin =
-# Example: core_plugin = ml2
-# core_plugin = ml2
+# Allow the usage of the bulk API (boolean value)
+#allow_bulk = true
+
+# Allow the usage of the pagination (boolean value)
+#allow_pagination = false
+
+# Allow the usage of the sorting (boolean value)
+#allow_sorting = false
+
+# The maximum number of items returned in a single response, value was
+# 'infinite' or negative integer means no limit (string value)
+#pagination_max_limit = -1
 
-# The EVSNeutronPluginV2 Neutron plugin connects to the Solaris Elastic
-# Virtual Switch framework to provide virtual networking between Solaris
-# Zones.
-core_plugin = neutron.plugins.evs.plugin.EVSNeutronPluginV2
+# Default value of availability zone hints. The availability zone aware
+# schedulers use this when the resources availability_zone_hints is empty.
+# Multiple availability zones can be specified by a comma separated string.
+# This value can be empty. In this case, even if availability_zone_hints for a
+# resource is empty, availability zone is considered for high availability
+# while scheduling the resource. (list value)
+#default_availability_zones =
+
+# Maximum number of DNS nameservers per subnet (integer value)
+#max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet (integer value)
+#max_subnet_host_routes = 20
 
-# (ListOpt) List of service plugin entrypoints to be loaded from the
-# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
-# the plugins included in the neutron source distribution. For compatibility
-# with previous versions, the class name of a plugin can be specified instead
-# of its entrypoint name.
-#
-# This option must be set when the core_plugin is set to ML2 and the
-# supported values are router and vpnaas.
-#
-# service_plugins =
-# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
-# service_plugins = router
+# Maximum number of fixed ips per port. This option is deprecated and will be
+# removed in the N release. (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#max_fixed_ips_per_port = 5
 
-# Paste configuration file
-# api_paste_config = api-paste.ini
-
-# (StrOpt) Hostname to be used by the neutron server, agents and services
-# running on this machine. All the agents and services running on this machine
-# must use the same host value.
-# The default value is hostname of the machine.
-#
-# host =
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-# auth_strategy = keystone
+# Default IPv4 subnet pool to be used for automatic subnet CIDR allocation.
+# Specifies by UUID the pool to be used in case where creation of a subnet is
+# being called without a subnet pool ID. If not set then no pool will be used
+# unless passed explicitly to the subnet create. If no pool is used, then a
+# CIDR must be passed to create a subnet and that subnet will not be allocated
+# from any pool; it will be considered part of the tenant's private address
+# space. This option is deprecated for removal in the N release. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#default_ipv4_subnet_pool = <None>
 
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also be used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
+# Default IPv6 subnet pool to be used for automatic subnet CIDR allocation.
+# Specifies by UUID the pool to be used in case where creation of a subnet is
+# being called without a subnet pool ID. See the description for
+# default_ipv4_subnet_pool for more information. This option is deprecated for
+# removal in the N release. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#default_ipv6_subnet_pool = <None>
 
-# DVR Base MAC address. The first 3 octets will remain unchanged. If the
-# 4th octet is not 00, it will also be used.  The others will be randomly
-# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
-# avoid mixing them up with MAC's allocated for tenant ports.
-# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
-# The default is 3 octet
-# dvr_base_mac = fa:16:3f:00:00:00
+# Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to
+# True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable
+# environment. Users making subnet creation requests for IPv6 subnets without
+# providing a CIDR or subnetpool ID will be given a CIDR via the Prefix
+# Delegation mechanism. Note that enabling PD will override the behavior of the
+# default IPv6 subnetpool. (boolean value)
+#ipv6_pd_enabled = false
 
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
+# DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite
+# lease times. (integer value)
+# Deprecated group/name - [DEFAULT]/dhcp_lease_time
+#dhcp_lease_duration = 86400
 
-# DHCP Lease duration (in seconds).  Use -1 to
-# tell dnsmasq to use infinite lease times.
-# dhcp_lease_duration = 86400
+# Domain to use for building the hostnames (string value)
+#dns_domain = openstacklocal
 
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
+# Driver for external DNS integration. (string value)
+#external_dns_driver = <None>
 
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Neutron is
-# being used in conjunction with nova security groups
-# allow_overlapping_ips = False
+# Allow sending resource operation notification to DHCP agent (boolean value)
+#dhcp_agent_notification = true
+
+# Allow overlapping IP support in Neutron. Attention: the following parameter
+# MUST be set to False if Neutron is being used in conjunction with Nova
+# security groups. (boolean value)
+#allow_overlapping_ips = false
+
+# Hostname to be used by the Neutron server, agents and services running on
+# this machine. All the agents and services running on this machine must use
+# the same host value. (string value)
+#host = example.domain
+
 # Ensure that configured gateway is on subnet. For IPv6, validate only if
 # gateway is not a link local address. Deprecated, to be removed during the
-# K release, at which point the check will be mandatory.
-# force_gateway_on_subnet = True
+# Newton release, at which point the gateway will not be forced on to subnet.
+# (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#force_gateway_on_subnet = true
 
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# be greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
+# Send notification to nova when port status changes (boolean value)
+#notify_nova_on_port_status_changes = true
 
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
+# Send notification to nova when port data (fixed_ips/floatingip) changes so
+# nova can update its cache. (boolean value)
+#notify_nova_on_port_data_changes = true
 
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
+# Number of seconds between sending events to nova if there are any events to
+# send. (integer value)
+#send_events_interval = 2
 
-# Maximum number of routes per router
-# max_routes = 30
+# If True, advertise network MTU values if core plugin calculates them. MTU is
+# advertised to running instances via DHCP and RA MTU options. (boolean value)
+#advertise_mtu = true
 
-# Default Subnet Pool to be used for IPv4 subnet-allocation.
-# Specifies by UUID the pool to be used in case of subnet-create being called
-# without a subnet-pool ID.  The default of None means that no pool will be
-# used unless passed explicitly to subnet create.  If no pool is used, then a
-# CIDR must be passed to create a subnet and that subnet will not be allocated
-# from any pool; it will be considered part of the tenant's private address
-# space.
-# default_ipv4_subnet_pool =
+# Neutron IPAM (IP address management) driver to use. If ipam_driver is not set
+# (default behavior), no IPAM driver is used. In order to use the reference
+# implementation of Neutron IPAM driver, use 'internal'. (string value)
+#ipam_driver = <None>
 
-# Default Subnet Pool to be used for IPv6 subnet-allocation.
-# Specifies by UUID the pool to be used in case of subnet-create being
-# called without a subnet-pool ID.  Set to "prefix_delegation"
-# to enable IPv6 Prefix Delegation in a PD-capable environment.
-# See the description for default_ipv4_subnet_pool for more information.
-# default_ipv6_subnet_pool =
+# If True, then allow plugins that support it to create VLAN transparent
+# networks. (boolean value)
+#vlan_transparent = false
 
-# =========== items for MTU selection and advertisement =============
-# Advertise MTU.  If True, effort is made to advertise MTU
-# settings to VMs via network methods (ie. DHCP and RA MTU options)
-# when the network's preferred MTU is known.
-# advertise_mtu = False
-# ======== end of items for MTU selection and advertisement =========
+# This will choose the web framework in which to run the Neutron API server.
+# 'pecan' is a new experiemental rewrite of the API server. (string value)
+# Allowed values: legacy, pecan
+#web_framework = legacy
 
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down; should be at least twice
-# report_interval, to be sure the agent is down for good
-# agent_down_time = 75
-# ===========  end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling a loadbalancer pool to an lbaas agent
-# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+# MTU of the underlying physical network. Neutron uses this value to calculate
+# MTU for all virtual network components. For flat and VLAN networks, neutron
+# uses this value without modification. For overlay networks such as VXLAN,
+# neutron automatically subtracts the overlay protocol overhead from this
+# value. Defaults to 1500, the standard value for Ethernet. Also consider
+# setting the path_mtu ml2 configuration value to the global_physnet_mtu value
+# when using the ml2 plug-in. Otherwise the global_physnet_mtu value might get
+# overridden by a smaller path_mtu value and hence have no effect on
+# overlay/tunnel networks but only flat and VLAN networks. (integer value)
+# Deprecated group/name - [ml2]/segment_mtu
+#global_physnet_mtu = 1500
 
-# (StrOpt) Representing the resource type whose load is being reported by
-# the agent.
-# This can be 'networks','subnets' or 'ports'. When specified (Default is networks),
-# the server will extract particular load sent as part of its agent configuration object
-# from the agent report state, which is the number of resources being consumed, at
-# every report_interval.
-# dhcp_load_type can be used in combination with network_scheduler_driver =
-# neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
-# When the network_scheduler_driver is WeightScheduler, dhcp_load_type can
-# be configured to represent the choice for the resource being balanced.
-# Example: dhcp_load_type = networks
-# Values:
-#   networks - number of networks hosted on the agent
-#   subnets -  number of subnets associated with the networks hosted on the agent
-#   ports   -  number of ports associated with the networks hosted on the agent
-# dhcp_load_type = networks
+# Number of backlog requests to configure the socket with (integer value)
+#backlog = 4096
+
+# Number of seconds to keep retrying to listen (integer value)
+#retry_until_window = 30
+
+# Enable SSL on the API server (boolean value)
+#use_ssl = false
+
+# Seconds between running periodic tasks (integer value)
+#periodic_interval = 40
 
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# neutron server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to neutron server
-# router_auto_schedule = True
+# Number of separate API worker processes for service. If not specified, the
+# default is equal to the number of CPUs available for best performance.
+# (integer value)
+api_workers = 1
 
-# Allow automatic rescheduling of routers from dead L3 agents with
-# admin_state_up set to True to alive agents.
-# allow_automatic_l3agent_failover = False
+# Number of RPC worker processes for service (integer value)
+#rpc_workers = 1
 
-# Allow automatic removal of networks from dead DHCP agents with
-# admin_state_up set to True.
-# Networks could then be rescheduled if network_auto_schedule is True
-# allow_automatic_dhcp_failover = True
-
-# Number of DHCP agents scheduled to host a network. This enables redundant
-# DHCP agents for configured networks.
-# dhcp_agents_per_network = 1
+# Number of RPC worker processes dedicated to state reports queue (integer
+# value)
+#rpc_state_report_workers = 1
 
-# Enable services on agents with admin_state_up False.
-# If this option is False, when admin_state_up of an agent is turned to
-# False, services on it will be disabled. If this option is True, services
-# on agents with admin_state_up False keep available and manual scheduling
-# to such agents is available. Agents with admin_state_up False are not
-# selected for automatic scheduling regardless of this option.
-# enable_services_on_agents_with_admin_state_down = False
-
-# ===========  end of items for agent scheduler extension =====
+# Range of seconds to randomly delay when starting the periodic task scheduler
+# to reduce stampeding. (Disable by setting to 0) (integer value)
+#periodic_fuzzy_delay = 5
 
-# =========== items for l3 extension ==============
-# Enable high availability for virtual routers.
-# l3_ha = False
 #
-# Maximum number of l3 agents which a HA router will be scheduled on. If it
-# is set to 0 the router will be scheduled on every agent.
-# max_l3_agents_per_router = 3
-#
-# Minimum number of l3 agents which a HA router will be scheduled on. The
-# default value is 2.
-# min_l3_agents_per_router = 2
-#
-# CIDR of the administrative network if HA mode is enabled
-# l3_ha_net_cidr = 169.254.192.0/18
+# From neutron.agent
 #
-# The network type to use when creating the HA network for an HA router.
-# By default or if empty, the first 'tenant_network_types'
-# is used. This is helpful when the VRRP traffic should use a specific
-# network which not the default one.
-# ha_network_type =
-# Example: ha_network_type = flat
-#
-# The physical network name with which the HA network can be created.
-# ha_network_physical_name =
-# Example: ha_network_physical_name = physnet1
-# =========== end of items for l3 extension =======
+
+# The driver used to manage the virtual interface. (string value)
+#interface_driver = <None>
+
+# Location for Metadata Proxy UNIX domain socket. (string value)
+#metadata_proxy_socket = $state_path/metadata_proxy
 
-# =========== items for metadata proxy configuration ==============
-# User (uid or name) running metadata proxy after its initialization
-# (if empty: agent effective user)
-# metadata_proxy_user =
+# User (uid or name) running metadata proxy after its initialization (if empty:
+# agent effective user). (string value)
+#metadata_proxy_user =
 
-# Group (gid or name) running metadata proxy after its initialization
-# (if empty: agent effective group)
-# metadata_proxy_group =
+# Group (gid or name) running metadata proxy after its initialization (if
+# empty: agent effective group). (string value)
+#metadata_proxy_group =
 
-# Enable/Disable log watch by metadata proxy, it should be disabled when
+# Enable/Disable log watch by metadata proxy. It should be disabled when
 # metadata_proxy_user/group is not allowed to read/write its log file and
-# 'copytruncate' logrotate option must be used if logrotate is enabled on
+# copytruncate logrotate option must be used if logrotate is enabled on
 # metadata proxy log files. Option default value is deduced from
 # metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent
-# effective user id/name.
-# metadata_proxy_watch_log =
+# effective user id/name. (boolean value)
+#metadata_proxy_watch_log = <None>
 
-# Location of Metadata Proxy UNIX domain socket
-# metadata_proxy_socket = $state_path/metadata_proxy
-# =========== end of items for metadata proxy configuration ==============
+#
+# From neutron.db
+#
+
+# Seconds to regard the agent is down; should be at least twice
+# report_interval, to be sure the agent is down for good. (integer value)
+#agent_down_time = 75
 
-# ========== items for VLAN trunking networks ==========
-# Setting this flag to True will allow plugins that support it to
-# create VLAN transparent networks. This flag has no effect for
-# plugins that do not support VLAN transparent networks.
-# vlan_transparent = False
-# ========== end of items for VLAN trunking networks ==========
-
-# =========== WSGI parameters related to the API server ==============
-# Number of separate worker processes to spawn.  The default, 0, runs the
-# worker thread in the current process.  Greater than 0 launches that number of
-# child processes as workers.  The parent process manages them.
-# api_workers = 0
-
-# Number of separate RPC worker processes to spawn.  The default, 0, runs the
-# worker thread in the current process.  Greater than 0 launches that number of
-# child processes as RPC workers.  The parent process manages them.
-# This feature is experimental until issues are addressed and testing has been
-# enabled for various plugins for compatibility.
-# rpc_workers = 0
+# Representing the resource type whose load is being reported by the agent.
+# This can be "networks", "subnets" or "ports". When specified (Default is
+# networks), the server will extract particular load sent as part of its agent
+# configuration object from the agent report state, which is the number of
+# resources being consumed, at every report_interval.dhcp_load_type can be used
+# in combination with network_scheduler_driver =
+# neutron.scheduler.dhcp_agent_scheduler.WeightScheduler When the
+# network_scheduler_driver is WeightScheduler, dhcp_load_type can be configured
+# to represent the choice for the resource being balanced. Example:
+# dhcp_load_type=networks (string value)
+# Allowed values: networks, subnets, ports
+#dhcp_load_type = networks
 
-# Timeout for client connections socket operations. If an
-# incoming connection is idle for this number of seconds it
-# will be closed. A value of '0' means wait forever. (integer
-# value)
-# client_socket_timeout = 900
+# Agent starts with admin_state_up=False when enable_new_agents=False. In the
+# case, user's resources will not be scheduled automatically to the agent until
+# admin changes admin_state_up to True. (boolean value)
+#enable_new_agents = true
 
-# wsgi keepalive option. Determines if connections are allowed to be held open
-# by clients after a request is fulfilled. A value of False will ensure that
-# the socket connection will be explicitly closed once a response has been
-# sent to the client.
-# wsgi_keep_alive = True
+# Maximum number of routes per router (integer value)
+#max_routes = 30
+
+# Define the default value of enable_snat if not provided in
+# external_gateway_info. (boolean value)
+#enable_snat_by_default = true
 
-# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
-# starting API server. Not supported on OS X.
-# tcp_keepidle = 600
+# Driver to use for scheduling network to DHCP agent (string value)
+#network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
 
-# Number of seconds to keep retrying to listen
-# retry_until_window = 30
+# Allow auto scheduling networks to DHCP agent. (boolean value)
+#network_auto_schedule = true
 
-# Number of backlog requests to configure the socket with.
-# backlog = 4096
+# Automatically remove networks from offline DHCP agents. (boolean value)
+#allow_automatic_dhcp_failover = true
 
-# Max header line to accommodate large tokens
-# max_header_line = 16384
-
-# Enable SSL on the API server
-# use_ssl = False
+# Number of DHCP agents scheduled to host a tenant network. If this number is
+# greater than 1, the scheduler automatically assigns multiple DHCP agents for
+# a given tenant network, providing high availability for DHCP service.
+# (integer value)
+#dhcp_agents_per_network = 1
 
-# Certificate file to use when starting API server securely
-# ssl_cert_file = /path/to/certfile
-
-# Private key file to use when starting API server securely
-# ssl_key_file = /path/to/keyfile
-
-# CA certificate file to use when starting API server securely to
-# verify connecting clients. This is an optional parameter only required if
-# API clients need to authenticate to the API server using SSL certificates
-# signed by a trusted CA
-# ssl_ca_file = /path/to/cafile
-# ======== end of WSGI parameters related to the API server ==========
+# Enable services on an agent with admin_state_up False. If this option is
+# False, when admin_state_up of an agent is turned False, services on it will
+# be disabled. Agents with admin_state_up False are not selected for automatic
+# scheduling regardless of this option. But manual scheduling to such agents is
+# available if this option is True. (boolean value)
+#enable_services_on_agents_with_admin_state_down = false
 
-# ======== neutron nova interactions ==========
-# Send notification to nova when port status is active.
-# notify_nova_on_port_status_changes = True
+# The base mac address used for unique DVR instances by Neutron. The first 3
+# octets will remain unchanged. If the 4th octet is not 00, it will also be
+# used. The others will be randomly generated. The 'dvr_base_mac' *must* be
+# different from 'base_mac' to avoid mixing them up with MAC's allocated for
+# tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00.
+# The default is 3 octet (string value)
+#dvr_base_mac = fa:16:3f:00:00:00
 
-# Send notifications to nova when port data (fixed_ips/floatingips) change
-# so nova can update it's cache.
-# notify_nova_on_port_data_changes = True
+# System-wide flag to determine the type of router that tenants can create.
+# Only admin can override. (boolean value)
+#router_distributed = false
 
-# URL for connection to nova (Only supports one nova region currently).
-# nova_url = http://127.0.0.1:8774/v2
+# Driver to use for scheduling router to a default L3 agent (string value)
+#router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler
 
-# Name of nova region to use. Useful if keystone manages more than one region
-# nova_region_name =
+# Allow auto scheduling of routers to L3 agent. (boolean value)
+#router_auto_schedule = true
 
-# Username for connection to nova in admin context
-# nova_admin_username =
-
-# The uuid of the admin nova tenant
-# nova_admin_tenant_id =
+# Automatically reschedule routers from offline L3 agents to online L3 agents.
+# (boolean value)
+#allow_automatic_l3agent_failover = false
 
-# The name of the admin nova tenant. If the uuid of the admin nova tenant
-# is set, this is optional.  Useful for cases where the uuid of the admin
-# nova tenant is not available when configuration is being done.
-# nova_admin_tenant_name =
+# Enable HA mode for virtual routers. (boolean value)
+#l3_ha = false
 
-# Password for connection to nova in admin context.
-# nova_admin_password =
+# Maximum number of L3 agents which a HA router will be scheduled on. If it is
+# set to 0 then the router will be scheduled on every agent. (integer value)
+#max_l3_agents_per_router = 3
 
-# Authorization URL for connection to nova in admin context.
-# nova_admin_auth_url = http://localhost:5000/v2.0
+# Minimum number of L3 agents which a HA router will be scheduled on. If it is
+# set to 0 then the router will be scheduled on every agent. (integer value)
+#min_l3_agents_per_router = 2
 
-# CA file for novaclient to verify server certificates
-# nova_ca_certificates_file =
+# Subnet used for the l3 HA admin network. (string value)
+#l3_ha_net_cidr = 169.254.192.0/18
 
-# Boolean to control ignoring SSL errors on the nova url
-# nova_api_insecure = False
+# The network type to use when creating the HA network for an HA router. By
+# default or if empty, the first 'tenant_network_types' is used. This is
+# helpful when the VRRP traffic should use a specific network which is not the
+# default one. (string value)
+#l3_ha_network_type =
 
-# Number of seconds between sending events to nova if there are any events to send
-# send_events_interval = 2
-
-# ======== end of neutron nova interactions ==========
+# The physical network name with which the HA network can be created. (string
+# value)
+#l3_ha_network_physical_name =
 
 #
-# Options defined in oslo.messaging
+# From neutron.extensions
+#
+
+# Maximum number of allowed address pairs (integer value)
+#max_allowed_address_pair = 10
+
+#
+# From neutron.qos
+#
+
+# Drivers list to use to send the update notification (list value)
+#notification_drivers = message_queue
+
+#
+# From oslo.log
 #
 
-# Use durable queues in amqp. (boolean value)
-# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
-# amqp_durable_queues=false
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
 
-# Auto-delete queues in amqp. (boolean value)
-# amqp_auto_delete=false
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+#
+# From oslo.messaging
+#
 
 # Size of RPC connection pool. (integer value)
-# rpc_conn_pool_size=30
-
-# Qpid broker hostname. (string value)
-# qpid_hostname=localhost
-
-# Qpid broker port. (integer value)
-# qpid_port=5672
-
-# Qpid HA cluster host:port pairs. (list value)
-# qpid_hosts=$qpid_hostname:$qpid_port
-
-# Username for Qpid connection. (string value)
-# qpid_username=
-
-# Password for Qpid connection. (string value)
-# qpid_password=
-
-# Space separated list of SASL mechanisms to use for auth.
-# (string value)
-# qpid_sasl_mechanisms=
-
-# Seconds between connection keepalive heartbeats. (integer
-# value)
-# qpid_heartbeat=60
-
-# Transport to use, either 'tcp' or 'ssl'. (string value)
-# qpid_protocol=tcp
-
-# Whether to disable the Nagle algorithm. (boolean value)
-# qpid_tcp_nodelay=true
-
-# The qpid topology version to use.  Version 1 is what was
-# originally used by impl_qpid.  Version 2 includes some
-# backwards-incompatible changes that allow broker federation
-# to work.  Users should update to version 2 when they are
-# able to take everything down, as it requires a clean break.
-# (integer value)
-# qpid_topology_version=1
-
-# SSL version to use (valid only if SSL enabled). valid values
-# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
-# distributions. (string value)
-# kombu_ssl_version=
-
-# SSL key file (valid only if SSL enabled). (string value)
-# kombu_ssl_keyfile=
-
-# SSL cert file (valid only if SSL enabled). (string value)
-# kombu_ssl_certfile=
-
-# SSL certification authority file (valid only if SSL
-# enabled). (string value)
-# kombu_ssl_ca_certs=
+# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
+#rpc_conn_pool_size = 30
 
-# How long to wait before reconnecting in response to an AMQP
-# consumer cancel notification. (floating point value)
-# kombu_reconnect_delay=1.0
-
-# The RabbitMQ broker address where a single node is used.
-# (string value)
-# rabbit_host=localhost
-
-# The RabbitMQ broker port where a single node is used.
-# (integer value)
-# rabbit_port=5672
-
-# RabbitMQ HA cluster host:port pairs. (list value)
-# rabbit_hosts=$rabbit_host:$rabbit_port
-
-# Connect over SSL for RabbitMQ. (boolean value)
-# rabbit_use_ssl=false
-
-# The RabbitMQ userid. (string value)
-# rabbit_userid=guest
-
-# The RabbitMQ password. (string value)
-# rabbit_password=guest
-
-# the RabbitMQ login method (string value)
-# rabbit_login_method=AMQPLAIN
-
-# The RabbitMQ virtual host. (string value)
-# rabbit_virtual_host=/
-
-# How frequently to retry connecting with RabbitMQ. (integer
-# value)
-# rabbit_retry_interval=1
-
-# How long to backoff for between retries when connecting to
-# RabbitMQ. (integer value)
-# rabbit_retry_backoff=2
-
-# Maximum number of RabbitMQ connection retries. Default is 0
-# (infinite retry count). (integer value)
-# rabbit_max_retries=0
-
-# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
-# this option, you must wipe the RabbitMQ database. (boolean
-# value)
-# rabbit_ha_queues=false
-
-# If passed, use a fake RabbitMQ provider. (boolean value)
-# fake_rabbit=false
-
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet
-# interface, or IP. The "host" option should point or resolve
-# to this address. (string value)
-# rpc_zmq_bind_address=*
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address. (string value)
+#rpc_zmq_bind_address = *
 
 # MatchMaker driver. (string value)
-# rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+# Allowed values: redis, dummy
+#rpc_zmq_matchmaker = redis
 
-# ZeroMQ receiver listening port. (integer value)
-# rpc_zmq_port=9501
+# Type of concurrency used. Either "native" or "eventlet" (string value)
+#rpc_zmq_concurrency = eventlet
 
 # Number of ZeroMQ contexts, defaults to 1. (integer value)
-# rpc_zmq_contexts=1
+#rpc_zmq_contexts = 1
 
-# Maximum number of ingress messages to locally buffer per
-# topic. Default is unlimited. (integer value)
-# rpc_zmq_topic_backlog=
+# Maximum number of ingress messages to locally buffer per topic. Default is
+# unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
 
 # Directory for holding IPC sockets. (string value)
-# rpc_zmq_ipc_dir=/var/run/openstack
+#rpc_zmq_ipc_dir = /var/run/openstack
 
-# Name of this node. Must be a valid hostname, FQDN, or IP
-# address. Must match "host" option, if running Nova. (string
-# value)
-# rpc_zmq_host=oslo
+# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
+# "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
 
-# Seconds to wait before a cast expires (TTL). Only supported
-# by impl_zmq. (integer value)
-# rpc_cast_timeout=30
+# Seconds to wait before a cast expires (TTL). The default value of -1
+# specifies an infinite linger period. The value of 0 specifies no linger
+# period. Pending messages shall be discarded immediately when the socket is
+# closed. Only supported by impl_zmq. (integer value)
+#rpc_cast_timeout = -1
 
-# Heartbeat frequency. (integer value)
-# matchmaker_heartbeat_freq=300
+# The default number of seconds that poll should wait. Poll raises timeout
+# exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about existing target
+# ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 120
 
-# Heartbeat time-to-live. (integer value)
-# matchmaker_heartbeat_ttl=600
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean
+# value)
+#use_pub_sub = true
 
-# Size of RPC greenthread pool. (integer value)
-# rpc_thread_pool_size=64
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49152
 
-# Driver or drivers to handle sending notifications. (multi
-# valued)
-# notification_driver=
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
 
-# AMQP topic used for OpenStack notifications. (list value)
-# Deprecated group/name - [rpc_notifier2]/topics
-# notification_topics=notifications
+# Number of retries to find free port number before fail with ZMQBindError.
+# (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Size of executor thread pool. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
 
 # Seconds to wait for a response from a call. (integer value)
-# rpc_response_timeout=60
+#rpc_response_timeout = 60
+
+# A URL representing the messaging driver to use and its full configuration. If
+# not set, we fall back to the rpc_backend option and driver specific
+# configuration. (string value)
+#transport_url = <None>
+
+# The messaging driver to use, defaults to rabbit. Other drivers include amqp
+# and zmq. (string value)
+#rpc_backend = rabbit
+
+# The default exchange under which topics are scoped. May be overridden by an
+# exchange name specified in the transport_url option. (string value)
+#control_exchange = neutron
+
+#
+# From oslo.service.wsgi
+#
+
+# File name for the paste.deploy config for api service (string value)
+#api_paste_config = api-paste.ini
+
+# A python format string that is used as the template to generate log lines.
+# The following values can beformatted into it: client_ip, date_time,
+# request_line, status_code, body_length, wall_seconds. (string value)
+#wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s  len: %(body_length)s time: %(wall_seconds).7f
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not
+# supported on OS X. (integer value)
+#tcp_keepidle = 600
+
+# Size of the pool of greenthreads used by wsgi (integer value)
+#wsgi_default_pool_size = 100
+
+# Maximum line size of message headers to be accepted. max_header_line may need
+# to be increased when using large tokens (typically those generated when
+# keystone is configured to use PKI tokens with big service catalogs). (integer
+# value)
+#max_header_line = 16384
+
+# If False, closes the client socket connection explicitly. (boolean value)
+#wsgi_keep_alive = true
+
+# Timeout for client connections' socket operations. If an incoming connection
+# is idle for this number of seconds it will be closed. A value of '0' means
+# wait forever. (integer value)
+#client_socket_timeout = 900
+
+
+[agent]
+
+#
+# From neutron.agent
+#
+
+# Root helper application. Use 'sudo neutron-rootwrap
+# /etc/neutron/rootwrap.conf' to use the real root filter facility. Change to
+# 'sudo' to skip the filtering and just run the command directly. (string
+# value)
+root_helper =
+
+# Use the root helper when listing the namespaces on a system. This may not be
+# required depending on the security configuration. If the root helper is not
+# required, set this to False for a performance improvement. (boolean value)
+#use_helper_for_ns_read = true
+
+# Root helper daemon application to use when possible. (string value)
+#root_helper_daemon = <None>
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
+
+# Add comments to iptables rules. Set to false to disallow the addition of
+# comments to generated iptables rules that describe each rule's purpose.
+# System must support the iptables comments module for addition of comments.
+# (boolean value)
+#comment_iptables_rules = true
+
+# Action to be executed when a child process dies (string value)
+# Allowed values: respawn, exit
+#check_child_processes_action = respawn
+
+# Interval between checks of child process liveness (seconds), use 0 to disable
+# (integer value)
+#check_child_processes_interval = 60
+
+# Availability zone of this node (string value)
+#availability_zone = nova
+
+
+[cors]
 
-# A URL representing the messaging driver to use and its full
-# configuration. If not set, we fall back to the rpc_backend
-# option and driver specific configuration. (string value)
-# transport_url=
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID,OpenStack-Volume-microversion
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
+
+
+[cors.subdomain]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID,OpenStack-Volume-microversion
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
+
+
+[database]
+
+#
+# From neutron.db
+#
+
+# Database engine for which script will be generated when using offline
+# migration. (string value)
+#engine =
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
+#sqlite_db = oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database. (string
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+connection = mysql://%SERVICE_USER%:%SERVICE_PASSWORD%@localhost/neutron
+
+# The SQLAlchemy connection string to use to connect to the slave database.
+# (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including the
+# default, overrides any server-set SQL mode. To use whatever SQL mode is set
+# by the server configuration, set this to no value. Example: mysql_sql_mode=
+# (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
 
-# The messaging driver to use, defaults to rabbit. Other
-# drivers include qpid and zmq. (string value)
-# rpc_backend=rabbit
+# Minimum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum number of database connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection lost.
+# (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database operation up to
+# db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries of a
+# database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before error is
+# raised. Set to -1 to specify an infinite retry count. (integer value)
+#db_max_retries = 20
+
+
+[keystone_authtoken]
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete public Identity API endpoint. (string value)
+auth_uri = http://127.0.0.1:5000/v2.0/
+
+# API version of the admin Identity API endpoint. (string value)
+#auth_version = <None>
+
+# Do not handle authorization requests within the middleware, but delegate the
+# authorization decision to downstream WSGI components. (boolean value)
+#delay_auth_decision = false
+
+# Request timeout value for communicating with Identity API server. (integer
+# value)
+#http_connect_timeout = <None>
+
+# How many times are we trying to reconnect when communicating with Identity
+# API Server. (integer value)
+#http_request_max_retries = 3
+
+# Env key for the swift cache. (string value)
+#cache = <None>
+
+# Required if identity server requires client certificate (string value)
+#certfile = <None>
+
+# Required if identity server requires client certificate (string value)
+#keyfile = <None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs connections.
+# Defaults to system CAs. (string value)
+#cafile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# The region in which the identity server can be found. (string value)
+#region_name = <None>
 
-# The default exchange under which topics are scoped. May be
-# overridden by an exchange name specified in the
-# transport_url option. (string value)
-# control_exchange=openstack
+# Directory used to cache files related to PKI tokens. (string value)
+signing_dir = $state_path/keystone-signing
+
+# Optionally specify a list of memcached server(s) to use for caching. If left
+# undefined, tokens will instead be cached in-process. (list value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers = <None>
+
+# In order to prevent excessive effort spent validating tokens, the middleware
+# caches previously-seen tokens for a configurable duration (in seconds). Set
+# to -1 to disable caching completely. (integer value)
+#token_cache_time = 300
+
+# Determines the frequency at which the list of revoked tokens is retrieved
+# from the Identity service (in seconds). A high number of revocation events
+# combined with a low cache duration may significantly reduce performance.
+# (integer value)
+#revocation_cache_time = 10
+
+# (Optional) If defined, indicate whether token data should be authenticated or
+# authenticated and encrypted. If MAC, token data is authenticated (with HMAC)
+# in the cache. If ENCRYPT, token data is encrypted and authenticated in the
+# cache. If the value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+# Allowed values: None, MAC, ENCRYPT
+#memcache_security_strategy = None
+
+# (Optional, mandatory if memcache_security_strategy is defined) This string is
+# used for key derivation. (string value)
+#memcache_secret_key = <None>
+
+# (Optional) Number of seconds memcached server is considered dead before it is
+# tried again. (integer value)
+#memcache_pool_dead_retry = 300
+
+# (Optional) Maximum total number of open connections to every memcached
+# server. (integer value)
+#memcache_pool_maxsize = 10
+
+# (Optional) Socket timeout in seconds for communicating with a memcached
+# server. (integer value)
+#memcache_pool_socket_timeout = 3
+
+# (Optional) Number of seconds a connection to memcached is held unused in the
+# pool before it is closed. (integer value)
+#memcache_pool_unused_timeout = 60
+
+# (Optional) Number of seconds that an operation will wait to get a memcached
+# client connection from the pool. (integer value)
+#memcache_pool_conn_get_timeout = 10
+
+# (Optional) Use the advanced (eventlet safe) memcached client pool. The
+# advanced pool will only work under python 2.x. (boolean value)
+#memcache_use_advanced_pool = false
+
+# (Optional) Indicate whether to set the X-Service-Catalog header. If False,
+# middleware will not ask for service catalog on token validation and will not
+# set the X-Service-Catalog header. (boolean value)
+#include_service_catalog = true
+
+# Used to control the use and type of token binding. Can be set to: "disabled"
+# to not check token binding. "permissive" (default) to validate binding
+# information if the bind type is of a form known to the server and ignore it
+# if not. "strict" like "permissive" but if the bind type is unknown the token
+# will be rejected. "required" any form of token binding is needed to be
+# allowed. Finally the name of a binding method that must be present in tokens.
+# (string value)
+#enforce_token_bind = permissive
+
+# If true, the revocation list will be checked for cached tokens. This requires
+# that PKI tokens are configured on the identity server. (boolean value)
+#check_revocations_for_cached = false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm
+# or multiple. The algorithms are those supported by Python standard
+# hashlib.new(). The hashes will be tried in the order given, so put the
+# preferred one first for performance. The result of the first hash will be
+# stored in the cache. This will typically be set to multiple values only while
+# migrating from a less secure algorithm to a more secure one. Once all the old
+# tokens are expired this option should be set to a single value for better
+# performance. (list value)
+#hash_algorithms = md5
+
+# Authentication type to load (unknown value)
+# Deprecated group/name - [DEFAULT]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (unknown value)
+#auth_section = <None>
+
+# Complete admin Identity API endpoint. This should specify the unversioned
+# root endpoint e.g. https://localhost:35357/ (string value)
+identity_uri = http://127.0.0.1:35357/
+
+# Service username. (string value)
+admin_user = %SERVICE_USER%
+
+# Service user password. (string value)
+admin_password = %SERVICE_PASSWORD%
+
+# Service tenant name. (string value)
+admin_tenant_name = %SERVICE_TENANT_NAME%
 
 
 [matchmaker_redis]
 
 #
-# Options defined in oslo.messaging
+# From oslo.messaging
 #
 
 # Host to locate redis. (string value)
-# host=127.0.0.1
+#host = 127.0.0.1
 
-# Use this port to connect to redis host. (integer value)
-# port=6379
+# Use this port to connect to redis host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#port = 6379
 
 # Password for Redis server (optional). (string value)
-# password=
+#password =
+
+# List of Redis Sentinel hosts (fault tolerance mode) e.g.
+# [host:port, host1:port ... ] (list value)
+#sentinel_hosts =
+
+# Redis replica set name. (string value)
+#sentinel_group_name = oslo-messaging-zeromq
+
+# Time in ms to wait between connection attempts. (integer value)
+#wait_timeout = 500
+
+# Time in ms to wait before the transaction is killed. (integer value)
+#check_timeout = 20000
+
+# Timeout in ms on blocking socket operations (integer value)
+#socket_timeout = 1000
 
 
-[matchmaker_ring]
+[nova]
 
 #
-# Options defined in oslo.messaging
+# From neutron
+#
+
+# Name of nova region to use. Useful if keystone manages more than one region.
+# (string value)
+#region_name = <None>
+
+# Type of the nova endpoint to use.  This endpoint will be looked up in the
+# keystone catalog and should be one of public, internal or admin. (string
+# value)
+# Allowed values: public, admin, internal
+#endpoint_type = public
+
+#
+# From nova.auth
 #
 
-# Matchmaker ring file (JSON). (string value)
-# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
-# ringfile=/etc/oslo/matchmaker_ring.json
-
-[quotas]
-# Default driver to use for quota checks
-# quota_driver = neutron.db.quota_db.DbQuotaDriver
-
-# Resource name(s) that are supported in quota features
-# quota_items = network,subnet,port
-
-# Default number of resource allowed per tenant. A negative value means
-# unlimited.
-# default_quota = -1
-
-# Number of networks allowed per tenant. A negative value means unlimited.
-# quota_network = 10
-
-# Number of subnets allowed per tenant. A negative value means unlimited.
-# quota_subnet = 10
-
-# Number of ports allowed per tenant. A negative value means unlimited.
-# quota_port = 50
-
-# Number of security groups allowed per tenant. A negative value means
-# unlimited.
-# quota_security_group = 10
-
-# Number of security group rules allowed per tenant. A negative value means
-# unlimited.
-# quota_security_group_rule = 100
-
-# Number of vips allowed per tenant. A negative value means unlimited.
-# quota_vip = 10
-
-# Number of pools allowed per tenant. A negative value means unlimited.
-# quota_pool = 10
-
-# Number of pool members allowed per tenant. A negative value means unlimited.
-# The default is unlimited because a member is not a real resource consumer
-# on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_member = -1
-
-# Number of health monitors allowed per tenant. A negative value means
-# unlimited.
-# The default is unlimited because a health monitor is not a real resource
-# consumer on Openstack. However, on back-end, a member is a resource consumer
-# and that is the reason why quota is possible.
-# quota_health_monitor = -1
-
-# Number of loadbalancers allowed per tenant. A negative value means unlimited.
-# quota_loadbalancer = 10
-
-# Number of listeners allowed per tenant. A negative value means unlimited.
-# quota_listener = -1
-
-# Number of v2 health monitors allowed per tenant. A negative value means
-# unlimited. These health monitors exist under the lbaas v2 API
-# quota_healthmonitor = -1
-
-# Number of routers allowed per tenant. A negative value means unlimited.
-# quota_router = 10
-
-# Number of floating IPs allowed per tenant. A negative value means unlimited.
-# quota_floatingip = 50
-
-# Number of firewalls allowed per tenant. A negative value means unlimited.
-# quota_firewall = 1
-
-# Number of firewall policies allowed per tenant. A negative value means
-# unlimited.
-# quota_firewall_policy = 1
-
-# Number of firewall rules allowed per tenant. A negative value means
-# unlimited.
-# quota_firewall_rule = 100
-
-[agent]
-# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the command directly
-root_helper =
-
-# Set to true to add comments to generated iptables rules that describe
-# each rule's purpose. (System must support the iptables comments module.)
-# comment_iptables_rules = True
-
-# Root helper daemon application to use when possible.
-# root_helper_daemon =
+# Authentication URL (unknown value)
+auth_url = http://127.0.0.1:5000/v2.0/
 
-# Use the root helper when listing the namespaces on a system. This may not
-# be required depending on the security configuration. If the root helper is
-# not required, set this to False for a performance improvement.
-# use_helper_for_ns_read = True
-
-# The interval to check external processes for failure in seconds (0=disabled)
-# check_child_processes_interval = 60
-
-# Action to take when an external process spawned by an agent dies
-# Values:
-#   respawn - Respawns the external process
-#   exit - Exits the agent
-# check_child_processes_action = respawn
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server; should be less than
-# agent_down_time, best if it is half or less than agent_down_time
-# report_interval = 30
-
-# ===========  end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_uri = http://127.0.0.1:5000/v2.0/
-identity_uri = http://127.0.0.1:35357/
-admin_tenant_name = %SERVICE_TENANT_NAME%
-admin_user = %SERVICE_USER%
-admin_password = %SERVICE_PASSWORD%
-signing_dir = $state_path/keystone-signing
-
-[database]
-# This line MUST be changed to actually run the plugin.
-# Example:
-# connection = mysql://root:[email protected]:3306/neutron
-# Replace 127.0.0.1 above with the IP address of the database used by the
-# main neutron server. (Leave it as is if the database runs on this host.)
-# connection = sqlite://
-# NOTE: In deployment the [database] section and its connection attribute may
-# be set in the corresponding core plugin '.ini' file. However, it is suggested
-# to put the [database] section and its connection attribute in this
-# configuration file.
-connection = mysql://%SERVICE_USER%:%SERVICE_PASSWORD%@localhost/neutron
-
-# Database engine for which script will be generated when using offline
-# migration
-# engine =
-
-# The SQLAlchemy connection string used to connect to the slave database
-# slave_connection =
-
-# This configures the MySQL storage engine. This allows for OpenStack to
-# support different storage engines such as InnoDB, NDB, etc. By Default,
-# this value will be set to InnoDB. For MySQL Cluster, set to NDBCLUSTER.
-# Example: mysql_storage_engine=(string value)
-mysql_storage_engine = InnoDB
-
-# Database reconnection retry times - in event connectivity is lost
-# set to -1 implies an infinite retry count
-# max_retries = 10
-
-# Database reconnection interval in seconds - if the initial connection to the
-# database fails
-# retry_interval = 10
-
-# Minimum number of SQL connections to keep open in a pool
-# min_pool_size = 1
-
-# Maximum number of SQL connections to keep open in a pool
-# max_pool_size = 10
-
-# Timeout in seconds before idle sql connections are reaped
-# idle_timeout = 3600
-
-# If set, use this value for max_overflow with sqlalchemy
-# max_overflow = 20
-
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-# connection_debug = 0
-
-# Add python stack traces to SQL as comment strings
-# connection_trace = False
-
-# If set, use this value for pool_timeout with sqlalchemy
-# pool_timeout = 10
-
-[nova]
-# Name of the plugin to load
-# auth_plugin =
-
-# Config Section from which to load plugin specific options
-# auth_section =
+# Authentication type to load (unknown value)
+# Deprecated group/name - [DEFAULT]/auth_plugin
+auth_type = v2password
 
 # PEM encoded Certificate Authority to use when verifying HTTPs connections.
-# cafile =
+# (string value)
+#cafile = <None>
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# Optional domain ID to use with v3 and v2 parameters. It will be used for both
+# the user and project domain in v3 and ignored in v2 authentication. (unknown
+# value)
+#default_domain_id = <None>
+
+# Optional domain name to use with v3 API and v2 parameters. It will be used
+# for both the user and project domain in v3 and ignored in v2 authentication.
+# (unknown value)
+#default_domain_name = <None>
 
-# PEM encoded client certificate cert file
-# certfile =
+# Domain ID to scope to (unknown value)
+#domain_id = <None>
+
+# Domain name to scope to (unknown value)
+#domain_name = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
 
-# Verify HTTPS connections.
-# insecure = False
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# User's password (unknown value)
+password = %SERVICE_PASSWORD%
+
+# Domain ID containing project (unknown value)
+#project_domain_id = <None>
 
-# PEM encoded client certificate key file
-# keyfile =
+# Domain name containing project (unknown value)
+#project_domain_name = <None>
+
+# Project ID to scope to (unknown value)
+# Deprecated group/name - [DEFAULT]/tenant-id
+#project_id = <None>
+
+# Project name to scope to (unknown value)
+# Deprecated group/name - [DEFAULT]/tenant-name
+#project_name = <None>
+
+# Tenant ID (unknown value)
+#tenant_id = <None>
+
+# Tenant Name (unknown value)
+tenant_name = %SERVICE_TENANT_NAME%
 
-# Name of nova region to use. Useful if keystone manages more than one region.
-# region_name =
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# Trust ID (unknown value)
+#trust_id = <None>
+
+# User's domain id (unknown value)
+#user_domain_id = <None>
 
-# Timeout value for http requests
-# timeout =
+# User's domain name (unknown value)
+#user_domain_name = <None>
+
+# User id (unknown value)
+#user_id = <None>
+
+# Username (unknown value)
+# Deprecated group/name - [DEFAULT]/user-name
+username = %SERVICE_USER%
+
 
 [oslo_concurrency]
 
-# Directory to use for lock files. For security, the specified directory should
-# only be writable by the user running the processes that need locking.
+#
+# From oslo.concurrency
+#
+
+# Enables or disables inter-process locks. (boolean value)
+# Deprecated group/name - [DEFAULT]/disable_process_locking
+#disable_process_locking = false
+
+# Directory to use for lock files.  For security, the specified directory
+# should only be writable by the user running the processes that need locking.
 # Defaults to environment variable OSLO_LOCK_PATH. If external locks are used,
-# a lock path must be set.
+# a lock path must be set. (string value)
+# Deprecated group/name - [DEFAULT]/lock_path
 lock_path = $state_path/lock
 
-# Enables or disables inter-process locks.
-# disable_process_locking = False
-
-[oslo_policy]
-
-# The JSON file that defines policies.
-# policy_file = policy.json
-
-# Default rule. Enforced when a requested rule is not found.
-# policy_default_rule = default
-
-# Directories where policy configuration files are stored.
-# They can be relative to any directory in the search path defined by the
-# config_dir option, or absolute paths. The file defined by policy_file
-# must exist for these directories to be searched. Missing or empty
-# directories are ignored.
-# policy_dirs = policy.d
 
 [oslo_messaging_amqp]
 
@@ -828,115 +1077,91 @@
 # From oslo.messaging
 #
 
-# Address prefix used when sending to a specific server (string value)
+# address prefix used when sending to a specific server (string value)
 # Deprecated group/name - [amqp1]/server_request_prefix
-# server_request_prefix = exclusive
+#server_request_prefix = exclusive
 
-# Address prefix used when broadcasting to all servers (string value)
+# address prefix used when broadcasting to all servers (string value)
 # Deprecated group/name - [amqp1]/broadcast_prefix
-# broadcast_prefix = broadcast
+#broadcast_prefix = broadcast
 
-# Address prefix when sending to any server in group (string value)
+# address prefix when sending to any server in group (string value)
 # Deprecated group/name - [amqp1]/group_request_prefix
-# group_request_prefix = unicast
+#group_request_prefix = unicast
 
 # Name for the AMQP container (string value)
 # Deprecated group/name - [amqp1]/container_name
-# container_name =
+#container_name = <None>
 
 # Timeout for inactive connections (in seconds) (integer value)
 # Deprecated group/name - [amqp1]/idle_timeout
-# idle_timeout = 0
+#idle_timeout = 0
 
 # Debug: dump AMQP frames to stdout (boolean value)
 # Deprecated group/name - [amqp1]/trace
-# trace = false
+#trace = false
 
-# CA certificate PEM file for verifing server certificate (string value)
+# CA certificate PEM file to verify server certificate (string value)
 # Deprecated group/name - [amqp1]/ssl_ca_file
-# ssl_ca_file =
+#ssl_ca_file =
 
 # Identifying certificate PEM file to present to clients (string value)
 # Deprecated group/name - [amqp1]/ssl_cert_file
-# ssl_cert_file =
+#ssl_cert_file =
 
 # Private key PEM file used to sign cert_file certificate (string value)
 # Deprecated group/name - [amqp1]/ssl_key_file
-# ssl_key_file =
+#ssl_key_file =
 
 # Password for decrypting ssl_key_file (if encrypted) (string value)
 # Deprecated group/name - [amqp1]/ssl_key_password
-# ssl_key_password =
+#ssl_key_password = <None>
 
 # Accept clients using either SSL or plain TCP (boolean value)
 # Deprecated group/name - [amqp1]/allow_insecure_clients
-# allow_insecure_clients = false
+#allow_insecure_clients = false
+
+# Space separated list of acceptable SASL mechanisms (string value)
+# Deprecated group/name - [amqp1]/sasl_mechanisms
+#sasl_mechanisms =
+
+# Path to directory that contains the SASL configuration (string value)
+# Deprecated group/name - [amqp1]/sasl_config_dir
+#sasl_config_dir =
+
+# Name of configuration file (without .conf suffix) (string value)
+# Deprecated group/name - [amqp1]/sasl_config_name
+#sasl_config_name =
+
+# User name for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/username
+#username =
+
+# Password for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/password
+#password =
 
 
-[oslo_messaging_qpid]
+[oslo_messaging_notifications]
 
 #
 # From oslo.messaging
 #
 
-# Use durable queues in AMQP. (boolean value)
-# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
-# amqp_durable_queues = false
-
-# Auto-delete queues in AMQP. (boolean value)
-# Deprecated group/name - [DEFAULT]/amqp_auto_delete
-# amqp_auto_delete = false
-
-# Size of RPC connection pool. (integer value)
-# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
-# rpc_conn_pool_size = 30
-
-# Qpid broker hostname. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_hostname
-# qpid_hostname = localhost
-
-# Qpid broker port. (integer value)
-# Deprecated group/name - [DEFAULT]/qpid_port
-# qpid_port = 5672
-
-# Qpid HA cluster host:port pairs. (list value)
-# Deprecated group/name - [DEFAULT]/qpid_hosts
-# qpid_hosts = $qpid_hostname:$qpid_port
-
-# Username for Qpid connection. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_username
-# qpid_username =
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
 
-# Password for Qpid connection. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_password
-# qpid_password =
-
-# Space separated list of SASL mechanisms to use for auth. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms
-# qpid_sasl_mechanisms =
-
-# Seconds between connection keepalive heartbeats. (integer value)
-# Deprecated group/name - [DEFAULT]/qpid_heartbeat
-# qpid_heartbeat = 60
+# A URL representing the messaging driver to use for notifications. If not set,
+# we fall back to the same configuration used for RPC. (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
 
-# Transport to use, either 'tcp' or 'ssl'. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_protocol
-# qpid_protocol = tcp
-
-# Whether to disable the Nagle algorithm. (boolean value)
-# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay
-# qpid_tcp_nodelay = true
-
-# The number of prefetched messages held by receiver. (integer value)
-# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity
-# qpid_receiver_capacity = 1
-
-# The qpid topology version to use.  Version 1 is what was originally used by
-# impl_qpid.  Version 2 includes some backwards-incompatible changes that allow
-# broker federation to work.  Users should update to version 2 when they are
-# able to take everything down, as it requires a clean break. (integer value)
-# Deprecated group/name - [DEFAULT]/qpid_topology_version
-# qpid_topology_version = 1
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
 
 
 [oslo_messaging_rabbit]
@@ -946,90 +1171,337 @@
 #
 
 # Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
 # Deprecated group/name - [DEFAULT]/rabbit_durable_queues
-# amqp_durable_queues = false
+#amqp_durable_queues = false
 
 # Auto-delete queues in AMQP. (boolean value)
 # Deprecated group/name - [DEFAULT]/amqp_auto_delete
-# amqp_auto_delete = false
-
-# Size of RPC connection pool. (integer value)
-# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
-# rpc_conn_pool_size = 30
+#amqp_auto_delete = false
 
 # SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
 # SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
 # distributions. (string value)
 # Deprecated group/name - [DEFAULT]/kombu_ssl_version
-# kombu_ssl_version =
+#kombu_ssl_version =
 
 # SSL key file (valid only if SSL enabled). (string value)
 # Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
-# kombu_ssl_keyfile =
+#kombu_ssl_keyfile =
 
 # SSL cert file (valid only if SSL enabled). (string value)
 # Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
-# kombu_ssl_certfile =
+#kombu_ssl_certfile =
 
 # SSL certification authority file (valid only if SSL enabled). (string value)
 # Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
-# kombu_ssl_ca_certs =
+#kombu_ssl_ca_certs =
 
 # How long to wait before reconnecting in response to an AMQP consumer cancel
 # notification. (floating point value)
 # Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
-# kombu_reconnect_delay = 1.0
+#kombu_reconnect_delay = 1.0
+
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not
+# be used. This option may notbe available in future versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client beforce abandoning to send it its replies.
+# This value should not be longer than rpc_response_timeout. (integer value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we are
+# currently connected to becomes unavailable. Takes effect only if more than
+# one RabbitMQ node is provided in config. (string value)
+# Allowed values: round-robin, shuffle
+#kombu_failover_strategy = round-robin
 
 # The RabbitMQ broker address where a single node is used. (string value)
 # Deprecated group/name - [DEFAULT]/rabbit_host
-# rabbit_host = localhost
+#rabbit_host = localhost
 
-# The RabbitMQ broker port where a single node is used. (integer value)
+# The RabbitMQ broker port where a single node is used. (port value)
+# Minimum value: 0
+# Maximum value: 65535
 # Deprecated group/name - [DEFAULT]/rabbit_port
-# rabbit_port = 5672
+#rabbit_port = 5672
 
 # RabbitMQ HA cluster host:port pairs. (list value)
 # Deprecated group/name - [DEFAULT]/rabbit_hosts
-# rabbit_hosts = $rabbit_host:$rabbit_port
+#rabbit_hosts = $rabbit_host:$rabbit_port
 
 # Connect over SSL for RabbitMQ. (boolean value)
 # Deprecated group/name - [DEFAULT]/rabbit_use_ssl
-# rabbit_use_ssl = false
+#rabbit_use_ssl = false
 
 # The RabbitMQ userid. (string value)
 # Deprecated group/name - [DEFAULT]/rabbit_userid
-# rabbit_userid = guest
+#rabbit_userid = guest
 
 # The RabbitMQ password. (string value)
 # Deprecated group/name - [DEFAULT]/rabbit_password
-# rabbit_password = guest
+#rabbit_password = guest
 
 # The RabbitMQ login method. (string value)
 # Deprecated group/name - [DEFAULT]/rabbit_login_method
-# rabbit_login_method = AMQPLAIN
+#rabbit_login_method = AMQPLAIN
 
 # The RabbitMQ virtual host. (string value)
 # Deprecated group/name - [DEFAULT]/rabbit_virtual_host
-# rabbit_virtual_host = /
+#rabbit_virtual_host = /
 
 # How frequently to retry connecting with RabbitMQ. (integer value)
-# rabbit_retry_interval = 1
+#rabbit_retry_interval = 1
 
 # How long to backoff for between retries when connecting to RabbitMQ. (integer
 # value)
 # Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
-# rabbit_retry_backoff = 2
+#rabbit_retry_backoff = 2
+
+# Maximum interval of RabbitMQ connection retries. Default is 30 seconds.
+# (integer value)
+#rabbit_interval_max = 30
 
 # Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
 # count). (integer value)
 # Deprecated group/name - [DEFAULT]/rabbit_max_retries
-# rabbit_max_retries = 0
+#rabbit_max_retries = 0
+
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
+# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
+# is no longer controlled by the x-ha-policy argument when declaring a queue.
+# If you just want to make sure that all queues (except  those with auto-
+# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy
+# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
+#rabbit_ha_queues = false
 
-# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you
-# must wipe the RabbitMQ database. (boolean value)
-# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
-# rabbit_ha_queues = false
+# Positive integer representing duration in seconds for queue TTL (x-expires).
+# Queues which are unused for the duration of the TTL are automatically
+# deleted. The parameter affects only reply and fanout queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 1800
+
+# Specifies the number of messages to prefetch. Setting to zero allows
+# unlimited messages. (integer value)
+#rabbit_qos_prefetch_count = 0
+
+# Number of seconds after which the Rabbit broker is considered down if
+# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer
+# value)
+#heartbeat_timeout_threshold = 60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate = 2
 
 # Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
 # Deprecated group/name - [DEFAULT]/fake_rabbit
-# fake_rabbit = false
+#fake_rabbit = false
+
+# Maximum number of channels to allow (integer value)
+#channel_max = <None>
+
+# The maximum byte size for an AMQP frame (integer value)
+#frame_max = <None>
+
+# How often to send heartbeats for consumer's connections (integer value)
+#heartbeat_interval = 1
+
+# Enable SSL (boolean value)
+#ssl = <None>
+
+# Arguments passed to ssl.wrap_socket (dict value)
+#ssl_options = <None>
+
+# Set socket timeout in seconds for connection's socket (floating point value)
+#socket_timeout = 0.25
+
+# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating point
+# value)
+#tcp_user_timeout = 0.25
+
+# Set delay for reconnection to some host which has connection error (floating
+# point value)
+#host_connection_reconnect_delay = 0.25
+
+# Maximum number of connections to keep queued. (integer value)
+#pool_max_size = 10
+
+# Maximum number of connections to create above `pool_max_size`. (integer
+# value)
+#pool_max_overflow = 0
+
+# Default number of seconds to wait for a connections to available (integer
+# value)
+#pool_timeout = 30
+
+# Lifetime of a connection (since creation) in seconds or None for no
+# recycling. Expired connections are closed on acquire. (integer value)
+#pool_recycle = 600
+
+# Threshold at which inactive (since release) connections are considered stale
+# in seconds or None for no staleness. Stale connections are closed on acquire.
+# (integer value)
+#pool_stale = 60
+
+# Persist notification messages. (boolean value)
+#notification_persistence = false
+
+# Exchange name for for sending notifications (string value)
+#default_notification_exchange = ${control_exchange}_notification
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# notification listener. (integer value)
+#notification_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# notification, -1 means infinite retry. (integer value)
+#default_notification_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# notification message (floating point value)
+#notification_retry_delay = 0.25
+
+# Time to live for rpc queues without consumers in seconds. (integer value)
+#rpc_queue_expiration = 60
+
+# Exchange name for sending RPC messages (string value)
+#default_rpc_exchange = ${control_exchange}_rpc
+
+# Exchange name for receiving RPC replies (string value)
+#rpc_reply_exchange = ${control_exchange}_rpc_reply
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc
+# listener. (integer value)
+#rpc_listener_prefetch_count = 100
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc reply
+# listener. (integer value)
+#rpc_reply_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# reply. -1 means infinite retry during rpc_timeout (integer value)
+#rpc_reply_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# reply. (floating point value)
+#rpc_reply_retry_delay = 0.25
+
+# Reconnecting retry count in case of connectivity problem during sending RPC
+# message, -1 means infinite retry. If actual retry attempts in not 0 the rpc
+# request could be processed more then one time (integer value)
+#default_rpc_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending RPC
+# message (floating point value)
+#rpc_retry_delay = 0.25
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string value)
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be relative
+# to any directory in the search path defined by the config_dir option, or
+# absolute paths. The file defined by policy_file must exist for these
+# directories to be searched.  Missing or empty directories are ignored. (multi
+# valued)
+# Deprecated group/name - [DEFAULT]/policy_dirs
+#policy_dirs = policy.d
+
+
+[quotas]
+
+#
+# From neutron
+#
+
+# Resource name(s) that are supported in quota features. This option is now
+# deprecated for removal. (list value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited. (integer value)
+#default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_network = 10
+
+# Number of subnets allowed per tenant, A negative value means unlimited.
+# (integer value)
+#quota_subnet = 10
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_port = 50
+
+# Default driver to use for quota checks (string value)
+#quota_driver = neutron.db.quota.driver.DbQuotaDriver
+
+# Keep in track in the database of current resourcequota usage. Plugins which
+# do not leverage the neutron database should set this flag to False (boolean
+# value)
+#track_quota_usage = true
+
+#
+# From neutron.extensions
+#
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_floatingip = 50
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited. (integer value)
+#quota_security_group = 10
+
+# Number of security rules allowed per tenant. A negative value means
+# unlimited. (integer value)
+#quota_security_group_rule = 100
+
+
+[ssl]
+
+#
+# From oslo.service.sslutils
+#
+
+# CA certificate file to use to verify connecting clients. (string value)
+# Deprecated group/name - [DEFAULT]/ssl_ca_file
+#ca_file = <None>
+
+# Certificate file to use when starting the server securely. (string value)
+# Deprecated group/name - [DEFAULT]/ssl_cert_file
+#cert_file = <None>
+
+# Private key file to use when starting the server securely. (string value)
+# Deprecated group/name - [DEFAULT]/ssl_key_file
+#key_file = <None>
+
+# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
+# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
+# distributions. (string value)
+#version = <None>
+
+# Sets the list of available ciphers. value should be a string in the OpenSSL
+# cipher list format. (string value)
+#ciphers = <None>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/neutron_vpnaas.conf	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,12 @@
+[DEFAULT]
+
+
+[service_providers]
+
+#
+# From neutron.vpnaas
+#
+
+# Defines providers for advanced services using the format:
+# <service_type>:<name>:<driver>[:default] (multi valued)
+service_provider = VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
--- a/components/openstack/neutron/files/ovs_neutron_plugin.ini	Wed Sep 07 14:48:41 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,159 +0,0 @@
-[ovs]
-# Do not change this parameter unless you have a good reason to.
-# This is the name of the OVS integration bridge. There is one per hypervisor.
-# The integration bridge acts as a virtual "patch bay". All VM VIFs are
-# attached to this bridge and then "patched" according to their network
-# connectivity.
-#
-integration_bridge = br_int0
-
-# Only used for the agent if tunnel_id_ranges is not empty for
-# the server.  In most cases, the default value should be fine.
-#
-# In the case of Solaris, the integration bridge and tunnel bridge must
-# be the same.
-#
-tunnel_bridge = br_int0
-
-# Peer patch port in integration bridge for tunnel bridge
-# int_peer_patch_port = patch-tun
-
-# Peer patch port in tunnel bridge for integration bridge
-# tun_peer_patch_port = patch-int
-
-# Uncomment this line for the agent if tunnel_id_ranges is not
-# empty for the server. Set local-ip to be the local IP address of
-# this hypervisor.
-#
-# local_ip =
-
-# (ListOpt) Comma-separated list of <physical_network>:<bridge> tuples
-# mapping physical network names to the agent's node-specific OVS
-# bridge names to be used for flat and VLAN networks. The length of
-# bridge names should be no more than 11. Each bridge must
-# exist, and should have a physical network interface configured as a
-# port. All physical networks configured on the server should have
-# mappings to appropriate bridges on each agent.
-#
-# bridge_mappings =
-# Example: bridge_mappings = physnet1:br-eth1
-
-# (BoolOpt) Use veths instead of patch ports to interconnect the integration
-# bridge to physical networks. Support kernel without ovs patch port support
-# so long as it is set to True.
-# use_veth_interconnection = False
-
-# (StrOpt) Which OVSDB backend to use, defaults to 'vsctl'
-# vsctl - The backend based on executing ovs-vsctl
-# native - The backend based on using native OVSDB
-# ovsdb_interface = vsctl
-
-# (StrOpt) The connection string for the native OVSDB backend
-# To enable ovsdb-server to listen on port 6640:
-#   ovs-vsctl set-manager ptcp:6640:127.0.0.1
-# ovsdb_connection = tcp:127.0.0.1:6640
-
-[agent]
-# Agent's polling interval in seconds
-# polling_interval = 2
-
-# Minimize polling by monitoring ovsdb for interface changes
-# minimize_polling = True
-
-# When minimize_polling = True, the number of seconds to wait before
-# respawning the ovsdb monitor after losing communication with it
-# ovsdb_monitor_respawn_interval = 30
-
-# (ListOpt) The types of tenant network tunnels supported by the agent.
-# Setting this will enable tunneling support in the agent. This can be set to
-# either 'gre' or 'vxlan'. If this is unset, it will default to [] and
-# disable tunneling support in the agent.
-# You can specify as many values here as your compute hosts supports.
-#
-# tunnel_types =
-# Example: tunnel_types = gre
-# Example: tunnel_types = vxlan
-# Example: tunnel_types = vxlan, gre
-
-# (IntOpt) The port number to utilize if tunnel_types includes 'vxlan'. By
-# default, this will make use of the Open vSwitch default value of '4789' if
-# not specified.
-#
-# vxlan_udp_port =
-# Example: vxlan_udp_port = 8472
-
-# (IntOpt) This is the MTU size of veth interfaces.
-# Do not change unless you have a good reason to.
-# The default MTU size of veth interfaces is 1500.
-# This option has no effect if use_veth_interconnection is False
-# veth_mtu =
-# Example: veth_mtu = 1504
-
-# (BoolOpt) Flag to enable l2-population extension. This option should only be
-# used in conjunction with ml2 plugin and l2population mechanism driver. It'll
-# enable plugin to populate remote ports macs and IPs (using fdb_add/remove
-# RPC calbbacks instead of tunnel_sync/update) on OVS agents in order to
-# optimize tunnel management.
-#
-# l2_population = False
-
-# Enable local ARP responder. Requires OVS 2.1. This is only used by the l2
-# population ML2 MechanismDriver.
-#
-# arp_responder = False
-
-# Enable suppression of ARP responses that don't match an IP address that
-# belongs to the port from which they originate.
-# Note: This prevents the VMs attached to this agent from spoofing,
-# it doesn't protect them from other devices which have the capability to spoof
-# (e.g. bare metal or VMs attached to agents without this flag set to True).
-# Requires a version of OVS that can match ARP headers.
-#
-# prevent_arp_spoofing = False
-
-# (BoolOpt) Set or un-set the don't fragment (DF) bit on outgoing IP packet
-# carrying GRE/VXLAN tunnel. The default value is True.
-#
-# dont_fragment = True
-
-# (BoolOpt) Set to True on L2 agents to enable support
-# for distributed virtual routing.
-#
-# enable_distributed_routing = False
-
-# (IntOpt) Set new timeout in seconds for new rpc calls after agent receives
-# SIGTERM. If value is set to 0, rpc timeout won't be changed"
-#
-# quitting_rpc_timeout = 10
-
-[securitygroup]
-# Firewall driver for realizing neutron security group function.
-# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
-# Example: firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
-
-# Controls if neutron security group is enabled or not.
-# It should be false when you use nova security group.
-enable_security_group = False
-
-#-----------------------------------------------------------------------------
-# Sample Configurations.
-#-----------------------------------------------------------------------------
-#
-# 1. With VLANs on eth1.
-# [ovs]
-# integration_bridge = br-int
-# bridge_mappings = default:br-eth1
-#
-# 2. With GRE tunneling.
-# [ovs]
-# integration_bridge = br-int
-# tunnel_bridge = br-tun
-# local_ip = 10.0.0.3
-#
-# 3. With VXLAN tunneling.
-# [ovs]
-# integration_bridge = br-int
-# tunnel_bridge = br-tun
-# local_ip = 10.0.0.3
-# [agent]
-# tunnel_types = vxlan
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/plugins/ml2/linuxbridge_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,197 @@
+[DEFAULT]
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[agent]
+
+#
+# From neutron.ml2.linuxbridge.agent
+#
+
+# The number of seconds the agent will wait between polling for local device
+# changes. (integer value)
+#polling_interval = 2
+
+# Set new timeout in seconds for new rpc calls after agent receives SIGTERM. If
+# value is set to 0, rpc timeout won't be changed (integer value)
+#quitting_rpc_timeout = 10
+
+# Enable suppression of ARP responses that don't match an IP address that
+# belongs to the port from which they originate. Note: This prevents the VMs
+# attached to this agent from spoofing, it doesn't protect them from other
+# devices which have the capability to spoof (e.g. bare metal or VMs attached
+# to agents without this flag set to True). Spoofing rules will not be added to
+# any ports that have port security disabled. For LinuxBridge, this requires
+# ebtables. For OVS, it requires a version that supports matching ARP headers.
+# This option will be removed in Newton so the only way to disable protection
+# will be via the port security extension. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#prevent_arp_spoofing = true
+
+
+[linux_bridge]
+
+#
+# From neutron.ml2.linuxbridge.agent
+#
+
+# Comma-separated list of <physical_network>:<physical_interface> tuples
+# mapping physical network names to the agent's node-specific physical network
+# interfaces to be used for flat and VLAN networks. All physical networks
+# listed in network_vlan_ranges on the server should have mappings to
+# appropriate interfaces on each agent. (list value)
+#physical_interface_mappings =
+
+# List of <physical_network>:<physical_bridge> (list value)
+#bridge_mappings =
+
+
+[securitygroup]
+
+#
+# From neutron.ml2.linuxbridge.agent
+#
+
+# Driver for security groups firewall in the L2 agent (string value)
+#firewall_driver = <None>
+
+# Controls whether the neutron security group API is enabled in the server. It
+# should be false when using no security groups or using the nova security
+# group API. (boolean value)
+#enable_security_group = true
+
+# Use ipset to speed-up the iptables based security groups. Enabling ipset
+# support requires that ipset is installed on L2 agent node. (boolean value)
+#enable_ipset = true
+
+
+[vxlan]
+
+#
+# From neutron.ml2.linuxbridge.agent
+#
+
+# Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 plugin
+# using linuxbridge mechanism driver (boolean value)
+#enable_vxlan = true
+
+# TTL for vxlan interface protocol packets. (integer value)
+#ttl = <None>
+
+# TOS for vxlan interface protocol packets. (integer value)
+#tos = <None>
+
+# Multicast group(s) for vxlan interface. A range of group addresses may be
+# specified by using CIDR notation. Specifying a range allows different VNIs to
+# use different group addresses, reducing or eliminating spurious broadcast
+# traffic to the tunnel endpoints. To reserve a unique group for each possible
+# (24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on
+# all the agents. (string value)
+#vxlan_group = 224.0.0.1
+
+# Local IP address of the VXLAN endpoints. (IP address value)
+#local_ip = <None>
+
+# Extension to use alongside ml2 plugin's l2population mechanism driver. It
+# enables the plugin to populate VXLAN forwarding table. (boolean value)
+#l2_population = false
+
+# Enable local ARP responder which provides local responses instead of
+# performing ARP broadcast into the overlay. Enabling local ARP responder is
+# not fullycompatible with the allowed-address-pairs extension. (boolean value)
+#arp_responder = false
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/plugins/ml2/macvtap_agent.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,157 @@
+[DEFAULT]
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+
+[agent]
+
+#
+# From neutron.ml2.macvtap.agent
+#
+
+# The number of seconds the agent will wait between polling for local device
+# changes. (integer value)
+#polling_interval = 2
+
+# Set new timeout in seconds for new rpc calls after agent receives SIGTERM. If
+# value is set to 0, rpc timeout won't be changed (integer value)
+#quitting_rpc_timeout = 10
+
+# Enable suppression of ARP responses that don't match an IP address that
+# belongs to the port from which they originate. Note: This prevents the VMs
+# attached to this agent from spoofing, it doesn't protect them from other
+# devices which have the capability to spoof (e.g. bare metal or VMs attached
+# to agents without this flag set to True). Spoofing rules will not be added to
+# any ports that have port security disabled. For LinuxBridge, this requires
+# ebtables. For OVS, it requires a version that supports matching ARP headers.
+# This option will be removed in Newton so the only way to disable protection
+# will be via the port security extension. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#prevent_arp_spoofing = true
+
+
+[macvtap]
+
+#
+# From neutron.ml2.macvtap.agent
+#
+
+# Comma-separated list of <physical_network>:<physical_interface> tuples
+# mapping physical network names to the agent's node-specific physical network
+# interfaces to be used for flat and VLAN networks. All physical networks
+# listed in network_vlan_ranges on the server should have mappings to
+# appropriate interfaces on each agent. (list value)
+#physical_interface_mappings =
+
+
+[securitygroup]
+
+#
+# From neutron.ml2.macvtap.agent
+#
+
+# Driver for security groups firewall in the L2 agent (string value)
+#firewall_driver = <None>
+
+# Controls whether the neutron security group API is enabled in the server. It
+# should be false when using no security groups or using the nova security
+# group API. (boolean value)
+#enable_security_group = true
+
+# Use ipset to speed-up the iptables based security groups. Enabling ipset
+# support requires that ipset is installed on L2 agent node. (boolean value)
+#enable_ipset = true
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/plugins/ml2/ml2_conf.ini	Wed Sep 07 14:48:41 2016 -0700
@@ -0,0 +1,240 @@
+[DEFAULT]
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+#debug = false
+
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#verbose = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instan