21978756 addrconf addresses must be created for stateless and slaac Neutron subnets
authorGirish Moodalbail <Girish.Moodalbail@oracle.COM>
Fri, 16 Oct 2015 15:53:02 -0700
changeset 4975 6445e44cfccd
parent 4974 134621002c51
child 4976 0224a36cbe09
21978756 addrconf addresses must be created for stateless and slaac Neutron subnets 21978743 ndpd.conf entries are incorrectly formatted for IPv6 subnets 21919000 neutron-dhcp-agent and neutron-server have timing issues 21918991 database times out when attempting various actions 21682493 Neutron fails due to mysql transaction locks when creating multiple instances 22024767 Remove annoying "Arguments dropped when creating context" logging
components/openstack/neutron/files/agent/evs_l3_agent.py
components/openstack/neutron/files/agent/solaris/dhcp.py
components/openstack/neutron/files/agent/solaris/interface.py
components/openstack/neutron/files/agent/solaris/net_lib.py
components/openstack/neutron/files/agent/solaris/ra.py
components/openstack/neutron/files/evs/plugin.py
components/openstack/neutron/files/neutron.exec_attr
components/openstack/neutron/patches/06-dhcp-port-sched.patch
components/openstack/neutron/patches/07-launchpad-1255441.patch
--- a/components/openstack/neutron/files/agent/evs_l3_agent.py	Tue Oct 13 09:20:24 2015 -0700
+++ b/components/openstack/neutron/files/agent/evs_l3_agent.py	Fri Oct 16 15:53:02 2015 -0700
@@ -79,7 +79,7 @@
         self.process_router(ri)
         if self.conf.enable_metadata_proxy:
             self._destroy_metadata_proxy(ri.router_id, ri.ns_name)
-
+        ra.disable_ipv6_ra(ri.router_id)
         del self.router_info[router_id]
 
     def _get_metadata_proxy_process_manager(self, router_id, ns_name):
@@ -368,32 +368,9 @@
                 dl.create_vnic(self.conf.external_network_datalink,
                                mac_address=mac_address, vid=vid)
             else:
-                # This is to handle HA by Solaris Cluster and is similar to
-                # the code we already have for the DHCP Agent. So, when
-                # the 1st L3 agent is down and the second L3 agent tries to
-                # connect its VNIC to EVS, we will end up in "vport in use"
-                # error. So, we need to reset the vport before we connect
-                # the VNIC to EVS.
-                cmd = ['/usr/sbin/evsadm', 'show-vport', '-f',
-                       'vport=%s' % ex_gw_port['id'], '-co',
-                       'evs,vport,status']
-                stdout = utils.execute(cmd)
-                evsname, vportname, status = stdout.strip().split(':')
-                tenant_id = ex_gw_port['tenant_id']
-                if status == 'used':
-                    cmd = ['/usr/sbin/evsadm', 'reset-vport', '-T', tenant_id,
-                           '%s/%s' % (evsname, vportname)]
-                    utils.execute(cmd)
-
-                # next remove protection setting on the VPort to allow
-                # multiple floating IPs to be configured on the l3e*
-                # interface
-                evsvport = "%s/%s" % (ex_gw_port['network_id'],
-                                      ex_gw_port['id'])
-                cmd = ['/usr/sbin/evsadm', 'set-vportprop', '-T',
-                       tenant_id, '-p', 'protection=none', evsvport]
-                utils.execute(cmd)
-                dl.connect_vnic(evsvport, tenant_id)
+                self.driver.plug(ex_gw_port['tenant_id'],
+                                 ex_gw_port['network_id'],
+                                 ex_gw_port['id'], external_dlname)
 
         self.driver.init_l3(external_dlname, [ex_gw_port['ip_cidr']])
 
@@ -442,16 +419,6 @@
             self.driver.fini_l3(external_dlname)
             self.driver.unplug(external_dlname)
 
-        # remove the EVS VPort associated with external network
-        cmd = ['/usr/sbin/evsadm', 'remove-vport',
-               '-T', ex_gw_port['tenant_id'],
-               '%s/%s' % (ex_gw_port['network_id'], ex_gw_port['id'])]
-        try:
-            utils.execute(cmd)
-        except Exception as err:
-            LOG.error(_("Failed to delete the EVS VPort associated with "
-                        "external network: %s") % err)
-
     def _get_ippool_name(self, mac_address, suffix=None):
         # Generate a unique-name for ippool(1m) from that last 3
         # bytes of mac-address. It is called pool name, but it is
@@ -596,14 +563,5 @@
             self.driver.fini_l3(internal_dlname)
             self.driver.unplug(internal_dlname)
 
-        # remove the EVS VPort associated with internal network
-        cmd = ['/usr/sbin/evsadm', 'remove-vport', '-T', port['tenant_id'],
-               '%s/%s' % (port['network_id'], port['id'])]
-        try:
-            utils.execute(cmd)
-        except Exception as err:
-            LOG.error(_("Failed to delete the EVS VPort associated with "
-                        "internal network: %s") % err)
-
     def routes_updated(self, ri):
         pass
--- a/components/openstack/neutron/files/agent/solaris/dhcp.py	Tue Oct 13 09:20:24 2015 -0700
+++ b/components/openstack/neutron/files/agent/solaris/dhcp.py	Fri Oct 16 15:53:02 2015 -0700
@@ -36,6 +36,7 @@
 from neutron.agent.solaris import net_lib
 from neutron.common import constants
 from neutron.common import exceptions
+from neutron.common import ipv6_utils
 from neutron.openstack.common import importutils
 from neutron.openstack.common import jsonutils
 from neutron.openstack.common import log as logging
@@ -912,17 +913,21 @@
             self.driver.plug(network.tenant_id, network.id,
                              port.id, interface_name)
         ip_cidrs = []
+        addrconf = False
         for fixed_ip in port.fixed_ips:
             subnet = fixed_ip.subnet
-            net = netaddr.IPNetwork(subnet.cidr)
-            ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
-            ip_cidrs.append(ip_cidr)
+            if not ipv6_utils.is_auto_address_subnet(subnet):
+                net = netaddr.IPNetwork(subnet.cidr)
+                ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
+                ip_cidrs.append(ip_cidr)
+            else:
+                addrconf = True
 
         if (self.conf.enable_isolated_metadata and
                 self.conf.use_namespaces):
             ip_cidrs.append(METADATA_DEFAULT_CIDR)
 
-        self.driver.init_l3(interface_name, ip_cidrs)
+        self.driver.init_l3(interface_name, ip_cidrs, addrconf=addrconf)
 
         if self.conf.use_namespaces:
             self._set_default_route(network, interface_name)
--- a/components/openstack/neutron/files/agent/solaris/interface.py	Tue Oct 13 09:20:24 2015 -0700
+++ b/components/openstack/neutron/files/agent/solaris/interface.py	Fri Oct 16 15:53:02 2015 -0700
@@ -14,18 +14,33 @@
 #
 # @author: Girish Moodalbail, Oracle, Inc.
 
+import rad.bindings.com.oracle.solaris.rad.evscntl_1 as evsbind
+import rad.client as radcli
+import rad.connect as radcon
+
 from oslo.config import cfg
 
 from neutron.agent.linux import utils
 from neutron.agent.solaris import net_lib
+from neutron.common import exceptions
+from neutron.openstack.common import log as logging
 
 
+LOG = logging.getLogger(__name__)
+
 OPTS = [
     cfg.StrOpt('evs_controller', default='ssh://evsuser@localhost',
                help=_("An URI that specifies an EVS controller"))
 ]
 
 
+class EVSControllerError(exceptions.NeutronException):
+    message = _("EVS controller: %(errmsg)s")
+
+    def __init__(self, evs_errmsg):
+        super(EVSControllerError, self).__init__(errmsg=evs_errmsg)
+
+
 class SolarisVNICDriver(object):
     """Driver used to manage Solaris EVS VNICs.
 
@@ -54,6 +69,9 @@
             raise SystemExit(_("'user' and 'hostname' need to be specified "
                                "for evs_controller"))
 
+        # save the user and EVS controller info
+        self.uh = uh
+        self._rad_connection = None
         # set the controller property for this host
         cmd = ['/usr/sbin/evsadm', 'show-prop', '-co', 'value', '-p',
                'controller']
@@ -63,17 +81,31 @@
                    'controller=%s' % (conf.evs_controller)]
             utils.execute(cmd)
 
+    @property
+    def rad_connection(self):
+        if (self._rad_connection is not None and
+                self._rad_connection._closed is None):
+            return self._rad_connection
+
+        LOG.debug(_("Connecting to EVS Controller at %s as %s") %
+                  (self.uh[1], self.uh[0]))
+
+        self._rad_connection = radcon.connect_ssh(self.uh[1], user=self.uh[0])
+        return self._rad_connection
+
     def fini_l3(self, device_name):
         ipif = net_lib.IPInterface(device_name)
         ipif.delete_ip()
 
-    def init_l3(self, device_name, ip_cidrs):
+    def init_l3(self, device_name, ip_cidrs, addrconf=False):
         """Set the L3 settings for the interface using data from the port.
            ip_cidrs: list of 'X.X.X.X/YY' strings
         """
         ipif = net_lib.IPInterface(device_name)
         for ip_cidr in ip_cidrs:
             ipif.create_address(ip_cidr)
+        if addrconf:
+            ipif.create_addrconf()
 
     # TODO(gmoodalb): - probably take PREFIX?? for L3
     def get_device_name(self, port):
@@ -86,27 +118,42 @@
              namespace=None, prefix=None, protection=False):
         """Plug in the interface."""
 
-        evs_vport = ('%s/%s') % (network_id, port_id)
-        dl = net_lib.Datalink(datalink_name)
+        try:
+            evsc = self.rad_connection.get_object(evsbind.EVSController())
+            vports_info = evsc.getVPortInfo("vport=%s" % (port_id))
+            if vports_info:
+                vport_info = vports_info[0]
+                # This is to handle HA when the 1st DHCP/L3 agent is down and
+                # the second DHCP/L3 agent tries to connect its VNIC to EVS, we
+                # will end up in "vport in use" error. So, we need to reset the
+                # vport before we connect the VNIC to EVS.
+                if vport_info.status == evsbind.VPortStatus.USED:
+                    LOG.debug(_("Retrieving EVS: %s"), vport_info.evsuuid)
+                    pat = radcli.ADRGlobPattern({'uuid': network_id,
+                                                 'tenant': tenant_id})
+                    evs_objs = self.rad_connection.list_objects(evsbind.EVS(),
+                                                                pat)
+                    if evs_objs:
+                        evs = self.rad_connection.get_object(evs_objs[0])
+                        evs.resetVPort(port_id, "force=yes")
 
-        # This is to handle HA when the 1st DHCP/L3 agent is down and
-        # the second DHCP/L3 agent tries to connect its VNIC to EVS, we will
-        # end up in "vport in use" error. So, we need to reset the vport
-        # before we connect the VNIC to EVS.
-        cmd = ['/usr/sbin/evsadm', 'show-vport', '-f',
-               'vport=%s' % port_id, '-co', 'evs,vport,status']
-        stdout = utils.execute(cmd)
-        evsname, vportname, status = stdout.strip().split(':')
-        if status == 'used':
-            cmd = ['/usr/sbin/evsadm', 'reset-vport', '-T', tenant_id,
-                   '%s/%s' % (evsname, vportname)]
-            utils.execute(cmd)
+                if not protection:
+                    LOG.debug(_("Retrieving VPort: %s"), port_id)
+                    pat = radcli.ADRGlobPattern({'uuid': port_id,
+                                                 'tenant': tenant_id,
+                                                 'evsuuid': network_id})
+                    vport_objs = self.rad_connection.list_objects(
+                        evsbind.VPort(), pat)
+                    if vport_objs:
+                        vport = self.rad_connection.get_object(vport_objs[0])
+                        vport.setProperty("protection=none")
+        except radcli.ObjectError as oe:
+            raise EVSControllerError(oe.get_payload().errmsg)
+        finally:
+            self.rad_connection.close()
 
-        if not protection:
-            cmd = ['/usr/sbin/evsadm', 'set-vportprop', '-T', tenant_id,
-                   '-p', 'protection=none', evs_vport]
-            utils.execute(cmd)
-
+        dl = net_lib.Datalink(datalink_name)
+        evs_vport = "%s/%s" % (network_id, port_id)
         dl.connect_vnic(evs_vport, tenant_id)
 
     def unplug(self, device_name, namespace=None, prefix=None):
--- a/components/openstack/neutron/files/agent/solaris/net_lib.py	Tue Oct 13 09:20:24 2015 -0700
+++ b/components/openstack/neutron/files/agent/solaris/net_lib.py	Fri Oct 16 15:53:02 2015 -0700
@@ -82,8 +82,7 @@
             if temp:
                 cmd.append('-t')
             self.execute_with_pfexec(cmd)
-
-        if self.ipaddr_exists(self._ifname, ipaddr):
+        elif self.ipaddr_exists(self._ifname, ipaddr):
             return
 
         # If an address is IPv6, then to create a static IPv6 address
@@ -111,6 +110,25 @@
 
         self.execute_with_pfexec(cmd)
 
+    def create_addrconf(self, temp=True):
+        if not self.ifname_exists(self._ifname):
+            # create ip interface
+            cmd = ['/usr/sbin/ipadm', 'create-ip', self._ifname]
+            if temp:
+                cmd.append('-t')
+            self.execute_with_pfexec(cmd)
+        else:
+            cmd = ['/usr/sbin/ipadm', 'show-addr', '-po', 'type', self._ifname]
+            stdout = self.execute(cmd)
+            if 'addrconf' in stdout:
+                return
+
+        cmd = ['/usr/sbin/ipadm', 'create-addr', '-T', 'addrconf',
+               self._ifname]
+        if temp:
+            cmd.append('-t')
+        self.execute_with_pfexec(cmd)
+
     def delete_address(self, ipaddr):
         if not self.ipaddr_exists(self._ifname, ipaddr):
             return
--- a/components/openstack/neutron/files/agent/solaris/ra.py	Tue Oct 13 09:20:24 2015 -0700
+++ b/components/openstack/neutron/files/agent/solaris/ra.py	Fri Oct 16 15:53:02 2015 -0700
@@ -39,24 +39,25 @@
 
 NDP_SMF_FMRI = 'svc:/network/routing/ndp:default'
 
-CONFIG_TEMPLATE = jinja2.Template("""if {{ interface_name }} \
-   AdvSendAdvertisements on \
-   MinRtrAdvInterval 3 \
-   MaxRtrAdvInterval 10 \
-   {% if ra_mode == constants.DHCPV6_STATELESS %}
-   AdvOtherConfigFlag on \
-   {% endif %}
-
-   {% if ra_mode == constants.DHCPV6_STATEFUL %}
-   AdvManagedFlag on
-   {% endif %}
-
-{% if ra_mode in (constants.IPV6_SLAAC, constants.DHCPV6_STATELESS) %}
-prefix {{ prefix }} {{ interface_name }} \
-        AdvOnLinkFlag on \
-        AdvAutonomousFlag on
-{% endif %}
-""")
+# The configuration file for ndpd daemon expects all the 'key value' to be
+# on the same line as that of the interface. For example:
+#
+# if net0  AdvSendAdvertisements on MinRtrAdvInterval 3 MaxRtrAdvInterval 10
+# prefix 3234234 net0 AdvOnLinkFlag on AdvAutonomousFlag on
+CONFIG_TEMPLATE = jinja2.Template(
+    """if {{ interface_name }} """
+    """ AdvSendAdvertisements on MinRtrAdvInterval 3 MaxRtrAdvInterval 10 """
+    """ {% if ra_mode == constants.DHCPV6_STATELESS %} """
+    """ AdvOtherConfigFlag on """
+    """ {% endif %} """
+    """ {% if ra_mode == constants.DHCPV6_STATEFUL %} """
+    """ AdvManagedFlag on """
+    """ {% endif %} """
+    """ {% if ra_mode in (constants.IPV6_SLAAC, """
+    """ constants.DHCPV6_STATELESS) %} """
+    """\nprefix {{ prefix }} {{ interface_name }} """
+    """ AdvOnLinkFlag on AdvAutonomousFlag on """
+    """ {% endif %} """)
 
 
 def _generate_ndpd_conf(router_id, router_ports, dev_name_helper):
@@ -68,7 +69,7 @@
         if netaddr.IPNetwork(prefix).version == 6:
             interface_name = dev_name_helper(p['id'])
             ra_mode = p['subnet']['ipv6_ra_mode']
-            buf.write('%s' % CONFIG_TEMPLATE.render(
+            buf.write('%s\n' % CONFIG_TEMPLATE.render(
                 ra_mode=ra_mode,
                 interface_name=interface_name,
                 prefix=prefix,
@@ -82,6 +83,10 @@
     cmd = ['/usr/sbin/svccfg', '-s', NDP_SMF_FMRI, 'setprop',
            'routing/config_file', '=', ndpd_conf]
     utils.execute(cmd)
+    # this is needed to reflect the routing/config_file property
+    # in svcprop output
+    cmd = ['/usr/sbin/svccfg', '-s', NDP_SMF_FMRI, 'refresh']
+    utils.execute(cmd)
     # ndpd SMF service doesn't support refresh method, so we
     # need to restart
     cmd = ['/usr/sbin/svcadm', 'restart', NDP_SMF_FMRI]
--- a/components/openstack/neutron/files/evs/plugin.py	Tue Oct 13 09:20:24 2015 -0700
+++ b/components/openstack/neutron/files/evs/plugin.py	Fri Oct 16 15:53:02 2015 -0700
@@ -16,11 +16,15 @@
 #
 # @author: Girish Moodalbail, Oracle, Inc.
 
+import time
+
 import rad.client as radcli
 import rad.connect as radcon
 import rad.bindings.com.oracle.solaris.rad.evscntl_1 as evsbind
 
 from oslo.config import cfg
+from oslo.db import exception as os_db_exc
+from sqlalchemy import exc as sqla_exc
 
 from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
 from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
@@ -38,6 +42,7 @@
 from neutron.db import external_net_db
 from neutron.db import l3_gwmode_db
 from neutron.db import model_base
+from neutron.db import models_v2
 from neutron.db import quota_db
 from neutron.extensions import external_net
 from neutron.extensions import providernet
@@ -47,6 +52,8 @@
 from neutron.plugins.common import constants as svc_constants
 
 LOG = logging.getLogger(__name__)
+MAX_RETRIES = 10
+RETRY_INTERVAL = 2
 
 evs_controller_opts = [
     cfg.StrOpt('evs_controller', default='ssh://evsuser@localhost',
@@ -286,14 +293,10 @@
             self._evs_controller_addIPnet(tenantname, evsname, ipnetname,
                                           ",".join(proplist))
 
-        # notify dhcp agent of subnet creation
-        self.dhcp_agent_notifier.notify(context, {'subnet': db_subnet},
-                                        'subnet.create.end')
         return db_subnet
 
     def update_subnet(self, context, id, subnet):
         LOG.debug(_("Updating Subnet: %s with %s") % (id, subnet))
-        evs_rpccall_sync = subnet.pop('evs_rpccall_sync', False)
         if (set(subnet['subnet'].keys()) - set(('enable_dhcp',
                                                 'allocation_pools',
                                                 'dns_nameservers',
@@ -314,16 +317,6 @@
             if poolstr:
                 self._evs_controller_updateIPnet(id, "pool=%s" % poolstr)
 
-        # notify dhcp agent of subnet update
-        methodname = 'subnet.update.end'
-        payload = {'subnet': retval}
-        if not evs_rpccall_sync:
-            self.dhcp_agent_notifier.notify(context, payload, methodname)
-        else:
-            msg = self.dhcp_agent_notifier.make_msg(
-                methodname.replace(".", "_"), payload=payload)
-            self.dhcp_agent_notifier.call(context, msg,
-                                          topic=topics.DHCP_AGENT)
         return retval
 
     def get_subnet(self, context, id, fields=None):
@@ -338,49 +331,29 @@
                         page_reverse)
         return [self._fields(subnet, fields) for subnet in subnets]
 
-    def _release_subnet_dhcp_port(self, context, subnet, delete_network):
-        """Release any dhcp port associated with the subnet"""
-        filters = dict(network_id=[subnet['network_id']])
-        portlist = self.get_ports(context, filters)
-
-        if delete_network:
-            # One can delete a network if there is only one port that has a
-            # VNIC attached to it and that port happens to be a DHCP port.
-            ports_with_deviceid = [port for port in portlist
-                                   if port['device_id'] != '']
-            update_subnet = len(ports_with_deviceid) == 1
-        else:
-            # One can delete a subnet if there is only one port and that
-            # port happens to be a DHCP port.
-            update_subnet = len(portlist) == 1
-        if update_subnet:
-            # For IPv6 we need to first reset the IPv6 attributes
-            if subnet['ip_version'] == 6:
-                if (attributes.is_attr_set(subnet.get('ipv6_address_mode'))):
-                    subnet_update = {'subnet':
-                                     {'ipv6_address_mode': None,
-                                      'ipv6_ra_mode': None
-                                      },
-                                     'evs_rpccall_sync': True
-                                     }
-                    self.update_subnet(context, subnet['id'], subnet_update)
-            # the lone port is a dhcp port created by dhcp agent
-            # it must be released before we can delete the subnet
-            subnet_update = {'subnet': {'enable_dhcp': False},
-                             'evs_rpccall_sync': True}
-            self.update_subnet(context, subnet['id'], subnet_update)
-
     @lockutils.synchronized('evs-plugin', 'neutron-')
-    def _evs_controller_removeIPnet(self, tenantname, evsname, ipnetuuid):
+    def _evs_controller_removeIPnet(self, tenantname, evsname, ipnetuuid,
+                                    auto_created_ports):
         LOG.debug(_("Removing IPnet with id: %s for tenant: %s for evs: %s") %
                   (ipnetuuid, tenantname, evsname))
         pat = radcli.ADRGlobPattern({'name': evsname, 'tenant': tenantname})
         try:
             evs = self.rad_connection.get_object(evsbind.EVS(), pat)
+            if auto_created_ports:
+                LOG.debug(_("Need to remove following ports %s before "
+                            "removing the IPnet") % (auto_created_ports))
+                for port in auto_created_ports:
+                    try:
+                        evs.removeVPort(port['id'], "force=yes")
+                    except radcli.ObjectError as oe:
+                        # '43' corresponds to EVS' EVS_ENOENT_VPORT error code
+                        if oe.get_payload().err == 43:
+                            LOG.debug(_("VPort %s could not be found") %
+                                      (port['id']))
             evs.removeIPnet(ipnetuuid)
-        except radcli.ObjectError as oe:
+        except (radcli.NotFoundError, radcli.ObjectError) as oe:
             # '42' corresponds to EVS' EVS_ENOENT_IPNET error code
-            if oe.get_payload().err == 42:
+            if oe.get_payload() is None or oe.get_payload().err == 42:
                 # EVS doesn't have that IPnet, return success to delete
                 # the IPnet from Neutron DB.
                 LOG.debug(_("IPnet could not be found in EVS."))
@@ -392,29 +365,16 @@
         if not subnet:
             return
 
-        # If the subnet is dhcp_enabled, then the dhcp agent would have
-        # created a port connected to this subnet. We need to remove
-        # that port before we can proceed with subnet delete operation.
-        # Since, there is no subnet.delete.start event, we use an another
-        # approach of updating the subnet's enable_dhcp attribute to
-        # False that in turn sends a subnet.udpate notification. This
-        # results in DHCP agent releasing the port.
-        if subnet['enable_dhcp']:
-                self._release_subnet_dhcp_port(context, subnet, False)
         with context.session.begin(subtransactions=True):
+            # get a list of ports automatically created by Neutron
+            auto_created_ports = context.session.query(models_v2.Port).\
+                filter(models_v2.Port.device_owner.
+                       in_(db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS)).all()
             # delete subnet in DB
             super(EVSNeutronPluginV2, self).delete_subnet(context, id)
             self._evs_controller_removeIPnet(subnet['tenant_id'],
-                                             subnet['network_id'], id)
-
-        # notify dhcp agent
-        payload = {
-            'subnet': {
-                'network_id': subnet['network_id'],
-                'id': id,
-            }
-        }
-        self.dhcp_agent_notifier.notify(context, payload, 'subnet.delete.end')
+                                             subnet['network_id'], id,
+                                             auto_created_ports)
 
     @lockutils.synchronized('evs-plugin', 'neutron-')
     def _evs_controller_createEVS(self, tenantname, evsname, propstr):
@@ -557,9 +517,9 @@
             self.rad_connection.\
                 get_object(evsbind.EVSController()).\
                 deleteEVS(evsuuid, tenantname)
-        except radcli.ObjectError as oe:
+        except (radcli.NotFoundError, radcli.ObjectError) as oe:
             # '41' corresponds to EVS' EVS_ENOENT_EVS error code
-            if oe.get_payload().err == 41:
+            if oe.get_payload() is None or oe.get_payload().err == 41:
                 # EVS doesn't have that EVS, return success to delete
                 # the EVS from Neutron DB.
                 LOG.debug(_("EVS could not be found in EVS backend."))
@@ -567,28 +527,35 @@
             raise EVSControllerError(oe.get_payload().errmsg)
 
     def delete_network(self, context, id):
-        # Check if it is an external network and whether addresses in that
-        # network are being used for floating ips
-        net = self.get_network(context, id)
-        if net[external_net.EXTERNAL]:
-            filters = dict(network_id=[id])
-            portlist = self.get_ports(context, filters)
-            ports_with_deviceid = [port for port in portlist
-                                   if port['device_id'] != '']
-            if ports_with_deviceid:
+        with context.session.begin(subtransactions=True):
+            network = self._get_network(context, id)
+
+            qry_network_ports = context.session.query(models_v2.Port).\
+                filter_by(network_id=id).filter(models_v2.Port.device_owner.
+                                                in_(db_base_plugin_v2.
+                                                    AUTO_DELETE_PORT_OWNERS))
+
+            auto_created_ports = qry_network_ports.all()
+            qry_network_ports.delete(synchronize_session=False)
+
+            port_in_use = context.session.query(models_v2.Port).filter_by(
+                network_id=id).first()
+
+            if port_in_use:
                 raise exceptions.NetworkInUse(net_id=id)
-        filters = dict(network_id=[id])
-        subnets = self.get_subnets(context, filters=filters)
-        dhcp_subnets = [s for s in subnets if s['enable_dhcp']]
-        for subnet in dhcp_subnets:
-            self._release_subnet_dhcp_port(context, subnet, True)
-        with context.session.begin(subtransactions=True):
-            super(EVSNeutronPluginV2, self).delete_network(context, id)
-            self._evs_controller_deleteEVS(net['tenant_id'], id)
 
-        # notify dhcp agent of network deletion
-        self.dhcp_agent_notifier.notify(context, {'network': {'id': id}},
-                                        'network.delete.end')
+            # clean up subnets
+            subnets = self._get_subnets_by_network(context, id)
+            for subnet in subnets:
+                super(EVSNeutronPluginV2, self).delete_subnet(context,
+                                                              subnet['id'])
+                self._evs_controller_removeIPnet(subnet['tenant_id'],
+                                                 subnet['network_id'],
+                                                 subnet['id'],
+                                                 auto_created_ports)
+
+            context.session.delete(network)
+            self._evs_controller_deleteEVS(network['tenant_id'], id)
 
     @lockutils.synchronized('evs-plugin', 'neutron-')
     def _evs_controller_addVPort(self, tenantname, evsname, vportname,
@@ -606,6 +573,41 @@
             raise EVSControllerError(oe.get_payload().errmsg)
         return vport
 
+    def _create_port_db(self, context, port):
+        with context.session.begin(subtransactions=True):
+            # for external gateway ports and floating ips, tenant_id
+            # is not set, but EVS does not like it.
+            tenant_id = self._get_tenant_id_for_create(context, port['port'])
+            if not tenant_id:
+                network = self._get_network(context,
+                                            port['port']['network_id'])
+                port['port']['tenant_id'] = network['tenant_id']
+            # create the port in the DB
+            db_port = super(EVSNeutronPluginV2, self).create_port(context,
+                                                                  port)
+            # Neutron allows to create a port on a network that doesn't
+            # yet have subnet associated with it, however EVS doesn't
+            # support this.
+            if not db_port['fixed_ips']:
+                raise EVSOpNotSupported(_("creating a port on a network that "
+                                          "does not yet have subnet "
+                                          "associated with it is not "
+                                          "supported"))
+            tenantname = db_port['tenant_id']
+            vportname = db_port['name']
+            if not vportname:
+                vportname = None
+            evs_id = db_port['network_id']
+            proplist = ['macaddr=%s' % db_port['mac_address']]
+            proplist.append('ipaddr=%s' %
+                            db_port['fixed_ips'][0].get('ip_address'))
+            proplist.append('uuid=%s' % db_port['id'])
+
+            self._evs_controller_addVPort(tenantname, evs_id, vportname,
+                                          ",".join(proplist))
+
+        return db_port
+
     def create_port(self, context, port):
         """Creates a port(VPort) for a given network(EVS).
 
@@ -621,34 +623,25 @@
             raise EVSOpNotSupported(_("setting admin_state_up=False for a "
                                       "port not supported"))
 
-        with context.session.begin(subtransactions=True):
-            # for external gateway ports and floating ips, tenant_id
-            # is not set, but EVS does not like it.
-            tenant_id = self._get_tenant_id_for_create(context, port['port'])
-            if not tenant_id:
-                network = self.get_network(context, port['port']['network_id'])
-                port['port']['tenant_id'] = network['tenant_id']
-            # create the port in the DB
-            db_port = super(EVSNeutronPluginV2, self).create_port(context,
-                                                                  port)
-
-            tenantname = db_port['tenant_id']
-            vportname = db_port['name']
-            if not vportname:
-                vportname = None
-            evs_id = db_port['network_id']
-            proplist = ['macaddr=%s' % db_port['mac_address']]
-            proplist.append('ipaddr=%s' %
-                            db_port['fixed_ips'][0].get('ip_address'))
-            proplist.append('uuid=%s' % db_port['id'])
-
-            self._evs_controller_addVPort(tenantname, evs_id, vportname,
-                                          ",".join(proplist))
-
-        # notify dhcp agent of port creation
-        self.dhcp_agent_notifier.notify(context, {'port': db_port},
-                                        'port.create.end')
-        return db_port
+        exc = None
+        for attempt in xrange(1, MAX_RETRIES):
+            try:
+                return self._create_port_db(context, port)
+            except os_db_exc.DBDeadlock as exc:
+                LOG.debug(_("Found %s. Restarting the transaction. "
+                            "Attempt: %s") % (exc, attempt))
+                time.sleep(RETRY_INTERVAL)
+            except sqla_exc.OperationalError as exc:
+                if ('timeout' in exc.message.lower() or
+                        'restart' in exc.message.lower()):
+                    LOG.debug(_("Found %s. Restarting the transaction. "
+                                "Attempt: %s") % (exc, attempt))
+                    time.sleep(RETRY_INTERVAL)
+                    continue
+                raise
+        else:
+            assert exc is not None
+            raise exc
 
     def update_port(self, context, id, port):
         # EVS does not allow updating certain attributes, so check for it
@@ -672,9 +665,6 @@
         db_port = super(EVSNeutronPluginV2, self).update_port(context,
                                                               id, port)
 
-        # notify dhcp agent of port update
-        self.dhcp_agent_notifier.notify(context, {'port': db_port},
-                                        'port.update.end')
         return db_port
 
     def get_port(self, context, id, fields=None):
@@ -689,35 +679,6 @@
                       page_reverse)
         return [self._fields(port, fields) for port in ports]
 
-    def notify_l3agent(self, context, port):
-        """ If an L3 agent is using this port, then we need to send
-        a notification to the L3 agent so that it can remove the EVS VPort
-        associated with the Neutron Port. In that case, the EVS Plugin will
-        only remove the Neutron port from the DB, so return False.
-
-        If the port is not used by the L3 agent, then the EVS plugin
-        will remove both the Neutron port and EVS VPort, so return True.
-        """
-
-        device_owner = port['device_owner']
-        if device_owner not in [constants.DEVICE_OWNER_ROUTER_INTF,
-                                constants.DEVICE_OWNER_ROUTER_GW,
-                                constants.DEVICE_OWNER_FLOATINGIP]:
-            return True
-        router_id = port['device_id']
-        port_update = {
-            'port': {
-                'device_id': '',
-                'device_owner': ''
-            }
-        }
-        self.update_port(context, port['id'], port_update)
-        if device_owner in [constants.DEVICE_OWNER_ROUTER_INTF,
-                            constants.DEVICE_OWNER_ROUTER_GW]:
-            self.l3_agent_notifier.routers_updated(context, [router_id])
-            return False
-        return True
-
     @lockutils.synchronized('evs-plugin', 'neutron-')
     def _evs_controller_removeVPort(self, tenantname, evsname, vportuuid):
         LOG.debug(_("Removing VPort with id: %s for tenant: %s for evs: %s") %
@@ -726,32 +687,15 @@
                                      'tenant': tenantname})
         try:
             evs = self.rad_connection.get_object(evsbind.EVS(), pat)
-            evs.removeVPort(vportuuid)
-        except radcli.ObjectError as oe:
-            # '7' corresponds to EVS' EVS_EBUSY_VPORT error code
-            if oe.get_payload().err == 7:
-                # It is possible that the VM is destroyed, but EVS is unaware
-                # of it. So, try to reset the vport. If it succeeds, then call
-                # removeVPort() again.
-                LOG.debug(_("EVS VPort is busy. We will need to reset "
-                            "and then remove"))
-                try:
-                    evs.resetVPort(vportuuid)
-                    evs.removeVPort(vportuuid)
-                except:
-                    # we failed one of the above operations, just return
-                    # the original exception.
-                    pass
-                else:
-                    # the reset and remove succeeded, just return.
-                    return
+            evs.removeVPort(vportuuid, "force=yes")
+        except (radcli.NotFoundError, radcli.ObjectError) as oe:
             # '43' corresponds to EVS' EVS_ENOENT_VPORT error code
-            elif oe.get_payload().err == 43:
+            if oe.get_payload() is None or oe.get_payload().err == 43:
                 # EVS doesn't have that VPort, return success to delete
                 # the VPort from Neutron DB.
                 LOG.debug(_("VPort could not be found in EVS."))
-                return
-            raise EVSControllerError(oe.get_payload().errmsg)
+            else:
+                raise EVSControllerError(oe.get_payload().errmsg)
 
     def delete_port(self, context, id, l3_port_check=True):
         if l3_port_check:
@@ -760,19 +704,8 @@
         port = self.get_port(context, id)
         if not port:
             return
-        del_vport = l3_port_check or self.notify_l3agent(context, port)
         with context.session.begin(subtransactions=True):
             super(EVSNeutronPluginV2, self).delete_port(context, id)
-            if del_vport:
-                self._evs_controller_removeVPort(port['tenant_id'],
-                                                 port['network_id'],
-                                                 port['id'])
-
-        # notify dhcp agent of port deletion
-        payload = {
-            'port': {
-                'network_id': port['network_id'],
-                'id': id,
-            }
-        }
-        self.dhcp_agent_notifier.notify(context, payload, 'port.delete.end')
+            self._evs_controller_removeVPort(port['tenant_id'],
+                                             port['network_id'],
+                                             port['id'])
--- a/components/openstack/neutron/files/neutron.exec_attr	Tue Oct 13 09:20:24 2015 -0700
+++ b/components/openstack/neutron/files/neutron.exec_attr	Fri Oct 16 15:53:02 2015 -0700
@@ -5,7 +5,7 @@
 privs=net_icmpaccess,net_privaddr,net_rawaccess,proc_audit,sys_dl_config
 
 neutron-agent:solaris:cmd:RO::/usr/sbin/ipadm:\
-privs=net_rawaccess,sys_ip_config
+privs={all}\:/system/volatile/in.ndpd_ipadm,net_rawaccess,sys_ip_config
 
 neutron-agent:solaris:cmd:RO::/usr/sbin/route:\
 privs=sys_ip_config
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/patches/06-dhcp-port-sched.patch	Fri Oct 16 15:53:02 2015 -0700
@@ -0,0 +1,69 @@
+This patch fixes the upstream bug 1431105 that is only fixed in Kilo.
+
+commit 05f234481474aa05f59c4af459b4343d21397afc
+Author: Kevin Benton <email address hidden>
+Date: Wed Mar 11 18:32:52 2015 -0700
+
+    Schedule net to a DHCP agt on subnet create
+
+    Change the DHCP notifier behavior to schedule a network
+    to a DHCP agent when a subnet is created rather than
+    waiting for the first port to be created.
+
+    This will reduce the possibility to get a VM port created
+    and have it send a DHCP request before the DHCP agent is
+    ready. Before, the network would be scheduled to an agent
+    as a result of the API call to create the VM port, so the
+    DHCP port wouldn't be created until after the VM port.
+    After this patch, the network will have been scheduled to
+    a DHCP agent before the first VM port is created.
+
+    There is still a possibility that the DHCP agent could be
+    responding so slowly that it doesn't create its port and
+    activate the dnsmasq instance before the VM sends traffic.
+    A proper fix will ensure that the dnsmasq instance is
+    truly ready to serve requests for a new port will require
+    significantly more code for barriers (either on the subnet
+    creation, port creation, or the nova boot process) are too
+    complex to add this late in the cycle.
+
+    This patch also eliminates the logic in the n1kv plugin that
+    was already doing the same thing.
+
+    Closes-Bug: #1431105
+    Change-Id: I1c1caed0fdda6b801375a07f9252a9127058a07e
+
+--- neutron-2014.2.2/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py	2015-02-05 07:45:33.000000000 -0800
++++ new/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py	2015-10-13 15:48:35.280824405 -0700
+@@ -63,8 +63,8 @@
+                     {'network': {'id': network['id']}}, agent['host'])
+         elif not existing_agents:
+             LOG.warn(_('Unable to schedule network %s: no agents available; '
+-                       'will retry on subsequent port creation events.'),
+-                     network['id'])
++                       'will retry on subsequent port and subnet creation '
++                       'events.'), network['id'])
+         return new_agents + existing_agents
+ 
+     def _get_enabled_agents(self, context, network, agents, method, payload):
+@@ -119,6 +119,7 @@
+ 
+             # schedule the network first, if needed
+             schedule_required = (
++                method == 'subnet_create_end' or
+                 method == 'port_create_end' and
+                 not self._is_reserved_dhcp_port(payload['port']))
+             if schedule_required:
+--- neutron-2014.2.2/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py	2015-02-05 07:45:33.000000000 -0800
++++ new/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py	2015-10-16 13:04:56.539001054 -0700
+@@ -1293,10 +1293,6 @@
+                       self).delete_subnet(context, sub['id'])
+         else:
+             LOG.debug(_("Created subnet: %s"), sub['id'])
+-            if not q_conf.CONF.network_auto_schedule:
+-                # Schedule network to a DHCP agent
+-                net = self.get_network(context, sub['network_id'])
+-                self.schedule_network(context, net)
+             return sub
+ 
+     def update_subnet(self, context, id, subnet):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/patches/07-launchpad-1255441.patch	Fri Oct 16 15:53:02 2015 -0700
@@ -0,0 +1,57 @@
+This upstream patch addresses Launchpad bug 1255441. Although it's been
+addressed in Kilo, the patch below is still not yet released for Juno.
+
+commit a8c7db5b9d9bba44660de3c7a64295f9f318b63a
+Author: Assaf Muller <email address hidden>
+Date: Wed Apr 1 09:38:21 2015 -0400
+
+    Remove "Arguments dropped when creating context" logging
+
+    This log was previously reduced from warning to debug.
+    Cinder removed it entirely in:
+    https://bugs.launchpad.net/cinder/+bug/1329156
+
+    The root cause is this:
+    Agent heartbeats use an admin context. The context is serialized
+    with its to_dict method, which exposes 'tenant' and 'project_name'
+    (These are properties of the class that are calculated from other
+    attributes). In the controller, this dict is used to initialize a
+    ContextBase, which does not accept tenant and project_name as arguments,
+    de facto sending those values as key word arguments.
+
+    We can either handle 'tenant' and 'project_name' specially, fix
+    it any other way, or drop the logging entirely. Is this logging
+    ever useful?
+
+    Change-Id: Ifd51b62bae7b96de44f04836015d2ed939bcb650
+    Closes-Bug: #1255441
+
+--- neutron-2014.2.2/neutron/context.py	2015-02-05 07:45:33.000000000 -0800
++++ new/neutron/context.py	2015-10-15 09:34:13.121138271 -0700
+@@ -76,12 +76,6 @@
+         if overwrite or not hasattr(local.store, 'context'):
+             local.store.context = self
+ 
+-        # Log only once the context has been configured to prevent
+-        # format errors.
+-        if kwargs:
+-            LOG.debug(_('Arguments dropped when creating '
+-                        'context: %s'), kwargs)
+-
+     @property
+     def project_id(self):
+         return self.tenant
+--- neutron-2014.2.2/neutron/tests/unit/test_neutron_context.py	2015-02-05 07:45:26.000000000 -0800
++++ new/neutron/tests/unit/test_neutron_context.py	2015-10-16 12:51:55.872425513 -0700
+@@ -41,11 +41,6 @@
+         self.assertIsNone(ctx.tenant_name)
+         self.assertIsNone(ctx.auth_token)
+ 
+-    def test_neutron_context_create_logs_unknown_kwarg(self):
+-        with mock.patch.object(context.LOG, 'debug') as mock_log:
+-            context.Context('user_id', 'tenant_id', foo=None)
+-        self.assertEqual(mock_log.call_count, 1)
+-
+     def test_neutron_context_create_with_name(self):
+         ctx = context.Context('user_id', 'tenant_id',
+                               tenant_name='tenant_name', user_name='user_name')