PSARC/2014/207 OpenStack Glance Update to Havana
authorDrew Fisher <drew.fisher@oracle.com>
Wed, 11 Jun 2014 17:13:12 -0700
changeset 1944 56ac2df1785b
parent 1943 1a27f000029f
child 1945 3dc1935a2189
PSARC/2014/207 OpenStack Glance Update to Havana PSARC/2014/208 OpenStack Cinder Update to Havana PSARC/2014/209 OpenStack Keystone Update to Havana PSARC/2014/210 OpenStack Nova Update to Havana 18416146 Neutron agents (L3 and DHCP) should cleanup resources when they are disabled 18562372 Failed to create a new project under Horizon 18645763 ZFSSA Cinder Driver support 18686327 evs agent silently ignores user-specified pool allocation ranges 18702697 fibre channel volumes should be supported in the cinder volume driver 18734289 nova won't terminate failed kz deployments 18738371 cinder-volume:setup should account for commented-out zfs_volume_base 18738374 cinder-volume:setup should check for existence of configuration file 18826190 nova-compute fails due to nova.utils.to_bytes 18855698 Update OpenStack to Havana 2013.2.3 18855710 Update python-cinderclient to 1.0.9 18855743 Update python-keystoneclient to 0.8.0 18855754 Update python-neutronclient to 2.3.4 18855764 Update python-novaclient to 2.17.0 18855793 Update python-swiftclient to 2.1.0 18856992 External networks can be deleted even when floating IP addresses are in use 18857784 bake in some more openstack configuration 18884923 Incorrect locale facets in python modules for openstack 18913890 the error in _get_view_and_lun may cause the failure of deleting volumes 18943044 Disable 'Security Groups' tab in Horizon dashboard
components/openstack/cinder/Makefile
components/openstack/cinder/cinder.p5m
components/openstack/cinder/files/api-paste.ini
components/openstack/cinder/files/cinder-api.xml
components/openstack/cinder/files/cinder-backup.xml
components/openstack/cinder/files/cinder-db.xml
components/openstack/cinder/files/cinder-scheduler.xml
components/openstack/cinder/files/cinder-volume-setup
components/openstack/cinder/files/cinder-volume.xml
components/openstack/cinder/files/cinder.conf
components/openstack/cinder/files/cinder.exec_attr
components/openstack/cinder/files/cinder.prof_attr
components/openstack/cinder/files/solaris/zfs.py
components/openstack/cinder/files/zfssa/__init__.py
components/openstack/cinder/files/zfssa/cinder.akwf
components/openstack/cinder/files/zfssa/restclient.py
components/openstack/cinder/files/zfssa/zfssaiscsi.py
components/openstack/cinder/files/zfssa/zfssarest.py
components/openstack/cinder/patches/01-noamqplib.patch
components/openstack/cinder/patches/01-requirements.patch
components/openstack/cinder/patches/02-noparamiko.patch
components/openstack/cinder/patches/03-emc_smis_iscsi.patch
components/openstack/cinder/patches/04-launchpad-1236459.patch
components/openstack/cinder/patches/05-launchpad-1252512.patch
components/openstack/cinder/patches/06-launchpad-1233763.patch
components/openstack/cinder/resolve.deps
components/openstack/common/Makefile
components/openstack/common/openstack.p5m
components/openstack/glance/Makefile
components/openstack/glance/files/glance-api.conf
components/openstack/glance/files/glance-api.xml
components/openstack/glance/files/glance-db.xml
components/openstack/glance/files/glance-registry.conf
components/openstack/glance/files/glance-registry.xml
components/openstack/glance/files/glance-scrubber.conf
components/openstack/glance/files/glance-scrubber.xml
components/openstack/glance/files/glance.prof_attr
components/openstack/glance/glance.p5m
components/openstack/glance/patches/01-nopycrypto.patch
components/openstack/glance/patches/02-zfs-uar-formats.patch
components/openstack/glance/patches/03-CVE-2014-0162.patch
components/openstack/glance/patches/04-requirements.patch
components/openstack/glance/patches/05-launchpad-1255556.patch
components/openstack/horizon/Makefile
components/openstack/horizon/files/branding/css/solaris.css
components/openstack/horizon/files/local_settings.py
components/openstack/horizon/files/overrides.py
components/openstack/horizon/horizon.p5m
components/openstack/horizon/patches/01-CVE-2014-0157.patch
components/openstack/horizon/patches/01-remove-nodejs.patch
components/openstack/horizon/patches/02-launchpad-1264228.patch
components/openstack/horizon/patches/02-update-flavor-form-length.patch
components/openstack/horizon/patches/03-CVE-2013-6858.patch
components/openstack/horizon/patches/03-launchpad-1254694.patch
components/openstack/horizon/patches/04-blue-piechart.patch
components/openstack/horizon/patches/04-lauchpad-1187129.patch
components/openstack/horizon/resolve.deps
components/openstack/keystone/Makefile
components/openstack/keystone/files/keystone.conf
components/openstack/keystone/files/keystone.prof_attr
components/openstack/keystone/files/keystone.xml
components/openstack/keystone/keystone.p5m
components/openstack/keystone/patches/01-ec2_token-import-only.patch
components/openstack/keystone/patches/01-launchpad-1244304.patch
components/openstack/keystone/patches/02-launchpad-1178740.patch
components/openstack/keystone/patches/02-remove-nova-depend.patch
components/openstack/keystone/patches/03-CVE-2013-6391.patch
components/openstack/keystone/patches/03-sample-data-sh.patch
components/openstack/keystone/patches/04-CVE-2013-4477.patch
components/openstack/keystone/patches/04-CVE-2014-2828.patch
components/openstack/keystone/patches/05-CVE-2014-2237.patch
components/openstack/keystone/patches/05-requirements.patch
components/openstack/keystone/patches/06-sample-data-sh.patch
components/openstack/keystone/patches/07-CVE-2014-2828.patch
components/openstack/keystone/resolve.deps
components/openstack/neutron/Makefile
components/openstack/neutron/files/agent/evs_l3_agent.py
components/openstack/neutron/files/agent/linux/device.py
components/openstack/neutron/files/agent/solaris/device.py
components/openstack/neutron/files/agent/solaris/dhcp.py
components/openstack/neutron/files/agent/solaris/interface.py
components/openstack/neutron/files/agent/solaris/ipfilters_manager.py
components/openstack/neutron/files/agent/solaris/net_lib.py
components/openstack/neutron/files/dhcp_agent.ini
components/openstack/neutron/files/evs/db/api.py
components/openstack/neutron/files/evs/db/l3nat.py
components/openstack/neutron/files/evs/db/quotas_db.py
components/openstack/neutron/files/evs/plugin.py
components/openstack/neutron/files/evs_plugin.ini
components/openstack/neutron/files/l3_agent.ini
components/openstack/neutron/files/neutron-dhcp-agent
components/openstack/neutron/files/neutron-dhcp-agent.xml
components/openstack/neutron/files/neutron-l3-agent
components/openstack/neutron/files/neutron-l3-agent.xml
components/openstack/neutron/files/neutron-server.xml
components/openstack/neutron/files/neutron.conf
components/openstack/neutron/files/neutron.prof_attr
components/openstack/neutron/files/quantum.conf
components/openstack/neutron/neutron.p5m
components/openstack/neutron/patches/01-dhcp-agent-add-solaris.patch
components/openstack/neutron/patches/01-neutron-no-pyudev.patch
components/openstack/neutron/patches/02-dhcp-agent-add-solaris.patch
components/openstack/neutron/patches/02-l3-agent-add-solaris.patch
components/openstack/neutron/patches/03-CVE-2014-0187.patch
components/openstack/neutron/patches/03-l3-agent-add-solaris.patch
components/openstack/neutron/patches/04-CVE-2013-6419.patch
components/openstack/neutron/patches/04-requirements.patch
components/openstack/neutron/patches/05-launchpad-1210121.patch
components/openstack/neutron/patches/06-launchpad-1255441.patch
components/openstack/neutron/resolve.deps
components/openstack/nova/Makefile
components/openstack/nova/files/api-paste.ini
components/openstack/nova/files/nova-api-ec2.xml
components/openstack/nova/files/nova-api-metadata.xml
components/openstack/nova/files/nova-api-osapi-compute.xml
components/openstack/nova/files/nova-cert.xml
components/openstack/nova/files/nova-compute.xml
components/openstack/nova/files/nova-conductor.xml
components/openstack/nova/files/nova-consoleauth.xml
components/openstack/nova/files/nova-novncproxy.xml
components/openstack/nova/files/nova-objectstore.xml
components/openstack/nova/files/nova-scheduler.xml
components/openstack/nova/files/nova.conf
components/openstack/nova/files/nova.exec_attr
components/openstack/nova/files/nova.prof_attr
components/openstack/nova/files/release
components/openstack/nova/files/solariszones/driver.py
components/openstack/nova/nova.p5m
components/openstack/nova/patches/02-noamqplib.patch
components/openstack/nova/patches/02-requirements.patch
components/openstack/nova/patches/03-Solaris-flavors.patch
components/openstack/nova/patches/04-CVE-2013-4497.patch
components/openstack/nova/patches/04-CVE-2014-0134-partial.patch
components/openstack/nova/patches/05-CVE-2013-4463.patch
components/openstack/nova/patches/05-CVE-2014-0167.patch
components/openstack/nova/patches/06-CVE-2013-6419.patch
components/openstack/nova/patches/06-CVE-2014-2573.patch
components/openstack/nova/patches/07-CVE-2013-7048.patch
components/openstack/nova/patches/08-CVE-2013-7130.patch
components/openstack/nova/resolve.deps
components/openstack/swift/Makefile
components/openstack/swift/files/proxy-server.conf
components/openstack/swift/files/swift-account-auditor.xml
components/openstack/swift/files/swift-account-reaper.xml
components/openstack/swift/files/swift-account-replicator.xml
components/openstack/swift/files/swift-account-server.xml
components/openstack/swift/files/swift-container-auditor.xml
components/openstack/swift/files/swift-container-replicator.xml
components/openstack/swift/files/swift-container-server.xml
components/openstack/swift/files/swift-container-sync.xml
components/openstack/swift/files/swift-container-updater.xml
components/openstack/swift/files/swift-object-auditor.xml
components/openstack/swift/files/swift-object-expirer.xml
components/openstack/swift/files/swift-object-replicator.xml
components/openstack/swift/files/swift-object-server.xml
components/openstack/swift/files/swift-object-updater.xml
components/openstack/swift/files/swift-proxy-server.xml
components/openstack/swift/files/swift-replicator-rsync
components/openstack/swift/files/swift-replicator-rsync.xml
components/openstack/swift/files/swift-smf-method
components/openstack/swift/files/swift.prof_attr
components/openstack/swift/patches/01-CVE-2013-4155.patch
components/openstack/swift/patches/01-CVE-2014-0006.patch
components/openstack/swift/patches/02-CVE-2014-0006.patch
components/openstack/swift/patches/02-requirements.patch
components/openstack/swift/patches/03-CVE-2013-2161.patch
components/openstack/swift/patches/manager.patch
components/openstack/swift/patches/manpages.patch
components/openstack/swift/patches/orphans.patch
components/openstack/swift/patches/recon.patch
components/openstack/swift/patches/test.patch
components/openstack/swift/resolve.deps
components/openstack/swift/swift.p5m
components/python/cinderclient/Makefile
components/python/cinderclient/cinderclient-PYVER.p5m
components/python/cinderclient/resolve.deps
components/python/keystoneclient/Makefile
components/python/keystoneclient/keystoneclient-PYVER.p5m
components/python/neutronclient/Makefile
components/python/neutronclient/neutronclient-PYVER.p5m
components/python/novaclient/Makefile
components/python/novaclient/novaclient-PYVER.p5m
components/python/swiftclient/Makefile
components/python/swiftclient/resolve.deps
components/python/swiftclient/swiftclient-PYVER.p5m
--- a/components/openstack/cinder/Makefile	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/Makefile	Wed Jun 11 17:13:12 2014 -0700
@@ -25,12 +25,12 @@
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		cinder
-COMPONENT_CODENAME=	grizzly
-COMPONENT_VERSION=	2013.1.4
+COMPONENT_CODENAME=	havana
+COMPONENT_VERSION=	2013.2.3
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:b3466dbc2ea88eef67f73db2b12ec2e8b66047b2bfa0eea31392e155657bee00
+    sha256:a2740f0a0481139ae21cdb0868bebcce01b9f19832439b7f3056435e75791194
 COMPONENT_ARCHIVE_URL=	http://launchpad.net/$(COMPONENT_NAME)/$(COMPONENT_CODENAME)/$(COMPONENT_VERSION)/+download/$(COMPONENT_ARCHIVE)
 COMPONENT_PROJECT_URL=	http://www.openstack.org/
 COMPONENT_BUGDB=	service/cinder
@@ -60,7 +60,13 @@
          $(CP) files/cinder-volume.xml $(PROTO_DIR)/lib/svc/manifest/application/openstack/; \
          $(MKDIR) $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/cinder/volume/drivers/solaris; \
 	 $(TOUCH) $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/cinder/volume/drivers/solaris/__init__.py; \
-	 $(CP) files/solaris/zfs.py $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/cinder/volume/drivers/solaris); \
+	 $(CP) files/solaris/zfs.py $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/cinder/volume/drivers/solaris; \
+         $(MKDIR) $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/cinder/volume/drivers/zfssa; \
+         $(CP) files/zfssa/__init__.py $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/cinder/volume/drivers/zfssa; \
+         $(CP) files/zfssa/cinder.akwf $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/cinder/volume/drivers/zfssa; \
+         $(CP) files/zfssa/restclient.py $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/cinder/volume/drivers/zfssa; \
+         $(CP) files/zfssa/zfssaiscsi.py $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/cinder/volume/drivers/zfssa; \
+         $(CP) files/zfssa/zfssarest.py $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/cinder/volume/drivers/zfssa); \
 	 $(PYTHON) -m compileall $(PROTO_DIR)/$(PYTHON_VENDOR_PACKAGES)
 
 # common targets
--- a/components/openstack/cinder/cinder.p5m	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/cinder.p5m	Wed Jun 11 17:13:12 2014 -0700
@@ -28,10 +28,10 @@
 set name=pkg.summary value="OpenStack Cinder"
 set name=pkg.description \
     value="OpenStack Cinder provides an infrastructure for managing block storage volumes in OpenStack. It allows block devices to be exposed and connected to compute instances for expanded storage, better performance and integration with enterprise storage platforms."
-set name=pkg.human-version value="Grizzly $(COMPONENT_VERSION)"
+set name=pkg.human-version value="Havana $(COMPONENT_VERSION)"
 set name=com.oracle.info.description \
     value="Cinder, the OpenStack block storage service"
-set name=com.oracle.info.tpno value=16268
+set name=com.oracle.info.tpno value=17714
 set name=info.classification \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
     value="org.opensolaris.category.2008:System/Enterprise Management" \
@@ -40,20 +40,18 @@
 set name=info.source-url value=$(COMPONENT_ARCHIVE_URL)
 set name=info.upstream value="OpenStack <[email protected]>"
 set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
-set name=org.opensolaris.arc-caseid value=PSARC/2013/350 value=PSARC/2014/054
+set name=org.opensolaris.arc-caseid value=PSARC/2013/350 value=PSARC/2014/054 \
+    value=PSARC/2014/208
 set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
 dir  path=etc/cinder owner=cinder group=cinder mode=0700
-file path=etc/cinder/api-paste.ini owner=cinder group=cinder mode=0644 \
-    overlay=allow preserve=true
+file files/api-paste.ini path=etc/cinder/api-paste.ini owner=cinder \
+    group=cinder mode=0644 overlay=allow preserve=renamenew
 file files/cinder.conf path=etc/cinder/cinder.conf owner=cinder group=cinder \
-    mode=0644 overlay=allow preserve=true
-file cinder/volume/drivers/emc/cinder_emc_config.xml.sample \
-    path=etc/cinder/cinder_emc_config.xml owner=cinder group=cinder mode=0644 \
-    overlay=allow preserve=true
+    mode=0644 overlay=allow preserve=renamenew
 file etc/cinder/logging_sample.conf path=etc/cinder/logging.conf owner=cinder \
-    group=cinder mode=0644 overlay=allow preserve=true
+    group=cinder mode=0644 overlay=allow preserve=renamenew
 file path=etc/cinder/policy.json owner=cinder group=cinder mode=0644 \
-    overlay=allow preserve=true
+    overlay=allow preserve=renamenew
 file files/cinder.auth_attr \
     path=etc/security/auth_attr.d/cloud:openstack:cinder group=sys
 file files/cinder.exec_attr \
@@ -84,6 +82,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder-$(COMPONENT_VERSION)-py$(PYVER).egg-info/entry_points.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder-$(COMPONENT_VERSION)-py$(PYVER).egg-info/not-zip-safe
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/__init__.py
@@ -92,19 +91,27 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/common.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/admin_actions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/availability_zones.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/backups.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/extended_snapshot_attributes.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/hosts.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/image_create.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/qos_specs_manage.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/quota_classes.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/quotas.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/scheduler_hints.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/services.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/snapshot_actions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/types_extra_specs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/types_manage.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/volume_actions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/volume_encryption_metadata.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/volume_host_attribute.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/volume_image_metadata.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/volume_mig_status_attribute.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/volume_tenant_attribute.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/volume_transfer.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/volume_type_encryption.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/extensions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/middleware/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/middleware/auth.py
@@ -138,24 +145,50 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/v2/types.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/v2/views/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/v2/views/volumes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/v2/volume_metadata.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/v2/volumes.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/versions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/views/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/views/availability_zones.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/views/backups.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/views/limits.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/views/qos_specs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/views/transfers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/views/types.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/views/versions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/xmlutil.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/drivers/ceph.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/drivers/swift.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/drivers/tsm.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/rpcapi.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/services/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/services/swift.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/README.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/exception.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/executor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/initiator/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/initiator/connector.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/initiator/host_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/initiator/linuxfc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/initiator/linuxscsi.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/iscsi/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/iscsi/iscsi.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/iser/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/iser/iser.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/local_dev/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/local_dev/lvm.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/remotefs/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/remotefs/remotefs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/common/config.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/common/sqlalchemyutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/compute/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/compute/aggregate_states.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/compute/nova.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/context.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/api.py
@@ -178,40 +211,95 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/008_add_backup.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/009_add_snapshot_metadata_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/010_add_transfers_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/011_add_bootable_column.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/011_sqlite_downgrade.sql
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/012_add_attach_host_column.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_downgrade.sql
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/013_add_provider_geometry_column.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/014_add_name_id.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/014_sqlite_downgrade.sql
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/015_drop_migrations_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/016_drop_sm_tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/017_add_encryption_information.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/018_add_qos_specs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/019_add_migration_status.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/020_add_volume_admin_metadata_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migration.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/models.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/session.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/exception.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/flags.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/image/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/image/glance.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/image/image_utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/keymgr/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/keymgr/conf_key_mgr.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/keymgr/key.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/keymgr/key_mgr.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/keymgr/not_implemented_key_mgr.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/ar/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/bg_BG/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/bs/LC_MESSAGES/cinder.po
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/cinder.pot
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/ca/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/cs/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/da/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/de/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/en_AU/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/en_GB/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/en_US/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/es/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/es_MX/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/fi_FI/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/fil/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/fr/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/hi/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/hr/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/hu/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/id/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/it/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/it_IT/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/ja/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/ka_GE/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/kn/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/ko/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/ko_KR/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/ms/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/nb/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/ne/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/nl_NL/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/pl_PL/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/pt/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/pt_BR/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/ro/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/ru/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/ru_RU/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/sk/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/sl_SI/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/sw_KE/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/tl/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/tl_PH/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/tr/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/tr_TR/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/uk/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/vi_VN/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/zh_CN/LC_MESSAGES/cinder.po
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/zh_HK/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/locale/zh_TW/LC_MESSAGES/cinder.po
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/README
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/context.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/exception.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/db/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/db/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/db/exception.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/db/sqlalchemy/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/db/sqlalchemy/models.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/db/sqlalchemy/session.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/db/sqlalchemy/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/eventlet_backdoor.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/excutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/fileutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/gettextutils.py
@@ -220,6 +308,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/local.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/lockutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/log.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/loopingcall.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/network_utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/notifier/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/notifier/api.py
@@ -229,8 +318,11 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/notifier/rpc_notifier.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/notifier/rpc_notifier2.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/notifier/test_notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/periodic_task.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/policy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/processutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/rootwrap/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/rootwrap/cmd.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/rootwrap/filters.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/rootwrap/wrapper.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/rpc/__init__.py
@@ -245,6 +337,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/rpc/matchmaker_redis.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/rpc/proxy.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/rpc/service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/rpc/zmq_receiver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/scheduler/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/scheduler/filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/scheduler/filters/__init__.py
@@ -254,11 +347,11 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/scheduler/filters/json_filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/scheduler/weight.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/scheduler/weights/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/setup.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/service.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/strutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/threadgroup.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/timeutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/uuidutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/version.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/policy.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/quota.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/__init__.py
@@ -276,7 +369,19 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/weights/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/weights/capacity.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/taskflow/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/taskflow/decorators.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/taskflow/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/taskflow/patterns/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/taskflow/patterns/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/taskflow/patterns/linear_flow.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/taskflow/states.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/taskflow/task.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/taskflow/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/test.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/transfer/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/transfer/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/units.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/version.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/__init__.py
@@ -284,20 +389,36 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/configuration.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/driver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/block_device.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/emc_smis_common.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/emc_smis_iscsi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/common.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/iscsi.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/options.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/ssc_utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/nexenta/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/nexenta/jsonrpc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/nexenta/options.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/nexenta/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/nexenta/volume.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/solaris/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/solaris/zfs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/zadara.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/iscsi.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/zfssa/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/zfssa/cinder.akwf
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/zfssa/restclient.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/zfssa/zfssaiscsi.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/zfssa/zfssarest.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/flows/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/flows/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/flows/create_volume/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/flows/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/qos_specs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/rpcapi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/volume_types.py
@@ -312,6 +433,9 @@
 # flush this out.
 depend type=group fmri=library/python/anyjson-26
 
+# force a dependency on package delivering fcinfo(1M)
+depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/fcinfo
+
 # force a dependency on package delivering itadm(1M)
 depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/itadm
 
@@ -325,6 +449,9 @@
 # (dependency is for EMC volume driver)
 depend type=require fmri=library/python-2/pywbem
 
+# force a dependency on babel; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/babel-26
+
 # force a dependency on glanceclient; pkgdepend work is needed to flush this
 # out.
 depend type=require fmri=library/python/glanceclient-26
@@ -335,12 +462,18 @@
 # force a dependency on iso8601; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/iso8601-26
 
+# force a dependency on keystoneclient; used via a paste.deploy filter
+depend type=require fmri=library/python/keystoneclient-26
+
 # force a dependency on kombu; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/kombu-26
 
 # force a dependency on lxml; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/lxml-26
 
+# force a dependency on novaclient; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/novaclient-26
+
 # force a dependency on paste; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/paste-26
 
@@ -348,19 +481,24 @@
 # out.
 depend type=require fmri=library/python/paste.deploy-26
 
+# force a dependency on pbr; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/pbr-26
+
 # force a dependency on routes; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/routes-26
 
 # force a dependency on setuptools; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/setuptools-26
 
+# force a dependency on six; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/six-26
+
+# force a dependency on sqlalchemy; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/sqlalchemy-26
+
 # force a dependency on stevedore; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/stevedore-26
 
-# force a dependency on suds; pkgdepend work is needed to flush this out.
-# (dependency is for NetApp volume driver)
-depend type=require fmri=library/python/suds-26
-
 # force a dependency on swiftclient; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/swiftclient-26
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/files/api-paste.ini	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,61 @@
+#############
+# OpenStack #
+#############
+
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = faultwrap sizelimit noauth apiv1
+keystone = faultwrap sizelimit authtoken keystonecontext apiv1
+keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = faultwrap sizelimit noauth apiv2
+keystone = faultwrap sizelimit authtoken keystonecontext apiv2
+keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv2
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
+auth_uri = http://127.0.0.1:5000/v2.0
+identity_uri = http://127.0.0.1:35357
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USER%
+admin_password = %SERVICE_PASSWORD%
+# signing_dir is configurable, but the default behavior of the authtoken
+# middleware should be sufficient.  It will create a temporary directory
+# in the home directory for the user the cinder process is running as.
+#signing_dir = /var/lib/cinder/keystone-signing
--- a/components/openstack/cinder/files/cinder-api.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/files/cinder-api.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -37,6 +37,13 @@
       <service_fmri value='svc:/application/openstack/cinder/cinder-db'/>
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/cinder-api %m">
       <method_context>
--- a/components/openstack/cinder/files/cinder-backup.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/files/cinder-backup.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -37,6 +37,13 @@
       <service_fmri value='svc:/application/openstack/cinder/cinder-db'/>
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/cinder-backup %m">
       <method_context>
--- a/components/openstack/cinder/files/cinder-db.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/files/cinder-db.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/usr/bin/cinder-manage db sync">
       <method_context>
--- a/components/openstack/cinder/files/cinder-scheduler.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/files/cinder-scheduler.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -37,6 +37,13 @@
       <service_fmri value='svc:/application/openstack/cinder/cinder-db'/>
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/cinder-scheduler %m">
       <method_context>
--- a/components/openstack/cinder/files/cinder-volume-setup	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/files/cinder-volume-setup	Wed Jun 11 17:13:12 2014 -0700
@@ -28,11 +28,19 @@
     order to set it up properly for Cinder to use.
 
     """
-    parser = ConfigParser.ConfigParser()
-    parser.read("/etc/cinder/cinder.conf")
+    cinder_conf = "/etc/cinder/cinder.conf"
+    if not os.path.exists(cinder_conf):
+        print "%s doesn't exist" % cinder_conf
+        return smf_include.SMF_EXIT_ERR_CONFIG
 
-    # set up the top-level dataset with the proper permissions for cinder
-    top_ds = parser.get("DEFAULT", "zfs_volume_base")
+    parser = ConfigParser.ConfigParser()
+    parser.read(cinder_conf)
+
+    # retrieve the top-level dataset or just get the default (rpool/cinder)
+    try:
+        top_ds = parser.get("DEFAULT", "zfs_volume_base")
+    except ConfigParser.NoOptionError:
+        top_ds = "rpool/cinder"
 
     # look to see if the dataset exists
     cmd = ["/usr/sbin/zfs", "list", top_ds]
--- a/components/openstack/cinder/files/cinder-volume.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/files/cinder-volume.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -42,6 +42,8 @@
       <service_fmri value='svc:/application/openstack/cinder/cinder-db'/>
     </dependency>
 
+    <logfile_attributes permissions='600'/>
+
     <instance name='setup' enabled='false'>
 
       <exec_method timeout_seconds="60" type="method" name="start"
@@ -62,6 +64,11 @@
         <service_fmri value='svc:/application/openstack/cinder/cinder-volume:setup' />
       </dependency>
 
+      <dependency name='ntp' grouping='optional_all' restart_on='none'
+        type='service'>
+        <service_fmri value='svc:/network/ntp'/>
+      </dependency>
+
       <exec_method timeout_seconds="60" type="method" name="start"
         exec="/lib/svc/method/cinder-volume %m">
         <method_context>
--- a/components/openstack/cinder/files/cinder.conf	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/files/cinder.conf	Wed Jun 11 17:13:12 2014 -0700
@@ -5,49 +5,6 @@
 [DEFAULT]
 
 #
-# Options defined in cinder.openstack.common.cfg:CommonConfigOpts
-#
-
-# Print debugging output (boolean value)
-#debug=false
-
-# Print more verbose output (boolean value)
-#verbose=false
-
-# If this option is specified, the logging configuration file
-# specified is used and overrides any other logging options
-# specified. Please see the Python logging module
-# documentation for details on logging configuration files.
-# (string value)
-#log_config=<None>
-
-# A logging.Formatter log message format string which may use
-# any of the available logging.LogRecord attributes. Default:
-# %(default)s (string value)
-#log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s
-
-# Format string for %%(asctime)s in log records. Default:
-# %(default)s (string value)
-#log_date_format=%Y-%m-%d %H:%M:%S
-
-# (Optional) Name of log file to output to. If not set,
-# logging will go to stdout. (string value)
-#log_file=<None>
-
-# (Optional) The directory to keep log files in (will be
-# prepended to --log-file) (string value)
-#log_dir=<None>
-
-# Use syslog for logging. (boolean value)
-#use_syslog=false
-
-# syslog facility to receive log lines (string value)
-#syslog_log_facility=LOG_USER
-
-# Do not count snapshots against gigabytes quota (bool value)
-#no_snapshot_gb_quota=False
-
-#
 # Options defined in cinder.exception
 #
 
@@ -56,161 +13,6 @@
 
 
 #
-# Options defined in cinder.flags
-#
-
-# Virtualization api connection type : libvirt, xenapi, or
-# fake (string value)
-#connection_type=<None>
-
-# The SQLAlchemy connection string used to connect to the
-# database (string value)
-#sql_connection=sqlite:///$state_path/$sqlite_db
-
-# Verbosity of SQL debugging information. 0=None,
-# 100=Everything (integer value)
-#sql_connection_debug=0
-
-# File name for the paste.deploy config for cinder-api (string
-# value)
-#api_paste_config=api-paste.ini
-
-# Directory where the cinder python module is installed
-# (string value)
-#pybasedir=/usr/lib/python2.6/vendor-packages
-
-# Directory where cinder binaries are installed (string value)
-bindir=/usr/bin
-
-# Top-level directory for maintaining cinder's state (string
-# value)
-#state_path=$pybasedir
-
-# ip address of this host (string value)
-#my_ip=10.0.0.1
-
-# default glance hostname or ip (string value)
-#glance_host=$my_ip
-
-# default glance port (integer value)
-#glance_port=9292
-
-# A list of the glance api servers available to cinder
-# ([hostname|ip]:port) (list value)
-#glance_api_servers=$glance_host:$glance_port
-
-# default version of the glance api to use
-#glance_api_version=1
-
-# Number retries when downloading an image from glance
-# (integer value)
-#glance_num_retries=0
-
-# Allow to perform insecure SSL (https) requests to glance
-# (boolean value)
-#glance_api_insecure=false
-
-# the topic scheduler nodes listen on (string value)
-#scheduler_topic=cinder-scheduler
-
-# the topic volume nodes listen on (string value)
-#volume_topic=cinder-volume
-
-# Deploy v1 of the Cinder API.  (boolean value)
-#enable_v1_api=true
-
-# Deploy v2 of the Cinder API.  (boolean value)
-#enable_v2_api=true
-
-# whether to rate limit the api (boolean value)
-#api_rate_limit=true
-
-# Specify list of extensions to load when using
-# osapi_volume_extension option with
-# cinder.api.contrib.select_extensions (list value)
-#osapi_volume_ext_list=
-
-# osapi volume extension to load (multi valued)
-#osapi_volume_extension=cinder.api.contrib.standard_extensions
-
-# Base URL that will be presented to users in links to the
-# OpenStack Volume API (string value)
-#osapi_volume_base_URL=<None>
-
-# the maximum number of items returned in a single response
-# from a collection resource (integer value)
-#osapi_max_limit=1000
-
-# the filename to use with sqlite (string value)
-#sqlite_db=cinder.sqlite
-
-# If passed, use synchronous mode for sqlite (boolean value)
-#sqlite_synchronous=true
-
-# timeout before idle sql connections are reaped (integer
-# value)
-#sql_idle_timeout=3600
-
-# maximum db connection retries during startup. (setting -1
-# implies an infinite retry count) (integer value)
-#sql_max_retries=10
-
-# interval between retries of opening a sql connection
-# (integer value)
-#sql_retry_interval=10
-
-# full class name for the Manager for volume (string value)
-#volume_manager=cinder.volume.manager.VolumeManager
-
-# full class name for the Manager for scheduler (string value)
-#scheduler_manager=cinder.scheduler.manager.SchedulerManager
-
-# Name of this node.  This can be an opaque identifier.  It is
-# not necessarily a hostname, FQDN, or IP address. (string
-# value)
-#host=cinder
-
-# availability zone of this node (string value)
-#storage_availability_zone=nova
-
-# Memcached servers or None for in process cache. (list value)
-#memcached_servers=<None>
-
-# default volume type to use (string value)
-#default_volume_type=<None>
-
-# time period to generate volume usages for.  Time period must
-# be hour, day, month or year (string value)
-#volume_usage_audit_period=month
-
-# Path to the rootwrap configuration file to use for running
-# commands as root (string value)
-#rootwrap_config=/etc/cinder/rootwrap.conf
-
-# Whether to log monkey patching (boolean value)
-#monkey_patch=false
-
-# List of modules/decorators to monkey patch (list value)
-#monkey_patch_modules=
-
-# maximum time since last check-in for up service (integer
-# value)
-#service_down_time=60
-
-# The full class name of the volume API class to use (string
-# value)
-#volume_api_class=cinder.volume.api.API
-
-# The strategy to use for auth. Supports noauth, keystone, and
-# deprecated. (string value)
-#auth_strategy=noauth
-
-# AMQP exchange to connect to if using RabbitMQ or Qpid
-# (string value)
-#control_exchange=cinder
-
-
-#
 # Options defined in cinder.policy
 #
 
@@ -228,11 +30,12 @@
 # number of volumes allowed per project (integer value)
 #quota_volumes=10
 
-# number of volume snapshots allowed per project (integer value)
+# number of volume snapshots allowed per project (integer
+# value)
 #quota_snapshots=10
 
-# number of volume and snapshot gigabytes allowed per project (integer
-# value)
+# number of volume gigabytes (snapshots are also included)
+# allowed per project (integer value)
 #quota_gigabytes=1000
 
 # number of seconds until a reservation expires (integer
@@ -250,6 +53,10 @@
 # default driver to use for quota checks (string value)
 #quota_driver=cinder.quota.DbQuotaDriver
 
+# whether to use default quota class for default quota
+# (boolean value)
+#use_default_quota_class=true
+
 
 #
 # Options defined in cinder.service
@@ -311,6 +118,19 @@
 
 
 #
+# Options defined in cinder.api.common
+#
+
+# the maximum number of items returned in a single response
+# from a collection resource (integer value)
+#osapi_max_limit=1000
+
+# Base URL that will be presented to users in links to the
+# OpenStack Volume API (string value)
+#osapi_volume_base_URL=<None>
+
+
+#
 # Options defined in cinder.api.middleware.auth
 #
 
@@ -328,11 +148,297 @@
 
 
 #
-# Options defined in cinder.common.deprecated
+# Options defined in cinder.backup.drivers.ceph
+#
+
+# Ceph config file to use. (string value)
+#backup_ceph_conf=/etc/ceph/ceph.conf
+
+# the Ceph user to connect with (string value)
+#backup_ceph_user=cinder
+
+# the chunk size in bytes that a backup will be broken into
+# before transfer to backup store (integer value)
+#backup_ceph_chunk_size=134217728
+
+# the Ceph pool to backup to (string value)
+#backup_ceph_pool=backups
+
+# RBD stripe unit to use when creating a backup image (integer
+# value)
+#backup_ceph_stripe_unit=0
+
+# RBD stripe count to use when creating a backup image
+# (integer value)
+#backup_ceph_stripe_count=0
+
+# If True, always discard excess bytes when restoring volumes.
+# (boolean value)
+#restore_discard_excess_bytes=true
+
+
+#
+# Options defined in cinder.backup.drivers.swift
+#
+
+# The URL of the Swift endpoint (string value)
+#backup_swift_url=http://localhost:8080/v1/AUTH_
+
+# Swift authentication mechanism (string value)
+#backup_swift_auth=per_user
+
+# Swift user name (string value)
+#backup_swift_user=<None>
+
+# Swift key for authentication (string value)
+#backup_swift_key=<None>
+
+# The default Swift container to use (string value)
+#backup_swift_container=volumebackups
+
+# The size in bytes of Swift backup objects (integer value)
+#backup_swift_object_size=52428800
+
+# The number of retries to make for Swift operations (integer
+# value)
+#backup_swift_retry_attempts=3
+
+# The backoff time in seconds between Swift retries (integer
+# value)
+#backup_swift_retry_backoff=2
+
+# Compression algorithm (None to disable) (string value)
+#backup_compression_algorithm=zlib
+
+
+#
+# Options defined in cinder.backup.drivers.tsm
+#
+
+# Volume prefix for the backup id when backing up to TSM
+# (string value)
+#backup_tsm_volume_prefix=backup
+
+# TSM password for the running username (string value)
+#backup_tsm_password=password
+
+# Enable or Disable compression for backups (boolean value)
+#backup_tsm_compression=true
+
+
+#
+# Options defined in cinder.backup.manager
+#
+
+# Driver to use for backups. (string value)
+#backup_driver=cinder.backup.drivers.swift
+
+
+#
+# Options defined in cinder.common.config
 #
 
-# make deprecations fatal (boolean value)
-#fatal_deprecations=false
+# Virtualization api connection type : libvirt, xenapi, or
+# fake (string value)
+#connection_type=<None>
+
+# File name for the paste.deploy config for cinder-api (string
+# value)
+#api_paste_config=api-paste.ini
+
+# Directory where the cinder python module is installed
+# (string value)
+#pybasedir=/usr/lib/python2.6/vendor-packages
+
+# Directory where cinder binaries are installed (string value)
+bindir=/usr/bin
+
+# Top-level directory for maintaining cinder's state (string
+# value)
+state_path=/var/lib/cinder
+
+# ip address of this host (string value)
+#my_ip=10.0.0.1
+
+# default glance hostname or ip (string value)
+#glance_host=$my_ip
+
+# default glance port (integer value)
+#glance_port=9292
+
+# A list of the glance api servers available to cinder
+# ([hostname|ip]:port) (list value)
+#glance_api_servers=$glance_host:$glance_port
+
+# Version of the glance api to use (integer value)
+#glance_api_version=1
+
+# Number retries when downloading an image from glance
+# (integer value)
+#glance_num_retries=0
+
+# Allow to perform insecure SSL (https) requests to glance
+# (boolean value)
+#glance_api_insecure=false
+
+# Whether to attempt to negotiate SSL layer compression when
+# using SSL (https) requests. Set to False to disable SSL
+# layer compression. In some cases disabling this may improve
+# data throughput, eg when high network bandwidth is available
+# and you are using already compressed image formats such as
+# qcow2 . (boolean value)
+#glance_api_ssl_compression=false
+
+# http/https timeout value for glance operations. If no value
+# (None) is supplied here, the glanceclient default value is
+# used. (integer value)
+#glance_request_timeout=<None>
+
+# the topic scheduler nodes listen on (string value)
+#scheduler_topic=cinder-scheduler
+
+# the topic volume nodes listen on (string value)
+#volume_topic=cinder-volume
+
+# the topic volume backup nodes listen on (string value)
+#backup_topic=cinder-backup
+
+# Deploy v1 of the Cinder API.  (boolean value)
+#enable_v1_api=true
+
+# Deploy v2 of the Cinder API.  (boolean value)
+#enable_v2_api=true
+
+# whether to rate limit the api (boolean value)
+#api_rate_limit=true
+
+# Specify list of extensions to load when using
+# osapi_volume_extension option with
+# cinder.api.contrib.select_extensions (list value)
+#osapi_volume_ext_list=
+
+# osapi volume extension to load (multi valued)
+#osapi_volume_extension=cinder.api.contrib.standard_extensions
+
+# full class name for the Manager for volume (string value)
+#volume_manager=cinder.volume.manager.VolumeManager
+
+# full class name for the Manager for volume backup (string
+# value)
+#backup_manager=cinder.backup.manager.BackupManager
+
+# full class name for the Manager for scheduler (string value)
+#scheduler_manager=cinder.scheduler.manager.SchedulerManager
+
+# Name of this node.  This can be an opaque identifier.  It is
+# not necessarily a hostname, FQDN, or IP address. (string
+# value)
+#host=cinder
+
+# availability zone of this node (string value)
+#storage_availability_zone=nova
+
+# default availability zone to use when creating a new volume.
+# If this is not set then we use the value from the
+# storage_availability_zone option as the default
+# availability_zone for new volumes. (string value)
+#default_availability_zone=<None>
+
+# Memcached servers or None for in process cache. (list value)
+#memcached_servers=<None>
+
+# default volume type to use (string value)
+#default_volume_type=<None>
+
+# time period to generate volume usages for.  Time period must
+# be hour, day, month or year (string value)
+#volume_usage_audit_period=month
+
+# Deprecated: command to use for running commands as root
+# (string value)
+#root_helper=sudo
+
+# Path to the rootwrap configuration file to use for running
+# commands as root (string value)
+#rootwrap_config=/etc/cinder/rootwrap.conf
+
+# Enable monkey patching (boolean value)
+#monkey_patch=false
+
+# List of modules/decorators to monkey patch (list value)
+#monkey_patch_modules=
+
+# maximum time since last check-in for up service (integer
+# value)
+#service_down_time=60
+
+# The full class name of the volume API class to use (string
+# value)
+#volume_api_class=cinder.volume.api.API
+
+# The full class name of the volume backup API class (string
+# value)
+#backup_api_class=cinder.backup.api.API
+
+# The strategy to use for auth. Supports noauth, keystone, and
+# deprecated. (string value)
+auth_strategy=keystone
+
+# A list of backend names to use. These backend names should
+# be backed by a unique [CONFIG] group with its options (list
+# value)
+#enabled_backends=<None>
+
+# Whether snapshots count against GigaByte quota (boolean
+# value)
+#no_snapshot_gb_quota=false
+
+# The full class name of the volume transfer API class (string
+# value)
+#transfer_api_class=cinder.transfer.api.API
+
+
+#
+# Options defined in cinder.compute
+#
+
+# The full class name of the compute API class to use (string
+# value)
+#compute_api_class=cinder.compute.nova.API
+
+
+#
+# Options defined in cinder.compute.nova
+#
+
+# Info to match when looking for nova in the service catalog.
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#nova_catalog_info=compute:nova:publicURL
+
+# Same as nova_catalog_info, but for admin endpoint. (string
+# value)
+#nova_catalog_admin_info=compute:nova:adminURL
+
+# Override service catalog lookup with template for nova
+# endpoint e.g. http://localhost:8774/v2/%(tenant_id)s (string
+# value)
+#nova_endpoint_template=<None>
+
+# Same as nova_endpoint_template, but for admin endpoint.
+# (string value)
+#nova_endpoint_admin_template=<None>
+
+# region name of this node (string value)
+#os_region_name=<None>
+
+# Location of ca certicates file to use for nova client
+# requests. (string value)
+#nova_ca_certificates_file=<None>
+
+# Allow to perform insecure SSL requests to nova (boolean
+# value)
+#nova_api_insecure=false
 
 
 #
@@ -358,6 +464,7 @@
 # value)
 #backup_name_template=backup-%s
 
+
 #
 # Options defined in cinder.db.base
 #
@@ -367,12 +474,107 @@
 
 
 #
+# Options defined in cinder.image.glance
+#
+
+# A list of url schemes that can be downloaded directly via
+# the direct_url.  Currently supported schemes: [file]. (list
+# value)
+#allowed_direct_url_schemes=
+
+
+#
 # Options defined in cinder.image.image_utils
 #
 
-# parent dir for tempdir used for image conversion (string
+# Directory used for temporary storage during image conversion
+# (string value)
+#image_conversion_dir=$state_path/conversion
+
+
+#
+# Options defined in cinder.keymgr
+#
+
+# The full class name of the key manager API class (string
+# value)
+#api_class=cinder.keymgr.conf_key_mgr.ConfKeyManager
+
+
+#
+# Options defined in cinder.keymgr.conf_key_mgr
+#
+
+# Fixed key returned by key manager, specified in hex (string
 # value)
-#image_conversion_dir=/tmp
+#fixed_key=<None>
+
+
+#
+# Options defined in cinder.openstack.common.db.api
+#
+
+# The backend to use for db (string value)
+#backend=sqlalchemy
+
+# Enable the experimental use of thread pooling for all DB API
+# calls (boolean value)
+#use_tpool=false
+
+
+#
+# Options defined in cinder.openstack.common.db.sqlalchemy.session
+#
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+connection=sqlite:///$state_path/$sqlite_db
+
+# timeout before idle sql connections are reaped (integer
+# value)
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+#max_pool_size=5
+
+# maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+#max_retries=10
+
+# interval between retries of opening a sql connection
+# (integer value)
+#retry_interval=10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+#connection_debug=0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+#connection_trace=false
+
+# the filename to use with sqlite (string value)
+#sqlite_db=cinder.sqlite
+
+# If true, use synchronous mode for sqlite (boolean value)
+#sqlite_synchronous=true
+
+
+#
+# Options defined in cinder.openstack.common.eventlet_backdoor
+#
+
+# port for eventlet backdoor to listen (integer value)
+#backdoor_port=<None>
 
 
 #
@@ -382,7 +584,8 @@
 # Whether to disable inter-process locks (boolean value)
 #disable_process_locking=false
 
-# Directory to use for lock files (string value)
+# Directory to use for lock files. Default to a temp directory
+# (string value)
 #lock_path=<None>
 
 
@@ -390,27 +593,24 @@
 # Options defined in cinder.openstack.common.log
 #
 
-# Log output to a per-service log file in named directory
-# (string value)
-#logdir=<None>
-
-# Log output to a named file (string value)
-#logfile=<None>
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
 
 # Log output to standard error (boolean value)
 #use_stderr=true
 
-# Default file mode used when creating log files (string
-# value)
-#logfile_mode=0644
-
 # format string to use for log messages with context (string
 # value)
-#logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s] %(instance)s%(message)s
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s
 
 # format string to use for log messages without context
 # (string value)
-#logging_default_format_string=%(asctime)s %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
 
 # data to append to log format when level is DEBUG (string
 # value)
@@ -418,7 +618,7 @@
 
 # prefix each line of exception output with this format
 # (string value)
-#logging_exception_prefix=%(asctime)s %(process)d TRACE %(name)s %(instance)s
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
 
 # list of logger=LEVEL pairs (list value)
 #default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN
@@ -426,6 +626,9 @@
 # publish error events (boolean value)
 #publish_errors=false
 
+# make deprecations fatal (boolean value)
+#fatal_deprecations=false
+
 # If an instance is passed with the log message, format it
 # like this (string value)
 #instance_format="[instance: %(uuid)s] "
@@ -434,6 +637,38 @@
 # it like this (string value)
 #instance_uuid_format="[instance: %(uuid)s] "
 
+# If this option is specified, the logging configuration file
+# specified is used and overrides any other logging options
+# specified. Please see the Python logging module
+# documentation for details on logging configuration files.
+# (string value)
+#log_config=<None>
+
+# A logging.Formatter log message format string which may use
+# any of the available logging.LogRecord attributes. This
+# option is deprecated.  Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths (string value)
+#log_dir=<None>
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+# syslog facility to receive log lines (string value)
+#syslog_log_facility=LOG_USER
+
 
 #
 # Options defined in cinder.openstack.common.notifier.api
@@ -448,15 +683,32 @@
 
 # Default publisher_id for outgoing notifications (string
 # value)
-#default_publisher_id=$host
+#default_publisher_id=<None>
+
+
+#
+# Options defined in cinder.openstack.common.notifier.rpc_notifier
+#
+
+# AMQP topic used for OpenStack notifications (list value)
+#notification_topics=notifications
 
 
 #
-# Options defined in cinder.openstack.common.notifier.rabbit_notifier
+# Options defined in cinder.openstack.common.notifier.rpc_notifier2
 #
 
-# AMQP topic used for openstack notifications (list value)
-#notification_topics=notifications
+# AMQP topic(s) used for OpenStack notifications (list value)
+#topics=notifications
+
+
+#
+# Options defined in cinder.openstack.common.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
 
 
 #
@@ -483,11 +735,30 @@
 
 # Modules of exceptions that are permitted to be recreatedupon
 # receiving exception data from an rpc call. (list value)
-#allowed_rpc_exception_modules=cinder.openstack.common.exception,nova.exception,cinder.exception
+#allowed_rpc_exception_modules=nova.exception,cinder.exception,exceptions
 
 # If passed, use a fake RabbitMQ provider (boolean value)
 #fake_rabbit=false
 
+# AMQP exchange to connect to if using RabbitMQ or Qpid
+# (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in cinder.openstack.common.rpc.amqp
+#
+
+# Enable a fast single reply queue if using AMQP based RPC
+# like RabbitMQ or Qpid. (boolean value)
+#amqp_rpc_single_reply_queue=false
+
+# Use durable queues in amqp. (boolean value)
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
 
 #
 # Options defined in cinder.openstack.common.rpc.impl_kombu
@@ -543,9 +814,6 @@
 # value)
 #rabbit_max_retries=0
 
-# use durable queues in RabbitMQ (boolean value)
-#rabbit_durable_queues=false
-
 # use H/A queues in RabbitMQ (x-ha-policy: all).You need to
 # wipe RabbitMQ database when changing this option. (boolean
 # value)
@@ -559,9 +827,12 @@
 # Qpid broker hostname (string value)
 #qpid_hostname=localhost
 
-# Qpid broker port (string value)
+# Qpid broker port (integer value)
 #qpid_port=5672
 
+# Qpid HA cluster host:port pairs (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
 # Username for qpid connection (string value)
 #qpid_username=
 
@@ -572,27 +843,6 @@
 # (string value)
 #qpid_sasl_mechanisms=
 
-# Automatically reconnect (boolean value)
-#qpid_reconnect=true
-
-# Reconnection timeout in seconds (integer value)
-#qpid_reconnect_timeout=0
-
-# Max reconnections before giving up (integer value)
-#qpid_reconnect_limit=0
-
-# Minimum seconds between reconnection attempts (integer
-# value)
-#qpid_reconnect_interval_min=0
-
-# Maximum seconds between reconnection attempts (integer
-# value)
-#qpid_reconnect_interval_max=0
-
-# Equivalent to setting max and min to the same value (integer
-# value)
-#qpid_reconnect_interval=0
-
 # Seconds between connection keepalive heartbeats (integer
 # value)
 #qpid_heartbeat=60
@@ -603,6 +853,14 @@
 # Disable Nagle algorithm (boolean value)
 #qpid_tcp_nodelay=true
 
+# The qpid topology version to use.  Version 1 is what was
+# originally used by impl_qpid.  Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work.  Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
 
 #
 # Options defined in cinder.openstack.common.rpc.impl_zmq
@@ -622,6 +880,10 @@
 # Number of ZeroMQ contexts, defaults to 1 (integer value)
 #rpc_zmq_contexts=1
 
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
 # Directory for holding IPC sockets (string value)
 #rpc_zmq_ipc_dir=/var/run/openstack
 
@@ -638,6 +900,26 @@
 # Matchmaker ring file (JSON) (string value)
 #matchmaker_ringfile=/etc/nova/matchmaker_ring.json
 
+# Heartbeat frequency (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+
+#
+# Options defined in cinder.openstack.common.rpc.matchmaker_redis
+#
+
+# Host to locate redis (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server. (optional) (string value)
+#password=<None>
+
 
 #
 # Options defined in cinder.scheduler.driver
@@ -646,6 +928,10 @@
 # The scheduler host manager class to use (string value)
 #scheduler_host_manager=cinder.scheduler.host_manager.HostManager
 
+# Maximum number of attempts to schedule an volume (integer
+# value)
+#scheduler_max_attempts=3
+
 
 #
 # Options defined in cinder.scheduler.host_manager
@@ -665,7 +951,7 @@
 #
 
 # Default scheduler driver to use (string value)
-#scheduler_driver=cinder.scheduler.simple.SimpleScheduler
+#scheduler_driver=cinder.scheduler.filter_scheduler.FilterScheduler
 
 
 #
@@ -696,6 +982,18 @@
 
 
 #
+# Options defined in cinder.transfer.api
+#
+
+# The number of characters in the salt. (integer value)
+#volume_transfer_salt_length=8
+
+# The number of characters in the autogenerated auth key.
+# (integer value)
+#volume_transfer_key_length=16
+
+
+#
 # Options defined in cinder.volume.api
 #
 
@@ -703,6 +1001,10 @@
 # resides (boolean value)
 #snapshot_same_host=true
 
+# Ensure that the new volumes are the same AZ as snapshot or
+# source volume (boolean value)
+#cloned_volume_same_az=true
+
 
 #
 # Options defined in cinder.volume.driver
@@ -716,17 +1018,14 @@
 # value)
 #reserved_percentage=0
 
-# number of times to rescan iSCSI target to find volume
-# (integer value)
-#num_iscsi_scan_tries=3
-
-# Number of iscsi target ids per host (integer value)
+# The maximum number of iscsi target ids per host (integer
+# value)
 #iscsi_num_targets=100
 
 # prefix for iscsi volumes (string value)
 #iscsi_target_prefix=iqn.2010-10.org.openstack:
 
-# The port that the iSCSI daemon is listening on (string
+# The IP address that the iSCSI daemon is listening on (string
 # value)
 #iscsi_ip_address=$my_ip
 
@@ -734,9 +1033,124 @@
 # value)
 #iscsi_port=3260
 
-# Optional override to the capacity based volume backend name
+# The maximum number of times to rescan targets to find volume
+# (integer value)
+#num_volume_device_scan_tries=3
+
+# The maximum number of times to rescan iSER targetto find
+# volume (integer value)
+#num_iser_scan_tries=3
+
+# The maximum number of iser target ids per host (integer
+# value)
+#iser_num_targets=100
+
+# prefix for iser volumes (string value)
+#iser_target_prefix=iqn.2010-10.org.iser.openstack:
+
+# The IP address that the iSER daemon is listening on (string
+# value)
+#iser_ip_address=$my_ip
+
+# The port that the iSER daemon is listening on (integer
+# value)
+#iser_port=3260
+
+# iser target user-land tool to use (string value)
+#iser_helper=tgtadm
+
+# The backend name for a given driver implementation (string
+# value)
+#volume_backend_name=<None>
+
+# Do we attach/detach volumes in cinder using multipath for
+# volume to image and image to volume transfers? (boolean
+# value)
+#use_multipath_for_image_xfer=false
+
+# Method used to wipe old voumes (valid options are: none,
+# zero, shred) (string value)
+#volume_clear=zero
+
+# Size in MiB to wipe at start of old volumes. 0 => all
+# (integer value)
+#volume_clear_size=0
+
+# iscsi target user-land tool to use (string value)
+#iscsi_helper=tgtadm
+
+# Volume configuration file storage directory (string value)
+#volumes_dir=$state_path/volumes
+
+# IET configuration file (string value)
+#iet_conf=/etc/iet/ietd.conf
+
+# Comma-separated list of initiator IQNs allowed to connect to
+# the iSCSI target. (From Nova compute nodes.) (string value)
+#lio_initiator_iqns=
+
+# Sets the behavior of the iSCSI target to either perform
+# blockio or fileio optionally, auto can be set and Cinder
+# will autodetect type of backing device (string value)
+#iscsi_iotype=fileio
+
+
 #
-#volume_backend_name=LVM_iSCSI_unique1
+# Options defined in cinder.volume.drivers.block_device
+#
+
+# List of all available devices (list value)
+#available_devices=
+
+
+#
+# Options defined in cinder.volume.drivers.coraid
+#
+
+# IP address of Coraid ESM (string value)
+#coraid_esm_address=
+
+# User name to connect to Coraid ESM (string value)
+#coraid_user=admin
+
+# Name of group on Coraid ESM to which coraid_user belongs
+# (must have admin privilege) (string value)
+#coraid_group=admin
+
+# Password to connect to Coraid ESM (string value)
+#coraid_password=password
+
+# Volume Type key name to store ESM Repository Name (string
+# value)
+#coraid_repository_key=coraid_repository
+
+
+#
+# Options defined in cinder.volume.drivers.eqlx
+#
+
+# Group name to use for creating volumes (string value)
+#eqlx_group_name=group-0
+
+# Timeout for the Group Manager cli command execution (integer
+# value)
+#eqlx_cli_timeout=30
+
+# Maximum retry count for reconnection (integer value)
+#eqlx_cli_max_retries=5
+
+# Use CHAP authentificaion for targets? (boolean value)
+#eqlx_use_chap=false
+
+# Existing CHAP account name (string value)
+#eqlx_chap_login=admin
+
+# Password for specified CHAP account name (string value)
+#eqlx_chap_password=password
+
+# Pool in which volumes will be created (string value)
+#eqlx_pool=default
+
 
 #
 # Options defined in cinder.volume.drivers.glusterfs
@@ -744,10 +1158,7 @@
 
 # File with the list of available gluster shares (string
 # value)
-#glusterfs_shares_config=<None>
-
-# Base dir where gluster expected to be mounted (string value)
-#glusterfs_mount_point_base=$state_path/mnt
+#glusterfs_shares_config=/etc/cinder/glusterfs_shares
 
 # Use du or df for free space calculation (string value)
 #glusterfs_disk_util=df
@@ -757,6 +1168,69 @@
 # volume creation takes a lot of time. (boolean value)
 #glusterfs_sparsed_volumes=true
 
+# Create volumes as QCOW2 files rather than raw files.
+# (boolean value)
+#glusterfs_qcow2_volumes=false
+
+# Base dir containing mount points for gluster shares. (string
+# value)
+#glusterfs_mount_point_base=$state_path/mnt
+
+
+#
+# Options defined in cinder.volume.drivers.gpfs
+#
+
+# Specifies the path of the GPFS directory where Block Storage
+# volume and snapshot files are stored. (string value)
+#gpfs_mount_point_base=<None>
+
+# Specifies the path of the Image service repository in GPFS.
+# Leave undefined if not storing images in GPFS. (string
+# value)
+#gpfs_images_dir=<None>
+
+# Specifies the type of image copy to be used.  Set this when
+# the Image service repository also uses GPFS so that image
+# files can be transferred efficiently from the Image service
+# to the Block Storage service. There are two valid values:
+# "copy" specifies that a full copy of the image is made;
+# "copy_on_write" specifies that copy-on-write optimization
+# strategy is used and unmodified blocks of the image file are
+# shared efficiently. (string value)
+#gpfs_images_share_mode=<None>
+
+# Specifies an upper limit on the number of indirections
+# required to reach a specific block due to snapshots or
+# clones.  A lengthy chain of copy-on-write snapshots or
+# clones can have a negative impact on performance, but
+# improves space utilization.  0 indicates unlimited clone
+# depth. (integer value)
+#gpfs_max_clone_depth=0
+
+# Specifies that volumes are created as sparse files which
+# initially consume no space. If set to False, the volume is
+# created as a fully allocated file, in which case, creation
+# may take a significantly longer time. (boolean value)
+#gpfs_sparse_volumes=true
+
+
+#
+# Options defined in cinder.volume.drivers.hds.hds
+#
+
+# configuration file for HDS cinder plugin for HUS (string
+# value)
+#hds_cinder_config_file=/opt/hds/hus/cinder_hus_conf.xml
+
+
+#
+# Options defined in cinder.volume.drivers.huawei
+#
+
+# config data for cinder huawei plugin (string value)
+#cinder_huawei_conf_file=/etc/cinder/cinder_huawei_conf.xml
+
 
 #
 # Options defined in cinder.volume.drivers.lvm
@@ -766,14 +1240,6 @@
 # value)
 #volume_group=cinder-volumes
 
-# Method used to wipe old volumes (valid options are: none,
-# zero, shred) (string value)
-#volume_clear=zero
-
-# Size in MiB to wipe at start of old volumes. 0 => all
-# (integer value)
-#volume_clear_size=0
-
 # Size of thin provisioning pool (None uses entire cinder VG)
 # (string value)
 #pool_size=<None>
@@ -783,75 +1249,65 @@
 # value)
 #lvm_mirrors=0
 
-
-#
-# Options defined in cinder.volume.drivers.netapp
-#
-
-# URL of the WSDL file for the DFM server (string value)
-#netapp_wsdl_url=<None>
-
-# User name for the DFM server (string value)
-#netapp_login=<None>
-
-# Password for the DFM server (string value)
-#netapp_password=<None>
-
-# Hostname for the DFM server (string value)
-#netapp_server_hostname=<None>
-
-# Port number for the DFM server (integer value)
-#netapp_server_port=8088
-
-# Storage service to use for provisioning (when
-# volume_type=None) (string value)
-#netapp_storage_service=<None>
-
-# Prefix of storage service name to use for provisioning
-# (volume_type name will be appended) (string value)
-#netapp_storage_service_prefix=<None>
-
-# Vfiler to use for provisioning (string value)
-#netapp_vfiler=<None>
+# Type of LVM volumes to deploy; (default or thin) (string
+# value)
+#lvm_type=default
 
 
 #
-# Options defined in cinder.volume.drivers.netapp_nfs
+# Options defined in cinder.volume.drivers.netapp.options
 #
 
-# Does snapshot creation call returns immediately (integer
-# value)
-#synchronous_snapshot_create=0
-
-# URL of the WSDL file for the DFM server (string value)
-#netapp_wsdl_url=<None>
-
-# User name for the DFM server (string value)
-#netapp_login=<None>
-
-# Password for the DFM server (string value)
-#netapp_password=<None>
-
-# Hostname for the DFM server (string value)
-#netapp_server_hostname=<None>
-
-# Port number for the DFM server (integer value)
-#netapp_server_port=8088
-
-# Storage service to use for provisioning (when
-# volume_type=None) (string value)
-#netapp_storage_service=<None>
-
-# Prefix of storage service name to use for provisioning
-# (volume_type name will be appended) (string value)
-#netapp_storage_service_prefix=<None>
-
 # Vfiler to use for provisioning (string value)
 #netapp_vfiler=<None>
 
+# User name for the storage controller (string value)
+#netapp_login=<None>
+
+# Password for the storage controller (string value)
+#netapp_password=<None>
+
+# Cluster vserver to use for provisioning (string value)
+#netapp_vserver=<None>
+
+# Host name for the storage controller (string value)
+#netapp_server_hostname=<None>
+
+# Port number for the storage controller (integer value)
+#netapp_server_port=80
+
+# Threshold available percent to start cache cleaning.
+# (integer value)
+#thres_avl_size_perc_start=20
+
+# Threshold available percent to stop cache cleaning. (integer
+# value)
+#thres_avl_size_perc_stop=60
+
+# Threshold minutes after which cache file can be cleaned.
+# (integer value)
+#expiry_thres_minutes=720
+
+# Volume size multiplier to ensure while creation (floating
+# point value)
+#netapp_size_multiplier=1.2
+
+# Comma separated volumes to be used for provisioning (string
+# value)
+#netapp_volume_list=<None>
+
+# Storage family type. (string value)
+#netapp_storage_family=ontap_cluster
+
+# Storage protocol type. (string value)
+#netapp_storage_protocol=<None>
+
+# Transport type protocol (string value)
+#netapp_transport_type=http
+
 
 #
-# Options defined in cinder.volume.drivers.nexenta.volume
+# Options defined in cinder.volume.drivers.nexenta.options
 #
 
 # IP address of Nexenta SA (string value)
@@ -883,6 +1339,36 @@
 # prefix for iSCSI target groups on SA (string value)
 #nexenta_target_group_prefix=cinder/
 
+# File with the list of available nfs shares (string value)
+#nexenta_shares_config=/etc/cinder/nfs_shares
+
+# Base dir containing mount points for nfs shares (string
+# value)
+#nexenta_mount_point_base=$state_path/mnt
+
+# Create volumes as sparsed files which take no space.If set
+# to False volume is created as regular file.In such case
+# volume creation takes a lot of time. (boolean value)
+#nexenta_sparsed_volumes=true
+
+# Default compression value for new ZFS folders. (string
+# value)
+#nexenta_volume_compression=on
+
+# Mount options passed to the nfs client. See section of the
+# nfs man page for details (string value)
+#nexenta_mount_options=<None>
+
+# Percent of ACTUAL usage of the underlying volume before no
+# new volumes can be allocated to the volume destination.
+# (floating point value)
+#nexenta_used_ratio=0.95
+
+# This will compare the allocated to available space on the
+# volume destination.  If the ratio exceeds this number, the
+# destination will no longer be valid. (floating point value)
+#nexenta_oversub_ratio=1.0
+
 # block size for volumes (blank=default,8KB) (string value)
 #nexenta_blocksize=
 
@@ -895,23 +1381,30 @@
 #
 
 # File with the list of available nfs shares (string value)
-#nfs_shares_config=<None>
-
-# Base dir where nfs expected to be mounted (string value)
-#nfs_mount_point_base=$state_path/mnt
-
-# Use du or df for free space calculation (string value)
-#nfs_disk_util=df
+#nfs_shares_config=/etc/cinder/nfs_shares
 
 # Create volumes as sparsed files which take no space.If set
 # to False volume is created as regular file.In such case
 # volume creation takes a lot of time. (boolean value)
 #nfs_sparsed_volumes=true
 
-# Mount options passed to the nfs client (string value)
-# The value set here is passed directly to the -o flag
-# of the mount command. See the nfs man page for details.
-#nfs_mount_options=None
+# Percent of ACTUAL usage of the underlying volume before no
+# new volumes can be allocated to the volume destination.
+# (floating point value)
+#nfs_used_ratio=0.95
+
+# This will compare the allocated to available space on the
+# volume destination.  If the ratio exceeds this number, the
+# destination will no longer be valid. (floating point value)
+#nfs_oversub_ratio=1.0
+
+# Base dir containing mount points for nfs shares. (string
+# value)
+#nfs_mount_point_base=$state_path/mnt
+
+# Mount options passed to the nfs client. See section of the
+# nfs man page for details. (string value)
+#nfs_mount_options=<None>
 
 
 #
@@ -922,10 +1415,17 @@
 # value)
 #rbd_pool=rbd
 
-# the RADOS client name for accessing rbd volumes (string
-# value)
+# the RADOS client name for accessing rbd volumes - only set
+# when using cephx authentication (string value)
 #rbd_user=<None>
 
+# path to the ceph configuration file to use (string value)
+#rbd_ceph_conf=
+
+# flatten volumes created from snapshots to remove dependency
+# (boolean value)
+#rbd_flatten_volume_from_snapshot=false
+
 # the libvirt uuid of the secret for the rbd_uservolumes
 # (string value)
 #rbd_secret_uuid=<None>
@@ -934,6 +1434,51 @@
 # does not write them directly to the volume (string value)
 #volume_tmp_dir=<None>
 
+# maximum number of nested clones that can be taken of a
+# volume before enforcing a flatten prior to next clone. A
+# value of zero disables cloning (integer value)
+#rbd_max_clone_depth=5
+
+
+#
+# Options defined in cinder.volume.drivers.san.hp.hp_3par_common
+#
+
+# 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1
+# (string value)
+#hp3par_api_url=
+
+# 3PAR Super user username (string value)
+#hp3par_username=
+
+# 3PAR Super user password (string value)
+#hp3par_password=
+
+# This option is DEPRECATED and no longer used. The 3par
+# domain name to use. (string value)
+#hp3par_domain=<None>
+
+# The CPG to use for volume creation (string value)
+#hp3par_cpg=OpenStack
+
+# The CPG to use for Snapshots for volumes. If empty
+# hp3par_cpg will be used (string value)
+#hp3par_cpg_snap=
+
+# The time in hours to retain a snapshot.  You can't delete it
+# before this expires. (string value)
+#hp3par_snapshot_retention=
+
+# The time in hours when a snapshot expires  and is deleted.
+# This must be larger than expiration (string value)
+#hp3par_snapshot_expiration=
+
+# Enable HTTP debugging to 3PAR (boolean value)
+#hp3par_debug=false
+
+# List of target iSCSI addresses to use. (list value)
+#hp3par_iscsi_ips=
+
 
 #
 # Options defined in cinder.volume.drivers.san.san
@@ -1000,6 +1545,14 @@
 
 
 #
+# Options defined in cinder.volume.drivers.solaris.zfs
+#
+
+# The base dataset for ZFS cinder volumes.
+#zfs_volume_base=rpool/cinder
+
+
+#
 # Options defined in cinder.volume.drivers.solidfire
 #
 
@@ -1009,6 +1562,13 @@
 # Allow tenants to specify QOS on create (boolean value)
 #sf_allow_tenant_qos=false
 
+# Create SolidFire accounts with this prefix (string value)
+#sf_account_prefix=cinder
+
+# SolidFire API port. Useful if the device api is behind a
+# proxy on a different port. (integer value)
+#sf_api_port=443
+
 
 #
 # Options defined in cinder.volume.drivers.storwize_svc
@@ -1018,11 +1578,11 @@
 #storwize_svc_volpool_name=volpool
 
 # Storage system space-efficiency parameter for volumes
-# (string value)
-#storwize_svc_vol_rsize=2%
+# (percentage) (integer value)
+#storwize_svc_vol_rsize=2
 
 # Storage system threshold for volume capacity warnings
-# (string value)
+# (percentage) (integer value)
 #storwize_svc_vol_warning=0
 
 # Storage system autoexpand parameter for volumes (True/False)
@@ -1030,7 +1590,7 @@
 #storwize_svc_vol_autoexpand=true
 
 # Storage system grain size parameter for volumes
-# (32/64/128/256) (string value)
+# (32/64/128/256) (integer value)
 #storwize_svc_vol_grainsize=256
 
 # Storage system compression option for volumes (boolean
@@ -1040,14 +1600,75 @@
 # Enable Easy Tier for volumes (boolean value)
 #storwize_svc_vol_easytier=true
 
+# The I/O group in which to allocate volumes (integer value)
+#storwize_svc_vol_iogrp=0
+
 # Maximum number of seconds to wait for FlashCopy to be
-# prepared. Maximum value is 600 seconds (10 minutes). (string
+# prepared. Maximum value is 600 seconds (10 minutes) (integer
 # value)
 #storwize_svc_flashcopy_timeout=120
 
+# Connection protocol (iSCSI/FC) (string value)
+#storwize_svc_connection_protocol=iSCSI
+
+# Configure CHAP authentication for iSCSI connections
+# (Default: Enabled) (boolean value)
+#storwize_svc_iscsi_chap_enabled=true
+
+# Connect with multipath (FC only; iSCSI multipath is
+# controlled by Nova) (boolean value)
+#storwize_svc_multipath_enabled=false
+
+# Allows vdisk to multi host mapping (boolean value)
+#storwize_svc_multihostmap_enabled=true
+
 
 #
-# Options defined in cinder.volume.drivers.windows
+# Options defined in cinder.volume.drivers.vmware.vmdk
+#
+
+# IP address for connecting to VMware ESX/VC server. (string
+# value)
+#vmware_host_ip=<None>
+
+# Username for authenticating with VMware ESX/VC server.
+# (string value)
+#vmware_host_username=<None>
+
+# Password for authenticating with VMware ESX/VC server.
+# (string value)
+#vmware_host_password=<None>
+
+# Optional VIM service WSDL Location e.g
+# http://<server>/vimService.wsdl. Optional over-ride to
+# default location for bug work-arounds. (string value)
+#vmware_wsdl_location=<None>
+
+# Number of times VMware ESX/VC server API must be retried
+# upon connection related issues. (integer value)
+#vmware_api_retry_count=10
+
+# The interval used for polling remote tasks invoked on VMware
+# ESX/VC server. (integer value)
+#vmware_task_poll_interval=5
+
+# Name for the folder in the VC datacenter that will contain
+# cinder volumes. (string value)
+#vmware_volume_folder=cinder-volumes
+
+# Timeout in seconds for VMDK volume transfer between Cinder
+# and Glance. (integer value)
+#vmware_image_transfer_timeout_secs=7200
+
+# Max number of objects to be retrieved per batch. Query
+# results will be obtained in batches from the server and not
+# in one shot. Server may still limit the count to something
+# less than the configured value. (integer value)
+#vmware_max_objects_retrieval=100
+
+
+#
+# Options defined in cinder.volume.drivers.windows.windows
 #
 
 # Path to store VHD backed volumes (string value)
@@ -1073,13 +1694,21 @@
 # Password for XenAPI connection (string value)
 #xenapi_connection_password=<None>
 
+# Base path to the storage repository (string value)
+#xenapi_sr_base_path=/var/run/sr-mount
+
 
 #
-# Options defined in cinder.volume.drivers.xiv
+# Options defined in cinder.volume.drivers.xiv_ds8k
 #
 
-# Proxy driver (string value)
-#xiv_proxy=xiv_openstack.nova_proxy.XIVNovaProxy
+# Proxy driver that connects to the IBM Storage Array (string
+# value)
+#xiv_ds8k_proxy=xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy
+
+# Connection type to the IBM Storage Array
+# (fibre_channel|iscsi) (string value)
+#xiv_ds8k_connection_type=iscsi
 
 
 #
@@ -1104,11 +1733,11 @@
 # Name of VPSA storage pool for volumes (string value)
 #zadara_vpsa_poolname=<None>
 
-# Default cache policy for volumes (string value)
-#zadara_default_cache_policy=write-through
-
-# Default encryption policy for volumes (string value)
-#zadara_default_encryption=NO
+# Default thin provisioning policy for volumes (boolean value)
+#zadara_vol_thin=true
+
+# Default encryption policy for volumes (boolean value)
+#zadara_vol_encrypt=false
 
 # Default striping mode for volumes (string value)
 #zadara_default_striping_mode=simple
@@ -1129,21 +1758,77 @@
 
 
 #
-# Options defined in cinder.volume.iscsi
+# Options defined in cinder.volume.drivers.zfssa.zfssaiscsi
 #
 
-# iscsi target user-land tool to use (string value)
-#iscsi_helper=tgtadm
-
-# Volume configuration file storage directory (string value)
-#volumes_dir=$state_path/volumes
-
-# IET configuration file (string value)
-#iet_conf=/etc/iet/ietd.conf
-
-# Comma-separatd list of initiator IQNs allowed to connect to
-# the iSCSI target. (From Nova compute nodes.) (string value)
-#lio_initiator_iqns=
+# ZFSSA management hostname/IP
+#zfssa_host=<appliance ip>
+
+# ZFSSA management user login
+#zfssa_auth_user=<user>
+
+# ZFSSA management user password
+#zfssa_auth_password=<password>
+
+# ZFSSA pool name
+#zfssa_pool=<pool>
+
+# ZFSSA project name
+#zfssa_project=<project>
+
+# ZFSSA volume block size
+# Must be one of 512, 1k, 2k, 4k, 8k, 16k, 32k, 64k, 128k.
+# This property is optional. If not provided, default is 8k.
+#zfssa_lun_volblocksize=
+
+# ZFSSA flag to create sparse (thin-provisioned) volume
+#zfssa_lun_sparse=False
+
+# ZFSSA flag to turn on compression on the volume
+# Must be one of off, lzjb, gzip-2, gzip, gzip-9.
+# This property is optional. If not provided, default is inherited
+# from the project.
+#zfssa_lun_compression=
+
+# ZFSSA flag to set write bias to latency or throughput
+# This property is optional. If not provided, default is inherited
+# from the project.
+#zfssa_lun_logbias=
+
+# ZFSSA iSCSI initiator group name
+#zfssa_initiator_group=
+
+# Cinder host initiator IQNs. Separate multiple entries with commas.
+#zfssa_initiator=
+
+# Cinder host initiator CHAP user.
+# This property is optional. Comment out the line if CHAP authentication is
+# not used.
+#zfssa_initiator_user=
+
+# Cinder host initiator CHAP password.
+# This property is optional. Comment out the line if CHAP authentication is
+# not used.
+#zfssa_initiator_password=
+
+# ZFSSA iSCSI target group name
+#zfssa_target_group=
+
+# ZFSSA iSCSI target CHAP user.
+# This property is optional. Comment out the line if CHAP authentication is
+# not used.
+#zfssa_target_user=
+
+# ZFSSA iSCSI target CHAP password.
+# This property is optional. Comment out the line if CHAP authentication is
+# not used.
+#zfssa_target_password=
+
+# ZFSSA iSCSI target portal (data-ip:port)
+#zfssa_target_portal=<data ip address>:3260
+
+# ZFSSA iSCSI target network interfaces (separate multiple entries with comma)
+#zfssa_target_interfaces=<device>
 
 
 #
@@ -1152,35 +1837,30 @@
 
 # Driver to use for volume creation (string value)
 # The local ZFS driver provides direct access to ZFS volumes that it
-# creates. The iSCSI ZFS driver provides the access to local ZFS volumes
-# using iSCSI over loopback and may be more suitable for future use of
-# features such as live migration of Nova instances.
+# creates. The other listed drivers provide access to ZFS volumes via
+# iSCSI or Fibre Channel and are suitable for cases where block storage
+# for Nova compute instances is shared.
 volume_driver=cinder.volume.drivers.solaris.zfs.ZFSVolumeDriver
 #volume_driver=cinder.volume.drivers.solaris.zfs.ZFSISCSIDriver
-
-
-#
-# Options defined in cinder.volume.drivers.solaris.zfs
-#
-
-# The base dataset for ZFS cinder volumes.
-#zfs_volume_base=rpool/cinder
+#volume_driver=cinder.volume.drivers.solaris.zfs.ZFSFCDriver
+#volume_driver=cinder.volume.drivers.zfssa.zfssaiscsi.ZFSSAISCSIDriver
+
+# Timeout for creating the volume to migrate to when
+# performing volume migration (seconds) (integer value)
+#migration_create_volume_timeout_secs=300
+
+# Offload pending volume delete during volume service startup
+# (boolean value)
+#volume_service_inithost_offload=false
 
 
 #
-# Multi backend options
+# Options defined in cinder.volume.utils
 #
 
-# Define the names of the groups for multiple volume backends
-#enabled_backends=fakedriver,lvmdriver
-
-# Define the groups as above
-#[lvmdriver]
-#volume_group=lvm-group-1
-#volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
-#volume_backend_name=LVM_iSCSI_unique1
-#[fakedriver]
-#volume_driver=cinder.volume.driver.FakeISCSIDriver
-
-
-# Total option count: 256
+# The default block size used when copying/clearing volumes
+# (string value)
+#volume_dd_blocksize=1M
+
+
+# Total option count: 401
--- a/components/openstack/cinder/files/cinder.exec_attr	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/files/cinder.exec_attr	Wed Jun 11 17:13:12 2014 -0700
@@ -1,9 +1,13 @@
 OpenStack Block Storage Management:solaris:cmd:RO::\
 /usr/bin/cinder-clear-rabbit-queues:uid=cinder;gid=cinder
 
-OpenStack Block Storage Management:solaris:cmd:RO::\
-/usr/bin/cinder-manage:uid=cinder;gid=cinder
+OpenStack Block Storage Management:solaris:cmd:RO::/usr/bin/cinder-manage:\
+uid=cinder;gid=cinder
+
+cinder-volume:solaris:cmd:RO::/usr/sbin/fcinfo:privs=file_dac_read,sys_devices
 
 cinder-volume:solaris:cmd:RO::/usr/sbin/itadm:uid=0
+
 cinder-volume:solaris:cmd:RO::/usr/sbin/stmfadm:euid=0
+
 cinder-volume:solaris:cmd:RO::/usr/sbin/zfs:privs=sys_config,sys_mount
--- a/components/openstack/cinder/files/cinder.prof_attr	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/files/cinder.prof_attr	Wed Jun 11 17:13:12 2014 -0700
@@ -6,7 +6,8 @@
 solaris.admin.edit/etc/cinder/logging.conf,\
 solaris.admin.edit/etc/cinder/policy.json,\
 solaris.smf.manage.cinder,\
-solaris.smf.value.cinder
+solaris.smf.value.cinder;\
+defaultpriv={file_dac_read}\:/var/svc/log/application-openstack-*
 
 OpenStack Management:RO:::profiles=OpenStack Block Storage Management
 
--- a/components/openstack/cinder/files/solaris/zfs.py	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/files/solaris/zfs.py	Wed Jun 11 17:13:12 2014 -0700
@@ -25,14 +25,14 @@
 from oslo.config import cfg
 
 from cinder import exception
-from cinder import flags
 from cinder.image import image_utils
 from cinder.openstack.common import log as logging
+from cinder.openstack.common import processutils
 from cinder.volume import driver
 
 from solaris_install.target.size import Size
 
-FLAGS = flags.FLAGS
+FLAGS = cfg.CONF
 LOG = logging.getLogger(__name__)
 
 solaris_zfs_opts = [
@@ -73,7 +73,7 @@
                                    "from that of the snapshot, '%s'.")
                                  % (volume['name'], volume['size'],
                                     snapshot['volume_size']))
-            raise exception.VolumeBackendAPIException(data=exception_message)
+            raise exception.InvalidInput(reason=exception_message)
 
         # Create a ZFS clone
         zfs_snapshot = self._get_zfs_snap_name(snapshot)
@@ -172,18 +172,20 @@
                                         volume['id'])
         return {
             'driver_volume_type': 'local',
-            'volume_path': volume_path
+            'volume_path': volume_path,
+            'data': {}
         }
 
     def terminate_connection(self, volume, connector, **kwargs):
         """Disconnection from the connector."""
         pass
 
-    def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
-        """ Callback for volume attached to instance."""
+    def attach_volume(self, context, volume, instance_uuid, host_name,
+                      mountpoint):
+        """Callback for volume attached to instance or host."""
         pass
 
-    def detach_volume(self, context, volume_id):
+    def detach_volume(self, context, volume):
         """ Callback for volume detached."""
         pass
 
@@ -251,6 +253,17 @@
 
         self._stats = stats
 
+    def extend_volume(self, volume, new_size):
+        """Extend an existing volume's size."""
+        volsize_str = 'volsize=%sg' % new_size
+        zfs_volume = self._get_zfs_volume_name(volume)
+        try:
+            self._execute('/usr/sbin/zfs', 'set', volsize_str, zfs_volume)
+        except Exception:
+            msg = (_("Failed to extend volume size to %(new_size)s GB.")
+                   % {'new_size': new_size})
+            raise exception.VolumeBackendAPIException(data=msg)
+
 
 class STMFDriver(ZFSVolumeDriver):
     """Abstract base class for common COMSTAR operations."""
@@ -327,11 +340,11 @@
     def _get_view_and_lun(self, lu):
         """Check the view entry of the LU and then get the lun and view."""
         view_and_lun = {}
-        view_and_lun['valid_value'] = False
+        view_and_lun['view'] = view_and_lun['lun'] = None
         try:
             (out, _err) = self._execute('/usr/sbin/stmfadm', 'list-view',
-                                        '-l', lu)
-        except exception.ProcessExecutionError as error:
+                                        '-l', lu, '-v')
+        except processutils.ProcessExecutionError as error:
             if 'no views found' in error.stderr:
                 LOG.debug(_("No view is found for LU '%s'") % lu)
                 return view_and_lun
@@ -341,15 +354,19 @@
         for line in [l.strip() for l in out.splitlines()]:
             if line.startswith("View Entry:"):
                 view_and_lun['view'] = line.split()[-1]
-                view_and_lun['valid_value'] = True
-            if line.startswith("LUN"):
-                view_and_lun['lun'] = line.split()[-1]
+            if line.startswith("LUN") and 'Auto' not in line.split()[-1]:
+                view_and_lun['lun'] = int(line.split()[-1])
+                break
+            if line.startswith("Lun"):
+                view_and_lun['lun'] = int(line.split()[2])
 
-        if view_and_lun['lun'] == 'Auto':
-            view_and_lun['lun'] = 0
-
-        LOG.debug(_("The view_entry and LUN of LU '%s' are '%s' and '%s'.")
-                  % (lu, view_and_lun['view'], view_and_lun['lun']))
+        if view_and_lun['view'] is None or view_and_lun['lun'] is None:
+            LOG.error(_("Failed to get the view_entry or LUN of the LU '%s'.")
+                      % lu)
+            raise
+        else:
+            LOG.debug(_("The view_entry and LUN of LU '%s' are '%s' and '%d'.")
+                      % (lu, view_and_lun['view'], view_and_lun['lun']))
 
         return view_and_lun
 
@@ -385,9 +402,9 @@
         self._execute('/usr/sbin/itadm', 'create-target', '-n', target_name)
         assert self._check_target(target_name, 'iSCSI')
 
-        # Add a logical unit view entry
+        # Add a view entry to the logical unit with the specified LUN, 8776
         if luid is not None:
-            self._execute('/usr/sbin/stmfadm', 'add-view', '-t',
+            self._execute('/usr/sbin/stmfadm', 'add-view', '-n', 8776, '-t',
                           target_group, luid)
 
     def remove_export(self, context, volume):
@@ -404,7 +421,7 @@
         # Remove the view entry
         if luid is not None:
             view_lun = self._get_view_and_lun(luid)
-            if view_lun['valid_value']:
+            if view_lun['view']:
                 self._execute('/usr/sbin/stmfadm', 'remove-view', '-l',
                               luid, view_lun['view'])
 
@@ -452,9 +469,9 @@
         properties['target_iqn'] = target_name
         properties['target_portal'] = ('%s:%d' %
                                        (self.configuration.iscsi_ip_address,
-                                       self.configuration.iscsi_port))
+                                        self.configuration.iscsi_port))
         view_lun = self._get_view_and_lun(luid)
-        if view_lun['valid_value']:
+        if view_lun['lun']:
             properties['target_lun'] = view_lun['lun']
         properties['volume_id'] = volume['id']
 
@@ -508,3 +525,210 @@
                     'for volume %(volume_name)s')
                   % {'initiator_name': initiator_name,
                      'volume_name': volume_name})
+
+
+class ZFSFCDriver(STMFDriver, driver.FibreChannelDriver):
+    """ZFS volume operations in FC mode."""
+    protocol = 'FC'
+
+    def __init__(self, *args, **kwargs):
+        super(ZFSFCDriver, self).__init__(*args, **kwargs)
+
+    def check_for_setup_error(self):
+        """Check the setup error."""
+        wwns = self._get_wwns()
+        if not wwns:
+            msg = (_("Could not determine fibre channel world wide "
+                     "node names."))
+            raise exception.VolumeBackendAPIException(data=msg)
+
+    def _get_wwns(self):
+        """Get the FC port WWNs of the host."""
+        (out, _err) = self._execute('/usr/sbin/fcinfo', 'hba-port', '-t')
+
+        wwns = []
+        for line in [l.strip() for l in out.splitlines()]:
+            if line.startswith("HBA Port WWN:"):
+                wwn = line.split()[-1]
+                LOG.debug(_("Got the FC port WWN '%s'") % wwn)
+                wwns.append(wwn)
+
+        return wwns
+
+    def _check_wwn_tg(self, wwn):
+        """Check if the target group 'tg-wwn-xxx' exists."""
+        (out, _err) = self._execute('/usr/sbin/stmfadm', 'list-tg')
+
+        for line in [l.strip() for l in out.splitlines()]:
+            if line.startswith("Target Group:") and wwn in line:
+                tg = line.split()[-1]
+                break
+        else:
+            LOG.debug(_("The target group 'tg-wwn-%s' doesn't exist.") % wwn)
+            tg = None
+
+        return tg
+
+    def _only_lu(self, lu):
+        """Check if the LU is the only one."""
+        (out, _err) = self._execute('/usr/sbin/stmfadm', 'list-lu', '-v')
+        linecount = 0
+
+        for line in [l.strip() for l in out.splitlines()]:
+            if line.startswith("LU Name:"):
+                luid = line.split()[-1]
+                linecount += 1
+
+        if linecount == 1 and luid == lu:
+            LOG.debug(_("The LU '%s' is the only one.") % lu)
+            return True
+        else:
+            return False
+
+    def _target_in_tg(self, wwn, tg):
+        """Check if the target has been added into a target group."""
+        target = 'wwn.%s' % wwn.upper()
+
+        if tg is not None:
+            (out, _err) = self._execute('/usr/sbin/stmfadm', 'list-tg',
+                                        '-v', tg)
+        else:
+            (out, _err) = self._execute('/usr/sbin/stmfadm', 'list-tg', '-v')
+
+        for line in [l.strip() for l in out.splitlines()]:
+            if line.startswith("Member:") and target in line:
+                return True
+        LOG.debug(_("The target '%s' is not in any target group.") % target)
+        return False
+
+    def create_export(self, context, volume):
+        """Export the volume."""
+        zvol = self._get_zvol_path(volume)
+
+        # Create a Logical Unit (LU)
+        self._execute('/usr/sbin/stmfadm', 'create-lu', zvol)
+        luid = self._get_luid(volume)
+        if not luid:
+            msg = (_("Failed to create logic unit for volume '%s'")
+                   % volume['name'])
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        wwns = self._get_wwns()
+        wwn = wwns[0]
+        target_group = self._check_wwn_tg(wwn)
+        if target_group is None:
+            target_group = 'tg-wwn-%s' % wwn
+            if self._target_in_tg(wwn, None):
+                msg = (_("Target WWN '%s' has been found in another"
+                         "target group, so it will not be added "
+                         "into the expected target group '%s'.") %
+                       (wwn, target_group))
+                raise exception.VolumeBackendAPIException(data=msg)
+
+            # Create a target group for the wwn
+            self._execute('/usr/sbin/stmfadm', 'create-tg', target_group)
+
+            # Enable the target and add it to the 'tg-wwn-xxx' group
+            self._execute('/usr/sbin/stmfadm', 'offline-target',
+                          'wwn.%s' % wwn)
+            self._execute('/usr/sbin/stmfadm', 'add-tg-member', '-g',
+                          target_group, 'wwn.%s' % wwn)
+            self._execute('/usr/sbin/stmfadm', 'online-target', 'wwn.%s' % wwn)
+        assert self._target_in_tg(wwn, target_group)
+
+        # Add a logical unit view entry
+        # TODO(Strony): replace the auto assigned LUN with '-n' option
+        if luid is not None:
+            self._execute('/usr/sbin/stmfadm', 'add-view', '-t',
+                          target_group, luid)
+
+    def remove_export(self, context, volume):
+        """Remove an export for a volume."""
+        luid = self._get_luid(volume)
+
+        if luid is not None:
+            wwns = self._get_wwns()
+            wwn = wwns[0]
+            target_wwn = 'wwn.%s' % wwn
+            target_group = 'tg-wwn-%s' % wwn
+            view_lun = self._get_view_and_lun(luid)
+            if view_lun['view']:
+                self._execute('/usr/sbin/stmfadm', 'remove-view', '-l',
+                              luid, view_lun['view'])
+
+            # Remove the target group when only one LU exists.
+            if self._only_lu(luid):
+                if self._check_target(target_wwn, 'Channel'):
+                    self._execute('/usr/sbin/stmfadm', 'offline-target',
+                                  target_wwn)
+                if self._check_tg(target_group):
+                    self._execute('/usr/sbin/stmfadm', 'delete-tg',
+                                  target_group)
+
+            # Remove the LU
+            self._execute('/usr/sbin/stmfadm', 'delete-lu', luid)
+
+    def _get_fc_properties(self, volume):
+        """Get Fibre Channel configuration.
+
+        :target_discovered:    boolean indicating whether discovery was used
+        :target_wwn:           the world wide name of the FC port target
+        :target_lun:           the lun assigned to the LU for the view entry
+
+        """
+        wwns = self._get_wwns()
+        if not wwns:
+            msg = (_("Could not determine fibre channel world wide "
+                     "node names."))
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        luid = self._get_luid(volume)
+        if not luid:
+            msg = (_("Failed to get logic unit for volume '%s'")
+                   % volume['name'])
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        properties = {}
+
+        properties['target_discovered'] = True
+        properties['target_wwn'] = wwns
+        view_lun = self._get_view_and_lun(luid)
+        if view_lun['lun']:
+            properties['target_lun'] = view_lun['lun']
+        return properties
+
+    def initialize_connection(self, volume, connector):
+        """Initializes the connection and returns connection info.
+
+        The  driver returns a driver_volume_type of 'fibre_channel'.
+        The target_wwn can be a single entry or a list of wwns that
+        correspond to the list of remote wwn(s) that will export the volume.
+        Example return values:
+
+            {
+                'driver_volume_type': 'fibre_channel'
+                'data': {
+                    'target_discovered': True,
+                    'target_lun': 1,
+                    'target_wwn': '1234567890123',
+                }
+            }
+
+            or
+
+             {
+                'driver_volume_type': 'fibre_channel'
+                'data': {
+                    'target_discovered': True,
+                    'target_lun': 1,
+                    'target_wwn': ['1234567890123', '0987654321321'],
+                }
+            }
+
+        """
+        fc_properties = self._get_fc_properties(volume)
+
+        return {
+            'driver_volume_type': 'fibre_channel',
+            'data': fc_properties
+        }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/files/zfssa/__init__.py	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,15 @@
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Empty for this release
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/files/zfssa/cinder.akwf	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ *    not use this file except in compliance with the License. You may obtain
+ *    a copy of the License at
+ *
+ *         http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ *    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ *    License for the specific language governing permissions and limitations
+ *    under the License.
+ */
+
+var workflow = {
+	name: 'Configuration for OpenStack Cinder Driver',
+	origin: 'Oracle Corporation',
+	description: 'Setup environment for OpenStack Cinder Driver',
+	version: '1.0.0',
+	parameters: {
+		name: {
+			label: 'Cinder\'s User\'s Name',
+			type: 'String'
+		},
+		password: {
+			label: 'Password',
+			type: 'Password'
+		}
+	},
+	execute: function (params) {
+		/*
+		 * Check for REST service to be enabled
+		 */
+		try {
+		    run('configuration services rest');
+		    if (get('<status>') != 'online')
+			run('enable');
+		} catch (err) {
+		    return ('The REST API is not available on this version of \
+			    appliance software and is required to run with the \
+			    ZFSSA cinder driver.  Please upgrade the appliance \
+			    software.');
+		}
+
+		/*
+		 * Cinder role
+		 */
+		var osrole = 'OpenStackRole';
+		run('cd /');
+		run('configuration roles');
+		try {
+		    run('select ' + osrole);
+		} catch(err) {
+		    run('role ' + osrole);
+		    run('set description="OpenStack Cinder Driver"');
+		    run('commit');
+		    run('select ' + osrole);
+		}
+		run('authorizations');
+		run('create');
+		run('set scope=stmf');
+		run('set allow_configure=true');
+		run('commit');
+		run('create');
+		run('set scope=nas');
+		run('set allow_clone=true');
+		run('set allow_createProject=true');
+		run('set allow_createShare=true');
+		run('set allow_changeSpaceProps=true');
+		run('set allow_changeGeneralProps=true');
+		run('set allow_destroy=true');
+		run('set allow_rollback=true');
+		run('set allow_takeSnap=true');
+		run('commit');
+
+		/*
+		 * Set user with Cinder role
+		 */
+		 var msg = 'User ' + params.name;
+		 run('cd /')
+		 run('configuration users');
+		 try {
+		    run('select ' + params.name);
+		    msg += ' updated.';
+		 } catch (err) {
+		    run('user ' + params.name);
+		    run('set initial_password=' + params.password);
+		    run('set fullname="OpenStack Cinder Driver"');
+		    run('commit');
+		    run('select ' + params.name);
+		    msg += ' created.';
+		}
+		run('set roles=' + osrole);
+		run('commit');
+		return (msg);
+	}
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/files/zfssa/restclient.py	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,353 @@
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+ZFS Storage Appliance REST API Client Programmatic Interface
+"""
+
+import httplib
+import json
+import time
+import urllib2
+import StringIO
+
+from cinder.openstack.common import log
+
+LOG = log.getLogger(__name__)
+
+
+class Status:
+    """Result HTTP Status"""
+
+    def __init__(self):
+        pass
+
+    #: Request return OK
+    OK = httplib.OK
+
+    #: New resource created successfully
+    CREATED = httplib.CREATED
+
+    #: Command accepted
+    ACCEPTED = httplib.ACCEPTED
+
+    #: Command returned OK but no data will be returned
+    NO_CONTENT = httplib.NO_CONTENT
+
+    #: Bad Request
+    BAD_REQUEST = httplib.BAD_REQUEST
+
+    #: User is not authorized
+    UNAUTHORIZED = httplib.UNAUTHORIZED
+
+    #: The request is not allowed
+    FORBIDDEN = httplib.FORBIDDEN
+
+    #: The requested resource was not found
+    NOT_FOUND = httplib.NOT_FOUND
+
+    #: The request is not allowed
+    NOT_ALLOWED = httplib.METHOD_NOT_ALLOWED
+
+    #: Request timed out
+    TIMEOUT = httplib.REQUEST_TIMEOUT
+
+    #: Invalid request
+    CONFLICT = httplib.CONFLICT
+
+    #: Service Unavailable
+    BUSY = httplib.SERVICE_UNAVAILABLE
+
+
+class RestResult(object):
+    """Result from a REST API operation"""
+    def __init__(self, response=None, err=None):
+        """Initialize a RestResult containing the results from a REST call
+        :param response: HTTP response
+        """
+        self.response = response
+        self.error = err
+        self.data = ""
+        self.status = 0
+        if self.response is not None:
+            self.status = self.response.getcode()
+            result = self.response.read()
+            while result:
+                self.data += result
+                result = self.response.read()
+
+        if self.error is not None:
+            self.status = self.error.code
+            self.data = httplib.responses[self.status]
+
+        LOG.debug('response code: %s' % self.status)
+        LOG.debug('response data: %s' % self.data)
+
+    def get_header(self, name):
+        """Get an HTTP header with the given name from the results
+
+        :param name: HTTP header name
+        :return: The header value or None if no value is found
+        """
+        if self.response is None:
+            return None
+        info = self.response.info()
+        return info.getheader(name)
+
+
+class RestClientError(Exception):
+    """Exception for ZFS REST API client errors"""
+    def __init__(self, status, name="ERR_INTERNAL", message=None):
+
+        """Create a REST Response exception
+
+        :param status: HTTP response status
+        :param name: The name of the REST API error type
+        :param message: Descriptive error message returned from REST call
+        """
+        Exception.__init__(self, message)
+        self.code = status
+        self.name = name
+        self.msg = message
+        if status in httplib.responses:
+            self.msg = httplib.responses[status]
+
+    def __str__(self):
+        return "%d %s %s" % (self.code, self.name, self.msg)
+
+
+class RestClientURL(object):
+    """ZFSSA urllib2 client"""
+    def __init__(self, url, **kwargs):
+        """
+        Initialize a REST client.
+
+        :param url: The ZFSSA REST API URL
+        :key session: HTTP Cookie value of x-auth-session obtained from a
+                      normal BUI login.
+        :key timeout: Time in seconds to wait for command to complete.
+                      (Default is 60 seconds)
+        """
+        self.url = url
+        self.local = kwargs.get("local", False)
+        self.base_path = kwargs.get("base_path", "/api")
+        self.timeout = kwargs.get("timeout", 60)
+        self.headers = None
+        if kwargs.get('session'):
+            self.headers['x-auth-session'] = kwargs.get('session')
+
+        self.headers = {"content-type": "application/json"}
+        self.do_logout = False
+        self.auth_str = None
+
+    def _path(self, path, base_path=None):
+        """build rest url path"""
+        if path.startswith("http://") or path.startswith("https://"):
+            return path
+        if base_path is None:
+            base_path = self.base_path
+        if not path.startswith(base_path) and not (
+                self.local and ("/api" + path).startswith(base_path)):
+            path = "%s%s" % (base_path, path)
+        if self.local and path.startswith("/api"):
+            path = path[4:]
+        return self.url + path
+
+    def authorize(self):
+        """Performs authorization setting x-auth-session"""
+        self.headers['authorization'] = 'Basic %s' % self.auth_str
+        if 'x-auth-session' in self.headers:
+            del self.headers['x-auth-session']
+
+        try:
+            result = self.post("/access/v1")
+            del self.headers['authorization']
+            if result.status == httplib.CREATED:
+                self.headers['x-auth-session'] = \
+                    result.get_header('x-auth-session')
+                self.do_logout = True
+                LOG.info('ZFSSA version: %s' %
+                         result.get_header('x-zfssa-version'))
+
+            elif result.status == httplib.NOT_FOUND:
+                raise RestClientError(result.status, name="ERR_RESTError",
+                                      message="REST Not Available: \
+                                      Please Upgrade")
+
+        except RestClientError as err:
+            del self.headers['authorization']
+            raise err
+
+    def login(self, auth_str):
+        """
+        Login to an appliance using a user name and password and start
+        a session like what is done logging into the BUI.  This is not a
+        requirement to run REST commands, since the protocol is stateless.
+        What is does is set up a cookie session so that some server side
+        caching can be done.  If login is used remember to call logout when
+        finished.
+
+        :param auth_str: Authorization string (base64)
+        """
+        self.auth_str = auth_str
+        self.authorize()
+
+    def logout(self):
+        """Logout of an appliance"""
+        result = None
+        try:
+            result = self.delete("/access/v1", base_path="/api")
+        except RestClientError:
+            pass
+
+        self.headers.clear()
+        self.do_logout = False
+        return result
+
+    def islogin(self):
+        """return if client is login"""
+        return self.do_logout
+
+    @staticmethod
+    def mkpath(*args, **kwargs):
+        """Make a path?query string for making a REST request
+
+        :cmd_params args: The path part
+        :cmd_params kwargs: The query part
+        """
+        buf = StringIO()
+        query = "?"
+        for arg in args:
+            buf.write("/")
+            buf.write(arg)
+        for k in kwargs:
+            buf.write(query)
+            if query == "?":
+                query = "&"
+            buf.write(k)
+            buf.write("=")
+            buf.write(kwargs[k])
+        return buf.getvalue()
+
+    def request(self, path, request, body=None, **kwargs):
+        """Make an HTTP request and return the results
+
+        :param path: Path used with the initiazed URL to make a request
+        :param request: HTTP request type (GET, POST, PUT, DELETE)
+        :param body: HTTP body of request
+        :key accept: Set HTTP 'Accept' header with this value
+        :key base_path: Override the base_path for this request
+        :key content: Set HTTP 'Content-Type' header with this value
+        """
+        out_hdrs = dict.copy(self.headers)
+        if kwargs.get("accept"):
+            out_hdrs['accept'] = kwargs.get("accept")
+
+        if body is not None:
+            if isinstance(body, dict):
+                body = str(json.dumps(body))
+
+        if body and len(body):
+            out_hdrs['content-length'] = len(body)
+
+        zfssaurl = self._path(path, kwargs.get("base_path"))
+        req = urllib2.Request(zfssaurl, body, out_hdrs)
+        req.get_method = lambda: request
+        maxreqretries = kwargs.get("maxreqretries", 10)
+        retry = 0
+        response = None
+
+        LOG.debug('request: %s %s' % (request, zfssaurl))
+        LOG.debug('out headers: %s' % out_hdrs)
+        if body is not None and body != '':
+            LOG.debug('body: %s' % body)
+
+        while retry < maxreqretries:
+            try:
+                response = urllib2.urlopen(req, timeout=self.timeout)
+            except urllib2.HTTPError as err:
+                LOG.error('REST Not Available: %s' % err.code)
+                if err.code == httplib.SERVICE_UNAVAILABLE and \
+                   retry < maxreqretries:
+                    retry += 1
+                    time.sleep(1)
+                    LOG.error('Server Busy retry request: %s' % retry)
+                    continue
+                if (err.code == httplib.UNAUTHORIZED or
+                    err.code == httplib.INTERNAL_SERVER_ERROR) and \
+                   '/access/v1' not in zfssaurl:
+                    try:
+                        LOG.error('Authorizing request retry: %s, %s' %
+                                  (zfssaurl, retry))
+                        self.authorize()
+                        req.add_header('x-auth-session',
+                                       self.headers['x-auth-session'])
+                    except RestClientError:
+                        pass
+                    retry += 1
+                    time.sleep(1)
+                    continue
+
+                return RestResult(err=err)
+
+            except urllib2.URLError as err:
+                LOG.error('URLError: %s' % err.reason)
+                raise RestClientError(-1, name="ERR_URLError",
+                                      message=err.reason)
+
+            break
+
+        if response and response.getcode() == httplib.SERVICE_UNAVAILABLE and \
+           retry >= maxreqretries:
+            raise RestClientError(response.getcode(), name="ERR_HTTPError",
+                                  message="REST Not Available: Disabled")
+
+        return RestResult(response=response)
+
+    def get(self, path, **kwargs):
+        """
+        Make an HTTP GET request
+
+        :param path: Path to resource.
+        """
+        return self.request(path, "GET", **kwargs)
+
+    def post(self, path, body="", **kwargs):
+        """Make an HTTP POST request
+
+        :param path: Path to resource.
+        :param body: Post data content
+        """
+        return self.request(path, "POST", body, **kwargs)
+
+    def put(self, path, body="", **kwargs):
+        """Make an HTTP PUT request
+
+        :param path: Path to resource.
+        :param body: Put data content
+        """
+        return self.request(path, "PUT", body, **kwargs)
+
+    def delete(self, path, **kwargs):
+        """Make an HTTP DELETE request
+
+        :param path: Path to resource that will be deleted.
+        """
+        return self.request(path, "DELETE", **kwargs)
+
+    def head(self, path, **kwargs):
+        """Make an HTTP HEAD request
+
+        :param path: Path to resource.
+        """
+        return self.request(path, "HEAD", **kwargs)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/files/zfssa/zfssaiscsi.py	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,389 @@
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+ZFS Storage Appliance Cinder Volume Driver
+"""
+import base64
+
+from cinder import exception
+from cinder.openstack.common import log
+from cinder.volume import driver
+from oslo.config import cfg
+
+from cinder.volume.drivers.zfssa import zfssarest
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+ZFSSA_OPTS = [
+    cfg.StrOpt('zfssa_host', required=True,
+               help='ZFSSA management IP address'),
+    cfg.StrOpt('zfssa_auth_user', required=True, secret=True,
+               help='ZFSSA management authorized user\'s name'),
+    cfg.StrOpt('zfssa_auth_password', required=True, secret=True,
+               help='ZFSSA management authorized user\'s password'),
+    cfg.StrOpt('zfssa_pool', required=True,
+               help='ZFSSA storage pool name'),
+    cfg.StrOpt('zfssa_project', required=True,
+               help='ZFSSA project name'),
+    cfg.StrOpt('zfssa_lun_volblocksize', default='8k',
+               help='Block size: 512, 1k, 2k, 4k, 8k, 16k, 32k, 64k, 128k'),
+    cfg.BoolOpt('zfssa_lun_sparse', default=False,
+                help='Flag to enable sparse (thin-provisioned): True, False'),
+    cfg.StrOpt('zfssa_lun_compression', default='',
+               help='Data compression-off, lzjb, gzip-2, gzip, gzip-9'),
+    cfg.StrOpt('zfssa_lun_logbias', default='',
+               help='Synchronous write bias-latency, throughput'),
+    cfg.StrOpt('zfssa_initiator_group', default='',
+               help='iSCSI initiator group'),
+    cfg.StrOpt('zfssa_initiator', default='',
+               help='iSCSI initiator IQNs (comma separated)'),
+    cfg.StrOpt('zfssa_initiator_user', default='',
+               help='iSCSI initiator CHAP user'),
+    cfg.StrOpt('zfssa_initiator_password', default='',
+               help='iSCSI initiator CHAP password'),
+    cfg.StrOpt('zfssa_target_group', default='tgt-grp',
+               help='iSCSI target group name'),
+    cfg.StrOpt('zfssa_target_user', default='',
+               help='iSCSI target CHAP user'),
+    cfg.StrOpt('zfssa_target_password', default='',
+               help='iSCSI target CHAP password'),
+    cfg.StrOpt('zfssa_target_portal', required=True,
+               help='iSCSI target portal (Data-IP:Port, w.x.y.z:3260)'),
+    cfg.StrOpt('zfssa_target_interfaces', required=True,
+               help='Network interfaces of iSCSI targets (comma separated)')
+]
+
+CONF.register_opts(ZFSSA_OPTS)
+
+SIZE_GB = 1073741824
+
+
+#pylint: disable=R0904
+class ZFSSAISCSIDriver(driver.ISCSIDriver):
+    """ZFSSA Cinder volume driver"""
+
+    VERSION = '1.0.0'
+    protocol = 'iSCSI'
+
+    def __init__(self, *args, **kwargs):
+        super(ZFSSAISCSIDriver, self).__init__(*args, **kwargs)
+        self.configuration.append_config_values(ZFSSA_OPTS)
+        self.zfssa = None
+        self._stats = None
+
+    def _get_target_alias(self):
+        """return target alias"""
+        return self.configuration.zfssa_target_group
+
+    def do_setup(self, context):
+        """Setup - create project, initiators, initiatorgroup, target,
+                   targetgroup
+        """
+        self.configuration._check_required_opts()
+        lcfg = self.configuration
+
+        LOG.info('Connecting to host: %s' % lcfg.zfssa_host)
+        self.zfssa = zfssarest.ZFSSAApi(lcfg.zfssa_host)
+        auth_str = base64.encodestring('%s:%s' %
+                                       (lcfg.zfssa_auth_user,
+                                        lcfg.zfssa_auth_password))[:-1]
+        self.zfssa.login(auth_str)
+        self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_project,
+                                  compression=lcfg.zfssa_lun_compression,
+                                  logbias=lcfg.zfssa_lun_logbias)
+
+        if (lcfg.zfssa_initiator != '' and
+            (lcfg.zfssa_initiator_group == '' or
+             lcfg.zfssa_initiator_group == 'default')):
+            LOG.warning('zfssa_initiator= %s wont be used on \
+                        zfssa_initiator_group= %s' %
+                        (lcfg.zfssa_initiator,
+                         lcfg.zfssa_initiator_group))
+
+        # Setup initiator and initiator group
+        if lcfg.zfssa_initiator != '' and \
+           lcfg.zfssa_initiator_group != '' and \
+           lcfg.zfssa_initiator_group != 'default':
+            for initiator in lcfg.zfssa_initiator.split(','):
+                self.zfssa.create_initiator(initiator,
+                                            lcfg.zfssa_initiator_group + '-' +
+                                            initiator,
+                                            chapuser=
+                                            lcfg.zfssa_initiator_user,
+                                            chapsecret=
+                                            lcfg.zfssa_initiator_password)
+                self.zfssa.add_to_initiatorgroup(initiator,
+                                                 lcfg.zfssa_initiator_group)
+        # Parse interfaces
+        interfaces = []
+        for interface in lcfg.zfssa_target_interfaces.split(','):
+            if interface == '':
+                continue
+            interfaces.append(interface)
+
+        # Setup target and target group
+        iqn = self.zfssa.create_target(
+            self._get_target_alias(),
+            interfaces,
+            tchapuser=lcfg.zfssa_target_user,
+            tchapsecret=lcfg.zfssa_target_password)
+
+        self.zfssa.add_to_targetgroup(iqn, lcfg.zfssa_target_group)
+
+    def check_for_setup_error(self):
+        """Check that driver can login and pool, project, initiators,
+           initiatorgroup, target, targetgroup exist
+        """
+        lcfg = self.configuration
+
+        self.zfssa.verify_pool(lcfg.zfssa_pool)
+        self.zfssa.verify_project(lcfg.zfssa_pool, lcfg.zfssa_project)
+
+        if lcfg.zfssa_initiator != '' and \
+           lcfg.zfssa_initiator_group != '' and \
+           lcfg.zfssa_initiator_group != 'default':
+            for initiator in lcfg.zfssa_initiator.split(','):
+                self.zfssa.verify_initiator(initiator)
+
+            self.zfssa.verify_target(self._get_target_alias())
+
+    def _get_provider_info(self, volume):
+        """return provider information"""
+        lcfg = self.configuration
+        lun = self.zfssa.get_lun(lcfg.zfssa_pool,
+                                 lcfg.zfssa_project, volume['name'])
+        iqn = self.zfssa.get_target(self._get_target_alias())
+        loc = "%s %s %s" % (lcfg.zfssa_target_portal, iqn, lun['number'])
+        LOG.debug('_export_volume: provider_location: %s' % loc)
+        provider = {'provider_location': loc}
+        if lcfg.zfssa_target_user != '' and lcfg.zfssa_target_password != '':
+            provider['provider_auth'] = 'CHAP %s %s' % \
+                                        (lcfg.zfssa_target_user,
+                                         lcfg.zfssa_target_password)
+        return provider
+
+    def create_volume(self, volume):
+        """Create a volume on ZFSSA"""
+        LOG.debug('zfssa.create_volume: volume=' + volume['name'])
+        lcfg = self.configuration
+        volsize = str(volume['size']) + 'g'
+        self.zfssa.create_lun(lcfg.zfssa_pool,
+                              lcfg.zfssa_project,
+                              volume['name'],
+                              volsize,
+                              targetgroup=lcfg.zfssa_target_group,
+                              volblocksize=lcfg.zfssa_lun_volblocksize,
+                              sparse=lcfg.zfssa_lun_sparse,
+                              compression=lcfg.zfssa_lun_compression,
+                              logbias=lcfg.zfssa_lun_logbias)
+
+        return self._get_provider_info(volume)
+
+    def delete_volume(self, volume):
+        """Deletes a volume with the given volume['name']."""
+        LOG.debug('zfssa.delete_volume: name=' + volume['name'])
+        lcfg = self.configuration
+        lun2del = self.zfssa.get_lun(lcfg.zfssa_pool,
+                                     lcfg.zfssa_project,
+                                     volume['name'])
+        """Delete clone's temp snapshot. see create_cloned_volume()"""
+        """clone is deleted as part of the snapshot delete."""
+        tmpsnap = 'tmp-snapshot-%s' % volume['id']
+        if 'origin' in lun2del and lun2del['origin']['snapshot'] == tmpsnap:
+            self.zfssa.delete_snapshot(lcfg.zfssa_pool,
+                                       lcfg.zfssa_project,
+                                       lun2del['origin']['share'],
+                                       lun2del['origin']['snapshot'])
+            return
+
+        self.zfssa.delete_lun(pool=lcfg.zfssa_pool,
+                              project=lcfg.zfssa_project,
+                              lun=volume['name'])
+
+    def create_snapshot(self, snapshot):
+        """Creates a snapshot with the given snapshot['name'] of the
+           snapshot['volume_name']
+        """
+        LOG.debug('zfssa.create_snapshot: snapshot=' + snapshot['name'])
+        lcfg = self.configuration
+        self.zfssa.create_snapshot(lcfg.zfssa_pool,
+                                   lcfg.zfssa_project,
+                                   snapshot['volume_name'],
+                                   snapshot['name'])
+
+    def delete_snapshot(self, snapshot):
+        """Deletes a snapshot."""
+        LOG.debug('zfssa.delete_snapshot: snapshot=' + snapshot['name'])
+        lcfg = self.configuration
+        has_clones = self.zfssa.has_clones(lcfg.zfssa_pool,
+                                           lcfg.zfssa_project,
+                                           snapshot['volume_name'],
+                                           snapshot['name'])
+        if has_clones:
+            LOG.error('snapshot %s: has clones' % snapshot['name'])
+            raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
+
+        self.zfssa.delete_snapshot(lcfg.zfssa_pool,
+                                   lcfg.zfssa_project,
+                                   snapshot['volume_name'],
+                                   snapshot['name'])
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        """Creates a volume from a snapshot - clone a snapshot"""
+        LOG.debug('zfssa.create_volume_from_snapshot: volume=' +
+                  volume['name'])
+        LOG.debug('zfssa.create_volume_from_snapshot: snapshot=' +
+                  snapshot['name'])
+        if not self._verify_clone_size(snapshot, volume['size'] * SIZE_GB):
+            exception_msg = (_('Error verifying clone size on '
+                               'Volume clone: %(clone)s '
+                               'Size: %(size)d on'
+                               'Snapshot: %(snapshot)s')
+                             % {'clone': volume['name'],
+                                'size': volume['size'],
+                                'snapshot': snapshot['name']})
+            LOG.error(exception_msg)
+            raise exception.InvalidInput(reason=exception_msg)
+
+        lcfg = self.configuration
+        self.zfssa.clone_snapshot(lcfg.zfssa_pool,
+                                  lcfg.zfssa_project,
+                                  snapshot['volume_name'],
+                                  snapshot['name'],
+                                  volume['name'])
+
+    def _update_volume_status(self):
+        """Retrieve status info from volume group."""
+        LOG.debug("Updating volume status")
+        self._stats = None
+        data = {}
+        data["volume_backend_name"] = self.__class__.__name__
+        data["vendor_name"] = 'Oracle'
+        data["driver_version"] = self.VERSION
+        data["storage_protocol"] = self.protocol
+
+        lcfg = self.configuration
+        (avail, used) = self.zfssa.get_pool_stats(lcfg.zfssa_pool)
+        if avail is None or used is None:
+            return
+        total = int(avail) + int(used)
+
+        if lcfg.zfssa_lun_sparse:
+            data['total_capacity_gb'] = 'infinite'
+        else:
+            data['total_capacity_gb'] = total / SIZE_GB
+        data['free_capacity_gb'] = int(avail) / SIZE_GB
+        data['reserved_percentage'] = 0
+        data['QoS_support'] = False
+        self._stats = data
+
+    def get_volume_stats(self, refresh=False):
+        """Get volume status.
+           If 'refresh' is True, run update the stats first.
+        """
+        if refresh:
+            self._update_volume_status()
+        return self._stats
+
+    def _export_volume(self, volume):
+        """Export the volume - set the initiatorgroup property."""
+        LOG.debug('_export_volume: volume name: %s' % volume['name'])
+        lcfg = self.configuration
+
+        self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
+                                          lcfg.zfssa_project,
+                                          volume['name'],
+                                          lcfg.zfssa_initiator_group)
+        return self._get_provider_info(volume)
+
+    def create_export(self, context, volume):
+        """Driver entry point to get the  export info for a new volume."""
+        LOG.debug('create_export: volume name: %s' % volume['name'])
+        return self._export_volume(volume)
+
+    def remove_export(self, context, volume):
+        """Driver entry point to remove an export for a volume."""
+        LOG.debug('remove_export: volume name: %s' % volume['name'])
+        lcfg = self.configuration
+        self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
+                                          lcfg.zfssa_project,
+                                          volume['name'],
+                                          '')
+
+    def ensure_export(self, context, volume):
+        """Driver entry point to get the export info for an existing volume."""
+        LOG.debug('ensure_export: volume name: %s' % volume['name'])
+        return self._export_volume(volume)
+
+    def copy_image_to_volume(self, context, volume, image_service, image_id):
+        self.ensure_export(context, volume)
+        super(ZFSSAISCSIDriver, self).copy_image_to_volume(
+            context, volume, image_service, image_id)
+
+    def extend_volume(self, volume, new_size):
+        """Driver entry point to extent volume size."""
+        LOG.debug('extend_volume: volume name: %s' % volume['name'])
+        lcfg = self.configuration
+        self.zfssa.set_lun_size(lcfg.zfssa_pool,
+                                lcfg.zfssa_project,
+                                volume['name'],
+                                new_size * SIZE_GB)
+
+    def _get_iscsi_properties(self, volume):
+        lcfg = self.configuration
+        lun = self.zfssa.get_lun(lcfg.zfssa_pool,
+                                 lcfg.zfssa_project,
+                                 volume['name'])
+        iqn = self.zfssa.get_target(self._get_target_alias())
+
+        return {'target_discovered': True,
+                'target_iqn': iqn,
+                'target_portal': lcfg.zfssa_target_portal,
+                'volume_id': lun['number'],
+                'access_mode': 'rw'}
+
+    def create_cloned_volume(self, volume, src_vref):
+        """Create a clone of the specified volume."""
+        zfssa_snapshot = {'volume_name': src_vref['name'],
+                          'name': 'tmp-snapshot-%s' % volume['id']}
+        self.create_snapshot(zfssa_snapshot)
+        try:
+            self.create_volume_from_snapshot(volume, zfssa_snapshot)
+        except exception.VolumeBackendAPIException:
+            LOG.error("Clone Volume '%s' failed from source volume '%s'"
+                      % (volume['name'], src_vref['name']))
+            # Cleanup snapshot
+            self.delete_snapshot(zfssa_snapshot)
+
+    def local_path(self, volume):
+        """Not implemented"""
+        pass
+
+    def backup_volume(self, context, backup, backup_service):
+        """Not implemented"""
+        pass
+
+    def restore_backup(self, context, backup, volume, backup_service):
+        """Not implemented"""
+        pass
+
+    def _verify_clone_size(self, snapshot, size):
+        """Check whether the clone size is the same as the parent volume"""
+        lcfg = self.configuration
+        lun = self.zfssa.get_lun(lcfg.zfssa_pool,
+                                 lcfg.zfssa_project,
+                                 snapshot['volume_name'])
+        return (lun['size'] == size)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/files/zfssa/zfssarest.py	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,614 @@
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+ZFS Storage Appliance Proxy
+"""
+import json
+import socket
+
+from cinder import exception
+from cinder.openstack.common import log
+
+from cinder.volume.drivers.zfssa import restclient
+
+LOG = log.getLogger(__name__)
+
+
+#pylint: disable=R0913
+#pylint: disable=R0904
+class ZFSSAApi(object):
+    """ZFSSA API proxy class"""
+    def __init__(self, host):
+        self.host = host
+        self.url = "https://" + self.host + ":215"
+        self.rclient = restclient.RestClientURL(self.url)
+
+    def __del__(self):
+        if self.rclient and self.rclient.islogin():
+            self.rclient.logout()
+
+    def _is_pool_owned(self, pdata):
+        """returns True if the pool's owner is the
+           same as the host.
+        """
+        svc = '/api/system/v1/version'
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            exception_msg = (_('Error getting version: '
+                               'svc: %(svc)s.'
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'svc': svc,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+            raise exception.VolumeBackendAPIException(data=exception_msg)
+
+        vdata = json.loads(ret.data)
+        return vdata['version']['asn'] == pdata['pool']['asn'] and \
+            vdata['version']['nodename'] == pdata['pool']['owner']
+
+    def login(self, auth_str):
+        """Login to the appliance"""
+        if self.rclient:
+            self.rclient.login(auth_str)
+
+    def get_pool_stats(self, pool):
+        """Get space_available and used properties of a pool
+           returns (avail, used)
+        """
+        svc = '/api/storage/v1/pools/' + pool
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            exception_msg = (_('Error Getting Pool Stats: '
+                               'Pool: %(pool)s '
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'pool': pool,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+            raise exception.InvalidVolume(reason=exception_msg)
+
+        val = json.loads(ret.data)
+
+        if not self._is_pool_owned(val):
+            exception_msg = (_('Error Pool ownership: '
+                               'Pool %(pool)s is not owned '
+                               'by %(host)s.')
+                             % {'pool': pool,
+                                'host': self.host})
+            LOG.error(exception_msg)
+            raise exception.InstanceNotFound(instance_id=pool)
+
+        avail = val['pool']['usage']['available']
+        used = val['pool']['usage']['used']
+
+        return (avail, used)
+
+    def create_project(self, pool, project, compression=None, logbias=None):
+        """Create a project on a pool
+           Check first whether the pool exists.
+        """
+        self.verify_pool(pool)
+        svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            svc = '/api/storage/v1/pools/' + pool + '/projects'
+            arg = {
+                'name': project
+            }
+            if compression and compression != '':
+                arg.update({'compression': compression})
+            if logbias and logbias != '':
+                arg.update({'logbias': logbias})
+
+            ret = self.rclient.post(svc, arg)
+            if ret.status != restclient.Status.CREATED:
+                exception_msg = (_('Error Creating Project: '
+                                   '%(project)s on '
+                                   'Pool: %(pool)s '
+                                   'Return code: %(ret.status)d '
+                                   'Message: %(ret.data)s .')
+                                 % {'project': project,
+                                    'pool': pool,
+                                    'ret.status': ret.status,
+                                    'ret.data': ret.data})
+                LOG.error(exception_msg)
+                raise exception.VolumeBackendAPIException(data=exception_msg)
+
+    def create_initiator(self, initiator, alias, chapuser=None,
+                         chapsecret=None):
+        """Create an iSCSI initiator"""
+
+        svc = '/api/san/v1/iscsi/initiators/alias=' + alias
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            svc = '/api/san/v1/iscsi/initiators'
+            arg = {
+                'initiator': initiator,
+                'alias': alias
+            }
+            if chapuser and chapuser != '' and chapsecret and chapsecret != '':
+                arg.update({'chapuser': chapuser,
+                            'chapsecret': chapsecret})
+
+            ret = self.rclient.post(svc, arg)
+            if ret.status != restclient.Status.CREATED:
+                exception_msg = (_('Error Creating Initator: '
+                                   '%(initiator)s on '
+                                   'Alias: %(alias)s '
+                                   'Return code: %(ret.status)d '
+                                   'Message: %(ret.data)s .')
+                                 % {'initiator': initiator,
+                                    'alias': alias,
+                                    'ret.status': ret.status,
+                                    'ret.data': ret.data})
+                LOG.error(exception_msg)
+                raise exception.VolumeBackendAPIException(data=exception_msg)
+
+    def add_to_initiatorgroup(self, initiator, initiatorgroup):
+        """Add an iSCSI initiator to initiatorgroup"""
+        svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            svc = '/api/san/v1/iscsi/initiator-groups'
+            arg = {
+                'name': initiatorgroup,
+                'initiators': [initiator]
+            }
+            ret = self.rclient.post(svc, arg)
+            if ret.status != restclient.Status.CREATED:
+                exception_msg = (_('Error Adding Initator: '
+                                   '%(initiator)s on group'
+                                   'InitiatorGroup: %(initiatorgroup)s '
+                                   'Return code: %(ret.status)d '
+                                   'Message: %(ret.data)s .')
+                                 % {'initiator': initiator,
+                                    'initiatorgroup': initiatorgroup,
+                                    'ret.status': ret.status,
+                                    'ret.data': ret.data})
+                LOG.error(exception_msg)
+                raise exception.VolumeBackendAPIException(data=exception_msg)
+        else:
+            svc = '/api/san/v1/iscsi/initiator-groups/' + initiatorgroup
+            arg = {
+                'initiators': [initiator]
+            }
+            ret = self.rclient.put(svc, arg)
+            if ret.status != restclient.Status.ACCEPTED:
+                exception_msg = (_('Error Adding Initator: '
+                                   '%(initiator)s on group'
+                                   'InitiatorGroup: %(initiatorgroup)s '
+                                   'Return code: %(ret.status)d '
+                                   'Message: %(ret.data)s .')
+                                 % {'initiator': initiator,
+                                    'initiatorgroup': initiatorgroup,
+                                    'ret.status': ret.status,
+                                    'ret.data': ret.data})
+                LOG.error(exception_msg)
+                raise exception.VolumeBackendAPIException(data=exception_msg)
+
+    def create_target(self, alias, interfaces=None, tchapuser=None,
+                      tchapsecret=None):
+        """Create an iSCSI target
+           interfaces: an array with network interfaces
+           tchapuser, tchapsecret: target's chapuser and chapsecret
+           returns target iqn
+        """
+        svc = '/api/san/v1/iscsi/targets/alias=' + alias
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            svc = '/api/san/v1/iscsi/targets'
+            arg = {
+                'alias': alias
+            }
+
+            if tchapuser and tchapuser != '' and tchapsecret and \
+               tchapsecret != '':
+                arg.update({'targetchapuser': tchapuser,
+                            'targetchapsecret': tchapsecret,
+                            'auth': 'chap'})
+
+            if interfaces is not None and len(interfaces) > 0:
+                arg.update({'interfaces': interfaces})
+
+            ret = self.rclient.post(svc, arg)
+            if ret.status != restclient.Status.CREATED:
+                exception_msg = (_('Error Creating Target: '
+                                   '%(alias)s'
+                                   'Return code: %(ret.status)d '
+                                   'Message: %(ret.data)s .')
+                                 % {'alias': alias,
+                                    'ret.status': ret.status,
+                                    'ret.data': ret.data})
+                LOG.error(exception_msg)
+                raise exception.VolumeBackendAPIException(data=exception_msg)
+
+        val = json.loads(ret.data)
+        return val['target']['iqn']
+
+    def get_target(self, alias):
+        """Get an iSCSI target iqn"""
+        svc = '/api/san/v1/iscsi/targets/alias=' + alias
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            exception_msg = (_('Error Getting Target: '
+                               '%(alias)s'
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s .')
+                             % {'alias': alias,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+            raise exception.VolumeBackendAPIException(data=exception_msg)
+
+        val = json.loads(ret.data)
+        return val['target']['iqn']
+
+    def add_to_targetgroup(self, iqn, targetgroup):
+        """Add an iSCSI target to targetgroup"""
+        svc = '/api/san/v1/iscsi/target-groups/' + targetgroup
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            svccrt = '/api/san/v1/iscsi/target-groups'
+            arg = {
+                'name': targetgroup,
+                'targets': [iqn]
+            }
+
+            ret = self.rclient.post(svccrt, arg)
+            if ret.status != restclient.Status.CREATED:
+                exception_msg = (_('Error Creating TargetGroup: '
+                                   '%(targetgroup)s with'
+                                   'IQN: %(iqn)s'
+                                   'Return code: %(ret.status)d '
+                                   'Message: %(ret.data)s .')
+                                 % {'targetgroup': targetgroup,
+                                    'iqn': iqn,
+                                    'ret.status': ret.status,
+                                    'ret.data': ret.data})
+                LOG.error(exception_msg)
+                raise exception.VolumeBackendAPIException(data=exception_msg)
+
+            return
+
+        arg = {
+            'targets': [iqn]
+        }
+
+        ret = self.rclient.put(svc, arg)
+        if ret.status != restclient.Status.ACCEPTED:
+            exception_msg = (_('Error Adding to TargetGroup: '
+                               '%(targetgroup)s with'
+                               'IQN: %(iqn)s'
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'targetgroup': targetgroup,
+                                'iqn': iqn,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+            raise exception.VolumeBackendAPIException(data=exception_msg)
+
+    def verify_pool(self, pool):
+        """Checks whether pool exists"""
+        svc = '/api/storage/v1/pools/' + pool
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            exception_msg = (_('Error Verifying Pool: '
+                               '%(pool)s '
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'pool': pool,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+            raise exception.VolumeBackendAPIException(data=exception_msg)
+
+    def verify_project(self, pool, project):
+        """Checks whether project exists"""
+        svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            exception_msg = (_('Error Verifying '
+                               'Project: %(project)s on '
+                               'Pool: %(pool)s '
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'project': project,
+                                'pool': pool,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+            raise exception.VolumeBackendAPIException(data=exception_msg)
+
+    def verify_initiator(self, iqn):
+        """Check whether initiator iqn exists"""
+        svc = '/api/san/v1/iscsi/initiators/' + iqn
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            exception_msg = (_('Error Verifying '
+                               'Initiator: %(iqn)s '
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'initiator': iqn,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+            raise exception.VolumeBackendAPIException(data=exception_msg)
+
+    def verify_target(self, alias):
+        """Check whether target alias exists."""
+        svc = '/api/san/v1/iscsi/targets/alias=' + alias
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            exception_msg = (_('Error Verifying '
+                               'Target: %(alias)s '
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'alias': alias,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+            raise exception.VolumeBackendAPIException(data=exception_msg)
+
+    def create_lun(self, pool, project, lun, volsize, targetgroup,
+                   volblocksize='8k', sparse=False, compression=None,
+                   logbias=None):
+        """Create a LUN
+           required - pool, project, lun, volsize, targetgroup.
+           optional - volblocksize, sparse, compression, logbias
+        """
+
+        svc = '/api/storage/v1/pools/' + pool + '/projects/' + project + \
+              '/luns/' + lun
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+                  project + '/luns'
+            arg = {
+                'name': lun,
+                'volsize': volsize,
+                'targetgroup':  targetgroup,
+                'initiatorgroup': 'com.sun.ms.vss.hg.maskAll',
+                'volblocksize': volblocksize,
+                'sparse': sparse
+            }
+            if compression and compression != '':
+                arg.update({'compression': compression})
+            if logbias and logbias != '':
+                arg.update({'logbias': logbias})
+
+            ret = self.rclient.post(svc, arg)
+            if ret.status != restclient.Status.CREATED:
+                exception_msg = (_('Error Creating '
+                                   'Volume: %(lun)s '
+                                   'Size: %(size)s '
+                                   'Return code: %(ret.status)d '
+                                   'Message: %(ret.data)s.')
+                                 % {'lun': lun,
+                                    'size': volsize,
+                                    'ret.status': ret.status,
+                                    'ret.data': ret.data})
+                LOG.error(exception_msg)
+                raise exception.VolumeBackendAPIException(data=exception_msg)
+
+    def get_lun(self, pool, project, lun):
+        """return iscsi lun properties"""
+        svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+            project + "/luns/" + lun
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            exception_msg = (_('Error Getting '
+                               'Volume: %(lun)s on '
+                               'Pool: %(pool)s '
+                               'Project: %(project)s '
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'lun': lun,
+                                'pool': pool,
+                                'project': project,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+            raise exception.VolumeBackendAPIException(data=exception_msg)
+
+        val = json.loads(ret.data)
+        ret = {
+            'guid': val['lun']['lunguid'],
+            'number': val['lun']['assignednumber'],
+            'initiatorgroup': val['lun']['initiatorgroup'],
+            'size': val['lun']['volsize']
+        }
+        if 'origin' in val['lun']:
+            ret.update({'origin': val['lun']['origin']})
+
+        return ret
+
+    def set_lun_initiatorgroup(self, pool, project, lun, initiatorgroup):
+        """Set the initiatorgroup property of a LUN"""
+        if initiatorgroup == '':
+            initiatorgroup = 'com.sun.ms.vss.hg.maskAll'
+
+        svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+            project + '/luns/' + lun
+        arg = {
+            'initiatorgroup': initiatorgroup
+        }
+
+        ret = self.rclient.put(svc, arg)
+        if ret.status != restclient.Status.ACCEPTED:
+            exception_msg = (_('Error Setting '
+                               'Volume: %(lun)s to '
+                               'InitiatorGroup: %(initiatorgroup)s '
+                               'Pool: %(pool)s '
+                               'Project: %(project)s  '
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'lun': lun,
+                                'initiatorgroup': initiatorgroup,
+                                'pool': pool,
+                                'project': project,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+
+    def delete_lun(self, pool, project, lun):
+        """delete iscsi lun"""
+        svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+            project + '/luns/' + lun
+
+        ret = self.rclient.delete(svc)
+        if ret.status != restclient.Status.NO_CONTENT:
+            exception_msg = (_('Error Deleting '
+                               'Volume: %(lun)s to '
+                               'Pool: %(pool)s '
+                               'Project: %(project)s  '
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'lun': lun,
+                                'pool': pool,
+                                'project': project,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+
+    def create_snapshot(self, pool, project, lun, snapshot):
+        """create snapshot"""
+        svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+            project + '/luns/' + lun + '/snapshots'
+        arg = {
+            'name': snapshot
+        }
+
+        ret = self.rclient.post(svc, arg)
+        if ret.status != restclient.Status.CREATED:
+            exception_msg = (_('Error Creating '
+                               'Snapshot: %(snapshot)s on'
+                               'Volume: %(lun)s to '
+                               'Pool: %(pool)s '
+                               'Project: %(project)s  '
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'snapshot': snapshot,
+                                'lun': lun,
+                                'pool': pool,
+                                'project': project,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+            raise exception.VolumeBackendAPIException(data=exception_msg)
+
+    def delete_snapshot(self, pool, project, lun, snapshot):
+        """delete snapshot"""
+        svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+              project + '/luns/' + lun + '/snapshots/' + snapshot
+
+        ret = self.rclient.delete(svc)
+        if ret.status != restclient.Status.NO_CONTENT:
+            exception_msg = (_('Error Deleting '
+                               'Snapshot: %(snapshot)s on '
+                               'Volume: %(lun)s to '
+                               'Pool: %(pool)s '
+                               'Project: %(project)s  '
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'snapshot': snapshot,
+                                'lun': lun,
+                                'pool': pool,
+                                'project': project,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+            raise exception.VolumeBackendAPIException(data=exception_msg)
+
+    def clone_snapshot(self, pool, project, lun, snapshot, clone):
+        """clone snapshot"""
+        svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+            project + '/luns/' + lun + '/snapshots/' + snapshot + '/clone'
+        arg = {
+            'project': project,
+            'share': clone
+        }
+
+        ret = self.rclient.put(svc, arg)
+        if ret.status != restclient.Status.CREATED:
+            exception_msg = (_('Error Cloning '
+                               'Snapshot: %(snapshot)s on '
+                               'Volume: %(lun)s of '
+                               'Pool: %(pool)s '
+                               'Project: %(project)s  '
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'snapshot': snapshot,
+                                'lun': lun,
+                                'pool': pool,
+                                'project': project,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+            raise exception.VolumeBackendAPIException(data=exception_msg)
+
+    def set_lun_size(self, pool, project, lun, size):
+        """increase lun size capacity"""
+        svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+            project + '/luns/' + lun
+        arg = {
+            'volsize': size
+        }
+
+        ret = self.rclient.put(svc, arg)
+        if ret.status != restclient.Status.ACCEPTED:
+            exception_msg = (_('Error Setting size on '
+                               'Size: %(size)s on '
+                               'Volume: %(lun)s of '
+                               'Pool: %(pool)s '
+                               'Project: %(project)s  '
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'size': size,
+                                'lun': lun,
+                                'pool': pool,
+                                'project': project,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+            raise exception.VolumeBackendAPIException(data=exception_msg)
+
+    def has_clones(self, pool, project, lun, snapshot):
+        """Checks whether snapshot has clones or not."""
+        svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
+            project + '/luns/' + lun + '/snapshots/' + snapshot
+
+        ret = self.rclient.get(svc)
+        if ret.status != restclient.Status.OK:
+            exception_msg = (_('Error Getting '
+                               'Snapshot: %(snapshot)s on '
+                               'Volume: %(lun)s to '
+                               'Pool: %(pool)s '
+                               'Project: %(project)s  '
+                               'Return code: %(ret.status)d '
+                               'Message: %(ret.data)s.')
+                             % {'snapshot': snapshot,
+                                'lun': lun,
+                                'pool': pool,
+                                'project': project,
+                                'ret.status': ret.status,
+                                'ret.data': ret.data})
+            LOG.error(exception_msg)
+            raise exception.VolumeBackendAPIException(data=exception_msg)
+
+        val = json.loads(ret.data)
+        return (val['snapshot']['numclones'] != 0)
--- a/components/openstack/cinder/patches/01-noamqplib.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,20 +0,0 @@
-In-house patch to remove amqplib from Cinder's requirements files as
-an alternate implementation is used on Solaris.
-
---- cinder-2013.1.4/cinder.egg-info/requires.txt.orig 2013-11-22 07:46:37.051978903 -0700
-+++ cinder-2013.1.4/cinder.egg-info/requires.txt        2013-11-22 07:46:45.304481983 -0700
-@@ -1,5 +1,4 @@
- SQLAlchemy>=0.7.3,<=0.7.9
--amqplib>=0.6.1
- anyjson>=0.2.4
- eventlet>=0.9.17
- kombu>=1.0.4
-
---- cinder-2013.1.4/tools/pip-requires.orig       2013-11-22 07:48:29.381781247 -0700
-+++ cinder-2013.1.4/tools/pip-requires        2013-11-22 07:48:34.137850426 -0700
-@@ -1,5 +1,4 @@
- SQLAlchemy>=0.7.3,<=0.7.9
--amqplib>=0.6.1
- anyjson>=0.2.4
- argparse
- eventlet>=0.9.17
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/patches/01-requirements.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,73 @@
+In-house patch to remove unnecessary dependencies from Cinder's
+requirements files. The specific reasons are as follows:
+
+amqplib		No longer applicable
+		(upstream commit e609183a39fef781aec45638c3bea4a7a6981412)
+
+argparse	No longer applicable
+
+lockfile	No longer applicable
+		(upstream commit a497b4a214b89cb2b387f227c208cfdae62ad6b1)
+
+netaddr		Not applicable to Solaris (VMware specific)
+
+paramiko	Not applicable to Solaris (various drivers specific)
+
+suds		Not applicable to Solaris (VMware specific)
+
+wsgiref		No longer applicable
+
+--- cinder-2013.2.3/cinder.egg-info/requires.txt.orig	2014-04-03 11:45:40.000000000 -0700
++++ cinder-2013.2.3/cinder.egg-info/requires.txt	2014-05-24 21:38:09.097874174 -0700
+@@ -1,16 +1,12 @@
+ pbr>=0.5.21,<1.0
+-amqplib>=0.6.1
+ anyjson>=0.3.3
+ Babel>=1.3
+ eventlet>=0.13.0
+ greenlet>=0.3.2
+ iso8601>=0.1.8
+ kombu>=2.4.8
+-lockfile>=0.8
+ lxml>=2.3
+-netaddr
+ oslo.config>=1.2.0
+-paramiko>=1.8.0
+ Paste
+ PasteDeploy>=1.5.0
+ python-glanceclient>=0.9.0
+@@ -22,6 +18,4 @@
+ SQLAlchemy>=0.7.8,<=0.7.99
+ sqlalchemy-migrate>=0.7.2
+ stevedore>=0.10
+-suds>=0.4
+ WebOb>=1.2.3,<1.3
+-wsgiref>=0.1.2
+\ No newline at end of file
+--- cinder-2013.2.3/requirements.txt.orig	2014-04-03 11:42:36.000000000 -0700
++++ cinder-2013.2.3/requirements.txt	2014-05-24 21:38:36.989936528 -0700
+@@ -1,17 +1,12 @@
+ pbr>=0.5.21,<1.0
+-amqplib>=0.6.1
+ anyjson>=0.3.3
+-argparse
+ Babel>=1.3
+ eventlet>=0.13.0
+ greenlet>=0.3.2
+ iso8601>=0.1.8
+ kombu>=2.4.8
+-lockfile>=0.8
+ lxml>=2.3
+-netaddr
+ oslo.config>=1.2.0
+-paramiko>=1.8.0
+ Paste
+ PasteDeploy>=1.5.0
+ python-glanceclient>=0.9.0
+@@ -23,6 +18,4 @@
+ SQLAlchemy>=0.7.8,<=0.7.99
+ sqlalchemy-migrate>=0.7.2
+ stevedore>=0.10
+-suds>=0.4
+ WebOb>=1.2.3,<1.3
+-wsgiref>=0.1.2
--- a/components/openstack/cinder/patches/02-noparamiko.patch	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/patches/02-noparamiko.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -1,56 +1,53 @@
 In-house patch for the temporary removal of Paramiko dependency in
 Cinder.  This patch is Solaris-specific and not suitable for upstream
 
---- cinder-2013.1.4/cinder/utils.py.orig	2013-10-17 11:21:37.000000000 -0700
-+++ cinder-2013.1.4/cinder/utils.py	2014-03-08 11:55:09.044072461 -0800
-@@ -27,7 +27,6 @@
- import inspect
- import itertools
- import os
+--- cinder-2013.2.3/cinder/utils.py.orig	2014-04-03 11:42:36.000000000 -0700
++++ cinder-2013.2.3/cinder/utils.py	2014-04-09 00:14:56.141352333 -0700
+@@ -43,7 +43,6 @@
+ from eventlet import greenthread
+ from eventlet import pools
+ from oslo.config import cfg
 -import paramiko
- import pyclbr
- import random
- import re
-@@ -233,135 +232,6 @@
-     return out, err
+ 
+ from cinder.brick.initiator import connector
+ from cinder import exception
+@@ -142,125 +141,6 @@
+     return processutils.execute(*cmd, **kwargs)
  
  
--def ssh_execute(ssh, cmd, process_input=None,
--                addl_env=None, check_exit_code=True):
--    LOG.debug(_('Running cmd (SSH): %s'), cmd)
--    if addl_env:
--        raise exception.Error(_('Environment not supported over SSH'))
+-def check_ssh_injection(cmd_list):
+-    ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>',
+-                             '<']
 -
--    if process_input:
--        # This is (probably) fixable if we need it...
--        raise exception.Error(_('process_input not supported over SSH'))
--
--    stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
--    channel = stdout_stream.channel
--
--    #stdin.write('process_input would go here')
--    #stdin.flush()
+-    # Check whether injection attacks exist
+-    for arg in cmd_list:
+-        arg = arg.strip()
 -
--    # NOTE(justinsb): This seems suspicious...
--    # ...other SSH clients have buffering issues with this approach
--    stdout = stdout_stream.read()
--    stderr = stderr_stream.read()
--    stdin_stream.close()
--    stdout_stream.close()
--    stderr_stream.close()
--
--    exit_status = channel.recv_exit_status()
+-        # Check for matching quotes on the ends
+-        is_quoted = re.match('^(?P<quote>[\'"])(?P<quoted>.*)(?P=quote)$', arg)
+-        if is_quoted:
+-            # Check for unescaped quotes within the quoted argument
+-            quoted = is_quoted.group('quoted')
+-            if quoted:
+-                if (re.match('[\'"]', quoted) or
+-                        re.search('[^\\\\][\'"]', quoted)):
+-                    raise exception.SSHInjectionThreat(command=str(cmd_list))
+-        else:
+-            # We only allow spaces within quoted arguments, and that
+-            # is the only special character allowed within quotes
+-            if len(arg.split()) > 1:
+-                raise exception.SSHInjectionThreat(command=str(cmd_list))
 -
--    # exit_status == -1 if no exit code was returned
--    if exit_status != -1:
--        LOG.debug(_('Result was %s') % exit_status)
--        if check_exit_code and exit_status != 0:
--            raise exception.ProcessExecutionError(exit_code=exit_status,
--                                                  stdout=stdout,
--                                                  stderr=stderr,
--                                                  cmd=cmd)
--    channel.close()
--    return (stdout, stderr)
+-        # Second, check whether danger character in command. So the shell
+-        # special operator must be a single argument.
+-        for c in ssh_injection_pattern:
+-            if arg == c:
+-                continue
+-
+-            result = arg.find(c)
+-            if not result == -1:
+-                if result == 0 or not arg[result - 1] == '\\':
+-                    raise exception.SSHInjectionThreat(command=cmd_list)
 -
 -
 -def create_channel(client, width, height):
@@ -119,27 +116,20 @@
 -        before returning it. For dead connections create and return a new
 -        connection.
 -        """
--        if self.free_items:
--            conn = self.free_items.popleft()
--            if conn:
--                if conn.get_transport().is_active():
--                    return conn
--                else:
--                    conn.close()
--            return self.create()
--        if self.current_size < self.max_size:
--            created = self.create()
--            self.current_size += 1
--            return created
--        return self.channel.get()
+-        conn = super(SSHPool, self).get()
+-        if conn:
+-            if conn.get_transport().is_active():
+-                return conn
+-            else:
+-                conn.close()
+-        return self.create()
 -
 -    def remove(self, ssh):
--        """Close an ssh client and remove it if in free_items."""
+-        """Close an ssh client and remove it from free_items."""
 -        ssh.close()
+-        ssh = None
 -        if ssh in self.free_items:
 -            self.free_items.pop(ssh)
--        ssh = None
--
 -        if self.current_size > 0:
 -            self.current_size -= 1
 -
@@ -147,23 +137,4 @@
  def cinderdir():
      import cinder
      return os.path.abspath(cinder.__file__).split('cinder/__init__.py')[0]
---- cinder-2013.1.4/cinder.egg-info/requires.txt.orig	Wed Feb 26 10:56:39 2014
-+++ cinder-2013.1.4/cinder.egg-info/requires.txt	Wed Feb 26 10:58:01 2014
-@@ -14,7 +14,6 @@
- sqlalchemy-migrate>=0.7.2
- stevedore>=0.8.0
- suds>=0.4
--paramiko
- Babel>=0.9.6
- iso8601>=0.1.4
- setuptools_git>=0.4
---- cinder-2013.1.4/tools/pip-requires.orig	Wed Feb 26 10:56:38 2014
-+++ cinder-2013.1.4/tools/pip-requires	Wed Feb 26 10:58:25 2014
-@@ -14,7 +14,6 @@
- sqlalchemy-migrate>=0.7.2
- stevedore>=0.8.0
- suds>=0.4
--paramiko
- Babel>=0.9.6
- iso8601>=0.1.4
- setuptools_git>=0.4
+
--- a/components/openstack/cinder/patches/03-emc_smis_iscsi.patch	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/patches/03-emc_smis_iscsi.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -1,17 +1,18 @@
 In-house patch to adopt EMC driver to use Solaris' iscsiadm(1M) rather
 than that from Linux.  Patch has not yet been submitted upstream.
 
---- cinder-2013.1.4/cinder/volume/drivers/emc/emc_smis_iscsi.py.~1~	2013-10-17 11:21:37.000000000 -0700
-+++ cinder-2013.1.4/cinder/volume/drivers/emc/emc_smis_iscsi.py	2014-03-12 17:07:06.500560732 -0700
-@@ -21,6 +21,7 @@
+--- cinder-2013.2.3/cinder/volume/drivers/emc/emc_smis_iscsi.py.~1~	2014-04-03 11:42:36.000000000 -0700
++++ cinder-2013.2.3/cinder/volume/drivers/emc/emc_smis_iscsi.py	2014-04-09 01:30:22.894010750 -0700
+@@ -21,6 +21,8 @@
  """
  
- import os
+ 
 +import sys
- import time
- 
++
  from cinder import exception
-@@ -118,13 +119,41 @@
+ from cinder.openstack.common import log as logging
+ from cinder.volume import driver
+@@ -114,13 +116,41 @@
  
          LOG.warn(_("ISCSI provider_location not stored, using discovery"))
  
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/patches/04-launchpad-1236459.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,36 @@
+Although the following patch has been addressed in Icehouse 2014.1, it
+still has not yet been released for Havana.  It has been modified to
+apply cleanly into our current Havana implementation
+
+commit 5c321d758c9718d7dde555316ac4fbd2f7acf424
+Author: Dan Prince <[email protected]>
+Date:   Mon Oct 7 12:41:28 2013 -0400
+
+    Drop conf_key_mgr warning message!
+    
+    By default ConfKeyManager logs tons of WARNING message stating
+    that it isn't production ready...
+    
+    Given that it is currently the only Cinder key manager option
+    which can be used/selected I don't think repeatedly logging
+    warnings is helpful. Lets just drop the warning message
+    for now and when a good "production ready" cinder key manager
+    implementation is implemented perhaps we can re-add a warning to
+    this class (hopefully making the production ready impl the default).
+    
+    Change-Id: Id1fdddc20a963f9fa4749ad57f355cd83d0e14e3
+    Closes-Bug: #1236459
+
+diff --git a/cinder/keymgr/conf_key_mgr.py b/cinder/keymgr/conf_key_mgr.py
+index 7b53e0c..1c9ff2d 100644
+--- a/cinder/keymgr/conf_key_mgr.py
++++ b/cinder/keymgr/conf_key_mgr.py
+@@ -64,8 +64,6 @@ class ConfKeyManager(key_mgr.KeyManager):
+     """
+ 
+     def __init__(self):
+-        LOG.warn(_('This key manager is insecure and is not recommended for '
+-                   'production deployments'))
+         super(ConfKeyManager, self).__init__()
+ 
+         self.key_id = '00000000-0000-0000-0000-000000000000'
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/patches/05-launchpad-1252512.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,45 @@
+Although the following patch has been addressed in Icehouse 2014.1, it
+still has not yet been released for Havana.  It has been modified to
+apply cleanly into our current Havana implementation
+
+commit 3badd1ffbe8796d27b924f882ed05b8d3f4f0f11
+Author: Dan Prince <[email protected]>
+Date:   Tue Dec 10 14:51:32 2013 -0500
+
+    Lazy log the fixed_key warnings
+    
+    Cinder currently spews quite a bit of WARNINGs like this with
+    the default settings:
+    
+    WARNING cinder.keymgr.conf_key_mgr [-] config option keymgr.fixed_key
+    has not been defined: some operations may fail unexpectedly
+    
+    Many users may not be using features that require key manager...
+    so logging the WARNING message a bit more lazily seems reasonable.
+    
+    Change-Id: I5ab72285c7d4bc2ec3196dd94fabf977b3a4ebaf
+    Closes-bug: 1252512
+
+diff --git a/cinder/keymgr/conf_key_mgr.py b/cinder/keymgr/conf_key_mgr.py
+index f000c44..ee4fbc3 100644
+--- cinder-2013.2.3/cinder/keymgr/conf_key_mgr.py.~2~	2014-05-29 11:02:58.846311952 -0700
++++ cinder-2013.2.3/cinder/keymgr/conf_key_mgr.py	2014-05-29 11:02:58.855843284 -0700
+@@ -67,9 +67,6 @@
+         super(ConfKeyManager, self).__init__()
+ 
+         self.key_id = '00000000-0000-0000-0000-000000000000'
+-        if CONF.keymgr.fixed_key is None:
+-            LOG.warn(_('config option keymgr.fixed_key has not been defined: '
+-                       'some operations may fail unexpectedly'))
+ 
+     def _generate_key(self, **kwargs):
+         _hex = self._generate_hex_key(**kwargs)
+@@ -78,6 +75,8 @@
+ 
+     def _generate_hex_key(self, **kwargs):
+         if CONF.keymgr.fixed_key is None:
++            LOG.warn(_('config option keymgr.fixed_key has not been defined: '
++                       'some operations may fail unexpectedly'))
+             raise ValueError(_('keymgr.fixed_key not defined'))
+         return CONF.keymgr.fixed_key
+ 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/patches/06-launchpad-1233763.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,181 @@
+Although the following patch has been addressed in Icehouse 2014.1, it
+still has not yet been released for Havana.
+
+From c1fca7affc22dc756f07f604f03c2343eeac9d15 Mon Sep 17 00:00:00 2001
+From: "Jay S. Bryant" <[email protected]>
+Date: Fri, 15 Nov 2013 19:01:58 -0600
+Subject: [PATCH] Add default quota class into DB during migration
+
+For some time now use_default_quota_class has been the
+default setting for Cinder.  Cinder, however, has not been putting
+any defaults for the default quota class into the database.  This
+resulted in any command that queried for the default quotas to cause
+the message "Deprecated: Default quota for resource: <resource> is set
+by the default quota flag: <quota flag>, it is now deprecated.  Please use
+the default quota class for default quota."
+
+This commit resolves this issue by setting the default value for volumes,
+snapshots and gigabytes in the quota_class table at migration time if there
+is not already a class_name of 'default' in the quota_classes table.
+
+Unit tests are included with this commit.
+
+Closes-bug 1233763
+Change-Id: I457ed8a9b78492eda22e31dfc198b2ee051d3ece
+(cherry picked from commit 7d2641688454d9064b691e4aab4b5d8b14d75305)
+---
+ .../versions/021_add_default_quota_class.py        |   85 ++++++++++++++++++++
+ cinder/tests/test_migrations.py                    |   31 +++++++
+ cinder/tests/test_quota.py                         |    5 ++
+ 3 files changed, 121 insertions(+)
+ create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py
+
+diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py b/cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py
+new file mode 100644
+index 0000000..5c06e9c
+--- /dev/null
++++ b/cinder/db/sqlalchemy/migrate_repo/versions/021_add_default_quota_class.py
+@@ -0,0 +1,85 @@
++#    Copyright 2013 IBM Corp.
++#
++#    Licensed under the Apache License, Version 2.0 (the "License"); you may
++#    not use this file except in compliance with the License. You may obtain
++#    a copy of the License at
++#
++#         http://www.apache.org/licenses/LICENSE-2.0
++#
++#    Unless required by applicable law or agreed to in writing, software
++#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++#    License for the specific language governing permissions and limitations
++#    under the License.
++
++import datetime
++
++from cinder.openstack.common import log as logging
++from oslo.config import cfg
++from sqlalchemy import MetaData, Table
++
++# Get default values via config.  The defaults will either
++# come from the default values set in the quota option
++# configuration or via cinder.conf if the user has configured
++# default values for quotas there.
++CONF = cfg.CONF
++CONF.import_opt('quota_volumes', 'cinder.quota')
++CONF.import_opt('quota_snapshots', 'cinder.quota')
++CONF.import_opt('quota_gigabytes', 'cinder.quota')
++LOG = logging.getLogger(__name__)
++
++CLASS_NAME = 'default'
++CREATED_AT = datetime.datetime.now()
++
++
++def upgrade(migrate_engine):
++    """Add default quota class data into DB."""
++    meta = MetaData()
++    meta.bind = migrate_engine
++
++    quota_classes = Table('quota_classes', meta, autoload=True)
++
++    rows = quota_classes.count().\
++        where(quota_classes.c.class_name == 'default').execute().scalar()
++
++    # Do not add entries if there are already 'default' entries.  We don't
++    # want to write over something the user added.
++    if rows:
++        LOG.info(_("Found existing 'default' entries in the quota_classes "
++                   "table.  Skipping insertion of default values."))
++        return
++
++    try:
++        #Set default volumes
++        qci = quota_classes.insert()
++        qci.execute({'created_at': CREATED_AT,
++                     'class_name': CLASS_NAME,
++                     'resource': 'volumes',
++                     'hard_limit': CONF.quota_volumes,
++                     'deleted': False, })
++        #Set default snapshots
++        qci.execute({'created_at': CREATED_AT,
++                     'class_name': CLASS_NAME,
++                     'resource': 'snapshots',
++                     'hard_limit': CONF.quota_snapshots,
++                     'deleted': False, })
++        #Set default gigabytes
++        qci.execute({'created_at': CREATED_AT,
++                     'class_name': CLASS_NAME,
++                     'resource': 'gigabytes',
++                     'hard_limit': CONF.quota_gigabytes,
++                     'deleted': False, })
++        LOG.info(_("Added default quota class data into the DB."))
++    except Exception:
++        LOG.error(_("Default quota class data not inserted into the DB."))
++        raise
++
++
++def downgrade(migrate_engine):
++    """Don't delete the 'default' entries at downgrade time.
++
++    We don't know if the user had default entries when we started.
++    If they did, we wouldn't want to remove them.  So, the safest
++    thing to do is just leave the 'default' entries at downgrade time.
++    """
++    pass
+diff --git a/cinder/tests/test_migrations.py b/cinder/tests/test_migrations.py
+index 257c3e8..2ef5bff 100644
+--- a/cinder/tests/test_migrations.py
++++ b/cinder/tests/test_migrations.py
+@@ -1002,3 +1002,34 @@ class TestMigrations(test.TestCase):
+ 
+             self.assertFalse(engine.dialect.has_table(engine.connect(),
+                                                       "volume_admin_metadata"))
++
++    def test_migration_021(self):
++        """Test adding default data for quota classes works correctly."""
++        for (key, engine) in self.engines.items():
++            migration_api.version_control(engine,
++                                          TestMigrations.REPOSITORY,
++                                          migration.INIT_VERSION)
++            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 20)
++            metadata = sqlalchemy.schema.MetaData()
++            metadata.bind = engine
++
++            migration_api.upgrade(engine, TestMigrations.REPOSITORY, 21)
++
++            quota_class_metadata = sqlalchemy.Table('quota_classes',
++                                                    metadata,
++                                                    autoload=True)
++
++            num_defaults = quota_class_metadata.count().\
++                where(quota_class_metadata.c.class_name == 'default').\
++                execute().scalar()
++
++            self.assertEqual(3, num_defaults)
++
++            migration_api.downgrade(engine, TestMigrations.REPOSITORY, 20)
++
++            # Defaults should not be deleted during downgrade
++            num_defaults = quota_class_metadata.count().\
++                where(quota_class_metadata.c.class_name == 'default').\
++                execute().scalar()
++
++            self.assertEqual(3, num_defaults)
+diff --git a/cinder/tests/test_quota.py b/cinder/tests/test_quota.py
+index 99b2ed2..ae79b39 100644
+--- a/cinder/tests/test_quota.py
++++ b/cinder/tests/test_quota.py
+@@ -62,6 +62,11 @@ class QuotaIntegrationTestCase(test.TestCase):
+ 
+         self.stubs.Set(rpc, 'call', rpc_call_wrapper)
+ 
++        # Destroy the 'default' quota_class in the database to avoid
++        # conflicts with the test cases here that are setting up their own
++        # defaults.
++        db.quota_class_destroy_all_by_name(self.context, 'default')
++
+     def tearDown(self):
+         db.volume_type_destroy(context.get_admin_context(),
+                                self.volume_type['id'])
+-- 
+1.7.9.2
+
--- a/components/openstack/cinder/resolve.deps	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/cinder/resolve.deps	Wed Jun 11 17:13:12 2014 -0700
@@ -1,10 +1,10 @@
 library/python/eventlet-26
 library/python/ipython-26
 library/python/oslo.config-26
-library/python/sqlalchemy-26
 library/python/sqlalchemy-migrate-26
 runtime/python-26
 system/core-os
 system/file-system/zfs
+system/storage/fc-utilities
 system/storage/iscsi/iscsi-target
 system/storage/scsi-target-mode-framework
--- a/components/openstack/common/Makefile	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/common/Makefile	Wed Jun 11 17:13:12 2014 -0700
@@ -20,12 +20,12 @@
 #
 
 #
-# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 #
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		openstack
-COMPONENT_VERSION=	2013.1.4
+COMPONENT_VERSION=	2013.2.3
 COMPONENT_PROJECT_URL=	http://www.openstack.org/
 IPS_COMPONENT_VERSION=  0.$(COMPONENT_VERSION)
 
--- a/components/openstack/common/openstack.p5m	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/common/openstack.p5m	Wed Jun 11 17:13:12 2014 -0700
@@ -28,7 +28,7 @@
 set name=pkg.summary value=OpenStack
 set name=pkg.description \
     value="OpenStack is a cloud operating system that controls large pools of compute, storage, and networking resources throughout a data center, all managed through a dashboard that gives administrators control while empowering their users to provision resources through a web interface."
-set name=pkg.human-version value="Grizzly $(COMPONENT_VERSION)"
+set name=pkg.human-version value="Havana $(COMPONENT_VERSION)"
 set name=info.classification \
     value="org.opensolaris.category.2008:Meta Packages/Group Packages" \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
@@ -51,5 +51,4 @@
 depend type=group fmri=library/python/keystoneclient
 depend type=group fmri=library/python/neutronclient
 depend type=group fmri=library/python/novaclient
-depend type=group fmri=library/python/quantumclient
 depend type=group fmri=library/python/swiftclient
--- a/components/openstack/glance/Makefile	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/glance/Makefile	Wed Jun 11 17:13:12 2014 -0700
@@ -25,12 +25,12 @@
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		glance
-COMPONENT_CODENAME=	grizzly
-COMPONENT_VERSION=	2013.1.4
+COMPONENT_CODENAME=	havana
+COMPONENT_VERSION=	2013.2.3
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:dfb8070a12bbf5761db1c55d21db4772fe81ed903d57cf991618e5224cbdcd67
+    sha256:8766f8d198ec513c46519f1c44f99a4845ba3c04e7b7c41893cb3d5a7c2a9a28
 COMPONENT_ARCHIVE_URL=	http://launchpad.net/$(COMPONENT_NAME)/$(COMPONENT_CODENAME)/$(COMPONENT_VERSION)/+download/$(COMPONENT_ARCHIVE)
 COMPONENT_PROJECT_URL=	http://www.openstack.org/
 COMPONENT_BUGDB=	service/glance
--- a/components/openstack/glance/files/glance-api.conf	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/glance/files/glance-api.conf	Wed Jun 11 17:13:12 2014 -0700
@@ -13,11 +13,14 @@
 
 # List of which store classes and store class locations are
 # currently known to glance at startup.
+# Existing but disabled stores:
+#      glance.store.rbd.Store,
+#      glance.store.s3.Store,
+#      glance.store.swift.Store,
+#      glance.store.sheepdog.Store,
+#      glance.store.cinder.Store,
 #known_stores = glance.store.filesystem.Store,
-#               glance.store.http.Store,
-#               glance.store.rbd.Store,
-#               glance.store.s3.Store,
-#               glance.store.swift.Store,
+#               glance.store.http.Store
 
 
 # Maximum image size (in bytes) that may be uploaded through the
@@ -43,6 +46,10 @@
 # Not supported on OS X.
 #tcp_keepidle = 600
 
+# API to use for accessing data. Default value points to sqlalchemy
+# package, it is also possible to use: glance.db.registry.api
+# data_api = glance.db.sqlalchemy.api
+
 # SQLAlchemy connection string for the reference implementation
 # registry server. Any valid SQLAlchemy connection string is fine.
 # See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
@@ -84,6 +91,38 @@
 # The default value is false.
 #show_image_direct_url = False
 
+# Send headers containing user and tenant information when making requests to
+# the v1 glance registry. This allows the registry to function as if a user is
+# authenticated without the need to authenticate a user itself using the
+# auth_token middleware.
+# The default value is false.
+#send_identity_headers = False
+
+# Supported values for the 'container_format' image attribute
+#container_formats=ami,ari,aki,bare,ovf,uar
+
+# Supported values for the 'disk_format' image attribute
+#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,zfs
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+#
+# Property Protections config file
+# This file contains the rules for property protections and the roles
+# associated with it.
+# If this config value is not specified, by default, property protections
+# won't be enforced.
+# If a value is specified and the file is not found, then an
+# HTTPInternalServerError will be thrown.
+#property_protection_file =
+
+# Set a system wide quota for every user.  This value is the total number
+# of bytes that a user can use across all storage systems.  A value of
+# 0 means unlimited.
+#user_storage_quota = 0
+
 # ================= Syslog Options ============================
 
 # Send logs to syslog (/dev/log) instead of to file specified
@@ -153,6 +192,11 @@
 # Default: False
 #db_auto_create = False
 
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+#sqlalchemy_debug = True
+
 # ============ Notification System Options =====================
 
 # Notifications can be sent when images are create, updated or deleted.
@@ -177,7 +221,7 @@
 # the defaults)
 qpid_notification_exchange = glance
 qpid_notification_topic = notifications
-qpid_host = localhost
+qpid_hostname = localhost
 qpid_port = 5672
 qpid_username =
 qpid_password =
@@ -198,6 +242,12 @@
 # writes image data to
 filesystem_store_datadir = /var/lib/glance/images/
 
+# A path to a JSON file that contains metadata describing the storage
+# system.  When show_multiple_locations is True the information in this
+# file will be returned with any location that is contained in this
+# store.
+#filesystem_store_metadata_file = None
+
 # ============ Swift Store Options =============================
 
 # Version of the authentication service to use
@@ -266,6 +316,12 @@
 # is only necessary if the tenant has multiple swift endpoints.
 #swift_store_region =
 
+# If set to False, disables SSL layer compression of https swift requests.
+# Setting to 'False' may improve performance for images which are already
+# in a compressed format, eg qcow2. If set to True, enables SSL layer
+# compression (provided it is supported by the target swift proxy).
+#swift_store_ssl_compression = True
+
 # ============ S3 Store Options =============================
 
 # Address where the S3 authentication service lives
@@ -320,6 +376,40 @@
 # For best performance, this should be a power of two
 rbd_store_chunk_size = 8
 
+# ============ Sheepdog Store Options =============================
+
+sheepdog_store_address = localhost
+
+sheepdog_store_port = 7000
+
+# Images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+sheepdog_store_chunk_size = 64
+
+# ============ Cinder Store Options ===============================
+
+# Info to match when looking for cinder in the service catalog
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v1/%(project_id)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node (string value)
+#os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = False
+
 # ============ Delayed Delete Options =============================
 
 # Turn on/off delayed delete
@@ -338,12 +428,12 @@
 image_cache_dir = /var/lib/glance/image-cache/
 
 [keystone_authtoken]
-auth_host = 127.0.0.1
-auth_port = 35357
-auth_protocol = http
+auth_uri = http://127.0.0.1:5000/v2.0
+identity_uri = http://127.0.0.1:35357
 admin_tenant_name = %SERVICE_TENANT_NAME%
 admin_user = %SERVICE_USER%
 admin_password = %SERVICE_PASSWORD%
+signing_dir = /var/lib/glance/keystone-signing
 
 [paste_deploy]
 # Name of the paste configuration file that defines the available pipelines
@@ -353,4 +443,4 @@
 # service name removed. For example, if your paste section name is
 # [pipeline:glance-api-keystone], you would configure the flavor below
 # as 'keystone'.
-#flavor=
+flavor = keystone
--- a/components/openstack/glance/files/glance-api.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/glance/files/glance-api.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -29,6 +29,7 @@
       type='service'>
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
+
     <!-- create a dependency on the glance_db service so the glance-api and
          glance-registry services do not collide when creating the database -->
     <dependency name='glance_db' grouping='optional_all' restart_on='error'
@@ -36,6 +37,13 @@
       <service_fmri value='svc:/application/openstack/glance/glance-db'/>
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/glance-api %m">
       <method_context>
--- a/components/openstack/glance/files/glance-db.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/glance/files/glance-db.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/usr/bin/glance-manage db_sync">
       <method_context>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/glance/files/glance-registry.conf	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,99 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Address to bind the registry server
+bind_host = 0.0.0.0
+
+# Port the bind the registry server to
+bind_port = 9191
+
+# Log to this file. Make sure you do not set the same log
+# file for both the API and registry servers!
+log_file = /var/log/glance/registry.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package.
+# data_api = glance.db.sqlalchemy.api
+
+# SQLAlchemy connection string for the reference implementation
+# registry server. Any valid SQLAlchemy connection string is fine.
+# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
+sql_connection = sqlite:////var/lib/glance/glance.sqlite
+
+# Period in seconds after which SQLAlchemy should reestablish its connection
+# to the database.
+#
+# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
+# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
+# notice this, you can lower this value to ensure that SQLAlchemy reconnects
+# before MySQL can drop the connection.
+sql_idle_timeout = 3600
+
+# Limit the api to return `param_limit_max` items in a call to a container. If
+# a larger `limit` query param is provided, it will be reduced to this value.
+api_limit_max = 1000
+
+# If a `limit` query param is not provided in an api request, it will
+# default to `limit_param_default`
+limit_param_default = 25
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Whether to automatically create the database tables.
+# Default: False
+#db_auto_create = False
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+#sqlalchemy_debug = True
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+#use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL1
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting registry server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting registry server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+[keystone_authtoken]
+auth_uri = http://127.0.0.1:5000/v2.0
+identity_uri = http://127.0.0.1:35357
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USER%
+admin_password = %SERVICE_PASSWORD%
+signing_dir = /var/lib/glance/keystone-signing
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+#config_file = glance-registry-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-registry-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor = keystone
--- a/components/openstack/glance/files/glance-registry.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/glance/files/glance-registry.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -29,6 +29,7 @@
       type='service'>
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
+
     <!-- create a dependency on the glance_db service so the glance-api and
          glance-registry services do not collide when creating the database -->
     <dependency name='glance_db' grouping='optional_all' restart_on='error'
@@ -36,6 +37,13 @@
       <service_fmri value='svc:/application/openstack/glance/glance-db'/>
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/glance-registry %m">
       <method_context>
--- a/components/openstack/glance/files/glance-scrubber.conf	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/glance/files/glance-scrubber.conf	Wed Jun 11 17:13:12 2014 -0700
@@ -34,7 +34,27 @@
 # Port the registry server is listening on
 registry_port = 9191
 
+# Auth settings if using Keystone
+# auth_url = http://127.0.0.1:5000/v2.0/
+# admin_tenant_name = %SERVICE_TENANT_NAME%
+# admin_user = %SERVICE_USER%
+# admin_password = %SERVICE_PASSWORD%
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+
+# ================= Security Options ==========================
+
 # AES key for encrypting store 'location' metadata, including
 # -- if used -- Swift or S3 credentials
 # Should be set to a random string of length 16, 24 or 32 bytes
 #metadata_encryption_key = <16, 24 or 32 char registry metadata key>
+
+# ============ Filesystem Store Options ========================
+
+# Directory that the Filesystem backend store
+# writes image data to
+filesystem_store_datadir = /var/lib/glance/images/
+
--- a/components/openstack/glance/files/glance-scrubber.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/glance/files/glance-scrubber.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/glance-scrubber %m">
       <method_context>
--- a/components/openstack/glance/files/glance.prof_attr	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/glance/files/glance.prof_attr	Wed Jun 11 17:13:12 2014 -0700
@@ -9,6 +9,7 @@
 solaris.admin.edit/etc/glance/logging.conf,\
 solaris.admin.edit/etc/glance/policy.json,\
 solaris.smf.manage.glance,\
-solaris.smf.value.glance
+solaris.smf.value.glance;\
+defaultpriv={file_dac_read}\:/var/svc/log/application-openstack-*
 
 OpenStack Management:RO:::profiles=OpenStack Image Management
--- a/components/openstack/glance/glance.p5m	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/glance/glance.p5m	Wed Jun 11 17:13:12 2014 -0700
@@ -28,9 +28,9 @@
 set name=pkg.summary value="OpenStack Glance"
 set name=pkg.description \
     value="OpenStack Glance provides services for discovering, registering, and retrieving virtual machine images. Glance has a RESTful API that allows querying of VM image metadata as well as retrieval of the actual image. VM images made available through Glance can be stored in a variety of locations from simple file systems to object-storage systems like OpenStack Swift."
-set name=pkg.human-version value="Grizzly $(COMPONENT_VERSION)"
+set name=pkg.human-version value="Havana $(COMPONENT_VERSION)"
 set name=com.oracle.info.description value="Glance, the OpenStack image service"
-set name=com.oracle.info.tpno value=16269
+set name=com.oracle.info.tpno value=17717
 set name=info.classification \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
     value="org.opensolaris.category.2008:System/Enterprise Management" \
@@ -39,25 +39,26 @@
 set name=info.source-url value=$(COMPONENT_ARCHIVE_URL)
 set name=info.upstream value="OpenStack <[email protected]>"
 set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
-set name=org.opensolaris.arc-caseid value=PSARC/2013/350 value=PSARC/2014/055
+set name=org.opensolaris.arc-caseid value=PSARC/2013/350 value=PSARC/2014/055 \
+    value=PSARC/2014/207
 set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
 dir  path=etc/glance owner=glance group=glance mode=0700
 file etc/glance-api-paste.ini path=etc/glance/glance-api-paste.ini \
-    owner=glance group=glance mode=0644 overlay=allow preserve=true
+    owner=glance group=glance mode=0644 overlay=allow preserve=renamenew
 file files/glance-api.conf path=etc/glance/glance-api.conf owner=glance \
-    group=glance mode=0644 overlay=allow preserve=true
+    group=glance mode=0644 overlay=allow preserve=renamenew
 file etc/glance-cache.conf path=etc/glance/glance-cache.conf owner=glance \
-    group=glance mode=0644 overlay=allow preserve=true
+    group=glance mode=0644 overlay=allow preserve=renamenew
 file etc/glance-registry-paste.ini path=etc/glance/glance-registry-paste.ini \
-    owner=glance group=glance mode=0644 overlay=allow preserve=true
-file etc/glance-registry.conf path=etc/glance/glance-registry.conf \
-    owner=glance group=glance mode=0644 overlay=allow preserve=true
+    owner=glance group=glance mode=0644 overlay=allow preserve=renamenew
+file files/glance-registry.conf path=etc/glance/glance-registry.conf \
+    owner=glance group=glance mode=0644 overlay=allow preserve=renamenew
 file files/glance-scrubber.conf path=etc/glance/glance-scrubber.conf \
-    owner=glance group=glance mode=0644 overlay=allow preserve=true
+    owner=glance group=glance mode=0644 overlay=allow preserve=renamenew
 file etc/logging.cnf.sample path=etc/glance/logging.conf owner=glance \
-    group=glance mode=0644 overlay=allow preserve=true
+    group=glance mode=0644 overlay=allow preserve=renamenew
 file etc/policy.json path=etc/glance/policy.json owner=glance group=glance \
-    mode=0644 overlay=allow preserve=true
+    mode=0644 overlay=allow preserve=renamenew
 file etc/schema-image.json path=etc/glance/schema-image.json owner=glance \
     group=glance
 file files/glance.auth_attr \
@@ -88,6 +89,8 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/glance-$(COMPONENT_VERSION)-py$(PYVER).egg-info/PKG-INFO
 file path=usr/lib/python$(PYVER)/vendor-packages/glance-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/glance-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/glance-$(COMPONENT_VERSION)-py$(PYVER).egg-info/entry_points.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/glance-$(COMPONENT_VERSION)-py$(PYVER).egg-info/not-zip-safe
 file path=usr/lib/python$(PYVER)/vendor-packages/glance-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/glance-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/__init__.py
@@ -99,14 +102,17 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/middleware/cache.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/middleware/cache_manage.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/middleware/context.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/middleware/gzip.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/middleware/version_negotiation.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/policy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/property_protections.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/v1/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/v1/controller.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/v1/filters.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/v1/images.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/v1/members.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/v1/router.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/v1/upload_utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/v2/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/v2/image_data.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/v2/image_members.py
@@ -115,6 +121,17 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/v2/router.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/v2/schemas.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/api/versions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/cmd/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/cmd/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/cmd/cache_cleaner.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/cmd/cache_manage.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/cmd/cache_prefetcher.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/cmd/cache_pruner.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/cmd/control.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/cmd/manage.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/cmd/registry.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/cmd/replicator.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/cmd/scrubber.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/common/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/common/auth.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/common/client.py
@@ -122,11 +139,16 @@
     pkg.depend.bypass-generate=.*/paste.*
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/common/crypt.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/common/exception.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/common/ordereddict.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/common/property_utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/common/rpc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/common/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/common/wsgi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/context.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/migration.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/registry/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/registry/api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/simple/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/simple/api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/sqlalchemy/__init__.py
@@ -168,6 +190,13 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/sqlalchemy/migrate_repo/versions/020_drop_images_table_location.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/sqlalchemy/migrate_repo/versions/021_set_engine_mysql_innodb.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/sqlalchemy/migrate_repo/versions/022_image_member_index.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/sqlalchemy/migrate_repo/versions/023_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/sqlalchemy/migrate_repo/versions/024_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/sqlalchemy/migrate_repo/versions/025_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/sqlalchemy/migrate_repo/versions/026_add_location_storage_information.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/sqlalchemy/migrate_repo/versions/027_checksum_index.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/sqlalchemy/migrate_repo/versions/028_owner_index.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/sqlalchemy/migrate_repo/versions/029_location_meta_data_pickle_to_string.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/sqlalchemy/migrate_repo/versions/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/sqlalchemy/migration.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/db/sqlalchemy/models.py
@@ -184,24 +213,53 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/image_cache/drivers/xattr.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/image_cache/prefetcher.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/image_cache/pruner.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/ar/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/bg_BG/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/bs/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/ca/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/cs/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/da/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/de/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/en_AU/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/en_GB/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/en_US/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/es/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/es_MX/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/fi_FI/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/fil/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/fr/LC_MESSAGES/glance.po
-file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/glance.pot
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/hi/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/hr/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/hu/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/id/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/it/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/it_IT/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/ja/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/ka_GE/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/kn/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/ko/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/ko_KR/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/ms/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/nb/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/ne/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/nl_NL/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/pl_PL/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/pt/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/pt_BR/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/ro/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/ru/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/ru_RU/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/sk/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/sl_SI/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/sw_KE/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/tl/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/tl_PH/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/tr/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/tr_TR/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/uk/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/vi_VN/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/zh_CN/LC_MESSAGES/glance.po
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/zh_HK/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/locale/zh_TW/LC_MESSAGES/glance.po
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/notifier/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/notifier/notify_kombu.py
@@ -212,11 +270,18 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/README
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/context.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/eventlet_backdoor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/excutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/fileutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/gettextutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/importutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/jsonutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/local.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/lockutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/log.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/loopingcall.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/network_utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/notifier/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/notifier/api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/notifier/log_notifier.py
@@ -226,26 +291,39 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/notifier/rpc_notifier2.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/notifier/test_notifier.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/policy.py
-file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/setup.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/processutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/service.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/strutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/threadgroup.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/timeutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/uuidutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/glance/openstack/common/version.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/quota/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/api/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/api/v1/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/api/v1/images.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/api/v1/members.py
-file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/api/v2/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/api/v2/rpc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/client/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/client/v1/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/client/v1/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/client/v1/client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/client/v2/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/client/v2/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/registry/client/v2/client.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/schema.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/store/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/store/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/store/cinder.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/store/filesystem.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/store/gridfs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/store/http.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/store/location.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/store/rbd.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/store/s3.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/store/scrubber.py
+file path=usr/lib/python$(PYVER)/vendor-packages/glance/store/sheepdog.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/store/swift.py
 file path=usr/lib/python$(PYVER)/vendor-packages/glance/version.py
 dir  path=var/lib/glance owner=glance group=glance mode=0700
@@ -263,9 +341,19 @@
 # to flush this out.
 depend type=group fmri=library/python/swiftclient-26
 
+# force a dependency on argparse; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/argparse-26
+
 # force a dependency on boto; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/boto-26
 
+# force a dependency on cinderclient; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/cinderclient-26
+
+# force a dependency on greenlet; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/greenlet-26
+
 # force a dependency on httplib2; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/httplib2-26
 
@@ -275,6 +363,9 @@
 # force a dependency on jsonschema; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/jsonschema-26
 
+# force a dependency on keystoneclient; used via a paste.deploy filter
+depend type=require fmri=library/python/keystoneclient-26
+
 # force a dependency on kombu; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/kombu-26
 
@@ -285,11 +376,17 @@
 # out.
 depend type=require fmri=library/python/paste.deploy-26
 
+# force a dependency on pbr; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/pbr-26
+
+# force a dependency on pyopenssl; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/pyopenssl-26
+
 # force a dependency on routes; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/routes-26
 
-# force a dependency on setuptools; pkgdepend work is needed to flush this out.
-depend type=require fmri=library/python/setuptools-26
+# force a dependency on six; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/six-26
 
 # force a dependency on sqlalchemy; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/sqlalchemy-26
--- a/components/openstack/glance/patches/01-nopycrypto.patch	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/glance/patches/01-nopycrypto.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -4,19 +4,8 @@
 Convert urlsafe_encrypt() and urlsafe_decrypt() to use M2Crypto instead
 of PyCrypto.
 
---- glance-2013.1.4/glance.egg-info/requires.txt.orig	Thu Jan 16 22:08:47 2014
-+++ glance-2013.1.4/glance.egg-info/requires.txt	Thu Jan 16 22:23:01 2014
-@@ -11,7 +11,7 @@
- sqlalchemy-migrate>=0.7
- httplib2
- kombu
--pycrypto>=2.1.0alpha1
-+M2Crypto>=0.21.1
- iso8601>=0.1.4
- oslo.config>=1.1.0
- python-swiftclient>=1.2,<2
---- glance-2013.1.4/glance/common/crypt.py.orig	Thu Oct 17 11:22:18 2013
-+++ glance-2013.1.4/glance/common/crypt.py	Thu Jan 16 22:42:41 2014
+--- glance-2013.2.3/glance/common/crypt.py.orig	2014-04-03 11:43:55.000000000 -0700
++++ glance-2013.2.3/glance/common/crypt.py	2014-05-19 03:47:07.005226253 -0700
 @@ -4,6 +4,8 @@
  # Copyright 2011 OpenStack LLC.
  # All Rights Reserved.
@@ -26,20 +15,20 @@
  #    Licensed under the Apache License, Version 2.0 (the "License"); you may
  #    not use this file except in compliance with the License. You may obtain
  #    a copy of the License at
-@@ -21,12 +23,27 @@
+@@ -21,10 +23,26 @@
  """
  
  import base64
 +import os
++
++from M2Crypto.EVP import Cipher
++
++from glance.common import exception
++
  
 -from Crypto.Cipher import AES
 -from Crypto import Random
 -from Crypto.Random import random
-+from M2Crypto.EVP import Cipher
- 
-+from glance.common import exception
- 
-+
 +def _key_to_alg(key):
 +    """Return a M2Crypto-compatible AES-CBC algorithm name given a key."""
 +    aes_algs = {
@@ -53,11 +42,10 @@
 +        msg = ('Invalid AES key length, %d bits') % keylen
 +        raise exception.Invalid(msg)
 +    return aes_algs[keylen]
-+
+ 
+ 
  def urlsafe_encrypt(key, plaintext, blocksize=16):
-     """
-     Encrypts plaintext. Resulting ciphertext will contain URL-safe characters
-@@ -36,20 +53,12 @@
+@@ -36,20 +54,12 @@
  
      :returns : Resulting ciphertext
      """
@@ -82,7 +70,7 @@
      return base64.urlsafe_b64encode(init_vector + padded)
  
  
-@@ -63,6 +72,7 @@
+@@ -63,6 +73,7 @@
      """
      # Cast from unicode
      ciphertext = base64.urlsafe_b64decode(str(ciphertext))
@@ -93,14 +81,3 @@
 +    padded = cipher.update(ciphertext[16:])
 +    padded = padded + cipher.final()
 +    return padded
---- glance-2013.1.4/tools/pip-requires.orig	Thu Oct 17 11:22:19 2013
-+++ glance-2013.1.4/tools/pip-requires	Thu Jan 16 22:22:56 2014
-@@ -15,7 +15,7 @@
- sqlalchemy-migrate>=0.7
- httplib2
- kombu
--pycrypto>=2.1.0alpha1
-+M2Crypto>=0.21.1
- iso8601>=0.1.4
- oslo.config>=1.1.0
- 
--- a/components/openstack/glance/patches/02-zfs-uar-formats.patch	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/glance/patches/02-zfs-uar-formats.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -1,40 +1,20 @@
 In-house patch to add 'uar' and 'zfs' container and disk formats to
 registered image types.  Patch has not yet been submitted upstream.
 
-diff --git a/glance/api/v1/images.py b/glance/api/v1/images.py
-index 4993a28..a4efe16 100644
---- a/glance/api/v1/images.py
-+++ b/glance/api/v1/images.py
-@@ -55,9 +55,9 @@ CONF = cfg.CONF
- LOG = logging.getLogger(__name__)
- SUPPORTED_PARAMS = glance.api.v1.SUPPORTED_PARAMS
- SUPPORTED_FILTERS = glance.api.v1.SUPPORTED_FILTERS
--CONTAINER_FORMATS = ['ami', 'ari', 'aki', 'bare', 'ovf']
-+CONTAINER_FORMATS = ['ami', 'ari', 'aki', 'bare', 'ovf', 'uar']
- DISK_FORMATS = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi',
--                'iso']
-+                'iso', 'zfs']
- 
+--- glance-2013.2.3/glance/domain/__init__.py.orig	2014-04-03 11:43:55.000000000 -0700
++++ glance-2013.2.3/glance/domain/__init__.py	2014-04-10 00:27:51.161800689 -0700
+@@ -23,12 +23,12 @@
  
- def validate_image_meta(req, values):
-diff --git a/glance/api/v2/images.py b/glance/api/v2/images.py
-index 3121b25..32090c9 100644
---- a/glance/api/v2/images.py
-+++ b/glance/api/v2/images.py
-@@ -520,14 +520,14 @@ _BASE_PROPERTIES = {
-         'type': 'string',
-         'description': _(''),
-         'type': 'string',
--        'enum': ['bare', 'ovf', 'ami', 'aki', 'ari'],
-+        'enum': ['bare', 'ovf', 'ami', 'aki', 'ari', 'uar'],
-     },
-     'disk_format': {
-         'type': 'string',
-         'description': _(''),
-         'type': 'string',
-         'enum': ['raw', 'vhd', 'vmdk', 'vdi', 'iso', 'qcow2',
--                 'aki', 'ari', 'ami'],
-+                 'aki', 'ari', 'ami', 'zfs'],
-     },
-     'created_at': {
-         'type': 'string',
+ image_format_opts = [
+     cfg.ListOpt('container_formats',
+-                default=['ami', 'ari', 'aki', 'bare', 'ovf'],
++                default=['ami', 'ari', 'aki', 'bare', 'ovf', 'uar'],
+                 help=_("Supported values for the 'container_format' "
+                        "image attribute")),
+     cfg.ListOpt('disk_formats',
+                 default=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2',
+-                         'vdi', 'iso'],
++                         'vdi', 'iso', 'zfs'],
+                 help=_("Supported values for the 'disk_format' "
+                        "image attribute")),
+ ]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/glance/patches/03-CVE-2014-0162.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,219 @@
+This proposed upstream patch addresses CVE-2014-0162 and is tracked
+under Launchpad bug 1298698. Although it's been addressed in Icehouse
+2014.1, the patch below is still not yet released for Havana.
+
+commit 13069a4017d36a549576a21ca3ec5b15c411effc
+Author: Zhi Yan Liu <[email protected]>
+Date:   Sat Mar 29 03:35:35 2014 +0800
+
+    To prevent remote code injection on Sheepdog store
+    
+    Change-Id: Iae92eaf9eb023f36a1bab7c20ea41c985f2bf51b
+    Signed-off-by: Zhi Yan Liu <[email protected]>
+
+diff --git a/glance/store/sheepdog.py b/glance/store/sheepdog.py
+index d10aea7..2f75441 100644
+--- a/glance/store/sheepdog.py
++++ b/glance/store/sheepdog.py
+@@ -25,6 +25,7 @@ from glance.common import exception
+ from glance.openstack.common import excutils
+ import glance.openstack.common.log as logging
+ from glance.openstack.common import processutils
++from glance.openstack.common import uuidutils
+ import glance.store
+ import glance.store.base
+ import glance.store.location
+@@ -32,7 +33,7 @@ import glance.store.location
+ 
+ LOG = logging.getLogger(__name__)
+ 
+-DEFAULT_ADDR = 'localhost'
++DEFAULT_ADDR = '127.0.0.1'
+ DEFAULT_PORT = '7000'
+ DEFAULT_CHUNKSIZE = 64  # in MiB
+ 
+@@ -63,18 +64,14 @@ class SheepdogImage:
+         self.chunk_size = chunk_size
+ 
+     def _run_command(self, command, data, *params):
+-        cmd = ("collie vdi %(command)s -a %(addr)s -p %(port)s %(name)s "
+-               "%(params)s" %
+-               {"command": command,
+-                "addr": self.addr,
+-                "port": self.port,
+-                "name": self.name,
+-                "params": " ".join(map(str, params))})
++        cmd = ["collie", "vdi"]
++        cmd.extend(command)
++        cmd.extend(["-a", self.addr, "-p", self.port, self.name])
++        cmd.extend(params)
+ 
+         try:
+-            return processutils.execute(
+-                cmd, process_input=data, shell=True)[0]
+-        except processutils.ProcessExecutionError as exc:
++            return processutils.execute(*cmd, process_input=data)[0]
++        except (processutils.ProcessExecutionError, OSError) as exc:
+             LOG.error(exc)
+             raise glance.store.BackendException(exc)
+ 
+@@ -84,7 +81,7 @@ class SheepdogImage:
+ 
+         Sheepdog Usage: collie vdi list -r -a address -p port image
+         """
+-        out = self._run_command("list -r", None)
++        out = self._run_command(["list", "-r"], None)
+         return long(out.split(' ')[3])
+ 
+     def read(self, offset, count):
+@@ -94,7 +91,7 @@ class SheepdogImage:
+ 
+         Sheepdog Usage: collie vdi read -a address -p port image offset len
+         """
+-        return self._run_command("read", None, str(offset), str(count))
++        return self._run_command(["read"], None, str(offset), str(count))
+ 
+     def write(self, data, offset, count):
+         """
+@@ -103,7 +100,7 @@ class SheepdogImage:
+ 
+         Sheepdog Usage: collie vdi write -a address -p port image offset len
+         """
+-        self._run_command("write", data, str(offset), str(count))
++        self._run_command(["write"], data, str(offset), str(count))
+ 
+     def create(self, size):
+         """
+@@ -111,7 +108,7 @@ class SheepdogImage:
+ 
+         Sheepdog Usage: collie vdi create -a address -p port image size
+         """
+-        self._run_command("create", None, str(size))
++        self._run_command(["create"], None, str(size))
+ 
+     def delete(self):
+         """
+@@ -119,7 +116,7 @@ class SheepdogImage:
+ 
+         Sheepdog Usage: collie vdi delete -a address -p port image
+         """
+-        self._run_command("delete", None)
++        self._run_command(["delete"], None)
+ 
+     def exist(self):
+         """
+@@ -127,7 +124,7 @@ class SheepdogImage:
+ 
+         Sheepdog Usage: collie vdi list -r -a address -p port image
+         """
+-        out = self._run_command("list -r", None)
++        out = self._run_command(["list", "-r"], None)
+         if not out:
+             return False
+         else:
+@@ -138,7 +135,7 @@ class StoreLocation(glance.store.location.StoreLocation):
+     """
+     Class describing a Sheepdog URI. This is of the form:
+ 
+-        sheepdog://image
++        sheepdog://image-id
+ 
+     """
+ 
+@@ -149,10 +146,14 @@ class StoreLocation(glance.store.location.StoreLocation):
+         return "sheepdog://%s" % self.image
+ 
+     def parse_uri(self, uri):
+-        if not uri.startswith('sheepdog://'):
+-            raise exception.BadStoreUri(uri, "URI must start with %s://" %
+-                                        'sheepdog')
+-        self.image = uri[11:]
++        valid_schema = 'sheepdog://'
++        if not uri.startswith(valid_schema):
++            raise exception.BadStoreUri(_("URI must start with %s://") %
++                                        valid_schema)
++        self.image = uri[len(valid_schema):]
++        if not uuidutils.is_uuid_like(self.image):
++            raise exception.BadStoreUri(_("URI must contains well-formated "
++                                          "image id"))
+ 
+ 
+ class ImageIterator(object):
+@@ -192,7 +193,7 @@ class Store(glance.store.base.Store):
+ 
+         try:
+             self.chunk_size = CONF.sheepdog_store_chunk_size * 1024 * 1024
+-            self.addr = CONF.sheepdog_store_address
++            self.addr = CONF.sheepdog_store_address.strip()
+             self.port = CONF.sheepdog_store_port
+         except cfg.ConfigFileValueError as e:
+             reason = _("Error in store configuration: %s") % e
+@@ -200,10 +201,18 @@ class Store(glance.store.base.Store):
+             raise exception.BadStoreConfiguration(store_name='sheepdog',
+                                                   reason=reason)
+ 
++        if ' ' in self.addr:
++            reason = (_("Invalid address configuration of sheepdog store: %s")
++                      % self.addr)
++            LOG.error(reason)
++            raise exception.BadStoreConfiguration(store_name='sheepdog',
++                                                  reason=reason)
++
+         try:
+-            processutils.execute("collie", shell=True)
+-        except processutils.ProcessExecutionError as exc:
+-            reason = _("Error in store configuration: %s") % exc
++            cmd = ["collie", "vdi", "list", "-a", self.addr, "-p", self.port]
++            processutils.execute(*cmd)
++        except Exception as e:
++            reason = _("Error in store configuration: %s") % e
+             LOG.error(reason)
+             raise exception.BadStoreConfiguration(store_name='sheepdog',
+                                                   reason=reason)
+diff --git a/glance/tests/unit/test_sheepdog_store.py b/glance/tests/unit/test_sheepdog_store.py
+index 8eef86b..bea7e29 100644
+--- a/glance/tests/unit/test_sheepdog_store.py
++++ b/glance/tests/unit/test_sheepdog_store.py
+@@ -57,4 +57,5 @@ class TestStore(base.StoreClearingUnitTest):
+                           'fake_image_id',
+                           utils.LimitingReader(StringIO.StringIO('xx'), 1),
+                           2)
+-        self.assertEqual(called_commands, ['list -r', 'create', 'delete'])
++        self.assertEqual([['list', '-r'], ['create'], ['delete']],
++                         called_commands)
+diff --git a/glance/tests/unit/test_store_location.py b/glance/tests/unit/test_store_location.py
+index 7eec171..2464ebb 100644
+--- a/glance/tests/unit/test_store_location.py
++++ b/glance/tests/unit/test_store_location.py
+@@ -52,7 +52,7 @@ class TestStoreLocation(base.StoreClearingUnitTest):
+             'rbd://imagename',
+             'rbd://fsid/pool/image/snap',
+             'rbd://%2F/%2F/%2F/%2F',
+-            'sheepdog://imagename',
++            'sheepdog://244e75f1-9c69-4167-9db7-1aa7d1973f6c',
+             'cinder://12345678-9012-3455-6789-012345678901',
+         ]
+ 
+@@ -367,15 +367,18 @@ class TestStoreLocation(base.StoreClearingUnitTest):
+         """
+         Test the specific StoreLocation for the Sheepdog store
+         """
+-        uri = 'sheepdog://imagename'
++        uri = 'sheepdog://244e75f1-9c69-4167-9db7-1aa7d1973f6c'
+         loc = glance.store.sheepdog.StoreLocation({})
+         loc.parse_uri(uri)
+-        self.assertEqual('imagename', loc.image)
++        self.assertEqual('244e75f1-9c69-4167-9db7-1aa7d1973f6c', loc.image)
+ 
+-        bad_uri = 'sheepdog:/image'
++        bad_uri = 'sheepdog:/244e75f1-9c69-4167-9db7-1aa7d1973f6c'
+         self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
+ 
+-        bad_uri = 'http://image'
++        bad_uri = 'http://244e75f1-9c69-4167-9db7-1aa7d1973f6c'
++        self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
++
++        bad_uri = 'image; name'
+         self.assertRaises(exception.BadStoreUri, loc.parse_uri, bad_uri)
+ 
+     def test_cinder_store_good_location(self):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/glance/patches/04-requirements.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,71 @@
+In-house patch to remove unnecessary dependencies from Glance's
+requirements files. The specific reasons are as follows:
+
+lxml		No longer applicable
+		(upstream commit b7f9120cd4623d8284c4edfea760ef10e1193492)
+
+passlib		No longer applicable
+		(upstream commit b31874284212d49560ddf4f36e5c9ca5d221f605)
+
+pycrypto	Not applicable to Solaris (M2Crypto used instead)
+
+wsgiref		No longer applicable
+		(upstream commit b31874284212d49560ddf4f36e5c9ca5d221f605)
+
+--- glance-2013.2.3/glance.egg-info/requires.txt.orig	2014-04-03 11:46:54.000000000 -0700
++++ glance-2013.2.3/glance.egg-info/requires.txt	2014-05-24 23:04:42.210143004 -0700
+@@ -6,20 +6,16 @@
+ PasteDeploy>=1.5.0
+ Routes>=1.12.3
+ WebOb>=1.2.3,<1.3
+-wsgiref>=0.1.2
+ boto>=2.4.0,!=2.13.0
+ sqlalchemy-migrate>=0.7.2
+ httplib2
+ kombu>=2.4.8
+-pycrypto>=2.6
+ iso8601>=0.1.8
+ oslo.config>=1.2.0
+ python-swiftclient>=1.5,<2.0.0
+-lxml>=2.3
+ Paste
+-passlib
+ jsonschema>=1.3.0,!=1.4.0
+ python-cinderclient>=1.0.6
+ python-keystoneclient>=0.3.2
+ pyOpenSSL>=0.11
+-six>=1.4.1
+\ No newline at end of file
++six>=1.4.1
+--- glance-2013.2.3/requirements.txt.orig	2014-04-03 11:43:55.000000000 -0700
++++ glance-2013.2.3/requirements.txt	2014-05-24 23:05:14.053235541 -0700
+@@ -12,29 +12,20 @@
+ PasteDeploy>=1.5.0
+ Routes>=1.12.3
+ WebOb>=1.2.3,<1.3
+-wsgiref>=0.1.2
+ argparse
+ boto>=2.4.0,!=2.13.0
+ sqlalchemy-migrate>=0.7.2
+ httplib2
+ kombu>=2.4.8
+-pycrypto>=2.6
+ iso8601>=0.1.8
+ oslo.config>=1.2.0
+ 
+ # For Swift storage backend.
+ python-swiftclient>=1.5,<2.0.0
+ 
+-# Note you will need gcc buildtools installed and must
+-# have installed libxml headers for lxml to be successfully
+-# installed using pip, therefore you will need to install the
+-# libxml2-dev and libxslt-dev Ubuntu packages.
+-lxml>=2.3
+-
+ # For paste.util.template used in keystone.common.template
+ Paste
+ 
+-passlib
+ jsonschema>=1.3.0,!=1.4.0
+ python-cinderclient>=1.0.6
+ python-keystoneclient>=0.3.2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/glance/patches/05-launchpad-1255556.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,119 @@
+Although the following patch has been addressed in Icehouse 2014.1, it
+still has not yet been released for Havana.  It has been modified to
+apply cleanly into our current Havana implementation
+
+commit 1f6381a73f5c99f1f731d6c4f9defb91bd2d042d
+Author: Flavio Percoco <[email protected]>
+Date:   Thu Nov 28 16:17:13 2013 +0100
+
+    Don't enable all stores by default
+    
+    Glance currently enables all stores by default. This patch changes that
+    by removing all stores that require manual configuration and leaving
+    those that work right out of the box.
+    
+    Current behavior causes a lot of confusion to users since most of those
+    stores print errors when they're not configured correctly. All extra
+    stores should be enabled explicitly by users.
+    
+    This fix makes tests use http locations. All other locations besides the
+    default ones should be tested in their own test suites.
+    
+    DocImpact
+    Closes-bug: #1255556
+    Change-Id: I82073352641d3eb2ab3d6e9a6b64afc99a30dcc7
+
+--- glance-2013.2.3/etc/glance-api.conf.~1~	2014-04-03 11:43:55.000000000 -0700
++++ glance-2013.2.3/etc/glance-api.conf	2014-05-29 13:47:44.689586507 -0700
+@@ -13,13 +13,14 @@
+ 
+ # List of which store classes and store class locations are
+ # currently known to glance at startup.
++# Existing but disabled stores:
++#      glance.store.rbd.Store,
++#      glance.store.s3.Store,
++#      glance.store.swift.Store,
++#      glance.store.sheepdog.Store,
++#      glance.store.cinder.Store,
+ #known_stores = glance.store.filesystem.Store,
+-#               glance.store.http.Store,
+-#               glance.store.rbd.Store,
+-#               glance.store.s3.Store,
+-#               glance.store.swift.Store,
+-#               glance.store.sheepdog.Store,
+-#               glance.store.cinder.Store,
++#               glance.store.http.Store
+ 
+ 
+ # Maximum image size (in bytes) that may be uploaded through the
+--- glance-2013.2.3/glance/store/__init__.py.~1~	2014-04-03 11:43:55.000000000 -0700
++++ glance-2013.2.3/glance/store/__init__.py	2014-05-29 13:53:14.827604452 -0700
+@@ -38,12 +38,7 @@
+     cfg.ListOpt('known_stores',
+                 default=[
+                     'glance.store.filesystem.Store',
+-                    'glance.store.http.Store',
+-                    'glance.store.rbd.Store',
+-                    'glance.store.s3.Store',
+-                    'glance.store.swift.Store',
+-                    'glance.store.sheepdog.Store',
+-                    'glance.store.cinder.Store',
++                    'glance.store.http.Store'
+                 ],
+                 help=_('List of which store classes and store class locations '
+                        'are currently known to glance at startup.')),
+--- glance-2013.2.3/glance/tests/unit/test_s3_store.py.~1~	2014-04-03 11:43:55.000000000 -0700
++++ glance-2013.2.3/glance/tests/unit/test_s3_store.py	2014-05-29 13:43:01.104336073 -0700
+@@ -41,7 +41,8 @@
+            's3_store_access_key': 'user',
+            's3_store_secret_key': 'key',
+            's3_store_host': 'localhost:8080',
+-           's3_store_bucket': 'glance'}
++           's3_store_bucket': 'glance',
++           'known_stores': ['glance.store.s3.Store']}
+ 
+ 
+ # We stub out as little as possible to ensure that the code paths
+--- glance-2013.2.3/glance/tests/unit/test_store_image.py.~1~	2014-04-03 11:43:55.000000000 -0700
++++ glance-2013.2.3/glance/tests/unit/test_store_image.py	2014-05-29 13:43:01.104846210 -0700
+@@ -20,7 +20,7 @@
+ from glance.tests import utils
+ 
+ 
+-BASE_URI = 'swift+http://storeurl.com/container'
++BASE_URI = 'http://storeurl.com/container'
+ UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
+ UUID2 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7'
+ USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf'
+--- glance-2013.2.3/glance/tests/unit/test_store_location.py.~2~	2014-05-29 13:43:01.087604346 -0700
++++ glance-2013.2.3/glance/tests/unit/test_store_location.py	2014-05-29 15:38:15.306762870 -0700
+@@ -30,6 +30,18 @@
+ 
+     def setUp(self):
+         self.config(default_store='file')
++
++        # NOTE(flaper87): Each store should test
++        # this in their test suite.
++        self.config(known_stores=[
++            "glance.store.filesystem.Store",
++            "glance.store.http.Store",
++            "glance.store.rbd.Store",
++            "glance.store.s3.Store",
++            "glance.store.swift.Store",
++            "glance.store.sheepdog.Store",
++            "glance.store.cinder.Store",
++        ])
+         super(TestStoreLocation, self).setUp()
+ 
+     def test_get_location_from_uri_back_to_uri(self):
+--- glance-2013.2.3/glance/tests/unit/utils.py.~1~	2014-04-03 11:43:55.000000000 -0700
++++ glance-2013.2.3/glance/tests/unit/utils.py	2014-05-29 13:43:01.105795472 -0700
+@@ -36,7 +36,7 @@
+ USER2 = '0b3b3006-cb76-4517-ae32-51397e22c754'
+ USER3 = '2hss8dkl-d8jh-88yd-uhs9-879sdjsd8skd'
+ 
+-BASE_URI = 'swift+http://storeurl.com/container'
++BASE_URI = 'http://storeurl.com/container'
+ 
+ 
+ def get_fake_request(path='', method='POST', is_admin=False, user=USER1,
--- a/components/openstack/horizon/Makefile	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/horizon/Makefile	Wed Jun 11 17:13:12 2014 -0700
@@ -25,12 +25,12 @@
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		horizon
-COMPONENT_CODENAME=	grizzly
-COMPONENT_VERSION=	2013.1.4
+COMPONENT_CODENAME=	havana
+COMPONENT_VERSION=	2013.2.3
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:fb24b6d8b38d199a8ab0174335df63c7612e74a02e4f0883ee8a3181ce07b4ab
+    sha256:de9b87ee62d8b28792399be0fc867ba99618eaaad289cf9842b5c7084e12620f
 COMPONENT_ARCHIVE_URL=	http://launchpad.net/$(COMPONENT_NAME)/$(COMPONENT_CODENAME)/$(COMPONENT_VERSION)/+download/$(COMPONENT_ARCHIVE)
 COMPONENT_PROJECT_URL=	http://www.openstack.org/
 COMPONENT_BUGDB=	utility/horizon
@@ -57,7 +57,7 @@
 	 $(CP) files/branding/theme/_stylesheets.html $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/openstack_dashboard/static/solaris/theme; \
 	 $(CP) files/overrides.py $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/openstack_dashboard); \
 	 $(PYTHON) -m compileall $(PROTO_DIR)/$(PYTHON_VENDOR_PACKAGES)
-
+	
 # common targets
 build:		$(BUILD_NO_ARCH)
 
--- a/components/openstack/horizon/files/branding/css/solaris.css	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/horizon/files/branding/css/solaris.css	Wed Jun 11 17:13:12 2014 -0700
@@ -1,4 +1,4 @@
-/*!
+/*
  * Bootstrap v2.0.1
  *
  * Copyright 2012 Twitter, Inc
@@ -123,11 +123,11 @@
   background-color: #ffffff;
 }
 a {
-  color: #dd4814;
+  color: #005D9D;
   text-decoration: none;
 }
 a:hover {
-  color: #dd4814;
+  color: #005D9D;
   text-decoration: underline;
 }
 .row {
@@ -350,7 +350,7 @@
 }
 h3 {
   line-height: 27px;
-  font-size: 18px;
+  font-size: 16px;
 }
 h3 small {
   font-size: 14px;
@@ -1278,11 +1278,11 @@
 [class^="icon-"],
 [class*=" icon-"] {
   display: inline-block;
-  width: 14px;
-  height: 14px;
+  width: 0px;
+  height: 0px;
   line-height: 14px;
   vertical-align: text-top;
-  xbackground-image: url('/static/bootstrap/img/glyphicons-halflings.png?531d4b607365');
+  /*xbackground-image: url('/static/bootstrap/img/glyphicons-halflings.png?531d4b607365');*/
   background-position: 14px 14px;
   background-repeat: no-repeat;
   *margin-right: .3em;
@@ -1292,7 +1292,7 @@
   *margin-left: 0;
 }
 .icon-white {
-  xbackground-image: url('/static/bootstrap/img/glyphicons-halflings-white.png?11118ae8db79');
+  /*xbackground-image: url('/static/bootstrap/img/glyphicons-halflings-white.png?11118ae8db79');*/
 }
 .icon-glass {
   background-position: 0      0;
@@ -1658,7 +1658,7 @@
   position: relative;
 }
 .dropdown-toggle {
-  *margin-bottom: -3px;
+  border: 1px solid #eeeeee;
 }
 .dropdown-toggle:active,
 .open .dropdown-toggle {
@@ -1749,7 +1749,8 @@
 .dropdown.open {
   *z-index: 1000;
 }
-.dropdown.open .dropdown-toggle {
+.dropdown.open .dropdown-toggle,
+.dropdown.open .dropdown-toggle:hover  {
   color: #ffffff;
   background: #ccc;
   background: rgba(0, 0, 0, 0.3);
@@ -1973,9 +1974,9 @@
   background-color: #c44012;
 
 }
-
+li.active .btn-primary,
 .btn-primary {
-    color: #fff;
+    color: #ffffff;
     border: 1px solid #0074CD;
     background: #0074CD;
     font-weight: bold;
@@ -2180,6 +2181,10 @@
   *zoom: 1;
   margin: 0px;
 }
+.topologyNavi .btn-group {
+	margin-left: 5px;
+	float: right;
+}
 .btn-group:before,
 .btn-group:after {
   display: table;
@@ -2192,7 +2197,7 @@
   *margin-left: 0;
 }
 .btn-group + .btn-group {
-  margin-left: 5px;
+  /*margin-left: 5px;*/
 }
 .btn-toolbar {
   margin-top: 9px;
@@ -2426,16 +2431,32 @@
 .nav-pills:after {
   clear: both;
 }
+.nav-pills {
+  margin: 0px;
+  padding: 5px 12px; 
+  width: 289px !important; 
+  border: 1px solid #d5dfe6;
+  border-top: 1px solid #eeeeee;
+  border-bottom: none;
+}
+.right .fake_table .members .nav-pills {
+	width: 304px !important;
+}
+.nav-pills:first-of-type {
+  border-top: 1px solid #d5dfe6;
+}
 .nav-tabs > li,
 .nav-pills > li {
   float: left;
 }
-.nav-tabs > li > a,
-.nav-pills > li > a {
+.nav-tabs > li > a {
   line-height: 33px;
   text-align: center;
 }
-
+.nav-pills > li > a {
+  line-height: 14px;
+  text-align: center;
+}
 .nav-tabs > li {
   margin-bottom: -1px;
   margin-left: -1px;
@@ -2455,19 +2476,26 @@
   border-top: none;
   cursor: default;
 }
+ul.nav-pills li.active {
+	float: right;
+}
 .nav-pills > li > a {
-  padding-top: 8px;
-  padding-bottom: 8px;
+  padding-top: 6px;
+  padding-bottom: 10px;
   margin-top: 2px;
   margin-bottom: 2px;
   -webkit-border-radius: 5px;
   -moz-border-radius: 5px;
   border-radius: 5px;
 }
-.nav-pills .active > a,
+.nav-pills .active > a {
+  color: #ffffff;
+  background-color: #0074CD;
+  height: 14px;
+  width: 8px;  
+}
 .nav-pills .active > a:hover {
-  color: #ffffff;
-  background-color: #0088cc;
+  background-color: #0160C1;
 }
 .nav-stacked > li {
   float: none;
@@ -2510,13 +2538,15 @@
   border-width: 1px;
 }
 .nav-pills .dropdown-menu {
-  -webkit-border-radius: 4px;
-  -moz-border-radius: 4px;
-  border-radius: 4px;
+  -webkit-border-radius: 0px;
+  -moz-border-radius: 0px;
+  border-radius: 0px;
 }
 .nav-tabs .dropdown-toggle .caret,
 .nav-pills .dropdown-toggle .caret {
-  border-top-color: #0088cc;
+  /*border-top-color: #000000;*/
+  border-top: 4px solid #005580;
+  oppacity: 0.3;
   margin-top: 6px;
 }
 .nav-tabs .dropdown-toggle:hover .caret,
@@ -2525,10 +2555,10 @@
 }
 .nav-tabs .active .dropdown-toggle .caret,
 .nav-pills .active .dropdown-toggle .caret {
-  border-top-color: #333333;
+  border-top: 4px solid #005580;
 }
 .nav > .dropdown.active > a:hover {
-  color: #000000;
+  color: #005580;
   cursor: pointer;
 }
 .nav-tabs .open .dropdown-toggle,
@@ -3755,7 +3785,7 @@
   font-style: normal;
 }
 a {
-  color: #dd4814;
+  color: #005D9D;
 }
 ul {
   list-style: none;
@@ -3767,6 +3797,7 @@
 #main_content {
   padding-left: 247px;   
   padding-right: 20px;
+  min-width: 840px;
 }
 .topbar {
   background: #f2f2f2;
@@ -3909,6 +3940,12 @@
   color: #8EACB7;
   text-shadow: none;
 }
+.nav li a.dropdown-toggle {
+  color: #8EACB7;
+}
+.nav li a.dropdown-toggle:hover {
+  color: #005580;
+}
 .container-fluid {
   padding-left: 0;
 }
@@ -4234,7 +4271,8 @@
 }
 .table_actions .table_search {
   /*display: inline-block;*/
- padding-bottom: 8px;
+  padding-bottom: 8px;
+  margin-left: 142px; 
   width: 267px;
 }
 
@@ -4381,7 +4419,7 @@
 }
 .left {
   float: left;
-  width: 347px;
+  width: 315px;
   margin-right: 15px;
 }
 .left form {
@@ -4389,7 +4427,7 @@
 }
 .right {
   float: left;
-  width: 308px;
+  width: 330px;
 }
 
 .workflow ul.nav-tabs {
@@ -4415,7 +4453,7 @@
 	background: url(../img/content_tab_bkgd_selected.png) repeat-x;
 	font-weight: bold;
 	border: 1px solid #C4CED8;
-	border-top: 1px solid #FFFFFF;
+	border-top: 1px solid #1274D1;
 	border-bottom: 1px solid #FFFFFF;
 }
 
@@ -4537,6 +4575,9 @@
   width: 10px;
 }
 /* Actions dropdown */
+th.actions_column {
+  width: 150px;	
+}
 td.actions_column {
   width: 150px;
   padding: 10px;
@@ -4969,33 +5010,49 @@
 .project_membership input {
   background: url('../../dashboard/img/search.png?781fb162b111') no-repeat 105px 5px whiteSmoke;
 }
+.membership .fake_table_header, 
 .project_membership .fake_table_header {
-  background-color: #F1F1F1;
+  background-color: #F2F4F7;
   width: 306px;
   height: 38px;
-  padding-top: 15px;
-  border: 1px solid #DDD;
+  padding-top: 10px;
+  border: 1px solid #D5DFE6;
   border-bottom: none;
 }
+.membership .fake_table,
 .project_membership .fake_table {
-  margin-left: 5px;
   width: 315px;
 }
-.project_membership .fake_table ul.no_results {
-  width: 298px;
-}
+.membership .fake_table ul.no_results,
+.project_membership .left .fake_table ul.no_results {
+  width: 313px;
+  line-height: 36px;
+  border: 1px solid #d5dfe6;  
+
+}
+.membership .fake_table ul.btn-group:hover,
 .project_membership .fake_table ul.btn-group:hover {
   background-color: #DDD;
 }
+.membership .left .fake_table_header,
 .project_membership .left .fake_table_header {
-  width: 318px;
-}
+  width: 313px;
+}
+.membership .right .fake_table_header,
 .project_membership .right .fake_table_header {
-  width: 318px;
-  margin-left: -15px;
-}
+  width: 328px;
+}
+.membership .right .fake_table ul.no_results,
 .project_membership .right .fake_table ul.no_results {
-  margin-left: -20px;
+  /*margin-left: -20px;*/
+  line-height: 36px;
+  width: 328px;
+  border: 1px solid #d5dfe6;
+}
+ul.no_results li {
+  padding: 5px 12px;
+  height: 36px;
+  line-height: 36px;
 }
 .project_membership .member {
   padding: 10px;
@@ -5004,18 +5061,21 @@
 .project_membership .project_members {
   margin-left: -20px;
 }
-.project_membership .project_members ul.btn-group,
+.membership .project_members ul.btn-group,
 .project_membership .available_users ul.btn-group {
   width: 308px;
 }
+.membership .dark_stripe,
 .project_membership .dark_stripe {
   background-color: #F9F9F9;
 }
+.membership .light_stripe, 
 .project_membership .light_stripe {
   background-color: white;
 }
+.membership .last_stripe,
 .project_membership .last_stripe {
-  border-bottom: 1px solid #DDD;
+  border-bottom: 1px solid #d5dfe6;
 }
 .project_membership .filter {
   width: 120px;
@@ -5027,6 +5087,7 @@
   padding: 10px;
   color: #08C;
 }
+.membership .role_dropdown li, 
 .project_membership .role_dropdown li {
   cursor: pointer;
   background: none;
@@ -5042,13 +5103,27 @@
   box-shadow: none;
   z-index: 99999;
 }
+.membership .role_dropdown li:hover, 
 .project_membership .role_dropdown li:hover {
   background-color: #EBECED;
 }
+.membership .nav .role_options, 
 .project_membership .nav .role_options {
   float: right;
   padding-right: 5px;
 }
+.membership input[type="text"] {
+	width: 180px;
+	float: right;
+	margin-right: 12px;
+} 
+.members_title {
+	margin-left: 12px;
+	position: relative;
+	top: 6px;
+
+}
+
 /* Inline user creation */
 .add_user_btn {
   display: inline;
@@ -5636,8 +5711,10 @@
   background-size: 14px 14px;
 }
 .launchButtons {
+  float: right;
   text-align: right;
   margin: 10px 0px 15px 10px;
+  clear: right;	
 }
 .launchButtons a.btn {
   margin-left: 10px;
@@ -5700,7 +5777,7 @@
   margin: 0px -30px 0px 0px;
   padding: 0px;
   position: relative;
-  top: 107px;
+  top: 100px;
 }
 
 #user_info, #user_info a {
@@ -5848,6 +5925,7 @@
   margin-left: 23px;
   color: #000000;
   vertical-align: top;
+  font-weight: 500;  
 }
 
 .sidebar a { font-size: 14px; }
@@ -5931,7 +6009,9 @@
 /* static pages */
 .static_page, .quota-dynamic { margin-top: 80px; }
 
-.static_page, .quota-dynamic h3 { margin-bottom: 20px; }
+.static_page, .quota-dynamic h3 { 
+  margin-bottom: 20px;
+}
 
 .static_page, .quota-dynamic .progress_bar {
   max-width: 50%;
@@ -5954,4 +6034,41 @@
 	color:#000000;
 	border-left: 1px solid #EEEEEE;
 }
+.topologyNavi {
+  height: 80px;
+}
+.quota-dynamic {
+  height: 200px; 
+  min-width: 840px;
+}
+.d3_quota_bar {
+  float: left;	
+  width: 160px;
+  text-align: center;
+  position: relative;
+  left: -30px;
+}
+#date_form h3 {
+	margin-bottom: 19px;
+}
 
+#volumes, #volume-types {
+  position: relative;
+  top: -8px;
+}
+td.normal_column, td.actions_column {
+	font-size: 12px;
+	color:#000000;
+	border-left: 1px solid #EEEEEE;
+}
+.display_name {
+	line-height: 36px;
+}
+.nodata {
+	display: none;
+}
+
+#no_update_flavor_access_members li {
+  line-height: 24px;
+  height: 48px;
+}
\ No newline at end of file
--- a/components/openstack/horizon/files/local_settings.py	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/horizon/files/local_settings.py	Wed Jun 11 17:13:12 2014 -0700
@@ -4,9 +4,16 @@
 
 from openstack_dashboard import exceptions
 
-DEBUG = False
+DEBUG = True
 TEMPLATE_DEBUG = DEBUG
 
+# Required for Django 1.5.
+# If horizon is running in production (DEBUG is False), set this
+# with the list of host/domain names that the application can serve.
+# For more information see:
+# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
+#ALLOWED_HOSTS = ['horizon.example.com', ]
+
 # Set SSL proxy settings:
 # For Django 1.4+ pass this header from the proxy after terminating the SSL,
 # and don't forget to strip it from the client's request.
@@ -25,12 +32,32 @@
 LOGOUT_URL = '/horizon/auth/logout/'
 LOGIN_REDIRECT_URL = '/horizon'
 
-# Set STATIC_ROOT directly.
-STATIC_ROOT = "/var/lib/openstack_dashboard/static"
+STATIC_ROOT = '/var/lib/openstack_dashboard/static'
 
 # Enable Solaris theme
 TEMPLATE_DIRS = ('/var/lib/openstack_dashboard/static/solaris/theme', )
 
+# Overrides for OpenStack API versions. Use this setting to force the
+# OpenStack dashboard to use a specfic API version for a given service API.
+# NOTE: The version should be formatted as it appears in the URL for the
+# service API. For example, The identity service APIs have inconsistent
+# use of the decimal point, so valid options would be "2.0" or "3".
+# OPENSTACK_API_VERSIONS = {
+#     "identity": 3
+# }
+
+# Set this to True if running on multi-domain model. When this is enabled, it
+# will require user to enter the Domain name in addition to username for login.
+# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
+
+# Overrides the default domain used when running on single-domain model
+# with Keystone V3. All entities will be created in the default domain.
+# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
+
+# Set Console type:
+# valid options would be "AUTO", "VNC" or "SPICE"
+# CONSOLE_TYPE = "AUTO"
+
 # Default OpenStack Dashboard configuration.
 HORIZON_CONFIG = {
     'dashboards': ('project', 'admin', 'settings',),
@@ -62,7 +89,7 @@
 # Turn off browser autocompletion for the login form if so desired.
 # HORIZON_CONFIG["password_autocomplete"] = "off"
 
-LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
+LOCAL_PATH = '/var/lib/openstack_dashboard'
 
 # Set custom secret key:
 # You can either set it to a specific value or you can let horizion generate a
@@ -74,7 +101,7 @@
 # requests routed to the same dashboard instance or you set the same SECRET_KEY
 # for all of them.
 from horizon.utils import secret_key
-SECRET_KEY = secret_key.generate_key()
+SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH, '.secret_key_store'))
 
 # We recommend you use memcached for development; otherwise after every reload
 # of the django development server, you will have to login again. To use
@@ -111,11 +138,14 @@
 
 OPENSTACK_HOST = "127.0.0.1"
 OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
-OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member"
+OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
 
 # Disable SSL certificate checks (useful for self-signed certificates):
 # OPENSTACK_SSL_NO_VERIFY = True
 
+# The CA certificate to use to verify SSL connections
+# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
+
 # The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
 # capabilities of the auth backend for Keystone.
 # If Keystone has been configured to use LDAP as the auth backend then set
@@ -125,29 +155,61 @@
 OPENSTACK_KEYSTONE_BACKEND = {
     'name': 'native',
     'can_edit_user': True,
-    'can_edit_project': True
+    'can_edit_group': True,
+    'can_edit_project': True,
+    'can_edit_domain': True,
+    'can_edit_role': True
 }
 
 OPENSTACK_HYPERVISOR_FEATURES = {
     'can_set_mount_point': True,
+}
 
-    # NOTE: as of Grizzly this is not yet supported in Nova so enabling this
-    # setting will not do anything useful
-    'can_encrypt_volumes': False
+# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
+# services provided by neutron. Options currenly available are load
+# balancer service, security groups, quotas, VPN service.
+OPENSTACK_NEUTRON_NETWORK = {
+    'enable_lb': False,
+    'enable_firewall': False,
+    'enable_quotas': True,
+    'enable_vpn': False,
+    # The profile_support option is used to detect if an external router can be
+    # configured via the dashboard. When using specific plugins the
+    # profile_support can be turned on if needed.
+    'profile_support': None,
+    #'profile_support': 'cisco',
 }
 
-# The OPENSTACK_QUANTUM_NETWORK settings can be used to enable optional
-# services provided by quantum.  Currently only the load balancer service
-# is available.
-OPENSTACK_QUANTUM_NETWORK = {
-    'enable_lb': False
-}
+# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
+# in the OpenStack Dashboard related to the Image service, such as the list
+# of supported image formats.
+# OPENSTACK_IMAGE_BACKEND = {
+#     'image_formats': [
+#         ('', ''),
+#         ('aki', _('AKI - Amazon Kernel Image')),
+#         ('ami', _('AMI - Amazon Machine Image')),
+#         ('ari', _('ARI - Amazon Ramdisk Image')),
+#         ('iso', _('ISO - Optical Disk Image')),
+#         ('qcow2', _('QCOW2 - QEMU Emulator')),
+#         ('raw', _('Raw')),
+#         ('vdi', _('VDI')),
+#         ('vhd', _('VHD')),
+#         ('vmdk', _('VMDK'))
+#     ]
+# }
 
 # OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
 # in the Keystone service catalog. Use this setting when Horizon is running
-# external to the OpenStack environment. The default is 'internalURL'.
+# external to the OpenStack environment. The default is 'publicURL'.
 #OPENSTACK_ENDPOINT_TYPE = "publicURL"
 
+# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
+# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
+# in the Keystone service catalog. Use this setting when Horizon is running
+# external to the OpenStack environment. The default is None.  This
+# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
+#SECONDARY_ENDPOINT_TYPE = "publicURL"
+
 # The number of objects (Swift containers/objects or images) to display
 # on a single page before providing a paging element (a "more" link)
 # to paginate results.
@@ -158,6 +220,36 @@
 # of your entire OpenStack installation, and hopefully be in UTC.
 TIME_ZONE = "UTC"
 
+# When launching an instance, the menu of available flavors is
+# sorted by RAM usage, ascending.  Provide a callback method here
+# (and/or a flag for reverse sort) for the sorted() method if you'd
+# like a different behaviour.  For more info, see
+# http://docs.python.org/2/library/functions.html#sorted
+# CREATE_INSTANCE_FLAVOR_SORT = {
+#     'key': my_awesome_callback_method,
+#     'reverse': False,
+# }
+
+# The Horizon Policy Enforcement engine uses these values to load per service
+# policy rule files. The content of these files should match the files the
+# OpenStack services are using to determine role based access control in the
+# target installation.
+
+# Path to directory containing policy.json files
+#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
+# Map of local copy of service policy files
+#POLICY_FILES = {
+#    'identity': 'keystone_policy.json',
+#    'compute': 'nova_policy.json'
+#}
+
+# Trove user and database extension support. By default support for
+# creating users and databases on database instances is turned on.
+# To disable these extensions set the permission here to something
+# unusable such as ["!"].
+# TROVE_ADD_USER_PERMS = []
+# TROVE_ADD_DATABASE_PERMS = []
+
 LOGGING = {
     'version': 1,
     # When set to True this will disable all logging except
@@ -189,27 +281,182 @@
         },
         'horizon': {
             'handlers': ['console'],
+            'level': 'DEBUG',
             'propagate': False,
         },
         'openstack_dashboard': {
             'handlers': ['console'],
+            'level': 'DEBUG',
             'propagate': False,
         },
         'novaclient': {
             'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'cinderclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
             'propagate': False,
         },
         'keystoneclient': {
             'handlers': ['console'],
+            'level': 'DEBUG',
             'propagate': False,
         },
         'glanceclient': {
             'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'neutronclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'heatclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'ceilometerclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'troveclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'swiftclient': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'openstack_auth': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
             'propagate': False,
         },
         'nose.plugins.manager': {
             'handlers': ['console'],
+            'level': 'DEBUG',
             'propagate': False,
-        }
+        },
+        'django': {
+            'handlers': ['console'],
+            'level': 'DEBUG',
+            'propagate': False,
+        },
+        'iso8601': {
+            'handlers': ['null'],
+            'propagate': False,
+        },
     }
 }
+
+SECURITY_GROUP_RULES = {
+    'all_tcp': {
+        'name': 'ALL TCP',
+        'ip_protocol': 'tcp',
+        'from_port': '1',
+        'to_port': '65535',
+    },
+    'all_udp': {
+        'name': 'ALL UDP',
+        'ip_protocol': 'udp',
+        'from_port': '1',
+        'to_port': '65535',
+    },
+    'all_icmp': {
+        'name': 'ALL ICMP',
+        'ip_protocol': 'icmp',
+        'from_port': '-1',
+        'to_port': '-1',
+    },
+    'ssh': {
+        'name': 'SSH',
+        'ip_protocol': 'tcp',
+        'from_port': '22',
+        'to_port': '22',
+    },
+    'smtp': {
+        'name': 'SMTP',
+        'ip_protocol': 'tcp',
+        'from_port': '25',
+        'to_port': '25',
+    },
+    'dns': {
+        'name': 'DNS',
+        'ip_protocol': 'tcp',
+        'from_port': '53',
+        'to_port': '53',
+    },
+    'http': {
+        'name': 'HTTP',
+        'ip_protocol': 'tcp',
+        'from_port': '80',
+        'to_port': '80',
+    },
+    'pop3': {
+        'name': 'POP3',
+        'ip_protocol': 'tcp',
+        'from_port': '110',
+        'to_port': '110',
+    },
+    'imap': {
+        'name': 'IMAP',
+        'ip_protocol': 'tcp',
+        'from_port': '143',
+        'to_port': '143',
+    },
+    'ldap': {
+        'name': 'LDAP',
+        'ip_protocol': 'tcp',
+        'from_port': '389',
+        'to_port': '389',
+    },
+    'https': {
+        'name': 'HTTPS',
+        'ip_protocol': 'tcp',
+        'from_port': '443',
+        'to_port': '443',
+    },
+    'smtps': {
+        'name': 'SMTPS',
+        'ip_protocol': 'tcp',
+        'from_port': '465',
+        'to_port': '465',
+    },
+    'imaps': {
+        'name': 'IMAPS',
+        'ip_protocol': 'tcp',
+        'from_port': '993',
+        'to_port': '993',
+    },
+    'pop3s': {
+        'name': 'POP3S',
+        'ip_protocol': 'tcp',
+        'from_port': '995',
+        'to_port': '995',
+    },
+    'ms_sql': {
+        'name': 'MS SQL',
+        'ip_protocol': 'tcp',
+        'from_port': '1433',
+        'to_port': '1433',
+    },
+    'mysql': {
+        'name': 'MYSQL',
+        'ip_protocol': 'tcp',
+        'from_port': '3306',
+        'to_port': '3306',
+    },
+    'rdp': {
+        'name': 'RDP',
+        'ip_protocol': 'tcp',
+        'from_port': '3389',
+        'to_port': '3389',
+    },
+}
--- a/components/openstack/horizon/files/overrides.py	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/horizon/files/overrides.py	Wed Jun 11 17:13:12 2014 -0700
@@ -16,7 +16,6 @@
 Solaris-specific customizations for Horizon
 """
 
-from horizon import tabs
 from openstack_dashboard.dashboards.admin.networks.forms import CreateNetwork
 from openstack_dashboard.dashboards.admin.networks.ports.forms import \
     CreatePort
@@ -26,6 +25,8 @@
     DeleteSubnet, SubnetsTable
 from openstack_dashboard.dashboards.admin.networks.tables import \
     DeleteNetwork, NetworksTable
+from openstack_dashboard.dashboards.project.access_and_security.tabs import \
+    AccessAndSecurityTabs, APIAccessTab, FloatingIPsTab, KeypairsTab
 from openstack_dashboard.dashboards.project.instances.tabs import \
     InstanceDetailTabs, LogTab, OverviewTab
 from openstack_dashboard.dashboards.project.instances.workflows import \
@@ -45,13 +46,16 @@
 from openstack_dashboard.dashboards.project.networks.workflows import \
     CreateNetworkInfoAction, CreateSubnetDetailAction, CreateSubnetInfoAction
 
-# remove PostCreationStep from LaunchInstance
+# remove VolumeOptions and PostCreationStep from LaunchInstance
 create_instance.LaunchInstance.default_steps = \
     (create_instance.SelectProjectUser,
      create_instance.SetInstanceDetails,
      create_instance.SetAccessControls,
      create_instance.SetNetwork)
 
+# Remove the Security Groups tab from Project/Access and Security
+AccessAndSecurityTabs.tabs = (KeypairsTab, FloatingIPsTab, APIAccessTab)
+
 # remove the 'Console' tab from Instance Detail
 InstanceDetailTabs.tabs = (OverviewTab, LogTab)
 
--- a/components/openstack/horizon/horizon.p5m	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/horizon/horizon.p5m	Wed Jun 11 17:13:12 2014 -0700
@@ -45,7 +45,7 @@
 set name=pkg.summary value="OpenStack Horizon"
 set name=pkg.description \
     value="OpenStack Horizon is the canonical implementation of Openstack's Dashboard, which provides a web based user interface to OpenStack services including Nova, Swift, Keystone, etc."
-set name=pkg.human-version value="Grizzly $(COMPONENT_VERSION)"
+set name=pkg.human-version value="Havana $(COMPONENT_VERSION)"
 set name=info.classification \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
     value="org.opensolaris.category.2008:System/Enterprise Management" \
@@ -61,7 +61,7 @@
 file files/openstack-dashboard-tls.conf \
     path=etc/apache2/2.2/samples-conf.d/openstack-dashboard-tls.conf
 file files/local_settings.py path=etc/openstack_dashboard/local_settings.py \
-    mode=0644 overlay=allow pkg.tmp.autopyc=false preserve=true
+    mode=0644 overlay=allow pkg.tmp.autopyc=false preserve=renamenew
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon-$(COMPONENT_VERSION)-py$(PYVER).egg-info/PKG-INFO
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
@@ -97,40 +97,28 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/forms/fields.py
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/forms/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/loaders.py
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/bg_BG/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/bg_BG/LC_MESSAGES/djangojs.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/ca/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/cs/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/en/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/en/LC_MESSAGES/djangojs.mo
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/en_AU/LC_MESSAGES/django.mo
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/en_AU/LC_MESSAGES/djangojs.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/en_GB/LC_MESSAGES/django.mo
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/en_GB/LC_MESSAGES/djangojs.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/es/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/es/LC_MESSAGES/djangojs.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/fi_FI/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/fr/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/fr/LC_MESSAGES/djangojs.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/hu/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/it/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/it/LC_MESSAGES/djangojs.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/ja/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/ja/LC_MESSAGES/djangojs.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/ka_GE/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/ko_KR/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/ko_KR/LC_MESSAGES/djangojs.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/nl_NL/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/nl_NL/LC_MESSAGES/djangojs.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/pl/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/pl/LC_MESSAGES/djangojs.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/pt/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/pt/LC_MESSAGES/djangojs.mo
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/pl_PL/LC_MESSAGES/django.mo
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/pl_PL/LC_MESSAGES/djangojs.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/pt_BR/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/pt_BR/LC_MESSAGES/djangojs.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/ru/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/ru/LC_MESSAGES/djangojs.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/vi_VN/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/zh_CN/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/zh_CN/LC_MESSAGES/djangojs.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/zh_HK/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/zh_TW/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/locale/zh_TW/LC_MESSAGES/djangojs.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/management/__init__.py
@@ -141,24 +129,30 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/middleware.py
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/models.py
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/site_urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/bootstrap/js/bootstrap-datepicker.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/bootstrap/js/bootstrap.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/bootstrap/js/bootstrap.min.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.communication.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.conf.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.cookies.js
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.d3linechart.js
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.d3piechart.js
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.firewalls.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.forms.js
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.heattop.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.instances.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.js
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.membership.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.messages.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.modals.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.networktopology.js
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.projects.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.quota.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.tables.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.tabs.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.templates.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.users.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/js/horizon.utils.js
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/lib/d3.v3.min.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/lib/hogan-2.0.0.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/lib/jquery/jquery-ui-1.9.2.custom.min.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/lib/jquery/jquery.cookie.js
@@ -168,6 +162,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/lib/json2.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/lib/qunit/qunit.css
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/lib/qunit/qunit.js
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/lib/rickshaw.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/lib/spin.jquery.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/lib/spin.js
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon/lib/underscore/underscore-min.js
@@ -194,8 +189,8 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/_subnav_list.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/client_side/_alert_message.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/client_side/_loading.html
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/client_side/_membership.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/client_side/_modal.html
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/client_side/_project_user.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/client_side/_script_loader.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/client_side/_table_row.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/client_side/template.html
@@ -207,12 +202,13 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_data_table_row_actions.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_data_table_table_actions.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_detail_table.html
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_domain_page_header.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_form_fields.html
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_limit_summary.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_modal.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_modal_form.html
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_modal_form_add_members.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_page_header.html
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_progress_bar.html
-file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_quota_summary.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_region_selector.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_resource_browser.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_sidebar.html
@@ -220,6 +216,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_tab_group.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_usage_summary.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_workflow.html
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_workflow_base.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_workflow_step.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/common/_workflow_step_update_members.html
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templates/horizon/qunit.html
@@ -228,6 +225,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templatetags/branding.py
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templatetags/horizon.py
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templatetags/parse_date.py
+file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templatetags/shellfilter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templatetags/sizeformat.py
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/templatetags/truncate_filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/horizon/utils/__init__.py
@@ -246,18 +244,44 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/ceilometer.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/cinder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/fwaas.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/glance.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/heat.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/keystone.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/lbaas.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/network.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/network_base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/neutron.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/nova.py
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/quantum.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/swift.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/trove.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/api/vpn.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/conf/keystone_policy.json
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/conf/nova_policy.json
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/context_processors.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/dashboard.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/defaults/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/defaults/panel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/defaults/tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/defaults/tabs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/defaults/templates/defaults/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/defaults/tests.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/defaults/urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/defaults/views.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/defaults/workflows.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/domains/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/domains/constants.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/domains/panel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/domains/tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/domains/templates/domains/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/domains/tests.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/domains/urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/domains/views.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/domains/workflows.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/extras/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/extras/forms.py
@@ -265,13 +289,10 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/extras/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/extras/urls.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/extras/views.py
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/forms.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/panel.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/tables.py
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/templates/flavors/_create.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/templates/flavors/_edit.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/templates/flavors/_update.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/templates/flavors/create.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/templates/flavors/edit.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/templates/flavors/extras/_create.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/templates/flavors/extras/_edit.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/templates/flavors/extras/_index.html
@@ -279,9 +300,34 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/templates/flavors/extras/edit.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/templates/flavors/extras/index.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/templates/flavors/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/templates/flavors/update.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/urls.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/views.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/flavors/workflows.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/constants.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/forms.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/panel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/templates/groups/_add_non_member.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/templates/groups/_create.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/templates/groups/_update.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/templates/groups/add_non_member.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/templates/groups/create.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/templates/groups/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/templates/groups/manage.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/templates/groups/update.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/tests.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/groups/views.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/hypervisors/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/hypervisors/panel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/hypervisors/tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/hypervisors/templates/hypervisors/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/hypervisors/tests.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/hypervisors/urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/hypervisors/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/images/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/images/forms.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/images/panel.py
@@ -309,6 +355,14 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/instances/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/instances/urls.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/instances/views.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/metering/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/metering/panel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/metering/tabs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/metering/templates/metering/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/metering/templates/metering/stats.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/metering/tests.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/metering/urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/metering/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/models.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/networks/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/networks/forms.py
@@ -333,9 +387,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/networks/templates/networks/ports/_update.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/networks/templates/networks/ports/create.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/networks/templates/networks/ports/update.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/networks/templates/networks/subnets/create.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/networks/templates/networks/subnets/index.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/networks/templates/networks/subnets/update.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/networks/templates/networks/update.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/networks/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/networks/urls.py
@@ -348,27 +400,26 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/overview/urls.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/overview/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/forms.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/panel.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/tables.py
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/templates/projects/_add_user.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/templates/projects/_create.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/templates/projects/_create_user.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/templates/projects/_quotas.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/templates/projects/_update.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/templates/projects/_update_members.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/templates/projects/add_user.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/templates/projects/create.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/templates/projects/create_user.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/templates/projects/index.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/templates/projects/quotas.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/templates/projects/update.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/templates/projects/usage.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/templates/projects/users.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/urls.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/projects/workflows.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/roles/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/roles/forms.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/roles/panel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/roles/tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/roles/templates/roles/_create.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/roles/templates/roles/_update.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/roles/templates/roles/create.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/roles/templates/roles/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/roles/templates/roles/update.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/roles/tests.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/roles/urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/roles/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/routers/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/routers/panel.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/admin/routers/ports/__init__.py
@@ -419,7 +470,6 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/floating_ips/tables.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/floating_ips/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/floating_ips/urls.py
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/floating_ips/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/floating_ips/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/floating_ips/workflows.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/keypairs/__init__.py
@@ -440,7 +490,6 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/api_access/openrc.sh.template
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/floating_ips/_allocate.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/floating_ips/allocate.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/floating_ips/associate.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/index.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/keypairs/_create.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/keypairs/_import.html
@@ -449,9 +498,11 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/keypairs/import.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/security_groups/_add_rule.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/security_groups/_create.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/security_groups/_update.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/security_groups/add_rule.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/security_groups/create.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/security_groups/detail.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/templates/access_and_security/security_groups/update.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/urls.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/access_and_security/views.py
@@ -460,17 +511,79 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/forms.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/panel.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/templates/containers/_container_detail.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/templates/containers/_copy.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/templates/containers/_create.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/templates/containers/_object_detail.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/templates/containers/_upload.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/templates/containers/container_detail.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/templates/containers/copy.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/templates/containers/create.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/templates/containers/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/templates/containers/object_detail.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/templates/containers/upload.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/urls.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/containers/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/dashboard.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/database_backups/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/database_backups/panel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/database_backups/tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/database_backups/templates/database_backups/_backup_details_help.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/database_backups/templates/database_backups/backup.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/database_backups/templates/database_backups/details.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/database_backups/templates/database_backups/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/database_backups/tests.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/database_backups/urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/database_backups/views.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/database_backups/workflows/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/database_backups/workflows/create_backup.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/panel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/tabs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/templates/databases/_detail_overview.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/templates/databases/_detail_users.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/templates/databases/_launch_details_help.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/templates/databases/_launch_initialize_help.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/templates/databases/_launch_restore_help.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/templates/databases/detail.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/templates/databases/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/templates/databases/launch.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/templates/databases/update.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/tests.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/views.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/workflows/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/databases/workflows/create_instance.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/forms.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/panel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/tabs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/_firewall_details.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/_insert_rule_to_policy.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/_policy_details.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/_remove_rule_from_policy.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/_rule_details.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/_update_rule_help.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/_update_rules.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/_updatefirewall.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/_updatepolicy.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/_updaterule.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/addfirewall.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/addpolicy.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/addrule.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/details_tabs.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/insert_rule_to_policy.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/remove_rule_from_policy.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/updatefirewall.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/updatepolicy.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/templates/firewalls/updaterule.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/tests.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/views.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/firewalls/workflows.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/images/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/images/forms.py
@@ -482,7 +595,6 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/panel.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/snapshots/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/snapshots/forms.py
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/snapshots/tables.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/snapshots/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/snapshots/urls.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/snapshots/views.py
@@ -499,35 +611,40 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/templates/images_and_snapshots/snapshots/detail.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/volume_snapshots/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/volume_snapshots/tables.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/volume_snapshots/tabs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/images_and_snapshots/volume_snapshots/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/forms.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/panel.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/tables.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/tabs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/_detail_console.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/_detail_log.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/_detail_overview.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/_flavors_and_quotas.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/_instance_ips.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/_launch_customize_help.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/_launch_details_help.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/_launch_network_help.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/_launch_volumes_help.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/_rebuild.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/_update_networks.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/detail.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/index.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/launch.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/update.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/templates/instances/rebuild.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/urls.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/workflows/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/workflows/create_instance.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/workflows/resize_instance.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/instances/workflows/update_instance.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/forms.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/models.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/panel.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/tables.py
@@ -538,19 +655,37 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/_monitors_tab.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/_pool_details.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/_pools_tab.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/_updatemember.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/_updatemonitor.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/_updatepool.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/_updatevip.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/_vip_details.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/addmember.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/addmonitor.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/addpool.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/addvip.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/details_tabs.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/updatemember.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/updatemonitor.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/updatepool.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/templates/loadbalancers/updatevip.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/urls.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/loadbalancers/workflows.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/models.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/instances/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/instances/tables.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/panel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/ports/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/ports/tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/routers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/routers/tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/templates/network_topology/_create_router.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/templates/network_topology/_post_massage.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/templates/network_topology/_svg_element.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/templates/network_topology/client_side/_balloon_container.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/templates/network_topology/client_side/_balloon_device.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/templates/network_topology/client_side/_balloon_port.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/templates/network_topology/create_router.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/templates/network_topology/iframe.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/templates/network_topology/index.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/urls.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/network_topology/views.py
@@ -574,7 +709,6 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/_detail_overview.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/_network_ips.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/_update.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/create.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/detail.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/index.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/ports/_detail_overview.html
@@ -583,10 +717,8 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/ports/detail.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/ports/update.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/subnets/_detail_overview.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/subnets/create.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/subnets/detail.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/subnets/index.html
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/subnets/update.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/templates/networks/update.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/networks/urls.py
@@ -622,6 +754,31 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/routers/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/routers/urls.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/routers/views.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/forms.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/mappings.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/panel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/sro.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/tabs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/templates/stacks/_create.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/templates/stacks/_detail_events.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/templates/stacks/_detail_overview.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/templates/stacks/_detail_resources.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/templates/stacks/_detail_topology.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/templates/stacks/_resource_info.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/templates/stacks/_resource_overview.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/templates/stacks/_select_template.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/templates/stacks/_stack_info.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/templates/stacks/create.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/templates/stacks/detail.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/templates/stacks/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/templates/stacks/resource.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/templates/stacks/select_template.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/tests.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/stacks/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/volumes/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/volumes/forms.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/volumes/panel.py
@@ -631,6 +788,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/volumes/templates/volumes/_create.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/volumes/templates/volumes/_create_snapshot.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/volumes/templates/volumes/_detail_overview.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/volumes/templates/volumes/_limits.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/volumes/templates/volumes/attach.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/volumes/templates/volumes/create.html
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/volumes/templates/volumes/create_snapshot.html
@@ -639,9 +797,49 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/volumes/tests.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/volumes/urls.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/volumes/views.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/vpn/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/vpn/panel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/vpn/tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/vpn/tabs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/vpn/templates/vpn/_ikepolicy_details.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/vpn/templates/vpn/_ipsecpolicy_details.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/vpn/templates/vpn/_ipsecsiteconnection_details.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/vpn/templates/vpn/_vpnservice_details.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/vpn/templates/vpn/details_tabs.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/vpn/templates/vpn/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/vpn/tests.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/vpn/urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/vpn/views.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/project/vpn/workflows.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/dashboard.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/models.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/forms.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/panel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/tabs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/templates/nexus1000v/_create_network_profile.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/templates/nexus1000v/_update_network_profile.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/templates/nexus1000v/create_network_profile.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/templates/nexus1000v/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/templates/nexus1000v/network_profile/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/templates/nexus1000v/policy_profile/index.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/templates/nexus1000v/update_network_profile.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/tests.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/router/nexus1000v/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/dashboard.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/models.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/password/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/password/forms.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/password/panel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/password/templates/password/_change.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/password/templates/password/change.html
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/password/tests.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/password/urls.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/password/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/user/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/user/forms.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/user/panel.py
@@ -651,36 +849,67 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/user/urls.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/dashboards/settings/user/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/hooks.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/local/__init__.py
 link \
     path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/local/local_settings.py \
     target=../../../../../../etc/openstack_dashboard/local_settings.py
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/bg_BG/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/ca/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/cs/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/en/LC_MESSAGES/django.mo
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/en_AU/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/en_GB/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/es/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/fi_FI/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/fr/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/hu/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/it/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/ja/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/ka_GE/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/ko_KR/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/nl_NL/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/pl/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/pt/LC_MESSAGES/django.mo
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/pl_PL/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/pt_BR/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/ru/LC_MESSAGES/django.mo
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/vi_VN/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/zh_CN/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/locale/zh_TW/LC_MESSAGES/django.mo
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/setup.py
-file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/version.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/config/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/config/generator.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/context.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/eventlet_backdoor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/excutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/fileutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/gettextutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/importutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/jsonutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/local.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/log.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/loopingcall.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/network_utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/notifier/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/notifier/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/notifier/log_notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/notifier/no_op_notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/notifier/rpc_notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/notifier/rpc_notifier2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/notifier/test_notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/policy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/amqp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/common.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/dispatcher.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/impl_fake.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/impl_kombu.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/impl_qpid.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/impl_zmq.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/matchmaker.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/matchmaker_redis.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/matchmaker_ring.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/proxy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/serializer.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/rpc/zmq_receiver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/threadgroup.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/timeutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/openstack/common/uuidutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/overrides.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/policy.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/settings.py
 dir  path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static \
     owner=webservd group=webservd
@@ -696,6 +925,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/bootstrap/less/close.less
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/bootstrap/less/code.less
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/bootstrap/less/component-animations.less
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/bootstrap/less/datepicker.less
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/bootstrap/less/dropdowns.less
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/bootstrap/less/forms.less
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/bootstrap/less/grid.less
@@ -726,18 +956,40 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/fonts/Anivers_Regular-webfont.ttf
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/fonts/Anivers_Regular-webfont.woff
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/action_required.png
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/db-gray.gif
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/db-gray.svg
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/db-green.svg
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/db-red.svg
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/drag.png
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/drop_arrow.png
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/favicon.ico
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/lb-gray.gif
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/lb-gray.svg
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/lb-green.svg
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/lb-red.svg
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/loading.gif
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/logo-splash.png
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/logo.png
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/right_droparrow.png
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/router.png
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/search.png
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/server-gray.gif
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/server-gray.svg
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/server-green.svg
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/server-red.svg
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/server.png
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/stack-gray.gif
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/stack-gray.svg
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/stack-green.svg
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/stack-red.svg
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/unknown-gray.gif
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/unknown-gray.svg
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/unknown-green.svg
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/unknown-red.svg
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/img/up_arrow.png
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/less/horizon.less
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/less/horizon_charts.less
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/dashboard/less/rickshaw.css
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/solaris/css/solaris.css
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/solaris/img/OpenStack_Dashboard_txt.png
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/static/solaris/img/Openstack_banner.png
@@ -762,69 +1014,101 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/usage/quotas.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/usage/tables.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/usage/views.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/utils/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/utils/filters.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/views.py
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_dashboard/wsgi/django.wsgi
+dir  path=var/lib/openstack_dashboard owner=webservd group=webservd
 link path=var/lib/openstack_dashboard/static/horizon \
     target=../../../../../usr/lib/python$(PYVER)/vendor-packages/horizon/static/horizon
 license horizon.license license="Apache v2.0" \
     com.oracle.info.description="Horizon, the OpenStack web based user interface service" \
-    com.oracle.info.name=$(COMPONENT_NAME) com.oracle.info.tpno=16292 \
+    com.oracle.info.name=$(COMPONENT_NAME) com.oracle.info.tpno=17862 \
     com.oracle.info.version=$(IPS_COMPONENT_VERSION)
 license solaris.css.license license="Apache v2.0 (Solaris theme)" \
     com.oracle.info.description="the Solaris theme for the OpenStack Dashboard" \
     com.oracle.info.name=$(COMPONENT_NAME) com.oracle.info.tpno=16775 \
     com.oracle.info.version=1.0
 
+# force a group dependency on the optional anyjson; pkgdepend work is needed to
+# flush this out.
+depend type=group fmri=library/python/anyjson-26
+
+# force a group dependency on the optional troveclient; pkgdepend work is needed
+# to flush this out.
+depend type=group fmri=library/python/troveclient-26
+
 # force a dependency on the Python runtime
 depend type=require fmri=__TBD pkg.debug.depend.file=python$(PYVER) \
     pkg.debug.depend.path=usr/bin
 
-# force dependency on cinderclient; pkgdepend work is needed to flush this out
+# force a dependency on ceilometerclient; pkgdepend work is needed to flush this
+# out
+depend type=require fmri=library/python/ceilometerclient-26
+
+# force a dependency on cinderclient; pkgdepend work is needed to flush this out
 depend type=require fmri=library/python/cinderclient-26
 
-# force dependency on django; pkgdepend work is needed to flush this out
+# force a dependency on django; pkgdepend work is needed to flush this out
 depend type=require fmri=library/python/django-26
 
-# force dependency on django_compressor; one of the applications defined in
+# force a dependency on django_compressor; one of the applications defined in
 # settings.py
 depend type=require fmri=library/python/django_compressor-26
 
-# force dependency on django_openstack_auth; pkgdepend work is needed to flush
+# force a dependency on django_openstack_auth; pkgdepend work is needed to flush
 # this out
 depend type=require fmri=library/python/django_openstack_auth-26
 
-# force dependency on glanceclient; pkgdepend work is needed to flush this out
+# force a dependency on glanceclient; pkgdepend work is needed to flush this out
 depend type=require fmri=library/python/glanceclient-26
 
-# force dependency on keystoneclient; pkgdepend work is needed to flush this out
+# force a dependency on greenlet; pkgdepend work is needed to flush this out
+depend type=require fmri=library/python/greenlet-26
+
+# force a dependency on heatclient; pkgdepend work is needed to flush this out
+depend type=require fmri=library/python/heatclient-26
+
+# force a dependency on iso8601; pkgdepend work is needed to flush this out
+depend type=require fmri=library/python/iso8601-26
+
+# force a dependency on keystoneclient; pkgdepend work is needed to flush this
+# out
 depend type=require fmri=library/python/keystoneclient-26
 
-# force dependency on lesscpy; one of the filters defined in settings.py
+# force a dependency on kombu; pkgdepend work is needed to flush this out
+depend type=require fmri=library/python/kombu-26
+
+# force a dependency on lesscpy; one of the filters defined in settings.py
 depend type=require fmri=library/python/lesscpy-26
 
-# force dependency on lockfile; pkgdepend work is needed to flush this out
+# force a dependency on lockfile; pkgdepend work is needed to flush this out
 depend type=require fmri=library/python/lockfile-26
 
-# force dependency on netaddr; pkgdepend work is needed to flush this out
+# force a dependency on netaddr; pkgdepend work is needed to flush this out
 depend type=require fmri=library/python/netaddr-26
 
-# force dependency on novaclient; pkgdepend work is needed to flush this out
+# force a dependency on neutronclient; pkgdepend work is needed to flush this
+# out
+depend type=require fmri=library/python/neutronclient-26
+
+# force a dependency on novaclient; pkgdepend work is needed to flush this out
 depend type=require fmri=library/python/novaclient-26
 
-# force dependency on pytz; pkgdepend work is needed to flush this out
+# force a dependency on pbr; pkgdepend work is needed to flush this out
+depend type=require fmri=library/python/pbr-26
+
+# force a dependency on pytz; pkgdepend work is needed to flush this out
 depend type=require fmri=library/python/pytz-26
 
-# force dependency on quantumclient; pkgdepend work is needed to flush this out
-depend type=require fmri=library/python/quantumclient-26
+# force a dependency on six; pkgdepend work is needed to flush this out
+depend type=require fmri=library/python/six-26
 
-# force dependency on setuptools; pkgdepend work is needed to flush this out
-depend type=require fmri=library/python/setuptools-26
-
-# force dependency on swiftclient; pkgdepend work is needed to flush this out
+# force a dependency on swiftclient; pkgdepend work is needed to flush this out
 depend type=require fmri=library/python/swiftclient-26
 
 # force a dependency on the Apache web server
 depend type=require fmri=web/server/apache-22
 
-# force dependency on the apache-wsgi module
+# force a dependency on the apache-wsgi module
 depend type=require fmri=web/server/apache-22/module/apache-wsgi-26
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/horizon/patches/01-CVE-2014-0157.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,145 @@
+Upstream patch for CVE-2014-0157.  This issue is fixed in Icehouse
+2014.1 and Havana 2013.2.4.
+
+From 54ec015f720a4379e8ffc34345b3a7bf36b6f15b Mon Sep 17 00:00:00 2001
+From: CristianFiorentino <[email protected]>
+Date: Mon, 10 Mar 2014 17:36:31 -0300
+Subject: [PATCH] Introduces escaping in Horizon/Orchestration
+
+1) Escape help_text a second time to avoid bootstrap tooltip XSS issue
+
+The "Description" parameter in a Heat template is used to populate
+a help_text tooltip in the dynamically generated Heat form. Bootstrap
+inserts this tooltip into the DOM using .html() which undoes any
+escaping we do in Django (it should be using .text()).
+
+This was fixed by forcing the help_text content to be escaped a second
+time. The issue itself is mitigated in bootstrap.js release 2.0.3
+(ours is currently 2.0.1).
+
+2) Properly escape untrusted Heat template 'outputs'
+
+The 'outputs' parameter in a Heat template was included in a Django
+template with HTML autoescaping turned off. Malicious HTML content
+could be included in a Heat template and would be rendered by Horizon
+when details about a created stack were displayed.
+
+This was fixed by not disabling autoescaping and explicitly escaping
+untrusted values in any strings that are later marked "safe" to render
+without further escaping.
+
+Conflicts:
+	openstack_dashboard/dashboards/project/stacks/mappings.py
+
+Change-Id: Icd9f9d9ca77068b12227d77469773a325c840001
+Closes-Bug: #1289033
+Co-Authored-By: Kieran Spear <[email protected]>
+---
+ horizon/templates/horizon/common/_form_fields.html |    7 ++++++-
+ .../dashboards/project/stacks/mappings.py          |   10 ++++++++--
+ .../stacks/templates/stacks/_detail_overview.html  |    3 +--
+ .../dashboards/project/stacks/tests.py             |   17 +++++++++++------
+ 4 files changed, 26 insertions(+), 11 deletions(-)
+
+diff --git a/horizon/templates/horizon/common/_form_fields.html b/horizon/templates/horizon/common/_form_fields.html
+index 3567614..f6fb98f 100644
+--- a/horizon/templates/horizon/common/_form_fields.html
++++ b/horizon/templates/horizon/common/_form_fields.html
+@@ -14,7 +14,12 @@
+         <span class="help-inline">{{ error }}</span>
+       {% endfor %}
+     {% endif %}
+-    <span class="help-block">{{ field.help_text }}</span>
++    {% comment %}
++    Escape help_text a second time here, to avoid an XSS issue in bootstrap.js.
++    This can most likely be removed once we upgrade bootstrap.js past 2.0.2.
++    Note: the spaces are necessary here.
++    {% endcomment %}
++    <span class="help-block">{% filter force_escape %} {{ field.help_text }} {% endfilter %} </span>
+     <div class="input">
+       {{ field }}
+     </div>
+diff --git a/openstack_dashboard/dashboards/project/stacks/mappings.py b/openstack_dashboard/dashboards/project/stacks/mappings.py
+index 0353291..f1389c5 100644
+--- a/openstack_dashboard/dashboards/project/stacks/mappings.py
++++ b/openstack_dashboard/dashboards/project/stacks/mappings.py
+@@ -19,6 +19,8 @@ import urlparse
+ 
+ from django.core.urlresolvers import reverse  # noqa
+ from django.template.defaultfilters import register  # noqa
++from django.utils import html
++from django.utils import safestring
+ 
+ from openstack_dashboard.api import swift
+ 
+@@ -76,11 +78,15 @@ def stack_output(output):
+     if not output:
+         return u''
+     if isinstance(output, dict) or isinstance(output, list):
+-        return u'<pre>%s</pre>' % json.dumps(output, indent=2)
++        json_string = json.dumps(output, indent=2)
++        safe_output = u'<pre>%s</pre>' % html.escape(json_string)
++        return safestring.mark_safe(safe_output)
+     if isinstance(output, basestring):
+         parts = urlparse.urlsplit(output)
+         if parts.netloc and parts.scheme in ('http', 'https'):
+-            return u'<a href="%s" target="_blank">%s</a>' % (output, output)
++            url = html.escape(output)
++            safe_link = u'<a href="%s" target="_blank">%s</a>' % (url, url)
++            return safestring.mark_safe(safe_link)
+     return unicode(output)
+ 
+ 
+diff --git a/openstack_dashboard/dashboards/project/stacks/templates/stacks/_detail_overview.html b/openstack_dashboard/dashboards/project/stacks/templates/stacks/_detail_overview.html
+index f4756e0..33fe783 100644
+--- a/openstack_dashboard/dashboards/project/stacks/templates/stacks/_detail_overview.html
++++ b/openstack_dashboard/dashboards/project/stacks/templates/stacks/_detail_overview.html
+@@ -36,9 +36,8 @@
+     <dt>{{ output.output_key }}</dt>
+     <dd>{{ output.description }}</dd>
+     <dd>
+-    {% autoescape off %}
+     {{ output.output_value|stack_output }}
+-    {% endautoescape %}</dd>
++    </dd>
+     {% endfor %}
+   </dl>
+ </div>
+diff --git a/openstack_dashboard/dashboards/project/stacks/tests.py b/openstack_dashboard/dashboards/project/stacks/tests.py
+index 408d86f..986e3e0 100644
+--- a/openstack_dashboard/dashboards/project/stacks/tests.py
++++ b/openstack_dashboard/dashboards/project/stacks/tests.py
+@@ -16,6 +16,7 @@ import json
+ 
+ from django.core.urlresolvers import reverse  # noqa
+ from django import http
++from django.utils import html
+ 
+ from mox import IsA  # noqa
+ 
+@@ -77,12 +78,16 @@ class MappingsTests(test.TestCase):
+         self.assertEqual(u'foo', mappings.stack_output('foo'))
+         self.assertEqual(u'', mappings.stack_output(None))
+ 
+-        self.assertEqual(
+-            u'<pre>[\n  "one", \n  "two", \n  "three"\n]</pre>',
+-            mappings.stack_output(['one', 'two', 'three']))
+-        self.assertEqual(
+-            u'<pre>{\n  "foo": "bar"\n}</pre>',
+-            mappings.stack_output({'foo': 'bar'}))
++        outputs = ['one', 'two', 'three']
++        expected_text = """[\n  "one", \n  "two", \n  "three"\n]"""
++
++        self.assertEqual(u'<pre>%s</pre>' % html.escape(expected_text),
++                         mappings.stack_output(outputs))
++
++        outputs = {'foo': 'bar'}
++        expected_text = """{\n  "foo": "bar"\n}"""
++        self.assertEqual(u'<pre>%s</pre>' % html.escape(expected_text),
++                         mappings.stack_output(outputs))
+ 
+         self.assertEqual(
+             u'<a href="http://www.example.com/foo" target="_blank">'
+-- 
+1.7.9.5
+
--- a/components/openstack/horizon/patches/01-remove-nodejs.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,18 +0,0 @@
-Upstream Change I89ed102f: Drop NodeJS dependency in favor of
-pure-python lesscpy https://review.openstack.org/#/c/37473/
-
-Fixed in Havana 2013.2
-
---- horizon-2013.1.4/openstack_dashboard/settings.py.orig  2013-12-03 09:39:07.042100957 -0700
-+++ horizon-2013.1.4/openstack_dashboard/settings.py 2013-12-03 09:39:34.835630769 -0700
-@@ -112,9 +112,8 @@
-     'django.contrib.staticfiles.finders.AppDirectoriesFinder',
- )
-
--less_binary = os.path.join(BIN_DIR, 'less', 'lessc')
- COMPRESS_PRECOMPILERS = (
--    ('text/less', (less_binary + ' {infile} {outfile}')),
-+    ('text/less', ('lesscpy {infile}')),
- )
-
- COMPRESS_CSS_FILTERS = (
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/horizon/patches/02-launchpad-1264228.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,71 @@
+This proposed upstream patch addresses
+
+	18562372 Failed to create a new project under Horizon
+
+and is tracked under Launchpad bug 1264228. Although it's been
+addressed in Icehouse 2014.1, the patch below is still not yet released
+for Havana.  It has been modified to apply cleanly into our current
+Havana implementation
+
+From e02eaab30996af7e8770cd651bd8aa7d504358a8 Mon Sep 17 00:00:00 2001
+From: JiaHao Li <[email protected]>
+Date: Thu, 26 Dec 2013 15:37:14 +0800
+Subject: [PATCH] Sync OPENSTACK_KEYSTONE_DEFAULT_ROLE with keystone
+
+For now, keystone default role is _member_, while horizon set
+OPENSTACK_KEYSTONE_DEFAULT_ROLE to Member. It will really be user
+friendly to modify horizon default value to _member_ to sync with
+keystone's default setting.
+
+Conflicts:
+
+    doc/source/topics/settings.rst
+
+Change-Id: I55d15e6cfb74e52e933c5a44efd6c27930415738
+Closes-Bug: #1264228
+(cherry picked from commit 0aacc44f324c3db049f912da1f84d93c1142cb37)
+---
+ doc/source/topics/settings.rst                     |    2 +-
+ .../local/local_settings.py.example                |    2 +-
+ openstack_dashboard/test/settings.py               |    2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/doc/source/topics/settings.rst b/doc/source/topics/settings.rst
+index 1f5eeea..2b2eabd 100644
+--- a/doc/source/topics/settings.rst
++++ b/doc/source/topics/settings.rst
+@@ -176,7 +176,7 @@ If you do not have multiple regions you should use the ``OPENSTACK_HOST`` and
+ ``OPENSTACK_KEYSTONE_DEFAULT_ROLE``
+ -----------------------------------
+ 
+-Default: "Member"
++Default: "_member_"
+ 
+ The name of the role which will be assigned to a user when added to a project.
+ This name must correspond to a role name in Keystone.
+diff --git a/openstack_dashboard/local/local_settings.py.example b/openstack_dashboard/local/local_settings.py.example
+index a1a8474..92fcc16 100644
+--- a/openstack_dashboard/local/local_settings.py.example
++++ b/openstack_dashboard/local/local_settings.py.example
+@@ -126,7 +126,7 @@ EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
+ 
+ OPENSTACK_HOST = "127.0.0.1"
+ OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
+-OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member"
++OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
+ 
+ # Disable SSL certificate checks (useful for self-signed certificates):
+ # OPENSTACK_SSL_NO_VERIFY = True
+diff --git a/openstack_dashboard/test/settings.py b/openstack_dashboard/test/settings.py
+index 85f470d..08086a2 100644
+--- horizon-2013.2.3/openstack_dashboard/test/settings.py.~1~	2014-04-03 11:45:53.000000000 -0700
++++ horizon-2013.2.3/openstack_dashboard/test/settings.py	2014-05-19 11:50:14.914650963 -0700
+@@ -85,7 +85,7 @@
+ }
+ 
+ OPENSTACK_KEYSTONE_URL = "http://localhost:5000/v2.0"
+-OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member"
++OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
+ 
+ OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
+ OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'test_domain'
--- a/components/openstack/horizon/patches/02-update-flavor-form-length.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-In-house patch to increase the size of the form field for Flavor Names, Flavor
-Keys, and Flavor Values from to 50 characters.  Patch has not yet been
-submitted upstream
-
---- horizon-2013.1.4/openstack_dashboard/dashboards/admin/flavors/forms.py.orig   2014-03-04 14:05:10.040319808 -0700
-+++ horizon-2013.1.4/openstack_dashboard/dashboards/admin/flavors/forms.py    2014-03-04 14:05:14.940287716 -0700
-@@ -34,7 +34,7 @@
-
- class CreateFlavor(forms.SelfHandlingForm):
-     name = forms.RegexField(label=_("Name"),
--                            max_length=25,
-+                            max_length=50,
-                             regex=r'^[\w\.\- ]+$',
-                             error_messages={'invalid': _('Name may only '
-                                 'contain letters, numbers, underscores, '
-
---- horizon-2013.1.4/openstack_dashboard/dashboards/admin/flavors/extras/forms.py.orig   2014-03-04 14:06:58.958564688 -0700
-+++ horizon-2013.1.4/openstack_dashboard/dashboards/admin/flavors/extras/forms.py    2014-03-04 14:07:08.193582887 -0700
-@@ -31,8 +31,8 @@
-
-
- class CreateExtraSpec(forms.SelfHandlingForm):
--    key = forms.CharField(max_length="25", label=_("Key"))
--    value = forms.CharField(max_length="25", label=_("Value"))
-+    key = forms.CharField(max_length="50", label=_("Key"))
-+    value = forms.CharField(max_length="50", label=_("Value"))
-     flavor_id = forms.CharField(widget=forms.widgets.HiddenInput)
-
-     def handle(self, request, data):
-@@ -49,8 +49,8 @@
-
-
- class EditExtraSpec(forms.SelfHandlingForm):
--    key = forms.CharField(max_length="25", label=_("Key"))
--    value = forms.CharField(max_length="25", label=_("Value"))
-+    key = forms.CharField(max_length="50", label=_("Key"))
-+    value = forms.CharField(max_length="50", label=_("Value"))
-     flavor_id = forms.CharField(widget=forms.widgets.HiddenInput)
-
-     def handle(self, request, data):
--- a/components/openstack/horizon/patches/03-CVE-2013-6858.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-Upstream patch fixed in Havana 2013.2.1
-
-commit b14debc73132d1253220192e110f00f62ddb8bbc
-Author: Rob Raymond <[email protected]>
-Date:   Mon Nov 4 12:12:40 2013 -0700
-
-    Fix bug by escaping strings from Nova before displaying them
-    
-    Fixes bug #1247675
-    
-    (cherry-picked from commit b8ff480)
-    Change-Id: I3637faafec1e1fba081533ee020f4ee218fea101
-
-diff --git a/openstack_dashboard/dashboards/project/images_and_snapshots/volume_snapshots/tables.py b/openstack_dashboard/dashboards/project/images_and_snapshots/volume_snapshots/tables.py
-index 2311e5c..17a4fb5 100644
---- a/openstack_dashboard/dashboards/project/images_and_snapshots/volume_snapshots/tables.py
-+++ b/openstack_dashboard/dashboards/project/images_and_snapshots/volume_snapshots/tables.py
-@@ -17,6 +17,7 @@
- import logging
- 
- from django.core.urlresolvers import reverse
-+from django.utils import html
- from django.utils import safestring
- from django.utils.http import urlencode
- from django.utils.translation import ugettext_lazy as _
-@@ -68,6 +69,7 @@ class SnapshotVolumeNameColumn(tables.Column):
-         request = self.table.request
-         volume_name = api.cinder.volume_get(request,
-                                             snapshot.volume_id).display_name
-+        volume_name = html.escape(volume_name)
-         return safestring.mark_safe(volume_name)
- 
-     def get_link_url(self, snapshot):
-diff --git a/openstack_dashboard/dashboards/project/volumes/tables.py b/openstack_dashboard/dashboards/project/volumes/tables.py
-index b14145b..e5426c1 100644
---- a/openstack_dashboard/dashboards/project/volumes/tables.py
-+++ b/openstack_dashboard/dashboards/project/volumes/tables.py
-@@ -19,7 +19,7 @@ import logging
- from django.core.urlresolvers import reverse, NoReverseMatch
- from django.template.defaultfilters import title
- from django.utils import safestring
--from django.utils.html import strip_tags
-+from django.utils import html
- from django.utils.translation import ugettext_lazy as _
- 
- from horizon import exceptions
-@@ -111,7 +111,7 @@ def get_attachment_name(request, attachment):
-                                          "attachment information."))
-     try:
-         url = reverse("horizon:project:instances:detail", args=(server_id,))
--        instance = '<a href="%s">%s</a>' % (url, name)
-+        instance = '<a href="%s">%s</a>' % (url, html.escape(name))
-     except NoReverseMatch:
-         instance = name
-     return instance
-@@ -132,7 +132,7 @@ class AttachmentColumn(tables.Column):
-             # without the server name...
-             instance = get_attachment_name(request, attachment)
-             vals = {"instance": instance,
--                    "dev": attachment["device"]}
-+                    "dev": html.escape(attachment["device"])}
-             attachments.append(link % vals)
-         return safestring.mark_safe(", ".join(attachments))
- 
-@@ -225,7 +225,7 @@ class AttachmentsTable(tables.DataTable):
-     def get_object_display(self, attachment):
-         instance_name = get_attachment_name(self.request, attachment)
-         vals = {"dev": attachment['device'],
--                "instance_name": strip_tags(instance_name)}
-+                "instance_name": html.escape(instance_name)}
-         return _("%(dev)s on instance %(instance_name)s") % vals
- 
-     def get_object_by_id(self, obj_id):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/horizon/patches/03-launchpad-1254694.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,41 @@
+Although the following patch has been addressed in Icehouse 2014.1, it
+still has not yet been released for Havana.  It has been modified to
+apply cleanly into our current Havana implementation
+
+commit 05ba5f81c390efbb6e5eb98db62f8f2599389ddb
+Author: Matt Fischer <[email protected]>
+Date:   Thu Jan 9 15:35:38 2014 -0700
+
+    Don't reset LOCAL_PATH if it's already set
+    
+    If LOCAL_PATH is set in local_settings, then settings should
+    not override it, even if SECRET_KEY is unset.
+    
+    Change-Id: I6aca03f65afddffb6cdd00e4084a8ab9b2255ef1
+    Closes-Bug: #1254694
+
+diff --git a/openstack_dashboard/settings.py b/openstack_dashboard/settings.py
+--- horizon-2013.2.3/openstack_dashboard/settings.py.~1~	2014-04-03 11:45:53.000000000 -0700
++++ horizon-2013.2.3/openstack_dashboard/settings.py	2014-05-25 13:48:46.166574128 -0700
+@@ -206,6 +206,7 @@
+ }
+ 
+ SECRET_KEY = None
++LOCAL_PATH = None
+ 
+ try:
+     from local.local_settings import *  # noqa
+@@ -216,9 +217,11 @@
+ # file is present. See local_settings.py.example for full documentation on the
+ # horizon.utils.secret_key module and its use.
+ if not SECRET_KEY:
++    if not LOCAL_PATH:
++        LOCAL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
++                                  'local')
++
+     from horizon.utils import secret_key
+-    LOCAL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
+-                              'local')
+     SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH,
+                                                        '.secret_key_store'))
+ 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/horizon/patches/04-blue-piechart.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,23 @@
+Internal only patch to change the color of pie charts drawn by
+Horizon to blue.  This patch is a recommendation from the UX design
+team and will not be committed upsteam.
+
+--- horizon-2013.2.3/horizon/static/horizon/js/horizon.d3piechart.js.orig  2014-06-11 13:19:35.874027396 -0600
++++ horizon-2013.2.3/horizon/static/horizon/js/horizon.d3piechart.js       2014-06-11 13:19:42.692287410 -0600
+@@ -17,9 +17,13 @@
+   r: 45,
+   bkgrnd: "#F2F2F2",
+   frgrnd: "#006CCF",
+-  full: "#D0342B",
+-  nearlyfull: "orange",
+-
++  full: "#006CCF",
++  nearlyfull: "006CCF",
++  
++  // frgrnd: "#006CCF",
++  // full: "#D0342B",
++  // nearlyfull: "orange",
++  
+   init: function() {
+     var self = this;
+ 
--- a/components/openstack/horizon/patches/04-lauchpad-1187129.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-From d7b8c1eeb6c36800a891584ad948b4b5ab1f52bf Mon Sep 17 00:00:00 2001
-From: armando-migliaccio <[email protected]>
-Date: Mon, 3 Jun 2013 13:57:44 -0700
-Subject: [PATCH] Fix bug in port creation form
-
-'network_name' is not a recognized Quantum attribute, so passing it
-in the request to the Quantum Server causes an error. In the end this
-is not required because the network is identified by its id. So
-this patch removes it (as it's done in other cases as well).
-
-Fixes bug #1187129
-
-Change-Id: Ie18c7bd504f8c241d002d6a050b8bcc59ed9fc03
-
-Fixed in Havana 2013.2
----
- .../dashboards/admin/networks/ports/forms.py       |    1 +
- .../dashboards/admin/networks/tests.py             |    2 --
- 2 files changed, 1 insertion(+), 2 deletions(-)
-
-diff --git a/openstack_dashboard/dashboards/admin/networks/ports/forms.py b/openstack_dashboard/dashboards/admin/networks/ports/forms.py
-index 7a1f45d..939952b 100644
---- a/openstack_dashboard/dashboards/admin/networks/ports/forms.py
-+++ b/openstack_dashboard/dashboards/admin/networks/ports/forms.py
-@@ -57,6 +57,7 @@ class CreatePort(forms.SelfHandlingForm):
-             network = api.quantum.network_get(request, data['network_id'])
-             data['tenant_id'] = network.tenant_id
-             data['admin_state_up'] = data['admin_state']
-+            del data['network_name']
-             del data['admin_state']
- 
-             port = api.quantum.port_create(request, **data)
-diff --git a/openstack_dashboard/dashboards/admin/networks/tests.py b/openstack_dashboard/dashboards/admin/networks/tests.py
-index d6c0123..9c2cd19 100644
---- a/openstack_dashboard/dashboards/admin/networks/tests.py
-+++ b/openstack_dashboard/dashboards/admin/networks/tests.py
-@@ -651,7 +651,6 @@ class NetworkPortTests(test.BaseAdminViewTests):
-         api.quantum.port_create(IsA(http.HttpRequest),
-                                 tenant_id=network.tenant_id,
-                                 network_id=network.id,
--                                network_name=network.name,
-                                 name=port.name,
-                                 admin_state_up=port.admin_state_up,
-                                 device_id=port.device_id,
-@@ -688,7 +687,6 @@ class NetworkPortTests(test.BaseAdminViewTests):
-         api.quantum.port_create(IsA(http.HttpRequest),
-                                 tenant_id=network.tenant_id,
-                                 network_id=network.id,
--                                network_name=network.name,
-                                 name=port.name,
-                                 admin_state_up=port.admin_state_up,
-                                 device_id=port.device_id,
--- a/components/openstack/horizon/resolve.deps	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/horizon/resolve.deps	Wed Jun 11 17:13:12 2014 -0700
@@ -1,1 +1,3 @@
+library/python/eventlet-26
+library/python/oslo.config-26
 runtime/python-26
--- a/components/openstack/keystone/Makefile	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/keystone/Makefile	Wed Jun 11 17:13:12 2014 -0700
@@ -25,12 +25,12 @@
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		keystone
-COMPONENT_CODENAME=	grizzly
-COMPONENT_VERSION=	2013.1.4
+COMPONENT_CODENAME=	havana
+COMPONENT_VERSION=	2013.2.3
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:3673f5d7c1c19fca7529934308e2d9a6efa55bf7d100d20de1aa85e431d259b2
+    sha256:0d27a32c6c211706f8b13aafe2fd51c7ddbea97897be90663fd8c2527ef56032
 COMPONENT_ARCHIVE_URL=	http://launchpad.net/$(COMPONENT_NAME)/$(COMPONENT_CODENAME)/$(COMPONENT_VERSION)/+download/$(COMPONENT_ARCHIVE)
 COMPONENT_PROJECT_URL=	http://www.openstack.org/
 COMPONENT_BUGDB=	service/keystone
--- a/components/openstack/keystone/files/keystone.conf	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/keystone/files/keystone.conf	Wed Jun 11 17:13:12 2014 -0700
@@ -13,8 +13,8 @@
 
 # The base endpoint URLs for keystone that are advertised to clients
 # (NOTE: this does NOT affect how keystone listens for connections)
-# public_endpoint = http://localhost:%(public_port)d/
-# admin_endpoint = http://localhost:%(admin_port)d/
+# public_endpoint = http://localhost:%(public_port)s/
+# admin_endpoint = http://localhost:%(admin_port)s/
 
 # The port number which the OpenStack Compute service listens on
 # compute_port = 8774
@@ -34,6 +34,15 @@
 # member_role_id = 9fe2ff9ee4384b1894a90878d3e92bab
 # member_role_name = _member_
 
+# enforced by optional sizelimit middleware (keystone.middleware:RequestBodySizeLimiter)
+# max_request_body_size = 114688
+
+# limit the sizes of user & tenant ID/names
+# max_param_size = 64
+
+# similar to max_param_size, but provides an exception for token values
+# max_token_size = 8192
+
 # === Logging Options ===
 # Print debugging output
 # (includes plaintext request logging, potentially including passwords)
@@ -73,9 +82,65 @@
 # or a module with notify() method:
 # onready = keystone.common.systemd
 
+# === Notification Options ===
+
+# Notifications can be sent when users or projects are created, updated or
+# deleted. There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and no_op (no notifications
+# sent, the default)
+
+# notification_driver can be defined multiple times
+# Do nothing driver (the default)
+# notification_driver = keystone.openstack.common.notifier.no_op_notifier
+# Logging driver example (not enabled by default)
+# notification_driver = keystone.openstack.common.notifier.log_notifier
+# RPC driver example (not enabled by default)
+# notification_driver = keystone.openstack.common.notifier.rpc_notifier
+
+# Default notification level for outgoing notifications
+# default_notification_level = INFO
+
+# Default publisher_id for outgoing notifications; included in the payload.
+# default_publisher_id =
+
+# AMQP topics to publish to when using the RPC notification driver.
+# Multiple values can be specified by separating with commas.
+# The actual topic names will be %s.%(default_notification_level)s
+# notification_topics = notifications
+
+# === RPC Options ===
+
+# For Keystone, these options apply only when the RPC notification driver is
+# used.
+
+# The messaging module to use, defaults to kombu.
+# rpc_backend = keystone.openstack.common.rpc.impl_kombu
+
+# Size of RPC thread pool
+# rpc_thread_pool_size = 64
+
+# Size of RPC connection pool
+# rpc_conn_pool_size = 30
+
+# Seconds to wait for a response from call or multicall
+# rpc_response_timeout = 60
+
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+# rpc_cast_timeout = 30
+
+# Modules of exceptions that are permitted to be recreated upon receiving
+# exception data from an rpc call.
+# allowed_rpc_exception_modules = keystone.openstack.common.exception,nova.exception,cinder.exception,exceptions
+
+# If True, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# AMQP exchange to connect to if using RabbitMQ or Qpid
+# control_exchange = openstack
+
 [sql]
 # The SQLAlchemy connection string used to connect to the database
-# connection = sqlite:////var/lib/keystone/keystone.sqlite
+connection = sqlite:////var/lib/keystone/keystone.sqlite
 
 # the timeout before idle sql connections are reaped
 # idle_timeout = 200
@@ -90,6 +155,20 @@
 # There is nothing special about this domain, other than the fact that it must
 # exist to order to maintain support for your v2 clients.
 # default_domain_id = default
+#
+# A subset (or all) of domains can have their own identity driver, each with
+# their own partial configuration file in a domain configuration directory.
+# Only values specific to the domain need to be placed in the domain specific
+# configuration file. This feature is disabled by default; set
+# domain_specific_drivers_enabled to True to enable.
+# domain_specific_drivers_enabled = False
+# domain_config_dir = /etc/keystone/domains
+
+# Maximum supported length for user passwords; decrease to improve performance.
+# max_password_length = 4096
+
+[credential]
+# driver = keystone.credential.backends.sql.Credential
 
 [trust]
 # driver = keystone.trust.backends.sql.Trust
@@ -97,6 +176,11 @@
 # delegation and impersonation features can be optionally disabled
 # enabled = True
 
+[os_inherit]
+# role-assignment inheritance to projects from owning domain can be
+# optionally enabled
+# enabled = False
+
 [catalog]
 # dynamic, sql-based backend (supports API/CLI-based management commands)
 # driver = keystone.catalog.backends.sql.Catalog
@@ -106,33 +190,132 @@
 
 # template_file = default_catalog.templates
 
+[endpoint_filter]
+# extension for creating associations between project and endpoints in order to
+# provide a tailored catalog for project-scoped token requests.
+# driver = keystone.contrib.endpoint_filter.backends.sql.EndpointFilter
+# return_all_endpoints_if_no_filter = True
+
 [token]
-# driver = keystone.token.backends.kvs.Token
+# Provides token persistence.
+# driver = keystone.token.backends.sql.Token
+
+# Controls the token construction, validation, and revocation operations.
+# Core providers are keystone.token.providers.[pki|uuid].Provider
+# provider =
 
 # Amount of time a token should remain valid (in seconds)
 # expiration = 86400
 
+# External auth mechanisms that should add bind information to token.
+# eg kerberos, x509
+# bind =
+
+# Enforcement policy on tokens presented to keystone with bind information.
+# One of disabled, permissive, strict, required or a specifically required bind
+# mode e.g. kerberos or x509 to require binding to that authentication.
+# enforce_token_bind = permissive
+
+# Token specific caching toggle. This has no effect unless the global caching
+# option is set to True
+# caching = True
+
+# Token specific cache time-to-live (TTL) in seconds.
+# cache_time =
+
+# Revocation-List specific cache time-to-live (TTL) in seconds.
+# revocation_cache_time = 3600
+
+[cache]
+# Global cache functionality toggle.
+# enabled = False
+
+# Prefix for building the configuration dictionary for the cache region. This
+# should not need to be changed unless there is another dogpile.cache region
+# with the same configuration name
+# config_prefix = cache.keystone
+
+# Default TTL, in seconds, for any cached item in the dogpile.cache region.
+# This applies to any cached method that doesn't have an explicit cache
+# expiration time defined for it.
+# expiration_time = 600
+
+# Dogpile.cache backend module. It is recommended that Memcache
+# (dogpile.cache.memcache) or Redis (dogpile.cache.redis) be used in production
+# deployments.  Small workloads (single process) like devstack can use the
+# dogpile.cache.memory backend.
+# backend = keystone.common.cache.noop
+
+# Arguments supplied to the backend module. Specify this option once per
+# argument to be passed to the dogpile.cache backend.
+# Example format: <argname>:<value>
+# backend_argument =
+
+# Proxy Classes to import that will affect the way the dogpile.cache backend
+# functions.  See the dogpile.cache documentation on changing-backend-behavior.
+# Comma delimited list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2
+# proxies =
+
+# Use a key-mangling function (sha1) to ensure fixed length cache-keys. This
+# is toggle-able for debugging purposes, it is highly recommended to always
+# leave this set to True.
+# use_key_mangler = True
+
+# Extra debugging from the cache backend (cache keys, get/set/delete/etc calls)
+# This is only really useful if you need to see the specific cache-backend
+# get/set/delete calls with the keys/values.  Typically this should be left
+# set to False.
+# debug_cache_backend = False
+
 [policy]
 # driver = keystone.policy.backends.sql.Policy
 
 [ec2]
 # driver = keystone.contrib.ec2.backends.kvs.Ec2
 
+[assignment]
+# driver =
+
+# Assignment specific caching toggle. This has no effect unless the global
+# caching option is set to True
+# caching = True
+
+# Assignment specific cache time-to-live (TTL) in seconds.
+# cache_time =
+
+[oauth1]
+# driver = keystone.contrib.oauth1.backends.sql.OAuth1
+
+# The Identity service may include expire attributes.
+# If no such attribute is included, then the token lasts indefinitely.
+# Specify how quickly the request token will expire (in seconds)
+# request_token_duration = 28800
+# Specify how quickly the access token will expire (in seconds)
+# access_token_duration = 86400
+
 [ssl]
 #enable = True
-#certfile = /etc/keystone/ssl/certs/keystone.pem
-#keyfile = /etc/keystone/ssl/private/keystonekey.pem
-#ca_certs = /etc/keystone/ssl/certs/ca.pem
-#cert_required = True
+#certfile = /etc/keystone/pki/certs/ssl_cert.pem
+#keyfile = /etc/keystone/pki/private/ssl_key.pem
+#ca_certs = /etc/keystone/pki/certs/cacert.pem
+#ca_key = /etc/keystone/pki/private/cakey.pem
+#key_size = 1024
+#valid_days = 3650
+#cert_required = False
+#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=localhost
 
 [signing]
-#token_format = PKI
-#certfile = /etc/keystone/ssl/certs/signing_cert.pem
-#keyfile = /etc/keystone/ssl/private/signing_key.pem
-#ca_certs = /etc/keystone/ssl/certs/ca.pem
-#key_size = 1024
+# Deprecated in favor of provider in the [token] section
+# Allowed values are PKI or UUID
+#token_format =
+
+#certfile = /etc/keystone/pki/certs/signing_cert.pem
+#keyfile = /etc/keystone/pki/private/signing_key.pem
+#ca_certs = /etc/keystone/pki/certs/cacert.pem
+#ca_key = /etc/keystone/pki/private/cakey.pem
+#key_size = 2048
 #valid_days = 3650
-#ca_password = None
+#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
 
 [ldap]
 # url = ldap://localhost
@@ -158,7 +341,6 @@
 # user_tree_dn = ou=Users,dc=example,dc=com
 # user_filter =
 # user_objectclass = inetOrgPerson
-# user_domain_id_attribute = businessCategory
 # user_id_attribute = cn
 # user_name_attribute = sn
 # user_mail_attribute = email
@@ -166,14 +348,15 @@
 # user_enabled_attribute = enabled
 # user_enabled_mask = 0
 # user_enabled_default = True
-# user_attribute_ignore = tenant_id,tenants
+# user_attribute_ignore = default_project_id,tenants
+# user_default_project_id_attribute =
 # user_allow_create = True
 # user_allow_update = True
 # user_allow_delete = True
 # user_enabled_emulation = False
 # user_enabled_emulation_dn =
 
-# tenant_tree_dn = ou=Groups,dc=example,dc=com
+# tenant_tree_dn = ou=Projects,dc=example,dc=com
 # tenant_filter =
 # tenant_objectclass = groupOfNames
 # tenant_domain_id_attribute = businessCategory
@@ -212,91 +395,34 @@
 # group_allow_update = True
 # group_allow_delete = True
 
+# ldap TLS options
+# if both tls_cacertfile and tls_cacertdir are set then
+# tls_cacertfile will be used and tls_cacertdir is ignored
+# valid options for tls_req_cert are demand, never, and allow
+# use_tls = False
+# tls_cacertfile =
+# tls_cacertdir =
+# tls_req_cert = demand
+
+# Additional attribute mappings can be used to map ldap attributes to internal
+# keystone attributes. This allows keystone to fulfill ldap objectclass
+# requirements. An example to map the description and gecos attributes to a
+# user's name would be:
+# user_additional_attribute_mapping = description:name, gecos:name
+#
+# domain_additional_attribute_mapping =
+# group_additional_attribute_mapping =
+# role_additional_attribute_mapping =
+# project_additional_attribute_mapping =
+# user_additional_attribute_mapping =
+
 [auth]
-methods = password,token
+methods = external,password,token,oauth1
+#external = keystone.auth.plugins.external.ExternalDefault
 password = keystone.auth.plugins.password.Password
 token = keystone.auth.plugins.token.Token
-
-[filter:debug]
-paste.filter_factory = keystone.common.wsgi:Debug.factory
-
-[filter:token_auth]
-paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
-
-[filter:admin_token_auth]
-paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
-
-[filter:xml_body]
-paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
-
-[filter:json_body]
-paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
-
-[filter:user_crud_extension]
-paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
-
-[filter:crud_extension]
-paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
-
-[filter:ec2_extension]
-paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
-
-[filter:s3_extension]
-paste.filter_factory = keystone.contrib.s3:S3Extension.factory
-
-[filter:url_normalize]
-paste.filter_factory = keystone.middleware:NormalizingFilter.factory
-
-[filter:sizelimit]
-paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory
-
-[filter:stats_monitoring]
-paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
-
-[filter:stats_reporting]
-paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
+oauth1 = keystone.auth.plugins.oauth1.OAuth
 
-[filter:access_log]
-paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory
-
-[app:public_service]
-paste.app_factory = keystone.service:public_app_factory
-
-[app:service_v3]
-paste.app_factory = keystone.service:v3_app_factory
-
-[app:admin_service]
-paste.app_factory = keystone.service:admin_app_factory
-
-[pipeline:public_api]
-pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service
-
-[pipeline:admin_api]
-pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service
-
-[pipeline:api_v3]
-pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension service_v3
-
-[app:public_version_service]
-paste.app_factory = keystone.service:public_version_app_factory
-
-[app:admin_version_service]
-paste.app_factory = keystone.service:admin_version_app_factory
-
-[pipeline:public_version_api]
-pipeline = access_log sizelimit stats_monitoring url_normalize xml_body public_version_service
-
-[pipeline:admin_version_api]
-pipeline = access_log sizelimit stats_monitoring url_normalize xml_body admin_version_service
-
-[composite:main]
-use = egg:Paste#urlmap
-/v2.0 = public_api
-/v3 = api_v3
-/ = public_version_api
-
-[composite:admin]
-use = egg:Paste#urlmap
-/v2.0 = admin_api
-/v3 = api_v3
-/ = admin_version_api
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+config_file = keystone-paste.ini
--- a/components/openstack/keystone/files/keystone.prof_attr	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/keystone/files/keystone.prof_attr	Wed Jun 11 17:13:12 2014 -0700
@@ -5,6 +5,7 @@
 solaris.admin.edit/etc/keystone/logging.conf,\
 solaris.admin.edit/etc/keystone/policy.json,\
 solaris.smf.manage.keystone,\
-solaris.smf.value.keystone
+solaris.smf.value.keystone;\
+defaultpriv={file_dac_read}\:/var/svc/log/application-openstack-*
 
 OpenStack Management:RO:::profiles=OpenStack Identity Management
--- a/components/openstack/keystone/files/keystone.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/keystone/files/keystone.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -29,6 +29,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/keystone %m">
       <method_context>
--- a/components/openstack/keystone/keystone.p5m	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/keystone/keystone.p5m	Wed Jun 11 17:13:12 2014 -0700
@@ -28,10 +28,10 @@
 set name=pkg.summary value="OpenStack Keystone"
 set name=pkg.description \
     value="OpenStack Keystone is a service that provides Identity, Token, Catalog, and Policy services for use specifically by projects in the OpenStack family."
-set name=pkg.human-version value="Grizzly $(COMPONENT_VERSION)"
+set name=pkg.human-version value="Havana $(COMPONENT_VERSION)"
 set name=com.oracle.info.description \
     value="Keystone, the OpenStack identity service"
-set name=com.oracle.info.tpno value=16293
+set name=com.oracle.info.tpno value=17866
 set name=info.classification \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
     value="org.opensolaris.category.2008:System/Enterprise Management" \
@@ -40,17 +40,28 @@
 set name=info.source-url value=$(COMPONENT_ARCHIVE_URL)
 set name=info.upstream value="OpenStack <[email protected]>"
 set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
-set name=org.opensolaris.arc-caseid value=PSARC/2013/350 value=PSARC/2014/048
+set name=org.opensolaris.arc-caseid value=PSARC/2013/350 value=PSARC/2014/048 \
+    value=PSARC/2014/209
 set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
 dir  path=etc/keystone owner=keystone group=keystone mode=0700
 file etc/default_catalog.templates path=etc/keystone/default_catalog.templates \
-    owner=keystone group=keystone mode=0644 overlay=allow preserve=true
+    owner=keystone group=keystone mode=0644 overlay=allow preserve=renamenew
+file etc/keystone-paste.ini path=etc/keystone/keystone-paste.ini \
+    owner=keystone group=keystone mode=0644 overlay=allow preserve=renamenew
 file files/keystone.conf path=etc/keystone/keystone.conf owner=keystone \
-    group=keystone mode=0644 overlay=allow preserve=true
+    group=keystone mode=0644 overlay=allow preserve=renamenew
 file etc/logging.conf.sample path=etc/keystone/logging.conf owner=keystone \
-    group=keystone mode=0644 overlay=allow preserve=true
+    group=keystone mode=0644 overlay=allow preserve=renamenew
 file etc/policy.json path=etc/keystone/policy.json owner=keystone \
-    group=keystone mode=0644 overlay=allow preserve=true
+    group=keystone mode=0644 overlay=allow preserve=renamenew
+file etc/policy.v3cloudsample.json path=etc/keystone/policy.v3cloudsample.json \
+    owner=keystone group=keystone mode=0644 overlay=allow preserve=renamenew
+file files/keystone.auth_attr \
+    path=etc/security/auth_attr.d/cloud:openstack:keystone group=sys
+file files/keystone.exec_attr \
+    path=etc/security/exec_attr.d/cloud:openstack:keystone group=sys
+file files/keystone.prof_attr \
+    path=etc/security/prof_attr.d/cloud:openstack:keystone group=sys
 file files/keystone.xml path=lib/svc/manifest/application/openstack/keystone.xml
 file files/keystone path=lib/svc/method/keystone
 file path=usr/bin/keystone-manage
@@ -65,14 +76,21 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/assignment/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/assignment/backends/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/assignment/backends/kvs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/assignment/backends/ldap.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/assignment/backends/sql.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/assignment/core.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/auth/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/auth/controllers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/auth/core.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/auth/plugins/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/auth/plugins/external.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/auth/plugins/oauth1.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/auth/plugins/password.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/auth/plugins/token.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/auth/routers.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/auth/token_factory.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/catalog/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/catalog/backends/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/catalog/backends/kvs.py
@@ -84,16 +102,21 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/clean.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/cli.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/bufferedhttp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/cache/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/cache/backends/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/cache/backends/noop.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/cache/core.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/cms.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/config.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/controller.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/dependency.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/environment/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/environment/eventlet_server.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/extension.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/kvs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/ldap/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/ldap/core.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/ldap/fakeldap.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/logging.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/models.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/openssl.py
@@ -101,7 +124,6 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/serializer.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/core.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/legacy.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/README
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/manage.py
@@ -131,10 +153,23 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/020_migrate_metadata_table_roles.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/021_add_trust_to_token.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/022_move_legacy_endpoint_id.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/023_drop_credential_constraints.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/024_add_index_to_expires.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/025_add_index_to_valid.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/026_drop_user_group_constraints.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/027_set_engine_mysql_innodb.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/028_fixup_group_metadata.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/029_update_assignment_metadata.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/030_drop_credential_constraint_sqlite.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/031_drop_credential_indexes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/032_username_length.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/033_migrate_ec2credentials_table_credentials.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/034_add_default_project_id_column_to_user.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/035_add_compound_revoked_token_index.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/036_token_drop_valid_index.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migrate_repo/versions/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migration.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/nova.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/util.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/sql/migration_helpers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/systemd.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/common/wsgi.py
@@ -145,10 +180,39 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/admin_crud/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/admin_crud/core.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/ec2/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/ec2/backends/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/ec2/backends/kvs.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/ec2/backends/sql.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/ec2/controllers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/ec2/core.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/ec2/routers.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/endpoint_filter/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/endpoint_filter/backends/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/endpoint_filter/backends/catalog_sql.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/endpoint_filter/backends/sql.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/endpoint_filter/configuration.rst
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/endpoint_filter/controllers.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/endpoint_filter/core.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/endpoint_filter/migrate_repo/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/endpoint_filter/migrate_repo/migrate.cfg
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/endpoint_filter/migrate_repo/versions/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/endpoint_filter/routers.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/example/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/example/migrate_repo/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/example/migrate_repo/migrate.cfg
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/example/migrate_repo/versions/001_example_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/example/migrate_repo/versions/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/oauth1/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/oauth1/backends/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/oauth1/backends/sql.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/oauth1/controllers.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/oauth1/core.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/oauth1/migrate_repo/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/oauth1/migrate_repo/migrate.cfg
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/oauth1/migrate_repo/versions/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/oauth1/routers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/s3/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/s3/core.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/stats/__init__.py
@@ -158,36 +222,138 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/user_crud/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/contrib/user_crud/core.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/controllers.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/credential/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/credential/backends/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/credential/backends/sql.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/credential/controllers.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/credential/core.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/credential/routers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/exception.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/identity/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/identity/backends/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/identity/backends/kvs.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/identity/backends/ldap/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/identity/backends/ldap/core.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/identity/backends/ldap.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/identity/backends/pam.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/identity/backends/sql.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/identity/controllers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/identity/core.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/identity/routers.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/ar/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/bg_BG/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/bs/LC_MESSAGES/keystone.po
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/ca/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/cs/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/da/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/de/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/en_AU/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/en_GB/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/en_US/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/es/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/es_MX/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/fi_FI/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/fil/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/fr/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/hi/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/hr/LC_MESSAGES/keystone.po
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/hu/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/id/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/it/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/it_IT/LC_MESSAGES/keystone.po
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/ja/LC_MESSAGES/keystone.po
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/keystone.pot
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/ka_GE/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/kn/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/ko/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/ko_KR/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/ms/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/nb/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/ne/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/nl_NL/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/pl_PL/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/pt/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/ro/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/ru/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/ru_RU/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/sk/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/sl_SI/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/sw_KE/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/tl/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/tl_PH/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/tr_TR/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/uk/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/vi_VN/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/zh_CN/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/zh_HK/LC_MESSAGES/keystone.po
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/locale/zh_TW/LC_MESSAGES/keystone.po
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/middleware/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/middleware/auth_token.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/middleware/core.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/middleware/ec2_token.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/middleware/s3_token.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/notifications.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/README
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/context.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/crypto/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/crypto/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/db/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/db/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/db/exception.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/db/sqlalchemy/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/db/sqlalchemy/migration.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/db/sqlalchemy/models.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/db/sqlalchemy/session.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/db/sqlalchemy/test_migrations.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/db/sqlalchemy/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/eventlet_backdoor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/exception.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/excutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/fileutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/fixture/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/fixture/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/fixture/lockutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/fixture/mockpatch.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/fixture/moxstubout.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/gettextutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/importutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/jsonutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/local.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/lockutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/log.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/log_handler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/loopingcall.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/network_utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/notifier/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/notifier/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/notifier/log_notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/notifier/no_op_notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/notifier/rpc_notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/notifier/rpc_notifier2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/notifier/test_notifier.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/policy.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/setup.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/amqp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/common.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/dispatcher.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/impl_fake.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/impl_kombu.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/impl_qpid.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/impl_zmq.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/matchmaker.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/matchmaker_redis.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/matchmaker_ring.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/proxy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/securemessage.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/serializer.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/rpc/zmq_receiver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/sslutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/strutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/threadgroup.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/timeutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/version.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/openstack/common/uuidutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/policy/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/policy/backends/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/policy/backends/rules.py
@@ -197,7 +363,6 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/policy/routers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/routers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/service.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystone/test.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/token/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/token/backends/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/token/backends/kvs.py
@@ -205,6 +370,10 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/token/backends/sql.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/token/controllers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/token/core.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/token/provider.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/token/providers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/token/providers/pki.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystone/token/providers/uuid.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/token/routers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/trust/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystone/trust/backends/__init__.py
@@ -224,6 +393,16 @@
 # flush this out.
 depend type=group fmri=library/python/anyjson-26
 
+# force a dependency on babel; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/babel-26
+
+# force a dependency on dogpile.cache; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/dogpile.cache-26
+
+# force a dependency on greenlet; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/greenlet-26
+
 # force a dependency on iso8601; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/iso8601-26
 
@@ -231,11 +410,14 @@
 # out.
 depend type=require fmri=library/python/keystoneclient-26
 
+# force a dependency on kombu; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/kombu-26
+
 # force a dependency on lxml; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/lxml-26
 
-# force a dependency on oslo.config; pkgdepend work is needed to flush this out.
-depend type=require fmri=library/python/oslo.config-26
+# force a dependency on netaddr; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/netaddr-26
 
 # force a dependency on passlib; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/passlib-26
@@ -244,11 +426,14 @@
 # out.
 depend type=require fmri=library/python/paste.deploy-26
 
+# force a dependency on requests; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/requests-26
+
 # force a dependency on routes; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/routes-26
 
-# force a dependency on setuptools; pkgdepend work is needed to flush this out.
-depend type=require fmri=library/python/setuptools-26
+# force a dependency on six; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/six-26
 
 # force a dependency on webob; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/webob-26
--- a/components/openstack/keystone/patches/01-ec2_token-import-only.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-Upstream patch fixed in Icehouse-1
-
-The following upstream patch was cherrypicked out of the following
-commit and is required for a subsequent patch to apply cleanly.
-
-commit 0f6f386c74754bc2e94a177d00d74bc074eba2fd
-Author: Dolph Mathews <[email protected]>
-Date:   Thu May 23 15:09:14 2013 -0500
-
-    import only modules (flake8 H302)
-    
-    Change-Id: I0fa6fc6bf9d51b60fa987a0040168f3f0ef78a4a
-
-diff --git a/keystone/middleware/ec2_token.py b/keystone/middleware/ec2_token.py
-index daac10a..7cd007c 100644
---- a/keystone/middleware/ec2_token.py
-+++ b/keystone/middleware/ec2_token.py
-@@ -22,7 +22,7 @@ Starting point for routing EC2 requests.
- 
- """
- 
--from urlparse import urlparse
-+import urlparse
- 
- from eventlet.green import httplib
- import webob.dec
-@@ -73,7 +73,7 @@ class EC2Token(wsgi.Middleware):
-         # Disable 'has no x member' pylint error
-         # for httplib and urlparse
-         # pylint: disable-msg=E1101
--        o = urlparse(FLAGS.keystone_ec2_url)
-+        o = urlparse.urlparse(FLAGS.keystone_ec2_url)
-         if o.scheme == 'http':
-             conn = httplib.HTTPConnection(o.netloc)
-         else:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/keystone/patches/01-launchpad-1244304.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,113 @@
+This upstream patch addresses Launchpad bug 1244304. Although it's been
+addressed in Icehouse 2014.1, the patch below is still not yet released
+for Havana.
+
+commit 380894fa4a9329c963fad7dbd17fd9b94b1918a8
+Author: Kun Huang <[email protected]>
+Date:   Fri Oct 25 00:26:43 2013 +0800
+
+    remove 8888 port in sample_data.sh
+    
+    Swift now doesn't use 8888 port as any default service. And the origin
+    sample_data.sh always let new users to fail and modify swift bind port.
+    So switching 8888 port here to 8080 could generate sample data without
+    any change for building swift + keystone environment.
+    
+    Closes-Bug: #1244304
+    Change-Id: If58f9f025f57565733aa25efc2fdf06865781391
+
+diff --git a/keystone/tests/test_token_provider.py b/keystone/tests/test_token_provider.py
+index c93bd73..8b2c212 100644
+--- a/keystone/tests/test_token_provider.py
++++ b/keystone/tests/test_token_provider.py
+@@ -92,10 +92,10 @@ SAMPLE_V2_TOKEN = {
+             {
+                 "endpoints": [
+                     {
+-                        "adminURL": "http://localhost:8888/v1",
++                        "adminURL": "http://localhost:8080/v1",
+                         "id": "7bd0c643e05a4a2ab40902b2fa0dd4e6",
+-                        "internalURL": "http://localhost:8888/v1/AUTH_01257",
+-                        "publicURL": "http://localhost:8888/v1/AUTH_01257",
++                        "internalURL": "http://localhost:8080/v1/AUTH_01257",
++                        "publicURL": "http://localhost:8080/v1/AUTH_01257",
+                         "region": "RegionOne"
+                     }
+                 ],
+@@ -202,19 +202,19 @@ SAMPLE_V3_TOKEN = {
+                         "id": "7bd0c643e05a4a2ab40902b2fa0dd4e6",
+                         "interface": "admin",
+                         "region": "RegionOne",
+-                        "url": "http://localhost:8888/v1"
++                        "url": "http://localhost:8080/v1"
+                     },
+                     {
+                         "id": "43bef154594d4ccb8e49014d20624e1d",
+                         "interface": "internal",
+                         "region": "RegionOne",
+-                        "url": "http://localhost:8888/v1/AUTH_01257"
++                        "url": "http://localhost:8080/v1/AUTH_01257"
+                     },
+                     {
+                         "id": "e63b5f5d7aa3493690189d0ff843b9b3",
+                         "interface": "public",
+                         "region": "RegionOne",
+-                        "url": "http://localhost:8888/v1/AUTH_01257"
++                        "url": "http://localhost:8080/v1/AUTH_01257"
+                     }
+                 ],
+                 "id": "a669e152f1104810a4b6701aade721bb",
+@@ -399,10 +399,10 @@ SAMPLE_V2_TOKEN_WITH_EMBEDED_VERSION = {
+             {
+                 "endpoints": [
+                     {
+-                        "adminURL": "http://localhost:8888/v1",
++                        "adminURL": "http://localhost:8080/v1",
+                         "id": "7bd0c643e05a4a2ab40902b2fa0dd4e6",
+-                        "internalURL": "http://localhost:8888/v1/AUTH_01257",
+-                        "publicURL": "http://localhost:8888/v1/AUTH_01257",
++                        "internalURL": "http://localhost:8080/v1/AUTH_01257",
++                        "publicURL": "http://localhost:8080/v1/AUTH_01257",
+                         "region": "RegionOne"
+                     }
+                 ],
+@@ -509,19 +509,19 @@ SAMPLE_V3_TOKEN_WITH_EMBEDED_VERSION = {
+                         "id": "7bd0c643e05a4a2ab40902b2fa0dd4e6",
+                         "interface": "admin",
+                         "region": "RegionOne",
+-                        "url": "http://localhost:8888/v1"
++                        "url": "http://localhost:8080/v1"
+                     },
+                     {
+                         "id": "43bef154594d4ccb8e49014d20624e1d",
+                         "interface": "internal",
+                         "region": "RegionOne",
+-                        "url": "http://localhost:8888/v1/AUTH_01257"
++                        "url": "http://localhost:8080/v1/AUTH_01257"
+                     },
+                     {
+                         "id": "e63b5f5d7aa3493690189d0ff843b9b3",
+                         "interface": "public",
+                         "region": "RegionOne",
+-                        "url": "http://localhost:8888/v1/AUTH_01257"
++                        "url": "http://localhost:8080/v1/AUTH_01257"
+                     }
+                 ],
+                 "id": "a669e152f1104810a4b6701aade721bb",
+diff --git a/tools/sample_data.sh b/tools/sample_data.sh
+index d09502d..65030b5 100755
+--- a/tools/sample_data.sh
++++ b/tools/sample_data.sh
+@@ -215,9 +215,9 @@ keystone service-create --name=swift \
+                         --description="Swift Service")
+ if [[ -z "$DISABLE_ENDPOINTS" ]]; then
+     keystone endpoint-create --region RegionOne --service-id $SWIFT_SERVICE \
+-        --publicurl   "http://$CONTROLLER_PUBLIC_ADDRESS:8888/v1/AUTH_\$(tenant_id)s" \
+-        --adminurl    "http://$CONTROLLER_ADMIN_ADDRESS:8888/v1" \
+-        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8888/v1/AUTH_\$(tenant_id)s"
++        --publicurl   "http://$CONTROLLER_PUBLIC_ADDRESS:8080/v1/AUTH_\$(tenant_id)s" \
++        --adminurl    "http://$CONTROLLER_ADMIN_ADDRESS:8080/v1" \
++        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8080/v1/AUTH_\$(tenant_id)s"
+ fi
+ 
+ # create ec2 creds and parse the secret and access key returned
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/keystone/patches/02-launchpad-1178740.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,72 @@
+This upstream patch addresses Launchpad bug 1178740. Although it's been
+addressed in Icehouse 2014.1, the patch below is still not yet released
+for Havana.
+
+commit 4f800bbe7f5ba07895a4cb32c14007e2f1818bd7
+Author: Lei Zhang <[email protected]>
+Date:   Tue Oct 8 17:40:37 2013 +0800
+
+    remove the nova dependency in the ec2_token middleware
+    
+    Change-Id: I34812522b55e38d3ea030638bbae75d65f507c90
+    Closes-Bug: #1178740
+
+diff --git a/keystone/middleware/ec2_token.py b/keystone/middleware/ec2_token.py
+index 0cd5841..4e58eac 100644
+--- a/keystone/middleware/ec2_token.py
++++ b/keystone/middleware/ec2_token.py
+@@ -25,18 +25,22 @@ Starting point for routing EC2 requests.
+ import urlparse
+ 
+ from eventlet.green import httplib
++from oslo.config import cfg
+ import webob.dec
+ import webob.exc
+ 
+-from nova import flags
+-from nova import utils
+-from nova import wsgi
++from keystone.common import config
++from keystone.common import wsgi
++from keystone.openstack.common import jsonutils
+ 
++keystone_ec2_opts = [
++    cfg.StrOpt('keystone_ec2_url',
++               default='http://localhost:5000/v2.0/ec2tokens',
++               help='URL to get token from ec2 request.'),
++]
+ 
+-FLAGS = flags.FLAGS
+-flags.DEFINE_string('keystone_ec2_url',
+-                    'http://localhost:5000/v2.0/ec2tokens',
+-                    'URL to get token from ec2 request.')
++CONF = config.CONF
++CONF.register_opts(keystone_ec2_opts)
+ 
+ 
+ class EC2Token(wsgi.Middleware):
+@@ -67,13 +71,13 @@ class EC2Token(wsgi.Middleware):
+                 'params': auth_params,
+             }
+         }
+-        creds_json = utils.dumps(creds)
++        creds_json = jsonutils.dumps(creds)
+         headers = {'Content-Type': 'application/json'}
+ 
+         # Disable 'has no x member' pylint error
+         # for httplib and urlparse
+         # pylint: disable-msg=E1101
+-        o = urlparse.urlparse(FLAGS.keystone_ec2_url)
++        o = urlparse.urlparse(CONF.keystone_ec2_url)
+         if o.scheme == 'http':
+             conn = httplib.HTTPConnection(o.netloc)
+         else:
+@@ -86,7 +90,7 @@ class EC2Token(wsgi.Middleware):
+         #             having keystone return token, tenant,
+         #             user, and roles from this call.
+ 
+-        result = utils.loads(response)
++        result = jsonutils.loads(response)
+         try:
+             token_id = result['access']['token']['id']
+         except (AttributeError, KeyError):
--- a/components/openstack/keystone/patches/02-remove-nova-depend.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,70 +0,0 @@
-Upstream patch fixed in Icehouse-1
-
-commit 4f800bbe7f5ba07895a4cb32c14007e2f1818bd7
-Author: Lei Zhang <[email protected]>
-Date:   Tue Oct 8 17:40:37 2013 +0800
-
-    remove the nova dependency in the ec2_token middleware
-    
-    Change-Id: I34812522b55e38d3ea030638bbae75d65f507c90
-    Closes-Bug: #1178740
-
-diff --git a/keystone/middleware/ec2_token.py b/keystone/middleware/ec2_token.py
-index 0cd5841..4e58eac 100644
---- a/keystone/middleware/ec2_token.py
-+++ b/keystone/middleware/ec2_token.py
-@@ -25,18 +25,22 @@ Starting point for routing EC2 requests.
- import urlparse
- 
- from eventlet.green import httplib
-+from oslo.config import cfg
- import webob.dec
- import webob.exc
- 
--from nova import flags
--from nova import utils
--from nova import wsgi
-+from keystone.common import config
-+from keystone.common import wsgi
-+from keystone.openstack.common import jsonutils
- 
-+keystone_ec2_opts = [
-+    cfg.StrOpt('keystone_ec2_url',
-+               default='http://localhost:5000/v2.0/ec2tokens',
-+               help='URL to get token from ec2 request.'),
-+]
- 
--FLAGS = flags.FLAGS
--flags.DEFINE_string('keystone_ec2_url',
--                    'http://localhost:5000/v2.0/ec2tokens',
--                    'URL to get token from ec2 request.')
-+CONF = config.CONF
-+CONF.register_opts(keystone_ec2_opts)
- 
- 
- class EC2Token(wsgi.Middleware):
-@@ -67,13 +71,13 @@ class EC2Token(wsgi.Middleware):
-                 'params': auth_params,
-             }
-         }
--        creds_json = utils.dumps(creds)
-+        creds_json = jsonutils.dumps(creds)
-         headers = {'Content-Type': 'application/json'}
- 
-         # Disable 'has no x member' pylint error
-         # for httplib and urlparse
-         # pylint: disable-msg=E1101
--        o = urlparse.urlparse(FLAGS.keystone_ec2_url)
-+        o = urlparse.urlparse(CONF.keystone_ec2_url)
-         if o.scheme == 'http':
-             conn = httplib.HTTPConnection(o.netloc)
-         else:
-@@ -86,7 +90,7 @@ class EC2Token(wsgi.Middleware):
-         #             having keystone return token, tenant,
-         #             user, and roles from this call.
- 
--        result = utils.loads(response)
-+        result = jsonutils.loads(response)
-         try:
-             token_id = result['access']['token']['id']
-         except (AttributeError, KeyError):
--- a/components/openstack/keystone/patches/03-CVE-2013-6391.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,149 +0,0 @@
-Upstream patch fixed in Havana 2013.2
-
-commit 8fcc18c42bde2db34e4b29236dc2e971d40f146b
-Author: Steven Hardy <[email protected]>
-Date:   Sun Oct 13 10:44:52 2013 +0100
-
-    Fix v2 token user ref with trust impersonation=True
-    
-    The v2 token controller incorrectly checks for a string instead
-    of a boolean, which results in the wrong user ID (trustee, when
-    it should be the trustor) when impersonation=True.  So fix the
-    comparison and tests, adding a test which illustrates the issue.
-    
-    This patchset also closes the gap that allows EC2 credentials to
-    be issued from trust-scoped tokens, allowing privilege escalation
-    since EC2 tokens have no concept of trust-scoping/role
-    restrictions in the Grizzly release.
-    
-    Change-Id: Ic94f30f2354c9fda20531bb598387368fde8a096
-    Closes-Bug: #1239303
-    Related-Bug: #1242597
-
-diff --git a/keystone/contrib/ec2/core.py b/keystone/contrib/ec2/core.py
-index 246587a..2ef9820 100644
---- a/keystone/contrib/ec2/core.py
-+++ b/keystone/contrib/ec2/core.py
-@@ -207,6 +207,9 @@ class Ec2Controller(controller.V2Controller):
-         if not self._is_admin(context):
-             self._assert_identity(context, user_id)
- 
-+        # Disallow trust-scoped tokens from creating credentials.
-+        self._assert_not_trust_scoped(context)
-+
-         self._assert_valid_user_id(context, user_id)
-         self._assert_valid_project_id(context, tenant_id)
- 
-@@ -308,6 +311,22 @@ class Ec2Controller(controller.V2Controller):
-         except exception.Forbidden:
-             return False
- 
-+    def _assert_not_trust_scoped(self, context):
-+        try:
-+            token_ref = self.token_api.get_token(
-+                context, token_id=context['token_id'])
-+        except exception.TokenNotFound as e:
-+            raise exception.Unauthorized(e)
-+
-+        # NOTE(morganfainberg): In Grizzly, it is not allowed to use a
-+        # trust scoped token to create an EC2 credential, this is due to
-+        # privilege escalation possibility (there is no way to correlate
-+        # the trust to the EC2 credential and limit roles to the trust).
-+        if 'trust' in token_ref:
-+            raise exception.Forbidden()
-+        if 'trust_id' in token_ref.get('metadata', {}):
-+            raise exception.Forbidden()
-+
-     def _assert_owner(self, context, user_id, credential_id):
-         """Ensure the provided user owns the credential.
- 
-diff --git a/keystone/token/controllers.py b/keystone/token/controllers.py
-index 1ae1d4f..e42ca7d 100644
---- a/keystone/token/controllers.py
-+++ b/keystone/token/controllers.py
-@@ -201,7 +201,7 @@ class Auth(controller.V2Controller):
-                 context, trust_ref['trustee_user_id'])
-             if not trustee_user_ref['enabled']:
-                 raise exception.Forbidden()()
--            if trust_ref['impersonation'] == 'True':
-+            if trust_ref['impersonation'] is True:
-                 current_user_ref = trustor_user_ref
-             else:
-                 current_user_ref = trustee_user_ref
-diff --git a/tests/test_auth.py b/tests/test_auth.py
-index 3d4ec87..8a810a4 100644
---- a/tests/test_auth.py
-+++ b/tests/test_auth.py
-@@ -19,6 +19,7 @@ import uuid
- 
- from keystone import auth
- from keystone import config
-+from keystone.contrib import ec2
- from keystone import exception
- from keystone import identity
- from keystone.openstack.common import timeutils
-@@ -517,7 +518,7 @@ class AuthWithTrust(AuthTest):
-         self.sample_data = {'trustor_user_id': self.trustor['id'],
-                             'trustee_user_id': self.trustee['id'],
-                             'project_id': self.tenant_bar['id'],
--                            'impersonation': 'True',
-+                            'impersonation': True,
-                             'roles': [{'id': self.role_browser['id']},
-                                       {'name': self.role_member['name']}]}
-         expires_at = timeutils.strtime(timeutils.utcnow() +
-@@ -525,7 +526,7 @@ class AuthWithTrust(AuthTest):
-                                        fmt=TIME_FORMAT)
-         self.create_trust(expires_at=expires_at)
- 
--    def create_trust(self, expires_at=None, impersonation='True'):
-+    def create_trust(self, expires_at=None, impersonation=True):
-         username = self.trustor['name'],
-         password = 'foo2'
-         body_dict = _build_user_auth(username=username, password=password)
-@@ -586,20 +587,42 @@ class AuthWithTrust(AuthTest):
-             self.assertIn(role['id'], role_ids)
- 
-     def test_create_trust_no_impersonation(self):
--        self.create_trust(expires_at=None, impersonation='False')
-+        self.create_trust(expires_at=None, impersonation=False)
-         self.assertEquals(self.new_trust['trustor_user_id'],
-                           self.trustor['id'])
-         self.assertEquals(self.new_trust['trustee_user_id'],
-                           self.trustee['id'])
--        self.assertEquals(self.new_trust['impersonation'],
--                          'False')
-+        self.assertIs(self.new_trust['impersonation'], False)
-         auth_response = self.fetch_v2_token_from_trust()
-         token_user = auth_response['access']['user']
-         self.assertEquals(token_user['id'],
-                           self.new_trust['trustee_user_id'])
--
-         #TODO Endpoints
- 
-+    def test_create_trust_impersonation(self):
-+        self.create_trust(expires_at=None)
-+        self.assertEqual(self.new_trust['trustor_user_id'], self.trustor['id'])
-+        self.assertEqual(self.new_trust['trustee_user_id'], self.trustee['id'])
-+        self.assertIs(self.new_trust['impersonation'], True)
-+        auth_response = self.fetch_v2_token_from_trust()
-+        token_user = auth_response['access']['user']
-+        self.assertEqual(token_user['id'], self.new_trust['trustor_user_id'])
-+
-+    def test_disallow_ec2_credential_from_trust_scoped_token(self):
-+        ec2_manager = ec2.Manager()
-+        self.ec2_controller = ec2.Ec2Controller()
-+        self.test_create_trust_impersonation()
-+        auth_response = self.fetch_v2_token_from_trust()
-+        # ensure it is not possible to create an ec2 token from a trust
-+        context = {'token_id': auth_response['access']['token']['id'],
-+                   'is_admin': False}
-+
-+        self.assertRaises(exception.Forbidden,
-+                          self.ec2_controller.create_credential,
-+                          context=context,
-+                          user_id=self.user_foo['id'],
-+                          tenant_id=self.tenant_bar['id'])
-+
-     def test_token_from_trust_wrong_user_fails(self):
-         new_trust = self.create_trust()
-         request_body = self.build_v2_token_request('FOO', 'foo2')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/keystone/patches/03-sample-data-sh.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,214 @@
+In-house patch to the sample_data.sh script installed in
+/usr/demo/openstack/keystone in order to support all of the standard
+services and to allow customization of the individual service
+endpoints.  Solaris-specific patch and is not suitable for upstream
+
+It also includes a change to use the standard Solaris tr(1) rather than
+GNU sed.
+
+--- keystone-2013.2.3/tools/sample_data.sh.orig	2014-05-27 09:17:02.379736817 -0700
++++ keystone-2013.2.3/tools/sample_data.sh	2014-05-27 11:09:25.741756254 -0700
+@@ -2,6 +2,8 @@
+ 
+ # Copyright 2013 OpenStack Foundation
+ #
++# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
++#
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+@@ -23,8 +25,8 @@
+ # and the administrative API.  It will get the admin_token (SERVICE_TOKEN)
+ # and admin_port from keystone.conf if available.
+ #
+-# Disable creation of endpoints by setting DISABLE_ENDPOINTS environment variable.
+-# Use this with the Catalog Templated backend.
++# Disable creation of endpoints by setting DISABLE_ENDPOINTS environment
++# variable.  Use this with the Catalog Templated backend.
+ #
+ # A EC2-compatible credential is created for the admin user and
+ # placed in etc/ec2rc.
+@@ -36,22 +38,48 @@
+ # service              nova      admin
+ # service              ec2       admin
+ # service              swift     admin
++# service              cinder    admin
++# service              neutron   admin
++
++# By default, passwords used are those in the OpenStack Install and Deploy
++# Manual.  One can override these (publicly known, and hence, insecure)
++# passwords by setting the appropriate environment variables. A common default
++# password for all the services can be used by setting the "SERVICE_PASSWORD"
++# environment variable.
+ 
+-# By default, passwords used are those in the OpenStack Install and Deploy Manual.
+-# One can override these (publicly known, and hence, insecure) passwords by setting the appropriate
+-# environment variables. A common default password for all the services can be used by
+-# setting the "SERVICE_PASSWORD" environment variable.
++PATH=/usr/bin
+ 
+ ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete}
+ NOVA_PASSWORD=${NOVA_PASSWORD:-${SERVICE_PASSWORD:-nova}}
+ GLANCE_PASSWORD=${GLANCE_PASSWORD:-${SERVICE_PASSWORD:-glance}}
+ EC2_PASSWORD=${EC2_PASSWORD:-${SERVICE_PASSWORD:-ec2}}
+ SWIFT_PASSWORD=${SWIFT_PASSWORD:-${SERVICE_PASSWORD:-swiftpass}}
++CINDER_PASSWORD=${CINDER_PASSWORD:-${SERVICE_PASSWORD:-cinder}}
++NEUTRON_PASSWORD=${NEUTRON_PASSWORD:-${SERVICE_PASSWORD:-neutron}}
+ 
+ CONTROLLER_PUBLIC_ADDRESS=${CONTROLLER_PUBLIC_ADDRESS:-localhost}
+ CONTROLLER_ADMIN_ADDRESS=${CONTROLLER_ADMIN_ADDRESS:-localhost}
+ CONTROLLER_INTERNAL_ADDRESS=${CONTROLLER_INTERNAL_ADDRESS:-localhost}
+ 
++NOVA_PUBLIC_ADDRESS=${NOVA_PUBLIC_ADDRESS:-$CONTROLLER_PUBLIC_ADDRESS}
++NOVA_ADMIN_ADDRESS=${NOVA_ADMIN_ADDRESS:-$CONTROLLER_ADMIN_ADDRESS}
++NOVA_INTERNAL_ADDRESS=${NOVA_INTERNAL_ADDRESS:-$CONTROLLER_INTERNAL_ADDRESS}
++GLANCE_PUBLIC_ADDRESS=${GLANCE_PUBLIC_ADDRESS:-$CONTROLLER_PUBLIC_ADDRESS}
++GLANCE_ADMIN_ADDRESS=${GLANCE_ADMIN_ADDRESS:-$CONTROLLER_ADMIN_ADDRESS}
++GLANCE_INTERNAL_ADDRESS=${GLANCE_INTERNAL_ADDRESS:-$CONTROLLER_INTERNAL_ADDRESS}
++EC2_PUBLIC_ADDRESS=${EC2_PUBLIC_ADDRESS:-$CONTROLLER_PUBLIC_ADDRESS}
++EC2_ADMIN_ADDRESS=${EC2_ADMIN_ADDRESS:-$CONTROLLER_ADMIN_ADDRESS}
++EC2_INTERNAL_ADDRESS=${EC2_INTERNAL_ADDRESS:-$CONTROLLER_INTERNAL_ADDRESS}
++SWIFT_PUBLIC_ADDRESS=${SWIFT_PUBLIC_ADDRESS:-$CONTROLLER_PUBLIC_ADDRESS}
++SWIFT_ADMIN_ADDRESS=${SWIFT_ADMIN_ADDRESS:-$CONTROLLER_ADMIN_ADDRESS}
++SWIFT_INTERNAL_ADDRESS=${SWIFT_INTERNAL_ADDRESS:-$CONTROLLER_INTERNAL_ADDRESS}
++CINDER_PUBLIC_ADDRESS=${CINDER_PUBLIC_ADDRESS:-$CONTROLLER_PUBLIC_ADDRESS}
++CINDER_ADMIN_ADDRESS=${CINDER_ADMIN_ADDRESS:-$CONTROLLER_ADMIN_ADDRESS}
++CINDER_INTERNAL_ADDRESS=${CINDER_INTERNAL_ADDRESS:-$CONTROLLER_INTERNAL_ADDRESS}
++NEUTRON_PUBLIC_ADDRESS=${NEUTRON_PUBLIC_ADDRESS:-$CONTROLLER_PUBLIC_ADDRESS}
++NEUTRON_ADMIN_ADDRESS=${NEUTRON_ADMIN_ADDRESS:-$CONTROLLER_ADMIN_ADDRESS}
++NEUTRON_INTERNAL_ADDRESS=${NEUTRON_INTERNAL_ADDRESS:-$CONTROLLER_INTERNAL_ADDRESS}
++
+ TOOLS_DIR=$(cd $(dirname "$0") && pwd)
+ KEYSTONE_CONF=${KEYSTONE_CONF:-/etc/keystone/keystone.conf}
+ if [[ -r "$KEYSTONE_CONF" ]]; then
+@@ -67,8 +95,8 @@
+ 
+ # Extract some info from Keystone's configuration file
+ if [[ -r "$KEYSTONE_CONF" ]]; then
+-    CONFIG_SERVICE_TOKEN=$(sed 's/[[:space:]]//g' $KEYSTONE_CONF | grep ^admin_token= | cut -d'=' -f2)
+-    CONFIG_ADMIN_PORT=$(sed 's/[[:space:]]//g' $KEYSTONE_CONF | grep ^admin_port= | cut -d'=' -f2)
++    CONFIG_SERVICE_TOKEN=$(tr -d '[\t ]' < $KEYSTONE_CONF | grep ^admin_token= | cut -d'=' -f2)
++    CONFIG_ADMIN_PORT=$(tr -d '[\t ]' < $KEYSTONE_CONF | grep ^admin_port= | cut -d'=' -f2)
+ fi
+ 
+ export SERVICE_TOKEN=${SERVICE_TOKEN:-$CONFIG_SERVICE_TOKEN}
+@@ -136,6 +164,22 @@
+                        --role-id $ADMIN_ROLE \
+                        --tenant-id $SERVICE_TENANT
+ 
++CINDER_USER=$(get_id keystone user-create --name=cinder \
++                                          --pass="${CINDER_PASSWORD}" \
++                                          --tenant-id $SERVICE_TENANT)
++
++keystone user-role-add --user-id $CINDER_USER \
++                       --role-id $ADMIN_ROLE \
++                       --tenant-id $SERVICE_TENANT
++
++NEUTRON_USER=$(get_id keystone user-create --name=neutron \
++                                           --pass="${NEUTRON_PASSWORD}" \
++                                           --tenant-id $SERVICE_TENANT)
++
++keystone user-role-add --user-id $NEUTRON_USER \
++                       --role-id $ADMIN_ROLE \
++                       --tenant-id $SERVICE_TENANT
++
+ #
+ # Keystone service
+ #
+@@ -159,23 +203,23 @@
+                         --description="Nova Compute Service")
+ if [[ -z "$DISABLE_ENDPOINTS" ]]; then
+     keystone endpoint-create --region RegionOne --service-id $NOVA_SERVICE \
+-        --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:\$(compute_port)s/v1.1/\$(tenant_id)s" \
+-        --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:\$(compute_port)s/v1.1/\$(tenant_id)s" \
+-        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:\$(compute_port)s/v1.1/\$(tenant_id)s"
++        --publicurl "http://$NOVA_PUBLIC_ADDRESS:\$(compute_port)s/v1.1/\$(tenant_id)s" \
++        --adminurl "http://$NOVA_ADMIN_ADDRESS:\$(compute_port)s/v1.1/\$(tenant_id)s" \
++        --internalurl "http://$NOVA_INTERNAL_ADDRESS:\$(compute_port)s/v1.1/\$(tenant_id)s"
+ fi
+ 
+ #
+ # Volume service
+ #
+ VOLUME_SERVICE=$(get_id \
+-keystone service-create --name=volume \
++keystone service-create --name=cinder \
+                         --type=volume \
+-                        --description="Nova Volume Service")
++                        --description="Cinder Volume Service")
+ if [[ -z "$DISABLE_ENDPOINTS" ]]; then
+     keystone endpoint-create --region RegionOne --service-id $VOLUME_SERVICE \
+-        --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8776/v1/\$(tenant_id)s" \
+-        --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8776/v1/\$(tenant_id)s" \
+-        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8776/v1/\$(tenant_id)s"
++        --publicurl "http://$CINDER_PUBLIC_ADDRESS:8776/v1/\$(tenant_id)s" \
++        --adminurl "http://$CINDER_ADMIN_ADDRESS:8776/v1/\$(tenant_id)s" \
++        --internalurl "http://$CINDER_INTERNAL_ADDRESS:8776/v1/\$(tenant_id)s"
+ fi
+ 
+ #
+@@ -187,9 +231,9 @@
+                         --description="Glance Image Service")
+ if [[ -z "$DISABLE_ENDPOINTS" ]]; then
+     keystone endpoint-create --region RegionOne --service-id $GLANCE_SERVICE \
+-        --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:9292" \
+-        --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:9292" \
+-        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:9292"
++        --publicurl "http://$GLANCE_PUBLIC_ADDRESS:9292" \
++        --adminurl "http://$GLANCE_ADMIN_ADDRESS:9292" \
++        --internalurl "http://$GLANCE_INTERNAL_ADDRESS:9292"
+ fi
+ 
+ #
+@@ -201,9 +245,9 @@
+                         --description="EC2 Compatibility Layer")
+ if [[ -z "$DISABLE_ENDPOINTS" ]]; then
+     keystone endpoint-create --region RegionOne --service-id $EC2_SERVICE \
+-        --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8773/services/Cloud" \
+-        --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8773/services/Admin" \
+-        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8773/services/Cloud"
++        --publicurl "http://$EC2_PUBLIC_ADDRESS:8773/services/Cloud" \
++        --adminurl "http://$EC2_ADMIN_ADDRESS:8773/services/Admin" \
++        --internalurl "http://$EC2_INTERNAL_ADDRESS:8773/services/Cloud"
+ fi
+ 
+ #
+@@ -212,15 +256,34 @@
+ SWIFT_SERVICE=$(get_id \
+ keystone service-create --name=swift \
+                         --type="object-store" \
+-                        --description="Swift Service")
++                        --description="Swift Object Store Service")
+ if [[ -z "$DISABLE_ENDPOINTS" ]]; then
+     keystone endpoint-create --region RegionOne --service-id $SWIFT_SERVICE \
+-        --publicurl   "http://$CONTROLLER_PUBLIC_ADDRESS:8080/v1/AUTH_\$(tenant_id)s" \
+-        --adminurl    "http://$CONTROLLER_ADMIN_ADDRESS:8080/v1" \
+-        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8080/v1/AUTH_\$(tenant_id)s"
++        --publicurl "http://$SWIFT_PUBLIC_ADDRESS:8080/v1/AUTH_\$(tenant_id)s" \
++        --adminurl "http://$SWIFT_ADMIN_ADDRESS:8080/v1" \
++        --internalurl "http://$SWIFT_INTERNAL_ADDRESS:8080/v1/AUTH_\$(tenant_id)s"
++fi
++
++#
++# Neutron service
++#
++NEUTRON_SERVICE=$(get_id \
++keystone service-create --name=neutron \
++                        --type=network \
++                        --description="Neutron Network Service")
++if [[ -z "$DISABLE_ENDPOINTS" ]]; then
++    keystone endpoint-create --region RegionOne --service-id $NEUTRON_SERVICE \
++        --publicurl "http://$NEUTRON_PUBLIC_ADDRESS:9696/" \
++        --adminurl "http://$NEUTRON_ADMIN_ADDRESS:9696/" \
++        --internalurl "http://$NEUTRON_INTERNAL_ADDRESS:9696/"
+ fi
+ 
+ # create ec2 creds and parse the secret and access key returned
++unset SERVICE_ENDPOINT SERVICE_TOKEN
++export OS_AUTH_URL=http://localhost:5000/v2.0
++export OS_PASSWORD="${ADMIN_PASSWORD}"
++export OS_TENANT_NAME=demo
++export OS_USERNAME=admin
+ RESULT=$(keystone ec2-credentials-create --tenant-id=$SERVICE_TENANT --user-id=$ADMIN_USER)
+ ADMIN_ACCESS=`echo "$RESULT" | grep access | awk '{print $4}'`
+ ADMIN_SECRET=`echo "$RESULT" | grep secret | awk '{print $4}'`
--- a/components/openstack/keystone/patches/04-CVE-2013-4477.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,72 +0,0 @@
-Upstream patch fixed in Havana 2013.2.1
-
-commit 82dcde08f60c45002955875664a3cf82d1d211bc
-Author: Brant Knudson <[email protected]>
-Date:   Mon Oct 21 15:21:12 2013 -0500
-
-    Fix remove role assignment adds role using LDAP assignment
-    
-    When using the LDAP assignment backend, attempting to remove a
-    role assignment when the role hadn't been used before would
-    actually add the role assignment and would not return a
-    404 Not Found like the SQL backend.
-    
-    This change makes it so that when attempt to remove a role that
-    wasn't assigned then 404 Not Found is returned.
-    
-    Closes-Bug: #1242855
-    Change-Id: I28ccd26cc4bb1a241d0363d0ab52d2c11410e8b3
-    (cherry picked from commit c6800ca1ac984c879e75826df6694d6199444ea0)
-    (cherry picked from commit b17e7bec768bd53d3977352486378698a3db3cfa)
-    (cherry picked from commit 4221b6020e6b0b42325d8904d7b8a22577a6acc0)
-
-diff --git a/keystone/identity/backends/ldap/core.py b/keystone/identity/backends/ldap/core.py
-index 8ac7395..3d016c0 100644
---- a/keystone/identity/backends/ldap/core.py
-+++ b/keystone/identity/backends/ldap/core.py
-@@ -704,21 +704,10 @@ class RoleApi(common_ldap.BaseLdap, ApiShimMixin):
-         try:
-             conn.modify_s(role_dn, [(ldap.MOD_DELETE,
-                                      self.member_attribute, user_dn)])
--        except ldap.NO_SUCH_OBJECT:
--            if tenant_id is None or self.get(role_id) is None:
--                raise exception.RoleNotFound(role_id=role_id)
--            attrs = [('objectClass', [self.object_class]),
--                     (self.member_attribute, [user_dn])]
--
--            if self.use_dumb_member:
--                attrs[1][1].append(self.dumb_member)
--            try:
--                conn.add_s(role_dn, attrs)
--            except Exception as inst:
--                raise inst
--
--        except ldap.NO_SUCH_ATTRIBUTE:
--            raise exception.UserNotFound(user_id=user_id)
-+        except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
-+            raise exception.RoleNotFound(message=_(
-+                'Cannot remove role that has not been granted, %s') %
-+                role_id)
- 
-     def get_role_assignments(self, tenant_id):
-         conn = self.get_connection()
-diff --git a/tests/test_backend.py b/tests/test_backend.py
-index d4c2e6c..1af3c16 100644
---- a/tests/test_backend.py
-+++ b/tests/test_backend.py
-@@ -57,6 +57,15 @@ class IdentityTests(object):
-         user_refs = self.identity_api.get_project_users(self.tenant_bar['id'])
-         self.assertNotIn(self.user_two['id'], [x['id'] for x in user_refs])
- 
-+    def test_remove_user_role_not_assigned(self):
-+        # Expect failure if attempt to remove a role that was never assigned to
-+        # the user.
-+        self.assertRaises(exception.RoleNotFound,
-+                          self.identity_api.remove_role_from_user_and_project,
-+                          tenant_id=self.tenant_bar['id'],
-+                          user_id=self.user_two['id'],
-+                          role_id=self.role_other['id'])
-+
-     def test_authenticate_bad_user(self):
-         self.assertRaises(AssertionError,
-                           self.identity_api.authenticate,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/keystone/patches/04-CVE-2014-2828.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,64 @@
+Upstream patch for bug 1300274.
+
+Fixed in Havana 2013.2.4, Icehouse 2014.1
+
+From: Florent Flament <[email protected]>
+Date: Tue, 1 Apr 2014 12:48:22 +0000 (+0000)
+Subject: Sanitizes authentication methods received in requests.
+X-Git-Url: https://review.openstack.org/gitweb?p=openstack%2Fkeystone.git;a=commitdiff_plain;h=e364ba5b12de8e4c11bd80bcca903f9615dcfc2e
+
+Sanitizes authentication methods received in requests.
+
+When a user authenticates against Identity V3 API, he can specify
+multiple authentication methods. This patch removes duplicates, which
+could have been used to achieve DoS attacks.
+
+Closes-Bug: 1300274
+(cherry picked from commit ef868ad92c00e23a4a5e9eb71e3e0bf5ae2fff0c)
+Cherry-pick from https://review.openstack.org/#/c/84425/
+
+Change-Id: I6e60324309baa094a5e54b012fb0fc528fea72ab
+---
+
+diff --git a/keystone/auth/controllers.py b/keystone/auth/controllers.py
+index c3399df..4944316 100644
+--- a/keystone/auth/controllers.py
++++ b/keystone/auth/controllers.py
+@@ -225,7 +225,13 @@ class AuthInfo(object):
+         :returns: list of auth method names
+ 
+         """
+-        return self.auth['identity']['methods'] or []
++        # Sanitizes methods received in request's body
++        # Filters out duplicates, while keeping elements' order.
++        method_names = []
++        for method in self.auth['identity']['methods']:
++            if method not in method_names:
++                method_names.append(method)
++        return method_names
+ 
+     def get_method_data(self, method):
+         """Get the auth method payload.
+diff --git a/keystone/tests/test_v3_auth.py b/keystone/tests/test_v3_auth.py
+index d07e6ae..e89e29f 100644
+--- a/keystone/tests/test_v3_auth.py
++++ b/keystone/tests/test_v3_auth.py
+@@ -81,6 +81,18 @@ class TestAuthInfo(test_v3.RestfulTestCase):
+                           None,
+                           auth_data)
+ 
++    def test_get_method_names_duplicates(self):
++        auth_data = self.build_authentication_request(
++            token='test',
++            user_id='test',
++            password='test')['auth']
++        auth_data['identity']['methods'] = ['password', 'token',
++                                            'password', 'password']
++        context = None
++        auth_info = auth.controllers.AuthInfo(context, auth_data)
++        self.assertEqual(auth_info.get_method_names(),
++                         ['password', 'token'])
++
+     def test_get_method_data_invalid_method(self):
+         auth_data = self.build_authentication_request(
+             user_id='test',
--- a/components/openstack/keystone/patches/05-CVE-2014-2237.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,177 +0,0 @@
-Upstream patch fixed in Grizzly 2013.1.5, Havana 2013.2.2, Icehouse
-
-From a411c944af78c36f2fdb87d305ba452dc52d7ed3 Mon Sep 17 00:00:00 2001
-From: Morgan Fainberg <[email protected]>
-Date: Fri, 21 Feb 2014 14:09:04 -0800
-Subject: [PATCH] Ensure tokens are added to both Trustor and Trustee indexes
-
-Tokens are now added to both the Trustor and Trustee user-token-index
-so that bulk token revocations (e.g. password change) of the trustee
-will work as expected. This is a backport of the basic code that was
-used in the Icehouse-vintage Dogpile Token KVS backend that resolves
-this issue by merging the handling of memcache and KVS backends into
-the same logic.
-
-Change-Id: I3e19e4a8fc1e11cef6db51d364e80061e97befa7
-Closes-Bug: #1260080
-
----
- keystone/token/backends/memcache.py |   27 +++++++++++++++++------
- tests/test_backend.py               |   41 ++++++++++++++++++++++++++++++++++-
- tests/test_backend_kvs.py           |    2 ++
- tests/test_backend_memcache.py      |    3 +++
- 4 files changed, 65 insertions(+), 8 deletions(-)
-
-diff --git a/keystone/token/backends/memcache.py b/keystone/token/backends/memcache.py
-index c2c9b51..dc5c34e 100644
---- a/keystone/token/backends/memcache.py
-+++ b/keystone/token/backends/memcache.py
-@@ -62,6 +62,15 @@ class Token(token.Driver):
-         return token_ref
- 
-     def create_token(self, token_id, data):
-+
-+        def update_index(user_id, token_data):
-+            user_key = self._prefix_user_id(user_id)
-+            if not self.client.append(user_key, ',%s' % token_data):
-+                if not self.client.add(user_key, token_data):
-+                    if not self.client.append(user_key, ',%s' % token_data):
-+                        msg = _('Unable to add token user list.')
-+                        raise exception.UnexpectedError(msg)
-+
-         data_copy = copy.deepcopy(data)
-         ptk = self._prefix_token_id(token.unique_id(token_id))
-         if not data_copy.get('expires'):
-@@ -73,15 +82,19 @@ class Token(token.Driver):
-             expires_ts = utils.unixtime(data_copy['expires'])
-             kwargs['time'] = expires_ts
-         self.client.set(ptk, data_copy, **kwargs)
-+        token_data = jsonutils.dumps(token_id)
-         if 'id' in data['user']:
--            token_data = jsonutils.dumps(token_id)
-             user_id = data['user']['id']
--            user_key = self._prefix_user_id(user_id)
--            if not self.client.append(user_key, ',%s' % token_data):
--                if not self.client.add(user_key, token_data):
--                    if not self.client.append(user_key, ',%s' % token_data):
--                        msg = _('Unable to add token user list.')
--                        raise exception.UnexpectedError(msg)
-+            update_index(user_id, token_data)
-+
-+        if CONF.trust.enabled and data.get('trust_id'):
-+            if 'access' in data_copy:
-+                trustee_user_id = data_copy['access']['trust'][
-+                    'trustee_user_id']
-+            else:
-+                trustee_user_id = data_copy['OS-TRUST:trust'][
-+                    'trustee_user_id']
-+            update_index(trustee_user_id, token_data)
-         return copy.deepcopy(data_copy)
- 
-     def _add_to_revocation_list(self, token_id, token_data):
-diff --git a/tests/test_backend.py b/tests/test_backend.py
-index 1af3c16..19caa0c 100644
---- a/tests/test_backend.py
-+++ b/tests/test_backend.py
-@@ -2096,7 +2096,8 @@ class TokenTests(object):
-                           self.token_api.delete_token, token_id)
- 
-     def create_token_sample_data(self, tenant_id=None, trust_id=None,
--                                 user_id="testuserid"):
-+                                 user_id='testuserid',
-+                                 trustee_user_id='testuserid2'):
-         token_id = self._create_token_id()
-         data = {'id': token_id, 'a': 'b',
-                 'user': {'id': user_id}}
-@@ -2104,6 +2105,11 @@ class TokenTests(object):
-             data['tenant'] = {'id': tenant_id, 'name': tenant_id}
-         if trust_id is not None:
-             data['trust_id'] = trust_id
-+            data.setdefault('access', {}).setdefault('trust', {})
-+            # Testuserid2 is used here since a trustee will be different in
-+            # the cases of impersonation and therefore should not match the
-+            # token's user_id.
-+            data['access']['trust']['trustee_user_id'] = trustee_user_id
-         self.token_api.create_token(token_id, data)
-         return token_id
- 
-@@ -2290,6 +2296,39 @@ class TokenTests(object):
-         for t in self.token_api.list_revoked_tokens():
-             self.assertIn('expires', t)
- 
-+    def test_token_in_trustee_and_trustor_token_list(self):
-+        self.opt_in_group('trust',
-+                          enabled=True)
-+        trustor = self.user_foo
-+        trustee = self.user_two
-+        trust_id = uuid.uuid4().hex
-+        trust_info = {'trustor_user_id': trustor['id'],
-+                      'trustee_user_id': trustee['id'],
-+                      'project_id': self.tenant_bar['id'],
-+                      'expires_at': timeutils.
-+                      parse_isotime('2031-02-18T18:10:00Z'),
-+                      'impersonation': True}
-+        self.trust_api.create_trust(trust_id, trust_info,
-+                                    roles=[{'id': 'member'},
-+                                           {'id': 'other'},
-+                                           {'id': 'browser'}])
-+
-+        token_id = self.create_token_sample_data(
-+            tenant_id=self.tenant_bar['id'],
-+            trust_id=trust_id,
-+            user_id=trustor['id'],
-+            trustee_user_id=trustee['id'])
-+
-+        # Ensure the token id exists in both the trustor and trustee token
-+        # lists
-+
-+        self.assertIn(token_id,
-+                      self.token_api.list_tokens(self.user_two['id'],
-+                                                 trust_id=trust_id))
-+        self.assertIn(token_id,
-+                      self.token_api.list_tokens(self.user_foo['id'],
-+                                                 trust_id=trust_id))
-+
- 
- class TrustTests(object):
-     def create_sample_trust(self, new_id):
-diff --git a/tests/test_backend_kvs.py b/tests/test_backend_kvs.py
-index f3a8ece..15a87b5 100644
---- a/tests/test_backend_kvs.py
-+++ b/tests/test_backend_kvs.py
-@@ -73,6 +73,8 @@ class KvsToken(test.TestCase, test_backend.TokenTests):
-     def setUp(self):
-         super(KvsToken, self).setUp()
-         self.token_api = token_kvs.Token(db={})
-+        self.load_backends()
-+        self.load_fixtures(default_fixtures)
- 
- 
- class KvsTrust(test.TestCase, test_backend.TrustTests):
-diff --git a/tests/test_backend_memcache.py b/tests/test_backend_memcache.py
-index 9fbaeb9..6339e6f 100644
---- a/tests/test_backend_memcache.py
-+++ b/tests/test_backend_memcache.py
-@@ -18,6 +18,7 @@ import uuid
- 
- import memcache
- 
-+import default_fixtures
- from keystone.common import utils
- from keystone.openstack.common import timeutils
- from keystone import test
-@@ -75,8 +76,10 @@ class MemcacheClient(object):
- class MemcacheToken(test.TestCase, test_backend.TokenTests):
-     def setUp(self):
-         super(MemcacheToken, self).setUp()
-+        self.load_backends()
-         fake_client = MemcacheClient()
-         self.token_api = token_memcache.Token(client=fake_client)
-+        self.load_fixtures(default_fixtures)
- 
-     def test_create_unicode_token_id(self):
-         token_id = unicode(self._create_token_id())
--- 
-1.7.9.5
-
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/keystone/patches/05-requirements.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,41 @@
+In-house patch to remove unnecessary dependencies from Keystone's
+requirements files. The specific reasons are as follows:
+
+oauth2		Not applicable to Solaris (being replaced with oauthlib)
+		(upstream commit bed88a2e724f5f23a1c839b7872b1bc56f059df5)
+
+pam		No longer applicable
+		(upstream commit ff1b41c15b2d65443234ccdb60565d6f6709f879)
+
+--- keystone-2013.2.3/keystone.egg-info/requires.txt.orig	2014-04-03 11:49:55.000000000 -0700
++++ keystone-2013.2.3/keystone.egg-info/requires.txt	2014-05-24 23:09:25.817082422 -0700
+@@ -1,5 +1,4 @@
+ pbr>=0.5.21,<1.0
+-pam>=0.1.4
+ WebOb>=1.2.3,<1.3
+ eventlet>=0.13.0
+ greenlet>=0.3.2
+@@ -15,5 +14,4 @@
+ python-keystoneclient>=0.3.2
+ oslo.config>=1.2.0
+ Babel>=1.3
+-oauth2
+-dogpile.cache>=0.5.0
+\ No newline at end of file
++dogpile.cache>=0.5.0
+
+--- keystone-2013.2.3/requirements.txt.orig	2014-04-03 11:47:39.000000000 -0700
++++ keystone-2013.2.3/requirements.txt	2014-05-24 23:09:35.044573447 -0700
+@@ -1,6 +1,5 @@
+ # keystone dependencies
+ pbr>=0.5.21,<1.0
+-pam>=0.1.4
+ WebOb>=1.2.3,<1.3
+ eventlet>=0.13.0
+ greenlet>=0.3.2
+@@ -16,5 +15,4 @@
+ python-keystoneclient>=0.3.2
+ oslo.config>=1.2.0
+ Babel>=1.3
+-oauth2
+ dogpile.cache>=0.5.0
--- a/components/openstack/keystone/patches/06-sample-data-sh.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,201 +0,0 @@
-In-house patch to the sample_data.sh script installed in
-/usr/demo/openstack/keystone in order to support all of the standard
-services and to allow customization of the individual service
-endpoints.  Solaris-specific patch and is not suitable for upstream
-
-It also includes a change to use the standard Solaris tr(1) rather than
-GNU sed.
-
---- keystone-2013.1.4/tools/sample_data.sh.~1~	2013-10-17 11:23:46.000000000 -0700
-+++ keystone-2013.1.4/tools/sample_data.sh	2014-03-07 23:39:03.065369827 -0800
-@@ -23,8 +23,8 @@
- # and the administrative API.  It will get the admin_token (SERVICE_TOKEN)
- # and admin_port from keystone.conf if available.
- #
--# Disable creation of endpoints by setting DISABLE_ENDPOINTS environment variable.
--# Use this with the Catalog Templated backend.
-+# Disable creation of endpoints by setting DISABLE_ENDPOINTS environment
-+# variable.  Use this with the Catalog Templated backend.
- #
- # A EC2-compatible credential is created for the admin user and
- # placed in etc/ec2rc.
-@@ -36,22 +36,48 @@
- # service              nova      admin
- # service              ec2       admin
- # service              swift     admin
-+# service              cinder    admin
-+# service              neutron   admin
- 
--# By default, passwords used are those in the OpenStack Install and Deploy Manual.
--# One can override these (publicly known, and hence, insecure) passwords by setting the appropriate
--# environment variables. A common default password for all the services can be used by
--# setting the "SERVICE_PASSWORD" environment variable.
-+# By default, passwords used are those in the OpenStack Install and Deploy
-+# Manual.  One can override these (publicly known, and hence, insecure)
-+# passwords by setting the appropriate environment variables. A common default
-+# password for all the services can be used by setting the "SERVICE_PASSWORD"
-+# environment variable.
-+
-+PATH=/usr/bin
- 
- ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete}
- NOVA_PASSWORD=${NOVA_PASSWORD:-${SERVICE_PASSWORD:-nova}}
- GLANCE_PASSWORD=${GLANCE_PASSWORD:-${SERVICE_PASSWORD:-glance}}
- EC2_PASSWORD=${EC2_PASSWORD:-${SERVICE_PASSWORD:-ec2}}
- SWIFT_PASSWORD=${SWIFT_PASSWORD:-${SERVICE_PASSWORD:-swiftpass}}
-+CINDER_PASSWORD=${CINDER_PASSWORD:-${SERVICE_PASSWORD:-cinder}}
-+NEUTRON_PASSWORD=${NEUTRON_PASSWORD:-${SERVICE_PASSWORD:-neutron}}
- 
- CONTROLLER_PUBLIC_ADDRESS=${CONTROLLER_PUBLIC_ADDRESS:-localhost}
- CONTROLLER_ADMIN_ADDRESS=${CONTROLLER_ADMIN_ADDRESS:-localhost}
- CONTROLLER_INTERNAL_ADDRESS=${CONTROLLER_INTERNAL_ADDRESS:-localhost}
- 
-+NOVA_PUBLIC_ADDRESS=${NOVA_PUBLIC_ADDRESS:-$CONTROLLER_PUBLIC_ADDRESS}
-+NOVA_ADMIN_ADDRESS=${NOVA_ADMIN_ADDRESS:-$CONTROLLER_ADMIN_ADDRESS}
-+NOVA_INTERNAL_ADDRESS=${NOVA_INTERNAL_ADDRESS:-$CONTROLLER_INTERNAL_ADDRESS}
-+GLANCE_PUBLIC_ADDRESS=${GLANCE_PUBLIC_ADDRESS:-$CONTROLLER_PUBLIC_ADDRESS}
-+GLANCE_ADMIN_ADDRESS=${GLANCE_ADMIN_ADDRESS:-$CONTROLLER_ADMIN_ADDRESS}
-+GLANCE_INTERNAL_ADDRESS=${GLANCE_INTERNAL_ADDRESS:-$CONTROLLER_INTERNAL_ADDRESS}
-+EC2_PUBLIC_ADDRESS=${EC2_PUBLIC_ADDRESS:-$CONTROLLER_PUBLIC_ADDRESS}
-+EC2_ADMIN_ADDRESS=${EC2_ADMIN_ADDRESS:-$CONTROLLER_ADMIN_ADDRESS}
-+EC2_INTERNAL_ADDRESS=${EC2_INTERNAL_ADDRESS:-$CONTROLLER_INTERNAL_ADDRESS}
-+SWIFT_PUBLIC_ADDRESS=${SWIFT_PUBLIC_ADDRESS:-$CONTROLLER_PUBLIC_ADDRESS}
-+SWIFT_ADMIN_ADDRESS=${SWIFT_ADMIN_ADDRESS:-$CONTROLLER_ADMIN_ADDRESS}
-+SWIFT_INTERNAL_ADDRESS=${SWIFT_INTERNAL_ADDRESS:-$CONTROLLER_INTERNAL_ADDRESS}
-+CINDER_PUBLIC_ADDRESS=${CINDER_PUBLIC_ADDRESS:-$CONTROLLER_PUBLIC_ADDRESS}
-+CINDER_ADMIN_ADDRESS=${CINDER_ADMIN_ADDRESS:-$CONTROLLER_ADMIN_ADDRESS}
-+CINDER_INTERNAL_ADDRESS=${CINDER_INTERNAL_ADDRESS:-$CONTROLLER_INTERNAL_ADDRESS}
-+NEUTRON_PUBLIC_ADDRESS=${NEUTRON_PUBLIC_ADDRESS:-$CONTROLLER_PUBLIC_ADDRESS}
-+NEUTRON_ADMIN_ADDRESS=${NEUTRON_ADMIN_ADDRESS:-$CONTROLLER_ADMIN_ADDRESS}
-+NEUTRON_INTERNAL_ADDRESS=${NEUTRON_INTERNAL_ADDRESS:-$CONTROLLER_INTERNAL_ADDRESS}
-+
- TOOLS_DIR=$(cd $(dirname "$0") && pwd)
- KEYSTONE_CONF=${KEYSTONE_CONF:-/etc/keystone/keystone.conf}
- if [[ -r "$KEYSTONE_CONF" ]]; then
-@@ -67,8 +93,8 @@
- 
- # Extract some info from Keystone's configuration file
- if [[ -r "$KEYSTONE_CONF" ]]; then
--    CONFIG_SERVICE_TOKEN=$(sed 's/[[:space:]]//g' $KEYSTONE_CONF | grep ^admin_token= | cut -d'=' -f2)
--    CONFIG_ADMIN_PORT=$(sed 's/[[:space:]]//g' $KEYSTONE_CONF | grep ^admin_port= | cut -d'=' -f2)
-+    CONFIG_SERVICE_TOKEN=$(tr -d '[\t ]' < $KEYSTONE_CONF | grep ^admin_token= | cut -d'=' -f2)
-+    CONFIG_ADMIN_PORT=$(tr -d '[\t ]' < $KEYSTONE_CONF | grep ^admin_port= | cut -d'=' -f2)
- fi
- 
- export SERVICE_TOKEN=${SERVICE_TOKEN:-$CONFIG_SERVICE_TOKEN}
-@@ -136,6 +162,22 @@
-                        --role-id $ADMIN_ROLE \
-                        --tenant-id $SERVICE_TENANT
- 
-+CINDER_USER=$(get_id keystone user-create --name=cinder \
-+                                          --pass="${CINDER_PASSWORD}" \
-+                                          --tenant-id $SERVICE_TENANT)
-+
-+keystone user-role-add --user-id $CINDER_USER \
-+                       --role-id $ADMIN_ROLE \
-+                       --tenant-id $SERVICE_TENANT
-+
-+NEUTRON_USER=$(get_id keystone user-create --name=neutron \
-+                                           --pass="${NEUTRON_PASSWORD}" \
-+                                           --tenant-id $SERVICE_TENANT)
-+
-+keystone user-role-add --user-id $NEUTRON_USER \
-+                       --role-id $ADMIN_ROLE \
-+                       --tenant-id $SERVICE_TENANT
-+
- #
- # Keystone service
- #
-@@ -159,23 +201,23 @@
-                         --description="Nova Compute Service")
- if [[ -z "$DISABLE_ENDPOINTS" ]]; then
-     keystone endpoint-create --region RegionOne --service-id $NOVA_SERVICE \
--        --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:\$(compute_port)s/v1.1/\$(tenant_id)s" \
--        --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:\$(compute_port)s/v1.1/\$(tenant_id)s" \
--        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:\$(compute_port)s/v1.1/\$(tenant_id)s"
-+        --publicurl "http://$NOVA_PUBLIC_ADDRESS:\$(compute_port)s/v1.1/\$(tenant_id)s" \
-+        --adminurl "http://$NOVA_ADMIN_ADDRESS:\$(compute_port)s/v1.1/\$(tenant_id)s" \
-+        --internalurl "http://$NOVA_INTERNAL_ADDRESS:\$(compute_port)s/v1.1/\$(tenant_id)s"
- fi
- 
- #
- # Volume service
- #
- VOLUME_SERVICE=$(get_id \
--keystone service-create --name=volume \
-+keystone service-create --name=cinder \
-                         --type=volume \
--                        --description="Nova Volume Service")
-+                        --description="Cinder Volume Service")
- if [[ -z "$DISABLE_ENDPOINTS" ]]; then
-     keystone endpoint-create --region RegionOne --service-id $VOLUME_SERVICE \
--        --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8776/v1/\$(tenant_id)s" \
--        --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8776/v1/\$(tenant_id)s" \
--        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8776/v1/\$(tenant_id)s"
-+        --publicurl "http://$CINDER_PUBLIC_ADDRESS:8776/v1/\$(tenant_id)s" \
-+        --adminurl "http://$CINDER_ADMIN_ADDRESS:8776/v1/\$(tenant_id)s" \
-+        --internalurl "http://$CINDER_INTERNAL_ADDRESS:8776/v1/\$(tenant_id)s"
- fi
- 
- #
-@@ -187,9 +229,9 @@
-                         --description="Glance Image Service")
- if [[ -z "$DISABLE_ENDPOINTS" ]]; then
-     keystone endpoint-create --region RegionOne --service-id $GLANCE_SERVICE \
--        --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:9292" \
--        --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:9292" \
--        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:9292"
-+        --publicurl "http://$GLANCE_PUBLIC_ADDRESS:9292" \
-+        --adminurl "http://$GLANCE_ADMIN_ADDRESS:9292" \
-+        --internalurl "http://$GLANCE_INTERNAL_ADDRESS:9292"
- fi
- 
- #
-@@ -201,9 +243,9 @@
-                         --description="EC2 Compatibility Layer")
- if [[ -z "$DISABLE_ENDPOINTS" ]]; then
-     keystone endpoint-create --region RegionOne --service-id $EC2_SERVICE \
--        --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:8773/services/Cloud" \
--        --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:8773/services/Admin" \
--        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8773/services/Cloud"
-+        --publicurl "http://$EC2_PUBLIC_ADDRESS:8773/services/Cloud" \
-+        --adminurl "http://$EC2_ADMIN_ADDRESS:8773/services/Admin" \
-+        --internalurl "http://$EC2_INTERNAL_ADDRESS:8773/services/Cloud"
- fi
- 
- #
-@@ -212,15 +254,30 @@
- SWIFT_SERVICE=$(get_id \
- keystone service-create --name=swift \
-                         --type="object-store" \
--                        --description="Swift Service")
-+                        --description="Swift Object Store Service")
- if [[ -z "$DISABLE_ENDPOINTS" ]]; then
-     keystone endpoint-create --region RegionOne --service-id $SWIFT_SERVICE \
--        --publicurl   "http://$CONTROLLER_PUBLIC_ADDRESS:8888/v1/AUTH_\$(tenant_id)s" \
--        --adminurl    "http://$CONTROLLER_ADMIN_ADDRESS:8888/v1" \
--        --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:8888/v1/AUTH_\$(tenant_id)s"
-+        --publicurl   "http://$SWIFT_PUBLIC_ADDRESS:8080/v1/AUTH_\$(tenant_id)s" \
-+        --adminurl    "http://$SWIFT_ADMIN_ADDRESS:8080/v1" \
-+        --internalurl "http://$SWIFT_INTERNAL_ADDRESS:8080/v1/AUTH_\$(tenant_id)s"
-+fi
-+
-+#
-+# Neutron service
-+#
-+NEUTRON_SERVICE=$(get_id \
-+keystone service-create --name=neutron \
-+                        --type=network \
-+                        --description="Neutron Network Service")
-+if [[ -z "$DISABLE_ENDPOINTS" ]]; then
-+    keystone endpoint-create --region RegionOne --service-id $NEUTRON_SERVICE \
-+        --publicurl "http://$NEUTRON_PUBLIC_ADDRESS:9696/" \
-+        --adminurl "http://$NEUTRON_ADMIN_ADDRESS:9696/" \
-+        --internalurl "http://$NEUTRON_INTERNAL_ADDRESS:9696/"
- fi
- 
- # create ec2 creds and parse the secret and access key returned
-+unset SERVICE_ENDPOINT SERVICE_TOKEN
- RESULT=$(keystone ec2-credentials-create --tenant-id=$SERVICE_TENANT --user-id=$ADMIN_USER)
- ADMIN_ACCESS=`echo "$RESULT" | grep access | awk '{print $4}'`
- ADMIN_SECRET=`echo "$RESULT" | grep secret | awk '{print $4}'`
--- a/components/openstack/keystone/patches/07-CVE-2014-2828.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,56 +0,0 @@
-Upstream patch for bug 1300274.
-
-Fixed in Havana 2013.2.4, Icehouse 2014.1
-
-From e364ba5b12de8e4c11bd80bcca903f9615dcfc2e Mon Sep 17 00:00:00 2001
-From: Florent Flament <[email protected]>
-Date: Tue, 1 Apr 2014 12:48:22 +0000
-Subject: Sanitizes authentication methods received in requests.
-
-When a user authenticates against Identity V3 API, he can specify
-multiple authentication methods. This patch removes duplicates, which
-could have been used to achieve DoS attacks.
-
-Closes-Bug: 1300274
-(cherry picked from commit ef868ad92c00e23a4a5e9eb71e3e0bf5ae2fff0c)
-Cherry-pick from https://review.openstack.org/#/c/84425/
-
-Change-Id: I6e60324309baa094a5e54b012fb0fc528fea72ab
-
---- keystone-2013.1.4/keystone/auth/controllers.py.orig	2014-04-10 14:46:27.890585026 -0600
-+++ keystone-2013.1.4/keystone/auth/controllers.py	2014-04-10 14:47:53.783687911 -0600
-@@ -228,7 +228,13 @@
-         :returns: list of auth method names
- 
-         """
--        return self.auth['identity']['methods']
-+        # Sanitizes methods received in request's body
-+        # Filters out duplicates, while keeping elements' order.
-+        method_names = []
-+        for method in self.auth['identity']['methods']:
-+            if method not in method_names:
-+                method_names.append(method)
-+        return method_names
- 
-     def get_method_data(self, method):
-         """ Get the auth method payload.
---- keystone-2013.1.4/tests/test_v3_auth.py.orig	2014-04-10 14:50:45.929495618 -0600
-+++ keystone-2013.1.4/tests/test_v3_auth.py	2014-04-10 14:50:48.764440233 -0600
-@@ -83,6 +83,17 @@
-                           None,
-                           auth_data)
- 
-+    def test_get_method_names_duplicates(self):
-+        auth_data = self.build_authentication_request(
-+            token='test',
-+            user_id='test',
-+            password='test')['auth']
-+        auth_data['identity']['methods'] = ['password', 'token',
-+                                            'password', 'password']
-+        context = None
-+        auth_info = auth.controllers.AuthInfo(context, auth_data)
-+        self.assertEqual(auth_info.get_method_names(),
-+                         ['password', 'token'])
- 
- class TestTokenAPIs(test_v3.RestfulTestCase):
-     def setUp(self):
--- a/components/openstack/keystone/resolve.deps	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/keystone/resolve.deps	Wed Jun 11 17:13:12 2014 -0700
@@ -1,9 +1,6 @@
-library/python-2/eventlet-26
-library/python-2/greenlet-26
-library/python-2/sqlalchemy-26
-library/python-2/sqlalchemy-migrate-26
 library/python/eventlet-26
-library/python/greenlet-26
+library/python/oslo.config-26
+library/python/pbr-26
 library/python/sqlalchemy-26
 library/python/sqlalchemy-migrate-26
 runtime/python-26
--- a/components/openstack/neutron/Makefile	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/Makefile	Wed Jun 11 17:13:12 2014 -0700
@@ -25,13 +25,12 @@
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		neutron
-COMPONENT_OLDNAME=	quantum
-COMPONENT_CODENAME=	grizzly
-COMPONENT_VERSION=	2013.1.4
-COMPONENT_SRC=		$(COMPONENT_OLDNAME)-$(COMPONENT_VERSION)
+COMPONENT_CODENAME=	havana
+COMPONENT_VERSION=	2013.2.3
+COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:3bd26ae7dabe6093a3cbe701ac8d7022fbdbe1d8231ab1c6866de388684e272c
+    sha256:116cc2ce9f2f5b2dcbd5a314d78a496b180a148dadd02a076ff664b0f3c20cd3
 COMPONENT_ARCHIVE_URL=	http://launchpad.net/$(COMPONENT_NAME)/$(COMPONENT_CODENAME)/$(COMPONENT_VERSION)/+download/$(COMPONENT_ARCHIVE)
 COMPONENT_PROJECT_URL=	http://www.openstack.org/
 COMPONENT_BUGDB=	service/neutron
@@ -51,25 +50,22 @@
 
 # move all the proper files into place and construct .pyc files for them
 COMPONENT_POST_BUILD_ACTION += \
-	 ($(MKDIR) $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/agent; \
-	 $(CP) files/agent/evs_l3_agent.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/agent; \
-	 $(MKDIR) $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/agent/linux; \
-	 $(CP) files/agent/linux/device.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/agent/linux; \
-	 $(MKDIR) $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/agent/solaris; \
-	 $(CP) files/agent/solaris/__init__.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/agent/solaris; \
-	 $(CP) files/agent/solaris/device.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/agent/solaris; \
-	 $(CP) files/agent/solaris/dhcp.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/agent/solaris; \
-	 $(CP) files/agent/solaris/interface.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/agent/solaris; \
-	 $(CP) files/agent/solaris/ipfilters_manager.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/agent/solaris; \
-	 $(CP) files/agent/solaris/net_lib.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/agent/solaris; \
-	 $(MKDIR) $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/plugins/evs; \
-	 $(TOUCH) $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/plugins/evs/__init__.py; \
-	 $(CP) files/evs/plugin.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/plugins/evs; \
-	 $(MKDIR) $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/plugins/evs/db; \
-	 $(TOUCH) $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/plugins/evs/db/__init__.py; \
-	 $(CP) files/evs/db/api.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/plugins/evs/db; \
-	 $(CP) files/evs/db/l3nat.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/plugins/evs/db; \
-	 $(CP) files/evs/db/quotas_db.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/quantum/plugins/evs/db)
+	 ($(MKDIR) $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/agent; \
+	 $(CP) files/agent/evs_l3_agent.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/agent; \
+	 $(MKDIR) $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/agent/solaris; \
+	 $(CP) files/agent/solaris/__init__.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/agent/solaris; \
+	 $(CP) files/agent/solaris/dhcp.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/agent/solaris; \
+	 $(CP) files/agent/solaris/interface.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/agent/solaris; \
+	 $(CP) files/agent/solaris/ipfilters_manager.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/agent/solaris; \
+	 $(CP) files/agent/solaris/net_lib.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/agent/solaris; \
+	 $(MKDIR) $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/plugins/evs; \
+	 $(TOUCH) $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/plugins/evs/__init__.py; \
+	 $(CP) files/evs/plugin.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/plugins/evs; \
+	 $(MKDIR) $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/plugins/evs/db; \
+	 $(TOUCH) $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/plugins/evs/db/__init__.py; \
+	 $(CP) files/evs/db/api.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/plugins/evs/db; \
+	 $(CP) files/evs/db/l3nat.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/plugins/evs/db; \
+	 $(CP) files/evs/db/quotas_db.py $(PROTO_DIR)/usr/lib/python$(PYTHON_VERSION)/vendor-packages/neutron/plugins/evs/db)
 
 
 COMPONENT_POST_INSTALL_ACTION += \
--- a/components/openstack/neutron/files/agent/evs_l3_agent.py	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/agent/evs_l3_agent.py	Wed Jun 11 17:13:12 2014 -0700
@@ -21,22 +21,26 @@
 #
 
 """
-Based off generic l3_agent (quantum/agent/l3_agent) code
+Based off generic l3_agent (neutron/agent/l3_agent) code
 """
 
+import netaddr
+
 from oslo.config import cfg
 
-from quantum.agent import l3_agent
-from quantum.agent.solaris import interface
-from quantum.agent.solaris import ipfilters_manager
-from quantum.agent.solaris import net_lib
-from quantum.common import constants as l3_constants
-from quantum.openstack.common import log as logging
+from neutron.agent import l3_agent
+from neutron.agent.linux import utils
+from neutron.agent.solaris import interface
+from neutron.agent.solaris import ipfilters_manager
+from neutron.agent.solaris import net_lib
+from neutron.common import constants as l3_constants
+from neutron.openstack.common import log as logging
 
 
 LOG = logging.getLogger(__name__)
 INTERNAL_DEV_PREFIX = 'l3i'
 EXTERNAL_DEV_PREFIX = 'l3e'
+FLOATING_IP_CIDR_SUFFIX = '/32'
 
 
 class RouterInfo(object):
@@ -44,35 +48,52 @@
     def __init__(self, router_id, root_helper, use_namespaces, router):
         self.router_id = router_id
         self.ex_gw_port = None
+        self._snat_enabled = None
+        self._snat_action = None
         self.internal_ports = []
-        self.floating_ips = []
+        # We do not need either root_helper or namespace, so set them to None
+        self.root_helper = None
+        self.use_namespaces = None
+        # Invoke the setter for establishing initial SNAT action
         self.router = router
-        self.ipfilters_manager = ipfilters_manager.IpfiltersManager()
+        self.ipfilters_manager = ipfilters_manager.IPfiltersManager()
         self.routes = []
 
+    @property
+    def router(self):
+        return self._router
+
+    @router.setter
+    def router(self, value):
+        self._router = value
+        if not self._router:
+            return
+        # enable_snat by default if it wasn't specified by plugin
+        self._snat_enabled = self._router.get('enable_snat', True)
+        # Set a SNAT action for the router
+        if self._router.get('gw_port'):
+            self._snat_action = ('add_rules' if self._snat_enabled
+                                 else 'remove_rules')
+        elif self.ex_gw_port:
+            # Gateway port was removed, remove rules
+            self._snat_action = 'remove_rules'
+
+    def ns_name(self):
+        pass
+
+    def perform_snat_action(self, snat_callback, *args):
+        # Process SNAT rules for attached subnets
+        if self._snat_action:
+            snat_callback(self, self._router.get('gw_port'),
+                          *args, action=self._snat_action)
+        self._snat_action = None
+
 
 class EVSL3NATAgent(l3_agent.L3NATAgent):
 
     RouterInfo = RouterInfo
 
     OPTS = [
-        cfg.StrOpt('external_network_bridge', default='',
-                   help=_("Name of bridge used for external network "
-                          "traffic.")),
-        cfg.StrOpt('interface_driver',
-                   help=_("The driver used to manage the virtual "
-                          "interface.")),
-        cfg.BoolOpt('use_namespaces', default=False,
-                    help=_("Allow overlapping IP.")),
-        cfg.StrOpt('router_id',
-                   help=_("If namespaces is disabled, the l3 agent can only"
-                          " configure a router that has the matching router "
-                          "ID.")),
-        cfg.BoolOpt('handle_internal_only_routers', default=True,
-                    help=_("Agent should implement routers with no gateway")),
-        cfg.StrOpt('gateway_external_network_id', default='',
-                   help=_("UUID of external network for routers implemented "
-                          "by the agents.")),
         cfg.StrOpt('external_network_datalink', default='net0',
                    help=_("Name of the datalink that connects to "
                           "an external network.")),
@@ -84,9 +105,6 @@
     def __init__(self, host, conf=None):
         cfg.CONF.register_opts(self.OPTS)
         cfg.CONF.register_opts(interface.OPTS)
-        if not cfg.CONF.router_id:
-            raise SystemExit(_("router_id option needs to be set"))
-
         super(EVSL3NATAgent, self).__init__(host=host, conf=conf)
 
     def _router_added(self, router_id, router):
@@ -102,6 +120,92 @@
         self.process_router(ri)
         del self.router_info[router_id]
 
+    def process_router(self, ri):
+        ex_gw_port = self._get_ex_gw_port(ri)
+        internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
+        existing_port_ids = set([p['id'] for p in ri.internal_ports])
+        current_port_ids = set([p['id'] for p in internal_ports
+                                if p['admin_state_up']])
+        new_ports = [p for p in internal_ports if
+                     p['id'] in current_port_ids and
+                     p['id'] not in existing_port_ids]
+        old_ports = [p for p in ri.internal_ports if
+                     p['id'] not in current_port_ids]
+        for p in new_ports:
+            self._set_subnet_info(p)
+            ri.internal_ports.append(p)
+            self.internal_network_added(ri, p)
+
+        for p in old_ports:
+            ri.internal_ports.remove(p)
+            self.internal_network_removed(ri, p)
+
+        internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports]
+        # TODO(salv-orlando): RouterInfo would be a better place for
+        # this logic too
+        ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
+                         ri.ex_gw_port and ri.ex_gw_port['id'])
+
+        interface_name = None
+        if ex_gw_port_id:
+            interface_name = self.get_external_device_name(ex_gw_port_id)
+        if ex_gw_port and not ri.ex_gw_port:
+            self._set_subnet_info(ex_gw_port)
+            self.external_gateway_added(ri, ex_gw_port,
+                                        interface_name, internal_cidrs)
+        elif not ex_gw_port and ri.ex_gw_port:
+            self.external_gateway_removed(ri, ri.ex_gw_port,
+                                          interface_name, internal_cidrs)
+
+        # We don't need this since our IPnat rules are bi-directional
+        # Process SNAT rules for external gateway
+        # ri.perform_snat_action(self._handle_router_snat_rules,
+        #                       internal_cidrs, interface_name)
+
+        # Process DNAT rules for floating IPs
+        if ex_gw_port:
+            self.process_router_floating_ips(ri, ex_gw_port)
+
+        ri.ex_gw_port = ex_gw_port
+        ri.enable_snat = ri.router.get('enable_snat')
+        self.routes_updated(ri)
+
+    def process_router_floating_ips(self, ri, ex_gw_port):
+        """Configure the router's floating IPs
+        Configures floating ips using ipnat(1m) on the router's gateway device.
+
+        Cleans up floating ips that should not longer be configured.
+        """
+        ifname = self.get_external_device_name(ex_gw_port['id'])
+        ipintf = net_lib.IPInterface(ifname)
+        ipaddr_list = ipintf.ipaddr_list()['static']
+
+        # Clear out all ipnat rules for floating ips
+        ri.ipfilters_manager.remove_nat_rules(ri.ipfilters_manager.ipv4['nat'])
+
+        existing_cidrs = set([addr for addr in ipaddr_list])
+        new_cidrs = set()
+
+        # Loop once to ensure that floating ips are configured.
+        for fip in ri.router.get(l3_constants.FLOATINGIP_KEY, []):
+            fip_ip = fip['floating_ip_address']
+            fip_cidr = str(fip_ip) + FLOATING_IP_CIDR_SUFFIX
+
+            new_cidrs.add(fip_cidr)
+
+            if fip_cidr not in existing_cidrs:
+                ipintf.create_address(fip_cidr)
+
+            # Rebuild iptables rules for the floating ip.
+            fixed_cidr = str(fip['fixed_ip_address']) + '/32'
+            nat_rules = ['bimap %s %s -> %s' % (ifname, fixed_cidr, fip_cidr)]
+            ri.ipfilters_manager.add_nat_rules(nat_rules)
+
+        # Clean up addresses that no longer belong on the gateway interface.
+        for ip_cidr in existing_cidrs - new_cidrs:
+            if ip_cidr.endswith(FLOATING_IP_CIDR_SUFFIX):
+                ipintf.delete_address(ip_cidr)
+
     def get_internal_device_name(self, port_id):
         # Because of the way how dnsmasq works on Solaris, the length
         # of datalink name cannot exceed 16 (includes terminating nul
@@ -113,15 +217,57 @@
         return dname.replace('-', '_')
 
     def get_external_device_name(self, port_id):
+        # please see the comment above
         dname = (EXTERNAL_DEV_PREFIX + port_id)[:13]
         dname += '_0'
         return dname.replace('-', '_')
 
-    def external_gateway_added(self, ri, ex_gw_port, internal_cidrs):
-        pass
+    def external_gateway_added(self, ri, ex_gw_port,
+                               external_dlname, internal_cidrs):
 
-    def external_gateway_removed(self, ri, ex_gw_port, internal_cidrs):
-        pass
+        if not net_lib.Datalink.datalink_exists(external_dlname):
+            dl = net_lib.Datalink(external_dlname)
+            # need to determine the VLAN ID for the VNIC
+            evsname = ex_gw_port['network_id']
+            tenantname = ex_gw_port['tenant_id']
+            cmd = ['/usr/sbin/evsadm', 'show-evs', '-co', 'vid',
+                   '-f', 'tenant=%s' % tenantname, evsname]
+            try:
+                stdout = utils.execute(cmd)
+            except Exception as err:
+                LOG.error(_("Failed to retrieve the VLAN ID associated "
+                            "with the external network, and it is required "
+                            "to create external gateway port: %s") % err)
+                return
+            vid = stdout.splitlines()[0].strip()
+            if vid == "":
+                LOG.error(_("External Network does not has a VLAN ID "
+                            "associated with it, and it is required to "
+                            "create external gateway port"))
+                return
+            mac_address = ex_gw_port['mac_address']
+            dl.create_vnic(self.conf.external_network_datalink,
+                           mac_address=mac_address, vid=vid)
+        self.driver.init_l3(external_dlname, [ex_gw_port['ip_cidr']])
+
+        # TODO(gmoodalb): wrap route(1m) command within a class in net_lib.py
+        gw_ip = ex_gw_port['subnet']['gateway_ip']
+        if gw_ip:
+            cmd = ['/usr/bin/pfexec', '/usr/sbin/route', 'add', 'default',
+                   gw_ip]
+            utils.execute(cmd, check_exit_code=False)
+
+    def external_gateway_removed(self, ri, ex_gw_port,
+                                 external_dlname, internal_cidrs):
+
+        if net_lib.Datalink.datalink_exists(external_dlname):
+            self.driver.fini_l3(external_dlname)
+            self.driver.unplug(external_dlname)
+        gw_ip = ex_gw_port['subnet']['gateway_ip']
+        if gw_ip:
+            cmd = ['/usr/bin/pfexec', '/usr/sbin/route', 'delete', 'default',
+                   gw_ip]
+            utils.execute(cmd, check_exit_code=False)
 
     def _get_ippool_name(self, mac_address):
         # generate a unique-name for ippool(1m) from that last 3
@@ -129,7 +275,7 @@
         mac_suffix = mac_address.split(':')[3:]
         return int("".join(mac_suffix), 16)
 
-    def internal_network_added(self, ri, ex_gw_port, port):
+    def internal_network_added(self, ri, port):
 
         internal_dlname = self.get_internal_device_name(port['id'])
         if not net_lib.Datalink.datalink_exists(internal_dlname):
@@ -149,7 +295,7 @@
         for oip in ri.internal_ports:
             if oip['mac_address'] != port['mac_address']:
                 if (self.conf.allow_forwarding_between_networks and
-                    oip['tenant_id'] == port['tenant_id']):
+                        oip['tenant_id'] == port['tenant_id']):
                     continue
                 other_subnet_cidrs.append(oip['subnet']['cidr'])
                 ippool_name = self._get_ippool_name(oip['mac_address'])
@@ -162,7 +308,7 @@
                  (internal_dlname, subnet_cidr, new_ippool_name)]
         ri.ipfilters_manager.add_ipf_rules(rules)
 
-    def internal_network_removed(self, ri, ex_gw_port, port):
+    def internal_network_removed(self, ri, port):
         internal_dlname = self.get_internal_device_name(port['id'])
         if net_lib.Datalink.datalink_exists(internal_dlname):
             self.driver.fini_l3(internal_dlname)
@@ -177,35 +323,21 @@
         ri.ipfilters_manager.remove_ippool(ippool_name, None)
         for internal_port in ri.internal_ports:
             if (self.conf.allow_forwarding_between_networks and
-                internal_port['tenant_id'] == port['tenant_id']):
+                    internal_port['tenant_id'] == port['tenant_id']):
                 continue
             ippool_name = \
                 self._get_ippool_name(internal_port['mac_address'])
-            ri.ipfilters_manager.remove_ippool(ippool_name,
-                                               internal_port['subnet']['cidr'])
-
-    def floating_ip_added(self, ri, ex_gw_port, floating_ip, fixed_ip):
-        floating_ipcidr = str(floating_ip) + '/32'
-        fixed_ipcidr = str(fixed_ip) + '/32'
-        #ifname = self.get_external_device_name(ex_gw_port['id'])
-        ifname = self.conf.external_network_datalink
-        ipif = net_lib.IPInterface(ifname)
-        ipif.create_address(floating_ipcidr)
+            subnet_cidr = internal_port['subnet']['cidr']
+            ri.ipfilters_manager.remove_ippool(ippool_name, [subnet_cidr])
 
-        nat_rules = ['bimap %s %s -> %s' %
-                     (ifname, fixed_ipcidr, floating_ipcidr)]
-        ri.ipfilters_manager.add_nat_rules(nat_rules)
-
-    def floating_ip_removed(self, ri, ex_gw_port, floating_ip, fixed_ip):
-        floating_ipcidr = str(floating_ip) + '/32'
-        fixed_ipcidr = str(fixed_ip) + '/32'
-        ifname = self.conf.external_network_datalink
-        ipif = net_lib.IPInterface(ifname)
-        ipif.delete_address(floating_ipcidr)
-
-        nat_rules = ['bimap %s %s -> %s' %
-                     (ifname, fixed_ipcidr, floating_ipcidr)]
-        ri.ipfilters_manager.remove_nat_rules(nat_rules)
+    def routers_updated(self, context, routers):
+        super(EVSL3NATAgent, self).routers_updated(context, routers)
+        if routers:
+            # If router's interface was removed, then the VNIC associated
+            # with that interface must be deleted immediately. The EVS
+            # plugin can delete the virtual port iff the VNIC associated
+            # with that virtual port is deleted first.
+            self._rpc_loop()
 
     def routes_updated(self, ri):
         pass
--- a/components/openstack/neutron/files/agent/linux/device.py	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,197 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import socket
-import uuid
-
-import netaddr
-from oslo.config import cfg
-
-from quantum.agent.common import config
-from quantum.agent.linux import interface
-from quantum.agent.linux import ip_lib
-from quantum.common import exceptions
-from quantum.openstack.common import importutils
-from quantum.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-NS_PREFIX = 'qdhcp-'
-METADATA_DEFAULT_PREFIX = 16
-METADATA_DEFAULT_IP = '169.254.169.254/%d' % METADATA_DEFAULT_PREFIX
-
-
-class DeviceManager(object):
-    OPTS = [
-        cfg.StrOpt('interface_driver',
-                   help=_("The driver used to manage the virtual interface."))
-    ]
-
-    def __init__(self, conf, plugin):
-        self.conf = conf
-        self.root_helper = config.get_root_helper(conf)
-        self.plugin = plugin
-        cfg.CONF.register_opts(DeviceManager.OPTS)
-        cfg.CONF.register_opts(interface.OPTS)
-        if not conf.interface_driver:
-            raise SystemExit(_('You must specify an interface driver'))
-        try:
-            self.driver = importutils.import_object(conf.interface_driver,
-                                                    conf)
-        except:
-            msg = _("Error importing interface driver "
-                    "'%s'") % conf.interface_driver
-            raise SystemExit(msg)
-
-    def get_interface_name(self, network, port=None):
-        """Return interface(device) name for use by the DHCP process."""
-        if not port:
-            device_id = self.get_device_id(network)
-            port = self.plugin.get_dhcp_port(network.id, device_id)
-        return self.driver.get_device_name(port)
-
-    def get_device_id(self, network):
-        """Return a unique DHCP device ID for this host on the network."""
-        # There could be more than one dhcp server per network, so create
-        # a device id that combines host and network ids
-
-        host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, socket.gethostname())
-        return 'dhcp%s-%s' % (host_uuid, network.id)
-
-    def _get_device(self, network):
-        """Return DHCP ip_lib device for this host on the network."""
-        device_id = self.get_device_id(network)
-        port = self.plugin.get_dhcp_port(network.id, device_id)
-        interface_name = self.get_interface_name(network, port)
-        namespace = NS_PREFIX + network.id
-        return ip_lib.IPDevice(interface_name,
-                               self.root_helper,
-                               namespace)
-
-    def _set_default_route(self, network):
-        """Sets the default gateway for this dhcp namespace.
-
-        This method is idempotent and will only adjust the route if adjusting
-        it would change it from what it already is.  This makes it safe to call
-        and avoids unnecessary perturbation of the system.
-        """
-        device = self._get_device(network)
-        gateway = device.route.get_gateway()
-
-        for subnet in network.subnets:
-            skip_subnet = (
-                subnet.ip_version != 4
-                or not subnet.enable_dhcp
-                or subnet.gateway_ip is None)
-
-            if skip_subnet:
-                continue
-
-            if gateway != subnet.gateway_ip:
-                m = _('Setting gateway for dhcp netns on net %(n)s to %(ip)s')
-                LOG.debug(m, {'n': network.id, 'ip': subnet.gateway_ip})
-
-                device.route.add_gateway(subnet.gateway_ip)
-
-            return
-
-        # No subnets on the network have a valid gateway.  Clean it up to avoid
-        # confusion from seeing an invalid gateway here.
-        if gateway is not None:
-            msg = _('Removing gateway for dhcp netns on net %s')
-            LOG.debug(msg, network.id)
-
-            device.route.delete_gateway(gateway)
-
-    def setup(self, network, reuse_existing=False):
-        """Create and initialize a device for network's DHCP on this host."""
-        device_id = self.get_device_id(network)
-        port = self.plugin.get_dhcp_port(network.id, device_id)
-
-        interface_name = self.get_interface_name(network, port)
-
-        if self.conf.use_namespaces:
-            namespace = NS_PREFIX + network.id
-        else:
-            namespace = None
-
-        if ip_lib.device_exists(interface_name,
-                                self.root_helper,
-                                namespace):
-            if not reuse_existing:
-                raise exceptions.PreexistingDeviceFailure(
-                    dev_name=interface_name)
-
-            LOG.debug(_('Reusing existing device: %s.'), interface_name)
-        else:
-            self.driver.plug(network.id,
-                             port.id,
-                             interface_name,
-                             port.mac_address,
-                             namespace=namespace)
-        ip_cidrs = []
-        for fixed_ip in port.fixed_ips:
-            subnet = fixed_ip.subnet
-            net = netaddr.IPNetwork(subnet.cidr)
-            ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
-            ip_cidrs.append(ip_cidr)
-
-        if (self.conf.enable_isolated_metadata and
-            self.conf.use_namespaces):
-            ip_cidrs.append(METADATA_DEFAULT_IP)
-
-        self.driver.init_l3(interface_name, ip_cidrs,
-                            namespace=namespace)
-
-        # ensure that the dhcp interface is first in the list
-        if namespace is None:
-            device = ip_lib.IPDevice(interface_name,
-                                     self.root_helper)
-            device.route.pullup_route(interface_name)
-
-        if self.conf.enable_metadata_network:
-            meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_IP)
-            metadata_subnets = [s for s in network.subnets if
-                                netaddr.IPNetwork(s.cidr) in meta_cidr]
-            if metadata_subnets:
-                # Add a gateway so that packets can be routed back to VMs
-                device = ip_lib.IPDevice(interface_name,
-                                         self.root_helper,
-                                         namespace)
-                # Only 1 subnet on metadata access network
-                gateway_ip = metadata_subnets[0].gateway_ip
-                device.route.add_gateway(gateway_ip)
-        elif self.conf.use_namespaces:
-            self._set_default_route(network)
-
-        return interface_name
-
-    def update(self, network):
-        """Update device settings for the network's DHCP on this host."""
-        if self.conf.use_namespaces and not self.conf.enable_metadata_network:
-            self._set_default_route(network)
-
-    def destroy(self, network, device_name):
-        """Destroy the device used for the network's DHCP on this host."""
-        if self.conf.use_namespaces:
-            namespace = NS_PREFIX + network.id
-        else:
-            namespace = None
-
-        self.driver.unplug(device_name, namespace=namespace)
-
-        self.plugin.release_dhcp_port(network.id,
-                                      self.get_device_id(network))
--- a/components/openstack/neutron/files/agent/solaris/device.py	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,100 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-# @author: Girish Moodalbail, Oracle, Inc.
-
-import socket
-import uuid
-
-import netaddr
-
-from oslo.config import cfg
-from quantum.agent.common import config
-from quantum.agent.solaris import interface
-from quantum.openstack.common import importutils
-from quantum.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-class DeviceManager(object):
-    OPTS = [
-        cfg.StrOpt('interface_driver',
-                   help=_('The driver used to manage the virtual interface.'))
-    ]
-
-    def __init__(self, conf, plugin):
-        self.conf = conf
-        self.root_helper = config.get_root_helper(conf)
-        self.plugin = plugin
-        cfg.CONF.register_opts(DeviceManager.OPTS)
-        cfg.CONF.register_opts(interface.OPTS)
-        if not conf.interface_driver:
-            raise SystemExit(_('You must specify an interface driver.'))
-        try:
-            self.driver = importutils.import_object(conf.interface_driver,
-                                                    conf)
-        except ImportError as ie:
-            raise SystemExit(_('Error importing interface driver %s: %s')
-                             % (conf.interface_driver, ie))
-
-    def get_interface_name(self, network, port=None):
-        """Return interface(device) name for use by the DHCP process."""
-
-        if not port:
-            device_id = self.get_device_id(network)
-            port = self.plugin.get_dhcp_port(network.id, device_id)
-        return self.driver.get_device_name(port)
-
-    def get_device_id(self, network):
-        """Return a unique DHCP device ID for this host on the network."""
-
-        # There could be more than one dhcp server per network, so create
-        # a device id that combines host and network ids
-        host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, socket.gethostname())
-        return 'dhcp%s-%s' % (host_uuid, network.id)
-
-    def setup(self, network, reuse_existing=False):
-        """Create and initialize a device for network's DHCP on this host."""
-        device_id = self.get_device_id(network)
-        port = self.plugin.get_dhcp_port(network.id, device_id)
-
-        interface_name = self.get_interface_name(network, port)
-
-        self.driver.plug(network.tenant_id, network.id, port.id,
-                         interface_name)
-        ip_cidrs = []
-        for fixed_ip in port.fixed_ips:
-            subnet = fixed_ip.subnet
-            net = netaddr.IPNetwork(subnet.cidr)
-            ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
-            ip_cidrs.append(ip_cidr)
-
-        self.driver.init_l3(interface_name, ip_cidrs)
-
-        return interface_name
-
-    def update(self, network):
-        """Update device settings for the network's DHCP on this host."""
-        pass
-
-    def destroy(self, network, device_name):
-        """Destroy the device used for the network's DHCP on this host."""
-
-        self.driver.fini_l3(device_name)
-        self.driver.unplug(device_name)
-        self.plugin.release_dhcp_port(network.id,
-                                      self.get_device_id(network))
--- a/components/openstack/neutron/files/agent/solaris/dhcp.py	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/agent/solaris/dhcp.py	Wed Jun 11 17:13:12 2014 -0700
@@ -1,3 +1,8 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
 # Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -18,14 +23,21 @@
 import os
 import re
 import shutil
+import socket
 import StringIO
+import sys
+import uuid
 
 import netaddr
+from oslo.config import cfg
 
-from oslo.config import cfg
-from quantum.agent.linux import utils
-from quantum.openstack.common import log as logging
-from quantum.openstack.common import uuidutils
+from neutron.agent.linux import utils
+from neutron.agent.solaris import net_lib
+from neutron.common import exceptions
+from neutron.openstack.common import importutils
+from neutron.openstack.common import jsonutils
+from neutron.openstack.common import log as logging
+from neutron.openstack.common import uuidutils
 
 LOG = logging.getLogger(__name__)
 
@@ -33,9 +45,6 @@
     cfg.StrOpt('dhcp_confs',
                default='$state_path/dhcp',
                help=_('Location to store DHCP server config files')),
-    cfg.IntOpt('dhcp_lease_time',
-               default=120,
-               help=_('Lifetime of a DHCP lease in seconds')),
     cfg.StrOpt('dhcp_domain',
                default='openstacklocal',
                help=_('Domain to use for building the hostnames')),
@@ -45,6 +54,12 @@
     cfg.StrOpt('dnsmasq_dns_server',
                help=_('Use another DNS server before any in '
                       '/etc/resolv.conf.')),
+    cfg.IntOpt(
+        'dnsmasq_lease_max',
+        default=(2 ** 24),
+        help=_('Limit number of leases to prevent a denial-of-service.')),
+    cfg.StrOpt('interface_driver',
+               help=_("The driver used to manage the virtual interface.")),
 ]
 
 IPV4 = 4
@@ -53,19 +68,50 @@
 TCP = 'tcp'
 DNS_PORT = 53
 DHCPV4_PORT = 67
-DHCPV6_PORT = 467
+DHCPV6_PORT = 547
+METADATA_DEFAULT_PREFIX = 16
+METADATA_DEFAULT_IP = '169.254.169.254'
+METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP,
+                                   METADATA_DEFAULT_PREFIX)
+METADATA_PORT = 80
+WIN2k3_STATIC_DNS = 249
+
+
+class DictModel(object):
+    """Convert dict into an object that provides attribute access to values."""
+    def __init__(self, d):
+        for key, value in d.iteritems():
+            if isinstance(value, list):
+                value = [DictModel(item) if isinstance(item, dict) else item
+                         for item in value]
+            elif isinstance(value, dict):
+                value = DictModel(value)
+
+            setattr(self, key, value)
+
+
+class NetModel(DictModel):
+
+    def __init__(self, use_namespaces, d):
+        super(NetModel, self).__init__(d)
+
+        self._ns_name = None
+
+    @property
+    def namespace(self):
+        return self._ns_name
 
 
 class DhcpBase(object):
     __metaclass__ = abc.ABCMeta
 
     def __init__(self, conf, network, root_helper='sudo',
-                 device_delegate=None, namespace=None, version=None):
+                 version=None, plugin=None):
         self.conf = conf
         self.network = network
-        self.root_helper = root_helper
-        self.device_delegate = device_delegate
-        self.namespace = namespace
+        self.root_helper = None
+        self.device_manager = DeviceManager(self.conf,
+                                            self.root_helper, plugin)
         self.version = version
 
     @abc.abstractmethod
@@ -80,18 +126,23 @@
         """Restart the dhcp service for the network."""
         self.disable(retain_port=True)
         self.enable()
+        self.device_manager.update(self.network)
 
     @abc.abstractproperty
     def active(self):
         """Boolean representing the running state of the DHCP server."""
 
     @abc.abstractmethod
+    def release_lease(self, mac_address, removed_ips):
+        """Release a DHCP lease."""
+
+    @abc.abstractmethod
     def reload_allocations(self):
         """Force the DHCP server to reload the assignment database."""
 
     @classmethod
     def existing_dhcp_networks(cls, conf, root_helper):
-        """Return a list of existing networks ids (ones we have configs for)"""
+        """Return a list of existing networks ids that we have configs for."""
 
         raise NotImplementedError
 
@@ -107,12 +158,15 @@
 
     def _enable_dhcp(self):
         """check if there is a subnet within the network with dhcp enabled."""
-        return any(s for s in self.network.subnets if s.enable_dhcp)
+        for subnet in self.network.subnets:
+            if subnet.enable_dhcp:
+                return True
+        return False
 
     def enable(self):
         """Enables DHCP for this network by spawning a local process."""
-        interface_name = self.device_delegate.setup(self.network,
-                                                    reuse_existing=True)
+        interface_name = self.device_manager.setup(self.network,
+                                                   reuse_existing=True)
         if self.active:
             self.restart()
         elif self._enable_dhcp():
@@ -125,10 +179,9 @@
 
         if self.active:
             cmd = ['kill', '-9', pid]
-            utils.execute(cmd)
-
+            utils.execute(cmd, self.root_helper)
             if not retain_port:
-                self.device_delegate.destroy(self.network, self.interface_name)
+                self.device_manager.destroy(self.network, self.interface_name)
 
         elif pid:
             LOG.debug(_('DHCP for %(net_id)s pid %(pid)d is stale, ignoring '
@@ -149,7 +202,7 @@
         conf_dir = os.path.join(confs_dir, self.network.id)
         if ensure_conf_dir:
             if not os.path.isdir(conf_dir):
-                os.makedirs(conf_dir, 0755)
+                os.makedirs(conf_dir, 0o755)
 
         return os.path.join(conf_dir, kind)
 
@@ -162,9 +215,9 @@
             with open(file_name, 'r') as f:
                 try:
                     return converter and converter(f.read()) or f.read()
-                except ValueError, e:
+                except ValueError:
                     msg = _('Unable to convert value in %s')
-        except IOError, e:
+        except IOError:
             msg = _('Unable to access %s')
 
         LOG.debug(msg % file_name)
@@ -181,7 +234,7 @@
         if pid is None:
             return False
 
-        cmd = ['pargs', pid]
+        cmd = ['/usr/bin/pargs', pid]
         try:
             return self.network.id in utils.execute(cmd)
         except RuntimeError:
@@ -204,42 +257,54 @@
 
 class Dnsmasq(DhcpLocalProcess):
     # The ports that need to be opened when security policies are active
-    # on the Quantum port used for DHCP.  These are provided as a convenience
+    # on the Neutron port used for DHCP.  These are provided as a convenience
     # for users of this class.
     PORTS = {IPV4: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)],
-             IPV6: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)]}
+             IPV6: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)],
+             }
 
     _TAG_PREFIX = 'tag%d'
 
-    QUANTUM_NETWORK_ID_KEY = 'QUANTUM_NETWORK_ID'
-    QUANTUM_RELAY_SOCKET_PATH_KEY = 'QUANTUM_RELAY_SOCKET_PATH'
+    NEUTRON_NETWORK_ID_KEY = 'NEUTRON_NETWORK_ID'
+    NEUTRON_RELAY_SOCKET_PATH_KEY = 'NEUTRON_RELAY_SOCKET_PATH'
+    MINIMUM_VERSION = 2.59
 
     @classmethod
     def check_version(cls):
-        # For Solaris, we rely on the packaging system to ensure a
-        # matching/supported version of dnsmasq
-        pass
+        ver = 0
+        try:
+            cmd = ['/usr/lib/inet/dnsmasq', '--version']
+            out = utils.execute(cmd)
+            ver = re.findall("\d+.\d+", out)[0]
+            is_valid_version = float(ver) >= cls.MINIMUM_VERSION
+            # For Solaris, we rely on the packaging system to ensure a
+            # matching/supported version of dnsmasq.
+            if not is_valid_version:
+                LOG.warning(_('FAILED VERSION REQUIREMENT FOR DNSMASQ. '
+                              'DHCP AGENT MAY NOT RUN CORRECTLY! '
+                              'Please ensure that its version is %s '
+                              'or above!'), cls.MINIMUM_VERSION)
+        except (OSError, RuntimeError, IndexError, ValueError):
+            LOG.warning(_('Unable to determine dnsmasq version. '
+                          'Please ensure that its version is %s '
+                          'or above!'), cls.MINIMUM_VERSION)
+        return float(ver)
 
     @classmethod
     def existing_dhcp_networks(cls, conf, root_helper):
-        """Return a list of existing networks ids (ones we have configs for)"""
+        """Return a list of existing networks ids that we have configs for."""
 
         confs_dir = os.path.abspath(os.path.normpath(conf.dhcp_confs))
 
-        class FakeNetwork:
-            def __init__(self, net_id):
-                self.id = net_id
-
         return [
             c for c in os.listdir(confs_dir)
-            if (uuidutils.is_uuid_like(c) and
-                cls(conf, FakeNetwork(c), root_helper).active)
+            if uuidutils.is_uuid_like(c)
         ]
 
     def spawn_process(self):
         """Spawns a Dnsmasq process for the network."""
         env = {
-            self.QUANTUM_NETWORK_ID_KEY: self.network.id
+            self.NEUTRON_NETWORK_ID_KEY: self.network.id,
         }
 
         cmd = [
@@ -252,14 +317,12 @@
             '--except-interface=lo0',
             '--pid-file=%s' % self.get_conf_file_name(
                 'pid', ensure_conf_dir=True),
-            # TODO(gmoodalb): calculate value from cidr (defaults to 150)
-            # '--dhcp-lease-max=%s' % ?,
             '--dhcp-hostsfile=%s' % self._output_hosts_file(),
             '--dhcp-optsfile=%s' % self._output_opts_file(),
-            # '--dhcp-script=%s' % self._lease_relay_script_path(),
             '--leasefile-ro',
         ]
 
+        possible_leases = 0
         for i, subnet in enumerate(self.network.subnets):
             # if a subnet is specified to have dhcp disabled
             if not subnet.enable_dhcp:
@@ -269,7 +332,7 @@
             else:
                 # TODO(gmoodalb): how do we indicate other options
                 # ra-only, slaac, ra-nameservers, and ra-stateless.
-                # We need to also set the DUID for DHCPv6 server to use
+                # We need to also set the DUID for the DHCPv6 server to use
                 macaddr_cmd = ['/usr/sbin/dladm', 'show-linkprop',
                                '-co', 'value', '-p', 'mac-address',
                                self.interface_name]
@@ -279,10 +342,24 @@
                 enterprise_id = '111'
                 cmd.append('--dhcp-duid=%s,%s' % (enterprise_id, uid))
                 mode = 'static'
-            cmd.append('--dhcp-range=set:%s,%s,%s,%ss' %
-                       (self._TAG_PREFIX % i,
-                        netaddr.IPNetwork(subnet.cidr).network,
-                        mode, self.conf.dhcp_lease_time))
+            if self.version >= self.MINIMUM_VERSION:
+                set_tag = 'set:'
+            else:
+                set_tag = ''
+
+            cidr = netaddr.IPNetwork(subnet.cidr)
+
+            cmd.append('--dhcp-range=%s%s,%s,%s,%ss' %
+                       (set_tag, self._TAG_PREFIX % i,
+                        cidr.network,
+                        mode,
+                        self.conf.dhcp_lease_duration))
+            possible_leases += cidr.size
+
+        # Cap the limit because creating lots of subnets can inflate
+        # this possible lease cap.
+        cmd.append('--dhcp-lease-max=%d' %
+                   min(possible_leases, self.conf.dnsmasq_lease_max))
 
         cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
         if self.conf.dnsmasq_dns_server:
@@ -290,7 +367,13 @@
 
         if self.conf.dhcp_domain:
             cmd.append('--domain=%s' % self.conf.dhcp_domain)
-        utils.execute(cmd)
+
+        # TODO(gmoodalb): prepend the env vars before command
+        utils.execute(cmd, self.root_helper)
+
+    def release_lease(self, mac_address, removed_ips):
+        # TODO(gmoodalb): we need to support dnsmasq's dhcp_release
+        pass
 
     def reload_allocations(self):
         """Rebuild the dnsmasq config and signal the dnsmasq to reload."""
@@ -304,13 +387,13 @@
 
         self._output_hosts_file()
         self._output_opts_file()
-
         if self.active:
             cmd = ['kill', '-HUP', self.pid]
-            utils.execute(cmd)
+            utils.execute(cmd, self.root_helper)
         else:
             LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), self.pid)
         LOG.debug(_('Reloading allocations for network: %s'), self.network.id)
+        self.device_manager.update(self.network)
 
     def _output_hosts_file(self):
         """Writes a dnsmasq compatible hosts file."""
@@ -321,8 +404,17 @@
             for alloc in port.fixed_ips:
                 name = 'host-%s.%s' % (r.sub('-', alloc.ip_address),
                                        self.conf.dhcp_domain)
-                buf.write('%s,%s,%s\n' %
-                          (port.mac_address, name, alloc.ip_address))
+                set_tag = ''
+                if getattr(port, 'extra_dhcp_opts', False):
+                    if self.version >= self.MINIMUM_VERSION:
+                        set_tag = 'set:'
+
+                    buf.write('%s,%s,%s,%s%s\n' %
+                              (port.mac_address, name, alloc.ip_address,
+                               set_tag, port.id))
+                else:
+                    buf.write('%s,%s,%s\n' %
+                              (port.mac_address, name, alloc.ip_address))
 
         name = self.get_conf_file_name('host')
         utils.replace_file(name, buf.getvalue())
@@ -331,6 +423,9 @@
     def _output_opts_file(self):
         """Write a dnsmasq compatible options file."""
 
+        if self.conf.enable_isolated_metadata:
+            subnet_to_interface_ip = self._make_subnet_interface_ip_map()
+
         options = []
         for i, subnet in enumerate(self.network.subnets):
             if not subnet.enable_dhcp:
@@ -340,29 +435,224 @@
                     self._format_option(i, 'dns-server',
                                         ','.join(subnet.dns_nameservers)))
 
-            host_routes = ["%s,%s" % (hr.destination, hr.nexthop)
-                           for hr in subnet.host_routes]
+            gateway = subnet.gateway_ip
+            host_routes = []
+            for hr in subnet.host_routes:
+                if hr.destination == "0.0.0.0/0":
+                    gateway = hr.nexthop
+                else:
+                    host_routes.append("%s,%s" % (hr.destination, hr.nexthop))
+
+            # Add host routes for isolated network segments
+            enable_metadata = (
+                self.conf.enable_isolated_metadata
+                and not subnet.gateway_ip
+                and subnet.ip_version == 4)
+
+            if enable_metadata:
+                subnet_dhcp_ip = subnet_to_interface_ip[subnet.id]
+                host_routes.append(
+                    '%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip)
+                )
 
             if host_routes:
                 options.append(
                     self._format_option(i, 'classless-static-route',
                                         ','.join(host_routes)))
+                options.append(
+                    self._format_option(i, WIN2k3_STATIC_DNS,
+                                        ','.join(host_routes)))
 
             if subnet.ip_version == 4:
-                if subnet.gateway_ip:
-                    options.append(self._format_option(i, 'router',
-                                                       subnet.gateway_ip))
+                if gateway:
+                    options.append(self._format_option(i, 'router', gateway))
                 else:
                     options.append(self._format_option(i, 'router'))
 
+        for port in self.network.ports:
+            if getattr(port, 'extra_dhcp_opts', False):
+                options.extend(
+                    self._format_option(port.id, opt.opt_name, opt.opt_value)
+                    for opt in port.extra_dhcp_opts)
+
         name = self.get_conf_file_name('opts')
         utils.replace_file(name, '\n'.join(options))
         return name
 
-    def _format_option(self, index, option_name, *args):
-        return ','.join(('tag:' + self._TAG_PREFIX % index,
-                         'option:%s' % option_name) + args)
+    def _make_subnet_interface_ip_map(self):
+        # TODO(gmoodalb): need to complete this when we support metadata
+        pass
+
+    def _format_option(self, tag, option, *args):
+        """Format DHCP option by option name or code."""
+        if self.version >= self.MINIMUM_VERSION:
+            set_tag = 'tag:'
+        else:
+            set_tag = ''
+
+        option = str(option)
+
+        if isinstance(tag, int):
+            tag = self._TAG_PREFIX % tag
+
+        if not option.isdigit():
+            option = 'option:%s' % option
+
+        return ','.join((set_tag + tag, '%s' % option) + args)
 
     @classmethod
     def lease_update(cls):
+        network_id = os.environ.get(cls.NEUTRON_NETWORK_ID_KEY)
+        dhcp_relay_socket = os.environ.get(cls.NEUTRON_RELAY_SOCKET_PATH_KEY)
+
+        action = sys.argv[1]
+        if action not in ('add', 'del', 'old'):
+            sys.exit()
+
+        mac_address = sys.argv[2]
+        ip_address = sys.argv[3]
+
+        if action == 'del':
+            lease_remaining = 0
+        else:
+            lease_remaining = int(os.environ.get('DNSMASQ_TIME_REMAINING', 0))
+
+        data = dict(network_id=network_id, mac_address=mac_address,
+                    ip_address=ip_address, lease_remaining=lease_remaining)
+
+        if os.path.exists(dhcp_relay_socket):
+            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+            sock.connect(dhcp_relay_socket)
+            sock.send(jsonutils.dumps(data))
+            sock.close()
+
+
+class DeviceManager(object):
+
+    def __init__(self, conf, root_helper, plugin):
+        self.conf = conf
+        self.root_helper = root_helper
+        self.plugin = plugin
+        if not conf.interface_driver:
+            raise SystemExit(_('You must specify an interface driver'))
+        try:
+            self.driver = importutils.import_object(
+                conf.interface_driver, conf)
+        except Exception as e:
+            msg = (_("Error importing interface driver '%(driver)s': "
+                   "%(inner)s") % {'driver': conf.interface_driver,
+                                   'inner': e})
+            raise SystemExit(msg)
+
+    def get_interface_name(self, network, port):
+        """Return interface(device) name for use by the DHCP process."""
+        return self.driver.get_device_name(port)
+
+    def get_device_id(self, network):
+        """Return a unique DHCP device ID for this host on the network."""
+        # There could be more than one dhcp server per network, so create
+        # a device id that combines host and network ids
+
+        host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, socket.gethostname())
+        return 'dhcp%s-%s' % (host_uuid, network.id)
+
+    def setup_dhcp_port(self, network):
+        """Create/update DHCP port for the host if needed and return port."""
+
+        device_id = self.get_device_id(network)
+        subnets = {}
+        dhcp_enabled_subnet_ids = []
+        for subnet in network.subnets:
+            if subnet.enable_dhcp:
+                dhcp_enabled_subnet_ids.append(subnet.id)
+                subnets[subnet.id] = subnet
+
+        dhcp_port = None
+        for port in network.ports:
+            port_device_id = getattr(port, 'device_id', None)
+            if port_device_id == device_id:
+                port_fixed_ips = []
+                for fixed_ip in port.fixed_ips:
+                    port_fixed_ips.append({'subnet_id': fixed_ip.subnet_id,
+                                           'ip_address': fixed_ip.ip_address})
+                    if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
+                        dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id)
+
+                # If there are dhcp_enabled_subnet_ids here that means that
+                # we need to add those to the port and call update.
+                if dhcp_enabled_subnet_ids:
+                    port_fixed_ips.extend(
+                        [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
+                    dhcp_port = self.plugin.update_dhcp_port(
+                        port.id, {'port': {'fixed_ips': port_fixed_ips}})
+                else:
+                    dhcp_port = port
+                # break since we found port that matches device_id
+                break
+
+        # DHCP port has not yet been created.
+        if dhcp_port is None:
+            LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s'
+                        ' does not yet exist.'), {'device_id': device_id,
+                                                  'network_id': network.id})
+            port_dict = dict(
+                name='',
+                admin_state_up=True,
+                device_id=device_id,
+                network_id=network.id,
+                tenant_id=network.tenant_id,
+                fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
+            dhcp_port = self.plugin.create_dhcp_port({'port': port_dict})
+
+        # Convert subnet_id to subnet dict
+        fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
+                          ip_address=fixed_ip.ip_address,
+                          subnet=subnets[fixed_ip.subnet_id])
+                     for fixed_ip in dhcp_port.fixed_ips]
+
+        ips = [DictModel(item) if isinstance(item, dict) else item
+               for item in fixed_ips]
+        dhcp_port.fixed_ips = ips
+
+        return dhcp_port
+
+    def setup(self, network, reuse_existing=False):
+        """Create and initialize a device for network's DHCP on this host."""
+        port = self.setup_dhcp_port(network)
+        interface_name = self.get_interface_name(network, port)
+
+        if net_lib.Datalink.datalink_exists(interface_name):
+            if not reuse_existing:
+                raise exceptions.PreexistingDeviceFailure(
+                    dev_name=interface_name)
+
+                LOG.debug(_('Reusing existing device: %s.'), interface_name)
+        else:
+            self.driver.plug(network.tenant_id, network.id,
+                             port.id,
+                             interface_name)
+        ip_cidrs = []
+        for fixed_ip in port.fixed_ips:
+            subnet = fixed_ip.subnet
+            net = netaddr.IPNetwork(subnet.cidr)
+            ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
+            ip_cidrs.append(ip_cidr)
+
+        if (self.conf.enable_isolated_metadata and self.conf.use_namespaces):
+            ip_cidrs.append(METADATA_DEFAULT_CIDR)
+
+        self.driver.init_l3(interface_name, ip_cidrs)
+
+        return interface_name
+
+    def update(self, network):
+        """Update device settings for the network's DHCP on this host."""
         pass
+
+    def destroy(self, network, device_name):
+        """Destroy the device used for the network's DHCP on this host."""
+
+        self.driver.fini_l3(device_name)
+        self.driver.unplug(device_name)
+        self.plugin.release_dhcp_port(network.id,
+                                      self.get_device_id(network))
--- a/components/openstack/neutron/files/agent/solaris/interface.py	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/agent/solaris/interface.py	Wed Jun 11 17:13:12 2014 -0700
@@ -16,8 +16,8 @@
 
 from oslo.config import cfg
 
-from quantum.agent.linux import utils
-from quantum.agent.solaris import net_lib
+from neutron.agent.linux import utils
+from neutron.agent.solaris import net_lib
 
 
 OPTS = [
--- a/components/openstack/neutron/files/agent/solaris/ipfilters_manager.py	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/agent/solaris/ipfilters_manager.py	Wed Jun 11 17:13:12 2014 -0700
@@ -19,10 +19,10 @@
 
 """Implements ipfilter and ipnat rules using Solaris utilities."""
 
-from quantum.agent.solaris import net_lib
+from neutron.agent.solaris import net_lib
 
 
-class IpfiltersManager(object):
+class IPfiltersManager(object):
     """Wrapper for Solaris IPF commands -- ipf(1m), ipnat(1m),
     and ippool(1m)."""
 
@@ -31,14 +31,14 @@
         self.ipv6 = {'filter': [], 'nat': []}
 
     def add_ippool(self, number, ip_cidrs):
-        ippool = net_lib.IppoolCommand(number)
+        ippool = net_lib.IPpoolCommand(number)
         if ip_cidrs:
             ippool.add_pool_nodes(ip_cidrs)
         else:
             ippool.add_pool()
 
     def remove_ippool(self, number, ip_cidrs):
-        ippool = net_lib.IppoolCommand(number)
+        ippool = net_lib.IPpoolCommand(number)
         if ip_cidrs:
             ippool.remove_pool_nodes(ip_cidrs)
         else:
@@ -47,7 +47,7 @@
     def add_nat_rules(self, rules, version='4'):
         # Solaris doesn't support IPv6 NAT rules
         assert version == '4'
-        ipnat = net_lib.IpnatCommand()
+        ipnat = net_lib.IPnatCommand()
         ipnat.add_rules(rules)
         # we successfully added the nat rules, update the local copy
         for rule in rules:
@@ -56,14 +56,14 @@
     def remove_nat_rules(self, rules, version='4'):
         # Solaris doesn't support IPv6 NAT rules
         assert version == '4'
-        ipnat = net_lib.IpnatCommand()
+        ipnat = net_lib.IPnatCommand()
         ipnat.remove_rules(rules)
         # we successfully removed the nat rules, update the local copy
         for rule in rules:
             self.ipv4['nat'].remove(rule)
 
     def add_ipf_rules(self, rules, version='4'):
-        ipf = net_lib.IpfilterCommand()
+        ipf = net_lib.IPfilterCommand()
         ipf.add_rules(rules, version)
         version_rules = (self.ipv4['filter'] if version == '4' else
                          self.ipv6['filter'])
@@ -71,7 +71,7 @@
             version_rules.append(rule)
 
     def remove_ipf_rules(self, rules, version='4'):
-        ipf = net_lib.IpfilterCommand()
+        ipf = net_lib.IPfilterCommand()
         ipf.remove_rules(rules, version)
         version_rules = (self.ipv4['filter'] if version == '4' else
                          self.ipv6['filter'])
--- a/components/openstack/neutron/files/agent/solaris/net_lib.py	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/agent/solaris/net_lib.py	Wed Jun 11 17:13:12 2014 -0700
@@ -19,7 +19,7 @@
 
 import netaddr
 
-from quantum.agent.linux import utils
+from neutron.agent.linux import utils
 
 
 class CommandBase(object):
@@ -60,18 +60,19 @@
         return ipaddr in stdout
 
     def ipaddr_list(self, filters=None):
-        cmd = ['/usr/sbin/ipadm', 'show-addr', '-po', 'type,addr,',
+        cmd = ['/usr/sbin/ipadm', 'show-addr', '-po', 'type,addr',
                self._ifname]
         stdout = self.execute(cmd)
         atype_addrs = stdout.strip().split('\n')
         result = {}
         for atype_addr in atype_addrs:
-            atype, addr = atype_addr.split(':')
+            atype, addr = atype_addr.split(':', 1)
             val = result.get(atype)
             if val is None:
                 result[atype] = []
                 val = result.get(atype)
-            val.append(addr)
+            # in the case of IPv6 addresses remove any escape '\' character
+            val.append(addr.replace("\\", ""))
         return result
 
     def create_address(self, ipaddr, addrobjname=None, temp=True):
@@ -167,12 +168,22 @@
 
         return self.execute_with_pfexec(cmd)
 
-    def create_vnic(self, lower_link, mac_address=None, temp=True):
+    def create_vnic(self, lower_link, mac_address=None, vid=None, temp=True):
         if self.datalink_exists(self._dlname):
             return
 
+        if vid:
+            # If the default_tag of lower_link is same as vid, then there
+            # is no need to set vid
+            cmd = ['/usr/sbin/dladm', 'show-linkprop', '-co', 'value',
+                   '-p', 'default_tag', lower_link]
+            stdout = utils.execute(cmd)
+            if stdout.splitlines()[0].strip() == vid:
+                vid = '0'
+        else:
+            vid = '0'
         cmd = ['/usr/sbin/dladm', 'create-vnic', '-l', lower_link,
-               '-m', mac_address, self._dlname]
+               '-m', mac_address, '-v', vid, self._dlname]
         if temp:
             cmd.append('-t')
 
@@ -186,7 +197,7 @@
         return self.execute_with_pfexec(cmd)
 
 
-class IppoolCommand(CommandBase):
+class IPpoolCommand(CommandBase):
     '''Wrapper around Solaris ippool(1m) command'''
 
     def __init__(self, pool_name, role='ipf', pool_type='tree'):
@@ -248,7 +259,7 @@
         self.execute_with_pfexec(cmd)
 
 
-class IpfilterCommand(CommandBase):
+class IPfilterCommand(CommandBase):
     '''Wrapper around Solaris ipf(1m) command'''
 
     def split_rules(self, rules):
@@ -282,7 +293,7 @@
         return self.execute_with_pfexec(cmd, process_input=process_input)
 
 
-class IpnatCommand(CommandBase):
+class IPnatCommand(CommandBase):
     '''Wrapper around Solaris ipnat(1m) command'''
 
     def add_rules(self, rules):
--- a/components/openstack/neutron/files/dhcp_agent.ini	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/dhcp_agent.ini	Wed Jun 11 17:13:12 2014 -0700
@@ -1,21 +1,20 @@
 [DEFAULT]
 # Show debugging output in log (sets DEBUG log level output)
-# debug = true
+# debug = False
 
-# The DHCP agent will resync its state with Quantum to recover from any
+# The DHCP agent will resync its state with Neutron to recover from any
 # transient notification or rpc errors. The interval is number of
 # seconds between attempts.
 # resync_interval = 5
 
-# The DHCP requires that an inteface driver be set.  Choose the one that best
-# matches you plugin.
-
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
 # Solaris Elastic Virtual Switch (EVS)
-interface_driver = quantum.agent.solaris.interface.SolarisVNICDriver
+interface_driver = neutron.agent.solaris.interface.SolarisVNICDriver
 
 # The agent can use other DHCP drivers.  Dnsmasq is the simplest and requires
 # no additional setup of the DHCP server.
-dhcp_driver = quantum.agent.solaris.dhcp.Dnsmasq
+dhcp_driver = neutron.agent.solaris.dhcp.Dnsmasq
 
 # Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
 # iproute2 package that supports namespaces).
@@ -23,22 +22,47 @@
 
 # The DHCP server can assist with providing metadata support on isolated
 # networks. Setting this value to True will cause the DHCP server to append
-# specific host routes to the DHCP request.  The metadata service will only
-# be activated when the subnet gateway_ip is None.  The guest instance must
-# be configured to request host routes via DHCP (Option 121).
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
 # enable_isolated_metadata = False
 
 # Allows for serving metadata requests coming from a dedicated metadata
 # access network whose cidr is 169.254.169.254/16 (or larger prefix), and
-# is connected to a Quantum router from which the VMs send metadata
+# is connected to a Neutron router from which the VMs send metadata
 # request. In this case DHCP Option 121 will not be injected in VMs, as
 # they will be able to reach 169.254.169.254 through a router.
 # This option requires enable_isolated_metadata = True
 # enable_metadata_network = False
 
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+# dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+
+# Use another DNS server before any in /etc/resolv.conf.
+# dnsmasq_dns_server =
+
+# Limit number of leases to prevent a denial-of-service.
+# dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
 # DeviceManager provides methods to setup/destroy dhcp ports on the
 # host running DHCP agent
-devicemanager = quantum.agent.solaris.device.DeviceManager
+devicemanager = neutron.agent.solaris.device.DeviceManager
 
 # An URI that specifies an EVS controller. It is of the form
 # ssh://user@hostname, where user is the username to use to connect
--- a/components/openstack/neutron/files/evs/db/api.py	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/evs/db/api.py	Wed Jun 11 17:13:12 2014 -0700
@@ -23,11 +23,11 @@
 
 from oslo.config import cfg
 
-from quantum.context import ContextBase
-from quantum.db import model_base
+from neutron.context import ContextBase
+from neutron.db import model_base
 
 
-EVS_DB_BASE = declarative.declarative_base(cls=model_base.QuantumBaseV2)
+EVS_DB_BASE = declarative.declarative_base(cls=model_base.NeutronBaseV2)
 EVS_DB_ENGINE = None
 EVS_DB_MAKER = None
 
@@ -46,9 +46,7 @@
     global EVS_DB_ENGINE
     if not EVS_DB_ENGINE:
         sql_connection = cfg.CONF.DATABASE.sql_connection
-        if not sql_connection:
-            sql_connection = 'sqlite:////var/lib/quantum/quantum.sqlite'
-        EVS_DB_ENGINE = sa.create_engine(sql_connection, echo=True)
+        EVS_DB_ENGINE = sa.create_engine(sql_connection, echo=False)
         EVS_DB_BASE.metadata.create_all(EVS_DB_ENGINE)
 
 
@@ -63,7 +61,7 @@
 
 
 def get_evs_context(context):
-    """Overrides the Quantum DB session with EVS DB session"""
+    """Overrides the Neutron DB session with EVS DB session"""
 
     evs_context = EVSContext.from_dict(context.to_dict())
     evs_context.session = get_session()
--- a/components/openstack/neutron/files/evs/db/l3nat.py	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/evs/db/l3nat.py	Wed Jun 11 17:13:12 2014 -0700
@@ -21,19 +21,20 @@
 #
 
 """
-Based off generic l3_agent (quantum/agent/l3_agent) code
+Based off generic l3_agent (neutron/agent/l3_agent) code
 """
 
 import sqlalchemy as sa
 
-from quantum.api.v2 import attributes
-from quantum.common import constants as l3_constants
-from quantum.common import exceptions as q_exc
-from quantum.db import l3_db
-from quantum.extensions import l3
-from quantum.openstack.common import log as logging
-from quantum.openstack.common import uuidutils
-from quantum.plugins.evs.db import api as evs_db
+from neutron.api.v2 import attributes
+from neutron.common import constants as l3_constants
+from neutron.common import exceptions as q_exc
+from neutron.db import l3_db
+from neutron.extensions import l3
+from neutron.extensions import external_net
+from neutron.openstack.common import log as logging
+from neutron.openstack.common import uuidutils
+from neutron.plugins.evs.db import api as evs_db
 
 
 LOG = logging.getLogger(__name__)
@@ -44,7 +45,7 @@
 
 
 class Router(evs_db.EVS_DB_BASE):
-    """Represents a v2 quantum router."""
+    """Represents a v2 neutron router."""
 
     id = sa.Column(sa.String(36), primary_key=True,
                    default=uuidutils.generate_uuid)
@@ -57,9 +58,11 @@
 
 
 class FloatingIP(evs_db.EVS_DB_BASE):
-    """Represents a floating IP, which may or many not be
-       allocated to a tenant, and may or may not be associated with
-       an internal port/ip address/router."""
+    """Represents a floating IP address.
+
+    This IP address may or may not be allocated to a tenant, and may or
+    may not be associated with an internal port/ip address/router.
+    """
 
     id = sa.Column(sa.String(36), primary_key=True,
                    default=uuidutils.generate_uuid)
@@ -78,16 +81,21 @@
     Router = Router
     FloatingIP = FloatingIP
 
-    def _make_router_dict(self, router, fields=None):
+    def _make_router_dict(self, router, fields=None,
+                          process_extensions=True):
         res = {'id': router['id'],
                'name': router['name'],
                'tenant_id': router['tenant_id'],
                'admin_state_up': router['admin_state_up'],
                'status': router['status'],
-               'external_gateway_info': None}
+               'external_gateway_info': None,
+               'gw_port_id': router['gw_port_id']}
         if router['gw_port_id']:
             nw_id = router['gw_port_network_id']
             res['external_gateway_info'] = {'network_id': nw_id}
+        if process_extensions:
+            self._apply_dict_extend_functions(
+                l3.ROUTERS, res, router)
         return self._fields(res, fields)
 
     def create_router(self, context, router):
@@ -98,13 +106,14 @@
         return super(EVS_L3_NAT_db_mixin, self).\
             update_router(evs_db.get_evs_context(context), id, router)
 
-    def _update_router_gw_info(self, context, router_id, info):
+    def _update_router_gw_info(self, context, router_id, info, router=None):
         """This method overrides the base class method and it's contents
         are exactly same as the base class method except that the Router
         DB columns are different for EVS and OVS"""
 
-        router = self._get_router(context, router_id)
+        router = router or self._get_router(context, router_id)
         gw_port_id = router['gw_port_id']
+        # network_id attribute is required by API, so it must be present
         gw_port_network_id = router['gw_port_network_id']
 
         network_id = info.get('network_id', None) if info else None
@@ -139,7 +148,7 @@
                                                   subnet['cidr'])
 
             # Port has no 'tenant-id', as it is hidden from user
-            gw_port = self.create_port(context.elevated(), {
+            gw_port = self._core_plugin.create_port(context.elevated(), {
                 'port':
                 {'tenant_id': '',  # intentionally not set
                  'network_id': network_id,
@@ -150,9 +159,10 @@
                  'admin_state_up': True,
                  'name': ''}})
 
-            if not len(gw_port['fixed_ips']):
-                self.delete_port(context.elevated(), gw_port['id'],
-                                 l3_port_check=False)
+            if not gw_port['fixed_ips']:
+                self._core_plugin.delete_port(context.elevated(),
+                                              gw_port['id'],
+                                              l3_port_check=False)
                 msg = (_('No IPs available for external network %s') %
                        network_id)
                 raise q_exc.BadRequest(resource='router', msg=msg)
@@ -195,7 +205,7 @@
                                  router_id, interface_info)
 
     def remove_router_interface(self, context, router_id, interface_info):
-        super(EVS_L3_NAT_db_mixin, self).\
+        return super(EVS_L3_NAT_db_mixin, self).\
             remove_router_interface(evs_db.get_evs_context(context),
                                     router_id, interface_info)
 
@@ -254,51 +264,14 @@
         super(EVS_L3_NAT_db_mixin, self).\
             disassociate_floatingips(evs_db.get_evs_context(context), port_id)
 
-    def _network_is_external(self, context, net_id):
-        try:
-            evs = self.get_network(context, net_id)
-            return evs[l3.EXTERNAL]
-        except:
-            return False
-
     def get_sync_data(self, context, router_ids=None, active=None):
         return super(EVS_L3_NAT_db_mixin, self).\
             get_sync_data(evs_db.get_evs_context(context), router_ids, active)
 
-    def get_external_network_id(self, context):
-        return super(EVS_L3_NAT_db_mixin, self).\
-            get_external_network_id(evs_db.get_evs_context(context))
-
-    def _get_tenant_id_for_create(self, context, resource):
-        if context.is_admin and 'tenant_id' in resource:
-            tenant_id = resource['tenant_id']
-        elif ('tenant_id' in resource and
-              resource['tenant_id'] != context.tenant_id):
-            reason = _('Cannot create resource for another tenant')
-            raise q_exc.AdminRequired(reason=reason)
-        else:
-            tenant_id = context.tenant_id
-        return tenant_id
-
     def _get_by_id(self, context, model, id):
         return context.session.query(model).\
             filter(model.id == id).one()
 
-    def _get_network(self, context, network_id):
-        return self.get_network(context, network_id)
-
-    def _get_subnet(self, context, subnet_id):
-        return self.get_subnet(context, subnet_id)
-
-    def _get_port(self, context, port_id):
-        return self.get_port(context, port_id)
-
-    def _delete_port(self, context, port_id):
-        return self.delete_port(context, port_id)
-
-    def _get_subnets_by_network(self, context, network_id):
-        return self.get_subnets(context, filters={'network_id': network_id})
-
     def allow_l3_port_deletion(self, context, port_id):
         """ If an L3 agent is using this port, then we need to send
         a notification to L3 agent to release the port before we can
--- a/components/openstack/neutron/files/evs/db/quotas_db.py	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/evs/db/quotas_db.py	Wed Jun 11 17:13:12 2014 -0700
@@ -22,9 +22,9 @@
 
 import sqlalchemy as sa
 
-from quantum.db import quota_db
-from quantum.openstack.common import uuidutils
-from quantum.plugins.evs.db import api as evs_db
+from neutron.db import quota_db
+from neutron.openstack.common import uuidutils
+from neutron.plugins.evs.db import api as evs_db
 
 
 class Quota(evs_db.EVS_DB_BASE):
--- a/components/openstack/neutron/files/evs/plugin.py	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/evs/plugin.py	Wed Jun 11 17:13:12 2014 -0700
@@ -23,22 +23,26 @@
 
 from oslo.config import cfg
 
-from quantum.api.rpc.agentnotifiers import dhcp_rpc_agent_api
-from quantum.api.rpc.agentnotifiers import l3_rpc_agent_api
-from quantum.api.v2 import attributes
-from quantum.common import constants as l3_constants
-from quantum.common import exceptions
-from quantum.common import rpc as q_rpc
-from quantum.common import topics
-from quantum.db import dhcp_rpc_base
-from quantum.db import l3_rpc_base
-from quantum.extensions import l3
-from quantum.extensions import providernet
-from quantum.openstack.common import log as logging
-from quantum.openstack.common import rpc
-from quantum.plugins.evs.db import api as evs_db
-from quantum.plugins.evs.db import l3nat as evs_l3nat
-from quantum import quantum_plugin_base_v2
+from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
+from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
+from neutron.api.v2 import attributes
+from neutron.common import constants as l3_constants
+from neutron.common import exceptions
+from neutron.common import rpc as q_rpc
+from neutron.common import topics
+from neutron.db import db_base_plugin_v2
+from neutron.db import dhcp_rpc_base
+from neutron.db import external_net_db
+from neutron.db import l3_rpc_base
+from neutron.extensions import external_net
+from neutron.extensions import providernet
+from neutron.openstack.common import lockutils
+from neutron.openstack.common import log as logging
+from neutron.openstack.common import rpc
+from neutron.plugins.common import constants as svc_constants
+from neutron.plugins.evs.db import api as evs_db
+from neutron.plugins.evs.db import l3nat as evs_l3nat
+from neutron.plugins.evs.db import quotas_db
 
 
 LOG = logging.getLogger(__name__)
@@ -48,7 +52,14 @@
                help=_("An URI that specifies an EVS controller"))
 ]
 
+evs_database_opts = [
+    cfg.StrOpt('sql_connection',
+               default='sqlite:////var/lib/neutron/neutron.sqlite',
+               help=_("An URI that specifies SQL connectionr")),
+]
+
 cfg.CONF.register_opts(evs_controller_opts, "EVS")
+cfg.CONF.register_opts(evs_database_opts, "DATABASE")
 
 # Maps OpenStack network resource attributes to EVS properties
 NETWORK_EVS_ATTRIBUTE_MAP = {
@@ -56,7 +67,7 @@
     'network_id': 'evs',
     'id': 'evs',
     'name': 'evs',
-    l3.EXTERNAL: 'OpenStack:' + l3.EXTERNAL,
+    external_net.EXTERNAL: 'OpenStack:' + external_net.EXTERNAL,
 }
 
 # Maps OpenStack subnet resource attributes to EVS' IPnet properties
@@ -81,14 +92,14 @@
 }
 
 
-class EVSControllerError(exceptions.QuantumException):
+class EVSControllerError(exceptions.NeutronException):
     message = _("EVS controller: %(errmsg)s")
 
     def __init__(self, evs_errmsg):
         super(EVSControllerError, self).__init__(errmsg=evs_errmsg)
 
 
-class EVSOpNotSupported(exceptions.QuantumException):
+class EVSOpNotSupported(exceptions.NeutronException):
     message = _("Operation not supported by EVS plugin: %(opname)s")
 
     def __init__(self, evs_errmsg):
@@ -97,7 +108,7 @@
 
 class EVSRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
                       l3_rpc_base.L3RpcCallbackMixin):
-    RPC_API_VERSION = '1.0'
+    RPC_API_VERSION = '1.1'
 
     def create_rpc_dispatcher(self):
         '''Get the rpc dispatcher for this manager.
@@ -108,11 +119,12 @@
         return q_rpc.PluginRpcDispatcher([self])
 
 
-class EVSQuantumPluginV2(quantum_plugin_base_v2.QuantumPluginBaseV2,
+class EVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
+                         external_net_db.External_net_db_mixin,
                          evs_l3nat.EVS_L3_NAT_db_mixin):
     """Implements v2 Neutron Plug-in API specification.
 
-    All the quantum API calls to create/delete/retrieve Network/Subnet/Port
+    All the neutron API calls to create/delete/retrieve Network/Subnet/Port
     are forwarded to EVS controller through Solaris RAD. The RAD connection
     to EVS Controller is over SSH. In order that this plugin can communicate
     with EVS Controller non-interactively and securely, one should setup SSH
@@ -171,7 +183,14 @@
     |---------------------+------------------+------------------------------|
     """
 
-    _supported_extension_aliases = ["provider", "router", "quotas"]
+    # These attribute specifies whether the plugin supports or not
+    # bulk/pagination/sorting operations.
+    __native_bulk_support = False
+    __native_pagination_support = False
+    __native_sorting_support = False
+
+    _supported_extension_aliases = ["provider", "external-net", "router",
+                                    "quotas"]
 
     def __init__(self):
         # Since EVS Framework does not support router and floatingip
@@ -196,11 +215,13 @@
 
     def _setup_rpc(self):
         # RPC support
-        self.topic = topics.PLUGIN
+        self.service_topics = {svc_constants.CORE: topics.PLUGIN,
+                               svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
         self.conn = rpc.create_connection(new=True)
         self.callbacks = EVSRpcCallbacks()
         self.dispatcher = self.callbacks.create_rpc_dispatcher()
-        self.conn.create_consumer(self.topic, self.dispatcher, fanout=False)
+        for svc_topic in self.service_topics.values():
+            self.conn.create_consumer(svc_topic, self.dispatcher, fanout=False)
         # Consume from all consumers in a thread
         self.conn.consume_in_thread()
         self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
@@ -209,12 +230,6 @@
     def supported_extension_aliases(self):
         return self._supported_extension_aliases
 
-    def _fields(self, resource, fields):
-        if fields:
-            return dict(((key, item) for key, item in resource.iteritems()
-                         if key in fields))
-        return resource
-
     def _convert_evs_to_network(self, evs):
         """Converts an EVS structure into Neutron Network structure."""
 
@@ -224,14 +239,14 @@
         networkdict['subnets'] = ([ipnet.uuid for ipnet in evs.ipnets]
                                   if evs.ipnets else [])
         networkdict['tenant_id'] = evs.tenantname
-        networkdict[l3.EXTERNAL] = False
+        networkdict[external_net.EXTERNAL] = False
         for prop in evs.props:
             if prop.name == 'l2-type':
                 networkdict[providernet.NETWORK_TYPE] = prop.value
             if prop.name == 'vlanid' or prop.name == 'vni':
                 networkdict[providernet.SEGMENTATION_ID] = int(prop.value)
-            if prop.name == NETWORK_EVS_ATTRIBUTE_MAP[l3.EXTERNAL]:
-                networkdict[l3.EXTERNAL] = \
+            if prop.name == NETWORK_EVS_ATTRIBUTE_MAP[external_net.EXTERNAL]:
+                networkdict[external_net.EXTERNAL] = \
                     (True if prop.value == 'True' else False)
         # fixed values as EVS framework doesn't support this
         networkdict['admin_state_up'] = True
@@ -279,16 +294,16 @@
         gw_ip = netaddr.IPAddress(subnetdict['gateway_ip'])
         pools = []
         if gw_ip == start_ip:
-            pools.append({'start' : str(netaddr.IPAddress(start_ip + 1)),
+            pools.append({'start': str(netaddr.IPAddress(start_ip + 1)),
                           'end': str(netaddr.IPAddress(end_ip))})
         elif gw_ip == end_ip:
-            pools.append({'start' : str(netaddr.IPAddress(start_ip)),
+            pools.append({'start': str(netaddr.IPAddress(start_ip)),
                           'end': str(netaddr.IPAddress(end_ip - 1))})
         else:
             pools.append({'start': str(netaddr.IPAddress(start_ip)),
-                          'end' : str(netaddr.IPAddress(gw_ip - 1))})
+                          'end': str(netaddr.IPAddress(gw_ip - 1))})
             pools.append({'start': str(netaddr.IPAddress(gw_ip + 1)),
-                          'end' : str(netaddr.IPAddress(end_ip))})
+                          'end': str(netaddr.IPAddress(end_ip))})
 
         subnetdict['allocation_pools'] = pools
         subnetdict['shared'] = False
@@ -303,7 +318,7 @@
         portdict['id'] = vport.uuid
         portdict['name'] = vport.name
         portdict['network_id'] = vport.evsuuid
-        #TODO(gmoodalb): set to host/zonename/vnicname?
+        # TODO(gmoodalb): set to host/zonename/vnicname?
         portdict['device_id'] = ''
         portdict['device_owner'] = ''
         for prop in vport.props:
@@ -353,6 +368,17 @@
         for rsrc in rsrc_to_remove:
             rsrclist.remove(rsrc)
 
+    @lockutils.synchronized('evs-plugin', 'neutron-')
+    def evs_controller_addIPnet(self, tenantname, evsname, ipnetname, propstr):
+        try:
+            pat = radcli.ADRGlobPattern(
+                {'name': evsname, 'tenant': tenantname})
+            evs = self._rc.get_object(evsbind.EVS(), pat)
+            ipnet = evs.addIPnet(propstr, ipnetname)
+        except radcli.ObjectError as oe:
+            raise EVSControllerError(oe.get_payload().errmsg)
+        return ipnet
+
     def create_subnet(self, context, subnet):
         """Creates a subnet(IPnet) for a given network(EVS).
 
@@ -362,6 +388,10 @@
          connect to the EVS, through a VPort, will get an IP address from the
          IPnet associated with the EVS.
         """
+        if (subnet['subnet']['allocation_pools'] is not
+                attributes.ATTR_NOT_SPECIFIED):
+            # user specified --allocation-pool and we don't support it
+            raise EVSOpNotSupported(_("cannot use --allocation-pool"))
         ipnetname = subnet['subnet']['name']
         if not ipnetname:
             ipnetname = None
@@ -371,8 +401,9 @@
         # obtain the optional default router
         defrouter = subnet['subnet']['gateway_ip']
         if defrouter is None:
-            # user specified --no-gateway, we don't support it
+            # user specified --no-gateway and we don't support it
             raise EVSOpNotSupported(_("cannot use --no-gateway"))
+
         if defrouter is not attributes.ATTR_NOT_SPECIFIED:
             proplist.append('defrouter=%s' % (defrouter))
 
@@ -381,7 +412,7 @@
         if attributes.is_attr_set(nameservers):
             proplist.append('%s=%s' %
                             (SUBNET_IPNET_ATTRIBUTE_MAP['dns_nameservers'],
-                            ','.join(nameservers)))
+                             ','.join(nameservers)))
 
         # obtain the host routes
         hostroutes = subnet['subnet']['host_routes']
@@ -403,16 +434,10 @@
 
         # TODO(gmoodalb): extract the tenant id if an admin is creating for
         # someone else
-        try:
-            evsuuid = subnet['subnet']['network_id']
-            tenantname = subnet['subnet']['tenant_id']
-            pat = radcli.ADRGlobPattern(
-                {'name': evsuuid, 'tenant': tenantname})
-            evs = self._rc.get_object(evsbind.EVS(), pat)
-            ipnet = evs.addIPnet(propstr, ipnetname)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-
+        evsname = subnet['subnet']['network_id']
+        tenantname = subnet['subnet']['tenant_id']
+        ipnet = self.evs_controller_addIPnet(tenantname, evsname, ipnetname,
+                                             propstr)
         retval = self._convert_ipnet_to_subnet(ipnet)
 
         # notify dhcp agent of subnet creation
@@ -420,15 +445,10 @@
                                         'subnet.create.end')
         return retval
 
-    def update_subnet(self, context, id, subnet):
-        evs_rpccall_sync = subnet.pop('evs_rpccall_sync', False)
-        if not (set(subnet['subnet'].keys()) == set(('enable_dhcp',))):
-                raise EVSOpNotSupported(_("only enable_dhcp can be updated"))
-
-        propstr = "%s=%s" % (SUBNET_IPNET_ATTRIBUTE_MAP['enable_dhcp'],
-                             subnet['subnet']['enable_dhcp'])
+    @lockutils.synchronized('evs-plugin', 'neutron-')
+    def evs_controller_updateIPnet(self, ipnetuuid, propstr):
         try:
-            pat = radcli.ADRGlobPattern({'uuid': id})
+            pat = radcli.ADRGlobPattern({'uuid': ipnetuuid})
             ipnetlist = self._rc.list_objects(evsbind.IPnet(), pat)
             assert len(ipnetlist) == 1
             ipnet = self._rc.get_object(ipnetlist[0])
@@ -436,6 +456,15 @@
         except radcli.ObjectError as oe:
             raise EVSControllerError(oe.get_payload().errmsg)
 
+    def update_subnet(self, context, id, subnet):
+        evs_rpccall_sync = subnet.pop('evs_rpccall_sync', False)
+        if not (set(subnet['subnet'].keys()) == set(('enable_dhcp',))):
+                raise EVSOpNotSupported(_("only subnets with enable_dhcp "
+                                          "set can be updated"))
+
+        propstr = "%s=%s" % (SUBNET_IPNET_ATTRIBUTE_MAP['enable_dhcp'],
+                             subnet['subnet']['enable_dhcp'])
+        self.evs_controller_updateIPnet(id, propstr)
         retval = self.get_subnet(context, id)
 
         # notify dhcp agent of subnet update
@@ -450,16 +479,28 @@
                                           topic=topics.DHCP_AGENT)
         return retval
 
-    def get_subnet(self, context, id, fields=None):
+    @lockutils.synchronized('evs-plugin', 'neutron-')
+    def evs_controller_getIPnet(self, ipnetuuid):
         try:
-            ipnetlist = self._evsc.getIPnetInfo('ipnet=%s' % (id))
+            ipnetlist = self._evsc.getIPnetInfo('ipnet=%s' % (ipnetuuid))
         except radcli.ObjectError as oe:
             raise EVSControllerError(oe.get_payload().errmsg)
+        return (ipnetlist[0] if ipnetlist else None)
 
-        if ipnetlist:
-            subnetdict = self._convert_ipnet_to_subnet(ipnetlist[0])
-            return self._fields(subnetdict, fields)
-        return {}
+    def get_subnet(self, context, id, fields=None):
+        ipnet = self.evs_controller_getIPnet(id)
+        if not ipnet:
+            return {}
+        subnetdict = self._convert_ipnet_to_subnet(ipnet)
+        return self._fields(subnetdict, fields)
+
+    @lockutils.synchronized('evs-plugin', 'neutron-')
+    def evs_controller_getIPnetList(self, filterstr):
+        try:
+            ipnetlist = self._evsc.getIPnetInfo(filterstr)
+        except radcli.ObjectError as oe:
+            raise EVSControllerError(oe.get_payload().errmsg)
+        return ipnetlist
 
     def get_subnets(self, context, filters=None, fields=None,
                     sorts=None, limit=None, marker=None, page_reverse=False):
@@ -483,6 +524,8 @@
                 key = SUBNET_IPNET_ATTRIBUTE_MAP.get(key, key)
                 if isinstance(value, list):
                     value = ",".join([str(val) for val in value])
+                    if not value:
+                        continue
                 filterlist.append("%s=%s" % (key, value))
 
             if filterlist:
@@ -490,11 +533,8 @@
 
         LOG.debug(_("calling ListIPnet from get_subnets() filterstr: '%s'")
                   % (filterstr))
-        try:
-            ipnetlist = self._evsc.getIPnetInfo(filterstr)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
 
+        ipnetlist = self.evs_controller_getIPnetList(filterstr)
         self._apply_rsrc_props_filter(ipnetlist, ipnet_props)
 
         retme = []
@@ -505,7 +545,7 @@
         return retme
 
     def get_subnets_count(self, context, filters=None):
-        return len(self.get_ipnets(context, filters))
+        return len(self.get_subnets(context, filters))
 
     def _release_subnet_dhcp_port(self, context, subnet, delete_network):
         """Release any dhcp port associated with the subnet"""
@@ -526,29 +566,34 @@
             # the lone port is a dhcp port created by dhcp agent
             # it must be released before we can delete the subnet
             subnet_update = {'subnet': {'enable_dhcp': False},
-                                        'evs_rpccall_sync': True}
+                             'evs_rpccall_sync': True}
             self.update_subnet(context, subnet['id'], subnet_update)
 
+    @lockutils.synchronized('evs-plugin', 'neutron-')
+    def evs_controller_removeIPnet(self, tenantname, evsname, ipnetuuid):
+        pat = radcli.ADRGlobPattern({'name': evsname, 'tenant': tenantname})
+        try:
+            evs = self._rc.get_object(evsbind.EVS(), pat)
+            evs.removeIPnet(ipnetuuid)
+        except radcli.ObjectError as oe:
+            raise EVSControllerError(oe.get_payload().errmsg)
+
     def delete_subnet(self, context, id):
-        try:
-            subnet = self.get_subnet(context, id)
-            if not subnet:
-                return
-            pat = radcli.ADRGlobPattern({'name': subnet['network_id'],
-                                         'tenant': subnet['tenant_id']})
-            evs = self._rc.get_object(evsbind.EVS(), pat)
-            # If the subnet is dhcp_enabled, then the dhcp agent would have
-            # created a port connected to this subnet. We need to remove
-            # that port before we can proceed with subnet delete operation.
-            # Since, there is no subnet.delete.start event, we use an another
-            # approach of updating the subnet's enable_dhcp attribute to
-            # False that in turn sends a subnet.udpate notification. This
-            # results in DHCP agent releasing the port.
-            if subnet['enable_dhcp']:
+        subnet = self.get_subnet(context, id)
+        if not subnet:
+            return
+
+        # If the subnet is dhcp_enabled, then the dhcp agent would have
+        # created a port connected to this subnet. We need to remove
+        # that port before we can proceed with subnet delete operation.
+        # Since, there is no subnet.delete.start event, we use an another
+        # approach of updating the subnet's enable_dhcp attribute to
+        # False that in turn sends a subnet.udpate notification. This
+        # results in DHCP agent releasing the port.
+        if subnet['enable_dhcp']:
                 self._release_subnet_dhcp_port(context, subnet, False)
-            evs.removeIPnet(id)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
+        self.evs_controller_removeIPnet(subnet['tenant_id'],
+                                        subnet['network_id'], id)
 
         # notify dhcp agent
         payload = {
@@ -559,6 +604,14 @@
         }
         self.dhcp_agent_notifier.notify(context, payload, 'subnet.delete.end')
 
+    @lockutils.synchronized('evs-plugin', 'neutron-')
+    def evs_controller_createEVS(self, tenantname, evsname, propstr):
+        try:
+            evs = self._evsc.createEVS(propstr, tenantname, evsname)
+        except radcli.ObjectError as oe:
+            raise EVSControllerError(oe.get_payload().errmsg)
+        return evs
+
     def create_network(self, context, network):
         """Creates a network(EVS) for a given tenant.
 
@@ -596,35 +649,44 @@
                                            "provider:network_type '%s' not "
                                            "supported") % network_type)
 
-        router_external = network['network'][l3.EXTERNAL]
+        router_external = network['network'][external_net.EXTERNAL]
         if attributes.is_attr_set(router_external):
-            proplist.append("%s=%s" % (NETWORK_EVS_ATTRIBUTE_MAP[l3.EXTERNAL],
-                                       router_external))
+            proplist.append("%s=%s" %
+                            (NETWORK_EVS_ATTRIBUTE_MAP[external_net.EXTERNAL],
+                             router_external))
 
         propstr = None
         if proplist:
             propstr = ",".join(proplist)
 
-        try:
-            evs = self._evsc.createEVS(propstr, tenantname, evsname)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-
+        evs = self.evs_controller_createEVS(tenantname, evsname, propstr)
         return self._convert_evs_to_network(evs)
 
     def update_network(self, context, id, network):
         raise EVSOpNotSupported(_("net-update"))
 
-    def get_network(self, context, id, fields=None):
+    @lockutils.synchronized('evs-plugin', 'neutron-')
+    def evs_controller_getEVS(self, evsuuid):
         try:
-            evslist = self._evsc.getEVSInfo('evs=%s' % id)
+            evslist = self._evsc.getEVSInfo('evs=%s' % evsuuid)
         except radcli.ObjectError as oe:
             raise EVSControllerError(oe.getpayload().errmsg)
+        return (evslist[0] if evslist else None)
 
-        if evslist:
-            networkdict = self._convert_evs_to_network(evslist[0])
-            return self._fields(networkdict, fields)
-        return {}
+    def get_network(self, context, id, fields=None):
+        evs = self.evs_controller_getEVS(id)
+        if not evs:
+            return {}
+        networkdict = self._convert_evs_to_network(evs)
+        return self._fields(networkdict, fields)
+
+    @lockutils.synchronized('evs-plugin', 'neutron-')
+    def evs_controller_getEVSList(self, filterstr):
+        try:
+            evslist = self._evsc.getEVSInfo(filterstr)
+        except radcli.ObjectError as oe:
+            raise EVSControllerError(oe.get_payload().errmsg)
+        return evslist
 
     def get_networks(self, context, filters=None, fields=None,
                      sorts=None, limit=None, marker=None, page_reverse=False):
@@ -649,6 +711,8 @@
                 key = NETWORK_EVS_ATTRIBUTE_MAP.get(key, key)
                 if isinstance(value, list):
                     value = ",".join([str(val) for val in value])
+                    if not value:
+                        continue
                 filterlist.append("%s=%s" % (key, value))
 
             if filterlist:
@@ -656,11 +720,7 @@
 
         LOG.debug(_("calling ListEVswitch from get_networks(): '%s'")
                   % (filterstr))
-        try:
-            evslist = self._evsc.getEVSInfo(filterstr)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-
+        evslist = self.evs_controller_getEVSList(filterstr)
         self._apply_rsrc_props_filter(evslist, evs_props)
 
         retme = []
@@ -673,21 +733,46 @@
     def get_networks_count(self, context, filters=None):
         return len(self.get_networks(context, filters))
 
-    def delete_network(self, context, id):
+    @lockutils.synchronized('evs-plugin', 'neutron-')
+    def evs_controller_deleteEVS(self, tenantname, evsuuid):
         try:
-            filters = dict(network_id=id)
-            subnets = self.get_subnets(context, filters=filters)
-            dhcp_subnets = [s for s in subnets if s['enable_dhcp']]
-            for subnet in dhcp_subnets:
-                self._release_subnet_dhcp_port(context, subnet, True)
-            self._evsc.deleteEVS(id, context.tenant_id)
+            self._evsc.deleteEVS(evsuuid, tenantname)
         except radcli.ObjectError as oe:
             raise EVSControllerError(oe.get_payload().errmsg)
 
+    def delete_network(self, context, id):
+        # Check if it is an external network and whether addresses in that
+        # network are being used for floating ips
+        evs = self.get_network(context, id)
+        if evs[external_net.EXTERNAL]:
+            filters = dict(evs=id)
+            portlist = self.get_ports(context, filters)
+            ports_with_deviceid = [port for port in portlist
+                                   if port['device_id'] != '']
+            if ports_with_deviceid:
+                raise exceptions.NetworkInUse(net_id=id)
+        filters = dict(network_id=id)
+        subnets = self.get_subnets(context, filters=filters)
+        dhcp_subnets = [s for s in subnets if s['enable_dhcp']]
+        for subnet in dhcp_subnets:
+            self._release_subnet_dhcp_port(context, subnet, True)
+        self.evs_controller_deleteEVS(context.tenant_id, id)
+
         # notify dhcp agent of network deletion
         self.dhcp_agent_notifier.notify(context, {'network': {'id': id}},
                                         'network.delete.end')
 
+    @lockutils.synchronized('evs-plugin', 'neutron-')
+    def evs_controller_addVPort(self, tenantname, evsname, vportname, propstr):
+        try:
+            pat = radcli.ADRGlobPattern({'name': evsname,
+                                         'tenant': tenantname})
+            evs = self._rc.get_object(evsbind.EVS(), pat)
+            vport = evs.addVPort(propstr, vportname)
+        except radcli.ObjectError as oe:
+            raise EVSControllerError(oe.get_payload().errmsg)
+        return vport
+
     def create_port(self, context, port):
         """Creates a port(VPort) for a given network(EVS).
 
@@ -725,25 +810,19 @@
         if attributes.is_attr_set(device_owner) and device_owner:
             proplist.append("%s=%s" %
                             (PORT_VPORT_ATTRIBUTE_MAP['device_owner'],
-                            device_owner))
+                             device_owner))
 
         propstr = None
         if proplist:
             propstr = ",".join(proplist)
 
-        try:
-            evsuuid = port['port']['network_id']
-            tenantname = port['port']['tenant_id']
-            # TODO(gmoodalb): -- pull it from the network_id!!
-            if not tenantname:
-                tenantname = context.tenant_id
-            pat = radcli.ADRGlobPattern({'name': evsuuid,
-                                         'tenant': tenantname})
-            evs = self._rc.get_object(evsbind.EVS(), pat)
-            vport = evs.addVPort(propstr, vportname)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-
+        evsname = port['port']['network_id']
+        tenantname = port['port']['tenant_id']
+        # TODO(gmoodalb): -- pull it from the network_id!!
+        if not tenantname:
+            tenantname = context.tenant_id
+        vport = self.evs_controller_addVPort(tenantname, evsname, vportname,
+                                             propstr)
         retval = self._convert_vport_to_port(context, vport)
 
         # notify dhcp agent of port creation
@@ -751,6 +830,18 @@
                                         'port.create.end')
         return retval
 
+    @lockutils.synchronized('evs-plugin', 'neutron-')
+    def evs_controller_updateVPort(self, vportuuid, proplist):
+        try:
+            pat = radcli.ADRGlobPattern({'uuid': vportuuid})
+            vportlist = self._rc.list_objects(evsbind.VPort(), pat)
+            assert len(vportlist) == 1
+            vport = self._rc.get_object(vportlist[0])
+            for prop in proplist:
+                vport.setProperty(prop)
+        except radcli.ObjectError as oe:
+            raise EVSControllerError(oe.get_payload().errmsg)
+
     def update_port(self, context, id, port):
         # EVS does not allow updating certain attributes
         if not (set(port['port'].keys()) <=
@@ -773,21 +864,12 @@
                 device_owner = " "
             proplist.append("%s=%s" %
                             (PORT_VPORT_ATTRIBUTE_MAP['device_owner'],
-                            device_owner))
+                             device_owner))
 
         if not proplist:
             return dict()
 
-        try:
-            pat = radcli.ADRGlobPattern({'uuid': id})
-            vportlist = self._rc.list_objects(evsbind.VPort(), pat)
-            assert len(vportlist) == 1
-            vport = self._rc.get_object(vportlist[0])
-            for prop in proplist:
-                vport.setProperty(prop)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-
+        self.evs_controller_updateVPort(id, proplist)
         retval = self.get_port(context, id)
 
         # notify dhcp agent of port update
@@ -795,16 +877,28 @@
                                         'port.update.end')
         return retval
 
-    def get_port(self, context, id, fields=None):
+    @lockutils.synchronized('evs-plugin', 'neutron-')
+    def evs_controller_getVPort(self, vportuuid):
         try:
-            vportlist = self._evsc.getVPortInfo('vport=%s' % (id))
+            vportlist = self._evsc.getVPortInfo('vport=%s' % (vportuuid))
         except radcli.ObjectError as oe:
             raise EVSControllerError(oe.get_payload().errmsg)
+        return (vportlist[0] if vportlist else None)
 
-        if vportlist:
-            portdict = self._convert_vport_to_port(context, vportlist[0])
-            return self._fields(portdict, fields)
-        return {}
+    def get_port(self, context, id, fields=None):
+        vport = self.evs_controller_getVPort(id)
+        if not vport:
+            return {}
+        portdict = self._convert_vport_to_port(context, vport)
+        return self._fields(portdict, fields)
+
+    @lockutils.synchronized('evs-plugin', 'neutron-')
+    def evs_controller_getVPortList(self, filterstr):
+        try:
+            vportlist = self._evsc.getVPortInfo(filterstr)
+        except radcli.ObjectError as oe:
+            raise EVSControllerError(oe.get_payload().errmsg)
+        return vportlist
 
     def get_ports(self, context, filters=None, fields=None,
                   sorts=None, limit=None, marker=None, page_reverse=False):
@@ -831,6 +925,8 @@
                 key = PORT_VPORT_ATTRIBUTE_MAP.get(key, key)
                 if isinstance(value, list):
                     value = ",".join([str(val) for val in value])
+                    if not value:
+                        continue
                 filterlist.append("%s=%s" % (key, value))
 
             if filterlist:
@@ -838,11 +934,7 @@
 
         LOG.debug(_("calling getVPortInfo from get_ports(): '%s'") %
                   (filterstr))
-        try:
-            vportlist = self._evsc.getVPortInfo(filterstr)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
-
+        vportlist = self.evs_controller_getVPortList(filterstr)
         self._apply_rsrc_props_filter(vportlist, vport_props)
 
         retme = []
@@ -872,28 +964,32 @@
             }
         }
         self.update_port(context, port['id'], port_update)
-        routers = self.get_sync_data(context.elevated(), [router_id])
         msg = l3_rpc_agent_api.L3AgentNotify.make_msg("routers_updated",
-                                                      routers=routers)
+                                                      routers=[router_id])
         l3_rpc_agent_api.L3AgentNotify.call(context, msg,
                                             topic=topics.L3_AGENT)
 
+    @lockutils.synchronized('evs-plugin', 'neutron-')
+    def evs_controller_removeVPort(self, tenantname, evsname, vportuuid):
+        try:
+            pat = radcli.ADRGlobPattern({'name': evsname,
+                                         'tenant': tenantname})
+            evs = self._rc.get_object(evsbind.EVS(), pat)
+            evs.removeVPort(vportuuid)
+        except radcli.ObjectError as oe:
+            raise EVSControllerError(oe.get_payload().errmsg)
+
     def delete_port(self, context, id, l3_port_check=True):
         if l3_port_check:
             self.prevent_l3_port_deletion(context, id)
-        try:
-            self.disassociate_floatingips(context, id)
-            port = self.get_port(context, id)
-            if not port:
-                return
-            if not l3_port_check:
-                self._release_l3agent_internal_port(context, port)
-            pat = radcli.ADRGlobPattern({'name': port['network_id'],
-                                         'tenant': port['tenant_id']})
-            evs = self._rc.get_object(evsbind.EVS(), pat)
-            evs.removeVPort(id)
-        except radcli.ObjectError as oe:
-            raise EVSControllerError(oe.get_payload().errmsg)
+        self.disassociate_floatingips(context, id)
+        port = self.get_port(context, id)
+        if not port:
+            return
+        if not l3_port_check:
+            self._release_l3agent_internal_port(context, port)
+        self.evs_controller_removeVPort(port['tenant_id'], port['network_id'],
+                                        id)
 
         # notify dhcp agent of port deletion
         payload = {
@@ -908,3 +1004,26 @@
     def update_fixed_ip_lease_expiration(self, context, network_id,
                                          ip_address, lease_remaining):
         pass
+
+    # needed for L3 agent support
+    def _get_network(self, context, network_id):
+        return self.get_network(context, network_id)
+
+    def _get_subnet(self, context, subnet_id):
+        return self.get_subnet(context, subnet_id)
+
+    def _get_port(self, context, port_id):
+        return self.get_port(context, port_id)
+
+    def _delete_port(self, context, port_id):
+        return self.delete_port(context, port_id)
+
+    def _get_subnets_by_network(self, context, network_id):
+        return self.get_subnets(context, filters={'network_id': network_id})
+
+    def _network_is_external(self, context, net_id):
+        try:
+            evs = self.get_network(context, net_id)
+            return evs[external_net.EXTERNAL]
+        except:
+            return False
--- a/components/openstack/neutron/files/evs_plugin.ini	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/evs_plugin.ini	Wed Jun 11 17:13:12 2014 -0700
@@ -8,4 +8,4 @@
 [DATABASE]
 # The SQLAlchemy connection string used to connect to the
 # database (string value). By default set to:
-# sql_connection = sqlite:////var/lib/quantum/quantum.sqlite
+# sql_connection = sqlite:////var/lib/neutron/neutron.sqlite
--- a/components/openstack/neutron/files/l3_agent.ini	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/l3_agent.ini	Wed Jun 11 17:13:12 2014 -0700
@@ -1,19 +1,20 @@
 [DEFAULT]
 # Show debugging output in log (sets DEBUG log level output)
-# debug = True
+# debug = False
 
-# L3 requires that an interface driver be set.  Choose the one that best
+# L3 requires that an interface driver be set. Choose the one that best
 # matches your plugin.
-
 # Solaris Elastic Virtual Switch (EVS)
-interface_driver = quantum.agent.solaris.interface.SolarisVNICDriver
+interface_driver = neutron.agent.solaris.interface.SolarisVNICDriver
 
 # Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
 # iproute2 package that supports namespaces).
-# use_namespaces = False
+use_namespaces = False
 
 # If use_namespaces is set as False then the agent can only configure one
-# router.  This is done by setting the specific router_id.
+# router.
+
+# This is done by setting the specific router_id.
 # router_id =
 
 # Each L3 agent can be associated with at most one external network.  This
@@ -24,13 +25,13 @@
 
 # Indicates that this L3 agent should also handle routers that do not have
 # an external network gateway configured.  This option should be True only
-# for a single agent in a Quantum deployment, and may be False for all agents
+# for a single agent in a Neutron deployment, and may be False for all agents
 # if all routers must have an external network gateway
 # handle_internal_only_routers = True
 
 # Name of bridge used for external network traffic. This should be set to
 # empty value for the linux bridge
-# external_network_bridge =
+external_network_bridge =
 
 # seconds between re-sync routers' data if needed
 # periodic_interval = 40
@@ -39,6 +40,10 @@
 # starting agent
 # periodic_fuzzy_delay = 5
 
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+enable_metadata_proxy = False
+
 # Name of the datalink that connects to an external network. By default it's
 # set to net0.
 # external_network_datalink = net0
--- a/components/openstack/neutron/files/neutron-dhcp-agent	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/neutron-dhcp-agent	Wed Jun 11 17:13:12 2014 -0700
@@ -15,10 +15,13 @@
 #    under the License.
 
 import os
+import re
 import sys
 
 import smf_include
 
+from subprocess import CalledProcessError, Popen, PIPE, check_call
+
 
 def start():
     # verify paths are valid
@@ -31,6 +34,42 @@
         "--config-file %s" % tuple(sys.argv[2:4])
     smf_include.smf_subprocess(cmd)
 
+
+def stop():
+    try:
+        # first kill the SMF contract
+        check_call(["/usr/bin/pkill", "-c", sys.argv[2]])
+    except CalledProcessError as err:
+        print "failed to kill the SMF contract: %s" % err
+        return smf_include.SMF_EXIT_ERR_FATAL
+
+    cmd = ["/usr/sbin/ipadm", "show-if", "-p", "-o", "ifname"]
+    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+    output, error = p.communicate()
+    if p.returncode != 0:
+        print "failed to retrieve IP interface names"
+        return smf_include.SMF_EXIT_ERR_FATAL
+
+    ifnames = output.splitlines()
+    # DHCP agent datalinks are always 15 characters in length. They start with
+    # 'evs', end with '_0', and in between they are hexadecimal digits.
+    prog = re.compile('evs[0-9A-Fa-f\_]{10}_0')
+    for ifname in ifnames:
+        if not prog.search(ifname):
+            continue
+
+        try:
+            # first remove the IP
+            check_call(["/usr/bin/pfexec", "/usr/sbin/ipadm", "delete-ip",
+                        ifname])
+            # next remove the VNIC
+            check_call(["/usr/bin/pfexec", "/usr/sbin/dladm", "delete-vnic",
+                        ifname])
+        except CalledProcessError as err:
+            print "failed to remove datalinks used by DHCP agent: %s" % err
+            return smf_include.SMF_EXIT_ERR_FATAL
+    return smf_include.SMF_EXIT_OK
+
 if __name__ == "__main__":
     os.putenv("LC_ALL", "C")
     smf_include.smf_main()
--- a/components/openstack/neutron/files/neutron-dhcp-agent.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/neutron-dhcp-agent.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -32,41 +32,38 @@
 
     <!-- neutron-dhcp-agent uses dnsmasq which is incompatible with running
          BIND, ISC DHCP, and TFTP -->
-    <dependency name='exclude-bind'
-                grouping='exclude_all'
-                restart_on='none'
-                type='service'>
+    <dependency name='exclude-bind' grouping='exclude_all' restart_on='none'
+      type='service'>
       <service_fmri value='svc:/network/dns/server'/>
     </dependency>
 
-    <dependency name='exclude-dhcp-relay'
-                grouping='exclude_all'
-                restart_on='none'
-                type='service'>
+    <dependency name='exclude-dhcp-relay' grouping='exclude_all'
+      restart_on='none' type='service'>
       <service_fmri value='svc:/network/dhcp/relay'/>
     </dependency>
 
-    <dependency name='exclude-dhcp-server'
-                grouping='exclude_all'
-                restart_on='none'
-                type='service'>
+    <dependency name='exclude-dhcp-server' grouping='exclude_all'
+      restart_on='none' type='service'>
       <service_fmri value='svc:/network/dhcp/server'/>
     </dependency>
 
-    <dependency name='exclude-tftp'
-                grouping='exclude_all'
-                restart_on='none'
-                type='service'>
+    <dependency name='exclude-tftp' grouping='exclude_all' restart_on='none'
+      type='service'>
       <service_fmri value='svc:/network/tftp/udp6'/>
     </dependency>
 
-    <dependency name='exclude-dnsmasq'
-                grouping='exclude_all'
-                restart_on='none'
-                type='service'>
+    <dependency name='exclude-dnsmasq' grouping='exclude_all' restart_on='none'
+      type='service'>
       <service_fmri value='svc:/network/dnsmasq'/>
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/neutron-dhcp-agent %m %{config/config_path} %{config/dhcp_config_path}">
       <method_context>
@@ -74,8 +71,12 @@
           privileges='basic,{net_privaddr}:53/udp,{net_privaddr}:53/tcp,{net_privaddr}:67/udp,{net_privaddr}:69/udp,{net_privaddr}:547/udp,net_icmpaccess,sys_ip_config' />
       </method_context>
     </exec_method>
-    <exec_method timeout_seconds="60" type="method" name="stop"
-      exec=":kill"/>
+    <exec_method timeout_seconds="600" type="method" name="stop"
+      exec="/lib/svc/method/neutron-dhcp-agent %m %{restarter/contract}">
+      <method_context>
+        <method_credential user='neutron' group='neutron' />
+      </method_context>
+    </exec_method>
 
     <instance name='default' enabled='false'>
       <!-- to start/stop/refresh the service -->
@@ -88,7 +89,7 @@
 
       <property_group name='config' type='application'>
         <propval name='config_path' type='astring'
-          value='/etc/neutron/quantum.conf'/>
+          value='/etc/neutron/neutron.conf'/>
         <propval name='dhcp_config_path' type='astring'
           value='/etc/neutron/dhcp_agent.ini'/>
       </property_group>
--- a/components/openstack/neutron/files/neutron-l3-agent	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/neutron-l3-agent	Wed Jun 11 17:13:12 2014 -0700
@@ -15,11 +15,13 @@
 #    under the License.
 
 import os
+import re
 import sys
 
+import netaddr
 import smf_include
 
-from subprocess import Popen, PIPE
+from subprocess import CalledProcessError, Popen, PIPE, check_call
 
 
 def start():
@@ -50,14 +52,112 @@
     v6fwding = "on" in output
 
     if not any((v4fwding, v6fwding)):
-        print "System-wide IPv4 or IPv6 (or both) forwarding must be enabled " \
-            "before enabling %s" % os.getenv("SMF_FMRI")
+        print "System-wide IPv4 or IPv6 (or both) forwarding must be " \
+              "enabled before enabling neutron-l3-agent"
         return smf_include.SMF_EXIT_ERR_CONFIG
 
     cmd = "/usr/lib/neutron/neutron-l3-agent --config-file %s " \
         "--config-file %s" % tuple(sys.argv[2:4])
     smf_include.smf_subprocess(cmd)
 
+
+def stop():
+    try:
+        # first kill the SMF contract
+        check_call(["/usr/bin/pkill", "-c", sys.argv[2]])
+    except CalledProcessError as err:
+        print "failed to kill the SMF contract: %s" % (err)
+        return smf_include.SMF_EXIT_ERR_FATAL
+    # remove VNICs associated with L3 agent
+    cmd = ["/usr/sbin/ipadm", "show-if", "-p", "-o", "ifname"]
+    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+    output, error = p.communicate()
+    if p.returncode != 0:
+        print "failed to retrieve IP interface names"
+        return smf_include.SMF_EXIT_ERR_CONFIG
+
+    ifnames = output.splitlines()
+    # L3 agent datalinks are always 15 characters in length. They start
+    # with either 'l3i' or 'l3e', end with '_0', and in between they are
+    # hexadecimal digits.
+    prog = re.compile('l3[ie][0-9A-Fa-f\_]{10}_0')
+    for ifname in ifnames:
+        if not prog.search(ifname):
+            continue
+        try:
+            # first remove the IP
+            check_call(["/usr/bin/pfexec", "/usr/sbin/ipadm", "delete-ip",
+                        ifname])
+            # next remove the VNIC
+            check_call(["/usr/bin/pfexec", "/usr/sbin/dladm", "delete-vnic",
+                        ifname])
+        except CalledProcessError as err:
+            print "failed to remove datalinks used by L3 agent: %s" % (err)
+            return smf_include.SMF_EXIT_ERR_FATAL
+
+    # remove IP Filter rules added by neutron-l3-agent
+    cmd = ["/usr/bin/pfexec", "/usr/sbin/ipfstat", "-io"]
+    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+    output, error = p.communicate()
+    if p.returncode != 0:
+        print "failed to retrieve IP Filter rules"
+        return smf_include.SMF_EXIT_ERR_FATAL
+
+    ipfilters = output.splitlines()
+    # L3 agent IP Filter rules are of the form
+    # block in quick on l3i64cbb496_a_0 from ... to pool/15417332
+    prog = re.compile('on l3i[0-9A-Fa-f\_]{10}_0')
+    ippool_names = []
+    for ipf in ipfilters:
+        if not prog.search(ipf):
+            continue
+        # capture the IP pool name
+        ippool_names.append(ipf.split('pool/')[1])
+
+        try:
+            # remove the IP Filter rule
+            p = Popen(["echo", ipf], stdout=PIPE)
+            check_call(["/usr/bin/pfexec", "/usr/sbin/ipf", "-r", "-f", "-"],
+                       stdin=p.stdout)
+        except CalledProcessError as err:
+            print "failed to remove IP Filter rule %s: %s" % (ipf, err)
+            return smf_include.SMF_EXIT_ERR_FATAL
+
+    # remove IP Pools added by neutron-l3-agent
+    for ippool_name in ippool_names:
+        try:
+            check_call(["/usr/bin/pfexec", "/usr/sbin/ippool", "-R",
+                        "-m", ippool_name, "-t", "tree"])
+        except CalledProcessError as err:
+            print "failed to remove IP Pool %s: %s" % (ippool_name, err)
+            return smf_include.SMF_EXIT_ERR_FATAL
+
+    # remove IP NAT rules added by neutron-l3-agent
+    cmd = ["/usr/bin/pfexec", "/usr/sbin/ipnat", "-lR"]
+    p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+    output, error = p.communicate()
+    if p.returncode != 0:
+        print "failed to retrieve IP NAT rules"
+        return smf_include.SMF_EXIT_ERR_FATAL
+
+    ipnat_rules = output.splitlines()
+    # L3 agent IP NAT rules are of the form
+    # bimap l3e64ccc496_a_0 192.168.1.3/32 -> 172.16.10.3/32
+    prog = re.compile('l3e[0-9A-Fa-f\_]{10}_0')
+    for ipnat_rule in ipnat_rules:
+        if not prog.search(ipnat_rule):
+            continue
+        # remove the IP NAT rule
+        try:
+            p = Popen(["echo", ipnat_rule], stdout=PIPE)
+            check_call(["/usr/bin/pfexec", "/usr/sbin/ipnat", "-r", "-f", "-"],
+                       stdin=p.stdout)
+        except CalledProcessError as err:
+            print "failed to remove IP NAT rule %s: %s" % (ipnat_rule, err)
+            return smf_include.SMF_EXIT_ERR_FATAL
+
+    return smf_include.SMF_EXIT_OK
+
 if __name__ == "__main__":
     os.putenv("LC_ALL", "C")
     smf_include.smf_main()
--- a/components/openstack/neutron/files/neutron-l3-agent.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/neutron-l3-agent.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -35,14 +35,25 @@
       <service_fmri value='svc:/network/ipfilter:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/neutron-l3-agent %m %{config/config_path} %{config/l3_config_path}">
       <method_context>
         <method_credential user='neutron' group='neutron' />
       </method_context>
     </exec_method>
-    <exec_method timeout_seconds="60" type="method" name="stop"
-      exec=":kill"/>
+    <exec_method timeout_seconds="600" type="method" name="stop"
+      exec="/lib/svc/method/neutron-l3-agent %m %{restarter/contract}">
+      <method_context>
+        <method_credential user='neutron' group='neutron' />
+      </method_context>
+    </exec_method>
 
     <instance name='default' enabled='false'>
       <!-- to start/stop/refresh the service -->
@@ -55,7 +66,7 @@
 
       <property_group name='config' type='application'>
         <propval name='config_path' type='astring'
-          value='/etc/neutron/quantum.conf'/>
+          value='/etc/neutron/neutron.conf'/>
         <propval name='l3_config_path' type='astring'
           value='/etc/neutron/l3_agent.ini'/>
       </property_group>
@@ -87,7 +98,7 @@
           required='true'>
           <common_name>
             <loctext xml:lang='C'>
-              Filesystem path to Neutron l3 .ini file
+              Filesystem path to Neutron L3 .ini file
             </loctext>
           </common_name>
         </prop_pattern>
--- a/components/openstack/neutron/files/neutron-server.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/neutron-server.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/neutron-server %m %{config/config_path} %{config/plugin_config_path}">
       <method_context>
@@ -50,7 +57,7 @@
 
       <property_group name='config' type='application'>
         <propval name='config_path' type='astring'
-          value='/etc/neutron/quantum.conf'/>
+          value='/etc/neutron/neutron.conf'/>
         <propval name='plugin_config_path' type='astring'
           value='/etc/neutron/plugins/evs/evs_plugin.ini'/>
       </property_group>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/files/neutron.conf	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,378 @@
+[DEFAULT]
+# Default log level is INFO
+# verbose and debug has the same result.
+# One of them will set DEBUG log level output
+# debug = False
+# verbose = False
+
+# Where to store Neutron state files.  This directory must be writable by the
+# user executing the agent.
+# state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog                           -> syslog
+# log_file and log_dir                 -> log_dir/log_file
+# (not log_file) and log_dir           -> log_dir/{binary_name}.log
+# use_stderr                           -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors                       -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+# log_dir =
+
+# publish_errors = False
+
+# Address to bind the API server
+# bind_host = 0.0.0.0
+
+# Port the bind the API server to
+# bind_port = 9696
+
+# Path to the extensions.  Note that this can be a colon-separated list of
+# paths.  For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# Neutron plugin provider module
+# The EVSNeutronPluginV2 Neutron plugin connects to the Solaris Elastic
+# Virtual Switch framework to provide virtual networking between Solaris
+# Zones.
+core_plugin = neutron.plugins.evs.plugin.EVSNeutronPluginV2
+
+# Advanced service modules
+# service_plugins =
+
+# Paste configuration file
+# api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+# auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+# dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+# allow_overlapping_ips = False
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+# Size of RPC thread pool
+# rpc_thread_pool_size = 64
+# Size of RPC connection pool
+# rpc_conn_pool_size = 30
+# Seconds to wait for a response from call or multicall
+# rpc_response_timeout = 60
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+# rpc_cast_timeout = 30
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)'
+# kombu_ssl_ca_certs =
+# IP address of the RabbitMQ installation
+# rabbit_host = localhost
+# Password of the RabbitMQ server
+# rabbit_password = guest
+# Port where RabbitMQ server is running/listening
+# rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+# rabbit_userid = guest
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all).You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are create, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver. DHCP agents needs it.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+# default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+# notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+# agent_down_time = 9
+# ===========  end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# ===========  end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn.  The default, 0, runs the
+# worker thread in the current process.  Greater than 0 launches that number of
+# child processes as workers.  The parent process manages them.
+# api_workers = 0
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+[quotas]
+# resource name(s) that are supported in quota features
+# quota_items = network,subnet,port
+
+# default number of resource allowed per tenant, minus for unlimited
+# default_quota = -1
+
+# number of networks allowed per tenant, and minus means unlimited
+# quota_network = 10
+
+# number of subnets allowed per tenant, and minus means unlimited
+# quota_subnet = 10
+
+# number of ports allowed per tenant, and minus means unlimited
+# quota_port = 50
+
+# number of security groups allowed per tenant, and minus means unlimited
+# quota_security_group = 10
+
+# number of security group rules allowed per tenant, and minus means unlimited
+# quota_security_group_rule = 100
+
+# default driver to use for quota checks
+# The default quota driver, neutron.db.quota_db.DbQuotaDriver, supports
+# static quotas defined in the [quotas] section. The Elastic Virtual
+# Switch plugin supports per-tenant quota limits via the quota extension
+# API. To enable per-tenant quotas, the quota_driver should be set to
+# neutron.plugins.evs.db.quotas_db.EVSDbQuotaDriver.
+quota_driver = neutron.plugins.evs.db.quotas_db.EVSDbQuotaDriver
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+# root_helper = sudo
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+# report_interval = 4
+
+# ===========  end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://127.0.0.1:5000/v2.0
+identity_uri = http://127.0.0.1:35357
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USER%
+admin_password = %SERVICE_PASSWORD%
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:[email protected]:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite://
+
+# The SQLAlchemy connection string used to connect to the slave database
+# slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+# max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+# retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+# min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# max_pool_size = 10
+
+# Timeout in seconds before idle sql connections are reaped
+# idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+# max_overflow = 20
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+# connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+# connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service type include LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# this is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
--- a/components/openstack/neutron/files/neutron.prof_attr	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/files/neutron.prof_attr	Wed Jun 11 17:13:12 2014 -0700
@@ -4,11 +4,12 @@
 solaris.admin.edit/etc/neutron/dhcp_agent.ini,\
 solaris.admin.edit/etc/neutron/l3_agent.ini,\
 solaris.admin.edit/etc/neutron/logging.conf,\
+solaris.admin.edit/etc/neutron/neutron.conf,\
 solaris.admin.edit/etc/neutron/plugins/evs/evs_plugin.ini,\
 solaris.admin.edit/etc/neutron/policy.json,\
-solaris.admin.edit/etc/neutron/quantum.conf,\
 solaris.smf.manage.neutron,\
-solaris.smf.value.neutron
+solaris.smf.value.neutron;\
+defaultpriv={file_dac_read}\:/var/svc/log/application-openstack-*
 
 OpenStack Management:RO:::profiles=OpenStack Network Management
 
--- a/components/openstack/neutron/files/quantum.conf	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,295 +0,0 @@
-[DEFAULT]
-# Default log level is INFO
-# verbose and debug has the same result.
-# One of them will set DEBUG log level output
-# debug = False
-# verbose = False
-
-# Where to store Quantum state files.  This directory must be writable by the
-# user executing the agent.
-# state_path = /var/lib/quantum
-
-# Where to store lock files
-lock_path = $state_path/lock
-
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
-# log_date_format = %Y-%m-%d %H:%M:%S
-
-# use_syslog                           -> syslog
-# log_file and log_dir                 -> log_dir/log_file
-# (not log_file) and log_dir           -> log_dir/{binary_name}.log
-# use_stderr                           -> stderr
-# (not user_stderr) and (not log_file) -> stdout
-# publish_errors                       -> notification system
-
-# use_syslog = False
-# syslog_log_facility = LOG_USER
-
-# use_stderr = True
-# log_file =
-# log_dir =
-
-# publish_errors = False
-
-# Address to bind the API server
-bind_host = 0.0.0.0
-
-# Port the bind the API server to
-bind_port = 9696
-
-# Path to the extensions.  Note that this can be a colon-separated list of
-# paths.  For example:
-# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
-# The __path__ of quantum.extensions is appended to this, so if your
-# extensions are in there you don't need to specify them here
-# api_extensions_path =
-
-# Quantum plugin provider module
-# The EVSQuantumPluginV2 Neutron plugin connects to the Solaris Elastic
-# Virtual Switch framework to provide virtual networking between Solaris
-# Zones.
-core_plugin = quantum.plugins.evs.plugin.EVSQuantumPluginV2
-
-# Advanced service modules
-# service_plugins =
-
-# Paste configuration file
-api_paste_config = api-paste.ini
-
-# The strategy to be used for auth.
-# Supported values are 'keystone'(default), 'noauth'.
-# auth_strategy = keystone
-
-# Base MAC address. The first 3 octets will remain unchanged. If the
-# 4h octet is not 00, it will also used. The others will be
-# randomly generated.
-# 3 octet
-# base_mac = fa:16:3e:00:00:00
-# 4 octet
-# base_mac = fa:16:3e:4f:00:00
-
-# Maximum amount of retries to generate a unique MAC address
-# mac_generation_retries = 16
-
-# DHCP Lease duration (in seconds)
-# dhcp_lease_duration = 120
-
-# Allow sending resource operation notification to DHCP agent
-# dhcp_agent_notification = True
-
-# Enable or disable bulk create/update/delete operations
-# allow_bulk = True
-# Enable or disable pagination
-# allow_pagination = False
-# Enable or disable sorting
-# allow_sorting = False
-# Enable or disable overlapping IPs for subnets
-# Attention: the following parameter MUST be set to False if Quantum is
-# being used in conjunction with nova security groups and/or metadata service.
-# allow_overlapping_ips = False
-# Ensure that configured gateway is on subnet
-# force_gateway_on_subnet = False
-
-
-# RPC configuration options. Defined in rpc __init__
-# The messaging module to use, defaults to kombu.
-# rpc_backend = quantum.openstack.common.rpc.impl_kombu
-# Size of RPC thread pool
-# rpc_thread_pool_size = 64,
-# Size of RPC connection pool
-# rpc_conn_pool_size = 30
-# Seconds to wait for a response from call or multicall
-# rpc_response_timeout = 60
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-# rpc_cast_timeout = 30
-# Modules of exceptions that are permitted to be recreated
-# upon receiving exception data from an rpc call.
-# allowed_rpc_exception_modules = quantum.openstack.common.exception, nova.exception
-# AMQP exchange to connect to if using RabbitMQ or QPID
-control_exchange = quantum
-
-# If passed, use a fake RabbitMQ provider
-# fake_rabbit = False
-
-# Configuration options if sending notifications via kombu rpc (these are
-# the defaults)
-# SSL version to use (valid only if SSL enabled)
-# kombu_ssl_version =
-# SSL key file (valid only if SSL enabled)
-# kombu_ssl_keyfile =
-# SSL cert file (valid only if SSL enabled)
-# kombu_ssl_certfile =
-# SSL certification authority file (valid only if SSL enabled)'
-# kombu_ssl_ca_certs =
-# IP address of the RabbitMQ installation
-# rabbit_host = localhost
-# Password of the RabbitMQ server
-# rabbit_password = guest
-# Port where RabbitMQ server is running/listening
-# rabbit_port = 5672
-# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
-# rabbit_hosts = localhost:5672
-# User ID used for RabbitMQ connections
-# rabbit_userid = guest
-# Location of a virtual RabbitMQ installation.
-# rabbit_virtual_host = /
-# Maximum retries with trying to connect to RabbitMQ
-# (the default of 0 implies an infinite retry count)
-# rabbit_max_retries = 0
-# RabbitMQ connection retry interval
-# rabbit_retry_interval = 1
-# Use HA queues in RabbitMQ (x-ha-policy: all).You need to
-# wipe RabbitMQ database when changing this option. (boolean value)
-# rabbit_ha_queues = false
-
-# QPID
-# rpc_backend=quantum.openstack.common.rpc.impl_qpid
-# Qpid broker hostname
-# qpid_hostname = localhost
-# Qpid broker port
-# qpid_port = 5672
-# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
-# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
-# qpid_hosts = localhost:5672
-# Username for qpid connection
-# qpid_username = ''
-# Password for qpid connection
-# qpid_password = ''
-# Space separated list of SASL mechanisms to use for auth
-# qpid_sasl_mechanisms = ''
-# Seconds between connection keepalive heartbeats
-# qpid_heartbeat = 60
-# Transport to use, either 'tcp' or 'ssl'
-# qpid_protocol = tcp
-# Disable Nagle algorithm
-# qpid_tcp_nodelay = True
-
-# ZMQ
-# rpc_backend=quantum.openstack.common.rpc.impl_zmq
-# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
-# The "host" option should point or resolve to this address.
-# rpc_zmq_bind_address = *
-
-# ============ Notification System Options =====================
-
-# Notifications can be sent when network/subnet/port are create, updated or deleted.
-# There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and
-# noop (no notifications sent, the default)
-
-# Notification_driver can be defined multiple times
-# Do nothing driver
-# notification_driver = quantum.openstack.common.notifier.no_op_notifier
-# Logging driver
-# notification_driver = quantum.openstack.common.notifier.log_notifier
-# RPC driver. DHCP agents needs it.
-notification_driver = quantum.openstack.common.notifier.rpc_notifier
-
-# default_notification_level is used to form actual topic name(s) or to set logging level
-default_notification_level = INFO
-
-# default_publisher_id is a part of the notification payload
-# host = myhost.com
-# default_publisher_id = $host
-
-# Defined in rpc_notifier, can be comma separated values.
-# The actual topic names will be %s.%(default_notification_level)s
-notification_topics = notifications
-
-# Default maximum number of items returned in a single response,
-# value == infinite and value < 0 means no max limit, and value must
-# greater than 0. If the number of items requested is greater than
-# pagination_max_limit, server will just return pagination_max_limit
-# of number of items.
-# pagination_max_limit = -1
-
-# Maximum number of DNS nameservers per subnet
-# max_dns_nameservers = 5
-
-# Maximum number of host routes per subnet
-# max_subnet_host_routes = 20
-
-# Maximum number of fixed ips per port
-# max_fixed_ips_per_port = 5
-
-# =========== items for agent management extension =============
-# Seconds to regard the agent as down.
-# agent_down_time = 5
-# ===========  end of items for agent management extension =====
-
-# =========== items for agent scheduler extension =============
-# Driver to use for scheduling network to DHCP agent
-# network_scheduler_driver = quantum.scheduler.dhcp_agent_scheduler.ChanceScheduler
-# Driver to use for scheduling router to a default L3 agent
-# router_scheduler_driver = quantum.scheduler.l3_agent_scheduler.ChanceScheduler
-
-# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
-# networks to first DHCP agent which sends get_active_networks message to
-# quantum server
-# network_auto_schedule = True
-
-# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
-# routers to first L3 agent which sends sync_routers message to quantum server
-# router_auto_schedule = True
-# ===========  end of items for agent scheduler extension =====
-
-[QUOTAS]
-# resource name(s) that are supported in quota features
-# quota_items = network,subnet,port
-
-# default number of resource allowed per tenant, minus for unlimited
-# default_quota = -1
-
-# number of networks allowed per tenant, and minus means unlimited
-# quota_network = 10
-
-# number of subnets allowed per tenant, and minus means unlimited
-# quota_subnet = 10
-
-# number of ports allowed per tenant, and minus means unlimited
-# quota_port = 50
-
-# number of security groups allowed per tenant, and minus means unlimited
-# quota_security_group = 10
-
-# number of security group rules allowed per tenant, and minus means unlimited
-# quota_security_group_rule = 100
-
-# default driver to use for quota checks
-# The default quota driver supports static quotas defined in the [QUOTAS]
-# section. The Elastic Virtual Switch plugin supports per-tenant quota
-# limits via the quota extension API. To enable per-tenant quotas, the
-# quota_driver should be set to
-# quantum.plugins.evs.db.quotas_db.EVSDbQuotaDriver.
-# quota_driver = quantum.quota.ConfDriver
-
-[DEFAULT_SERVICETYPE]
-# Description of the default service type (optional)
-# description = "default service type"
-# Enter a service definition line for each advanced service provided
-# by the default service type.
-# Each service definition should be in the following format:
-# <service>:<plugin>[:driver]
-
-[AGENT]
-# Use "sudo quantum-rootwrap /etc/quantum/rootwrap.conf" to use the real
-# root filter facility.
-# Change to "sudo" to skip the filtering and just run the comand directly
-# root_helper = sudo
-
-# =========== items for agent management extension =============
-# seconds between nodes reporting state to server, should be less than
-# agent_down_time
-# report_interval = 4
-
-# ===========  end of items for agent management extension =====
-
-[keystone_authtoken]
-auth_host = 127.0.0.1
-auth_port = 35357
-auth_protocol = http
-admin_tenant_name = %SERVICE_TENANT_NAME%
-admin_user = %SERVICE_USER%
-admin_password = %SERVICE_PASSWORD%
-signing_dir = /var/lib/quantum/keystone-signing
--- a/components/openstack/neutron/neutron.p5m	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/neutron.p5m	Wed Jun 11 17:13:12 2014 -0700
@@ -28,10 +28,10 @@
 set name=pkg.summary value="OpenStack Neutron"
 set name=pkg.description \
     value="Neutron provides an API to dynamically request and configure virtual networks. These networks connect 'interfaces' from other OpenStack services (e.g., VNICs from Nova VMs). The Neutron API supports extensions to provide advanced network capabilities (e.g., QoS, ACLs, network monitoring, etc)."
-set name=pkg.human-version value="Grizzly $(COMPONENT_VERSION)"
+set name=pkg.human-version value="Havana $(COMPONENT_VERSION)"
 set name=com.oracle.info.description \
     value="Neutron, the OpenStack virtual network service"
-set name=com.oracle.info.tpno value=16305
+set name=com.oracle.info.tpno value=17716
 set name=info.classification \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
     value="org.opensolaris.category.2008:System/Enterprise Management" \
@@ -44,19 +44,21 @@
 set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
 dir  path=etc/neutron owner=neutron group=neutron mode=0700
 file etc/api-paste.ini path=etc/neutron/api-paste.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=true
+    group=neutron mode=0644 overlay=allow preserve=renamenew
 file files/dhcp_agent.ini path=etc/neutron/dhcp_agent.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=true
+    group=neutron mode=0644 overlay=allow preserve=renamenew
 file files/l3_agent.ini path=etc/neutron/l3_agent.ini owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=true
+    group=neutron mode=0644 overlay=allow preserve=renamenew
+file files/neutron.conf path=etc/neutron/neutron.conf owner=neutron \
+    group=neutron mode=0644 \
+    original_name=cloud/openstack/neutron:etc/neutron/quantum.conf \
+    overlay=allow preserve=renamenew
 dir  path=etc/neutron/plugins owner=neutron group=neutron
 dir  path=etc/neutron/plugins/evs owner=neutron group=neutron
 file files/evs_plugin.ini path=etc/neutron/plugins/evs/evs_plugin.ini \
-    owner=neutron group=neutron mode=0644 overlay=allow preserve=true
+    owner=neutron group=neutron mode=0644 overlay=allow preserve=renamenew
 file etc/policy.json path=etc/neutron/policy.json owner=neutron group=neutron \
-    mode=0644 overlay=allow preserve=true
-file files/quantum.conf path=etc/neutron/quantum.conf owner=neutron \
-    group=neutron mode=0644 overlay=allow preserve=true
+    mode=0644 overlay=allow preserve=renamenew
 link path=etc/quantum target=./neutron
 file files/neutron.auth_attr \
     path=etc/security/auth_attr.d/cloud:openstack:neutron group=sys
@@ -75,225 +77,463 @@
 file files/neutron-dhcp-agent path=lib/svc/method/neutron-dhcp-agent
 file files/neutron-l3-agent path=lib/svc/method/neutron-l3-agent
 file files/neutron-server path=lib/svc/method/neutron-server
-file usr/bin/quantum-dhcp-agent path=usr/lib/neutron/neutron-dhcp-agent \
+file usr/bin/neutron-dhcp-agent path=usr/lib/neutron/neutron-dhcp-agent \
     mode=0555
-file usr/bin/quantum-l3-agent path=usr/lib/neutron/neutron-l3-agent mode=0555
-file usr/bin/quantum-server path=usr/lib/neutron/neutron-server mode=0555
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum-$(COMPONENT_VERSION)-py$(PYVER).egg-info/PKG-INFO
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum-$(COMPONENT_VERSION)-py$(PYVER).egg-info/eager_resources.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum-$(COMPONENT_VERSION)-py$(PYVER).egg-info/entry_points.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
+file usr/bin/neutron-l3-agent path=usr/lib/neutron/neutron-l3-agent mode=0555
+file usr/bin/neutron-server path=usr/lib/neutron/neutron-server mode=0555
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron-$(COMPONENT_VERSION)-py$(PYVER).egg-info/PKG-INFO
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron-$(COMPONENT_VERSION)-py$(PYVER).egg-info/entry_points.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron-$(COMPONENT_VERSION)-py$(PYVER).egg-info/not-zip-safe
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/common/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/dhcp_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/evs_l3_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/firewall.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l2population_rpc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/l3_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/async_process.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/daemon.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/dhcp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/external_process.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/interface.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/ip_lib.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/iptables_firewall.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/iptables_manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/ovs_lib.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/linux/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/metadata/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/metadata/agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/metadata/namespace_proxy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/netns_cleanup_util.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/ovs_cleanup_util.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/rpc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/securitygroups_rpc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/dhcp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/interface.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/ipfilters_manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/agent/solaris/net_lib.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/api_common.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/extensions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/agentnotifiers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/v2/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/v2/attributes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/v2/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/v2/resource.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/v2/router.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/versions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/views/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/api/views/versions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/auth.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/cmd/usage_audit.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/constants.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/legacy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/log.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/rpc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/test_lib.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/topics.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/common/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/context.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/agents_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/agentschedulers_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/allowedaddresspairs_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/db_base_plugin_v2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/dhcp_rpc_base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/external_net_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/extradhcpopt_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/extraroute_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/firewall/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/firewall/firewall_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/l3_agentschedulers_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/l3_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/l3_gwmode_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/l3_rpc_base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/loadbalancer/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/loadbalancer/loadbalancer_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/metering/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/metering/metering_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/README
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic.ini
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/common_ext_ops.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/env.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/script.py.mako
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1064e98b7917_nec_pf_port_del.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1149d7de0cfa_port_security.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/11c6e18605c8_pool_monitor_status_.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/128e042a2b68_ext_gw_mode.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1341ed32cc1e_nvp_netbinding_update.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/13de305df56e_add_nec_pf_name.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/14f24494ca31_arista_ml2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/176a85fc7d79_add_portbindings_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1b693c095aa3_quota_ext_db_grizzly.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1c33fa3cd1a1_extra_route_config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/1efb85914233_allowedaddresspairs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/2032abe8edac_lbaas_add_status_des.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/20ae61555e95_ml2_gre_type_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/2528ceb28230_nec_pf_netid_fix.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/263772d65691_cisco_db_cleanup_2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/27ef74513d33_quota_in_plumgrid_pl.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/2a3bae1ceb8_nec_port_binding.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/2c4af419145b_l3_support.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/32a65f71af51_ml2_portbinding.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/32b517556ec9_remove_tunnelip_mode.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/338d7508968c_vpnaas_peer_address_.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/35c7c198ddea_lbaas_healthmon_del_status.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/363468ac592c_nvp_network_gw.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/38335592a0dc_nvp_portmap.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/38fc1f6789f8_cisco_n1kv_overlay.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/39cf3f799352_fwaas_havana_2_model.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/3a520dd165d0_cisco_nexus_multi_switch.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/3b54bf9e29f7_nec_plugin_sharednet.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/3c6e57a23db4_add_multiprovider.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/3cabb850f4a5_table_to_track_port_.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/3cb5d900c5de_security_groups.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/3cbf70257c28_nvp_mac_learning.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/3d6fae8b70b0_nvp_lbaas_plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/3ed8f075e38a_nvp_fwaas_plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/40dffbf4b549_nvp_dist_router.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/45680af419f9_nvp_qos.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/4692d074d587_agent_scheduler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/46a0efbd8f0_cisco_n1kv_multisegm.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/477a4488d3f4_ml2_vxlan_type_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/48b6f43f7471_service_type.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/49332180ca96_ryu_plugin_update.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/4a666eb208c2_service_router.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/511471cc46b_agent_ext_model_supp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/51b4de912379_cisco_nexus_ml2_mech.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/52c5e4a18807_lbaas_pool_scheduler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/52ff27f7567a_support_for_vpnaas.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/53bbd27ec841_extra_dhcp_opts_supp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/54c2c487e913_lbaas.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/557edfc53098_new_service_types.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/569e98a8132b_metering.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/5918cbddab04_add_tables_for_route.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/5ac71e65402c_ml2_initial.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/63afba73813_ovs_tunnelendpoints_id_unique.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/66a59a7f516_nec_openflow_router.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/86cf4d88bd3_remove_bigswitch_por.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/README
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/b7a8863760e_rm_cisco_vlan_bindin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/c88b6b5fea3_cisco_n1kv_tables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/e6b16a30d97_cisco_provider_nets.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/f489cf14a79c_lbaas_havana.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/f9263d6df56_remove_dhcp_lease.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/folsom_initial.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/grizzly_release.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/alembic_migrations/versions/havana_release.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/migration/cli.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/model_base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/models_v2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/portbindings_base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/portbindings_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/portsecurity_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/quota_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/routedserviceinsertion_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/routerservicetype_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/securitygroups_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/securitygroups_rpc_base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/servicetype_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/sqlalchemyutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/vpn/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/db/vpn/vpn_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/debug/README
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/debug/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/debug/commands.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/debug/debug_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/debug/shell.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/allowedaddresspairs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/dhcpagentscheduler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/external_net.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/extra_dhcp_opt.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/extraroute.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/firewall.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/flavor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/l3.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/l3_ext_gw_mode.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/l3agentscheduler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/lbaas_agentscheduler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/loadbalancer.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/metering.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/multiprovidernet.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/portbindings.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/portsecurity.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/providernet.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/quotasv2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/routedserviceinsertion.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/routerservicetype.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/securitygroup.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/servicetype.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/extensions/vpnaas.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/hooks.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/ar/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/bg_BG/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/bs/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/ca/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/cs/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/da/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/de/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/en_AU/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/en_GB/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/en_US/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/es/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/es_MX/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/fi_FI/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/fil/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/fr/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/hi/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/hr/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/hu/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/id/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/it/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/it_IT/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/ja/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/ka_GE/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/kn/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/ko/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/ko_KR/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/ms/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/nb/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/ne/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/nl_NL/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/pl_PL/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/pt/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/pt_BR/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/ro/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/ru/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/ru_RU/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/sk/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/sl_SI/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/sw_KE/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/tl/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/tl_PH/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/tr_TR/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/uk/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/vi_VN/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/zh_CN/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/zh_HK/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/locale/zh_TW/LC_MESSAGES/neutron.po
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/neutron_plugin_base_v2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/context.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/db/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/db/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/db/exception.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/db/sqlalchemy/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/db/sqlalchemy/models.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/db/sqlalchemy/session.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/db/sqlalchemy/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/eventlet_backdoor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/excutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/fileutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/gettextutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/importutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/jsonutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/local.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/lockutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/log.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/loopingcall.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/network_utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/notifier/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/notifier/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/notifier/log_notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/notifier/no_op_notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/notifier/rabbit_notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/notifier/rpc_notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/notifier/rpc_notifier2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/notifier/test_notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/periodic_task.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/policy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/processutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rootwrap/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rootwrap/cmd.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rootwrap/filters.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rootwrap/wrapper.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/amqp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/common.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/dispatcher.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/impl_fake.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/impl_kombu.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/impl_qpid.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/impl_zmq.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/matchmaker.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/matchmaker_redis.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/matchmaker_ring.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/proxy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/serializer.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/rpc/zmq_receiver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/threadgroup.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/timeutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/openstack/common/uuidutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/common/constants.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/common/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/evs/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/evs/db/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/evs/db/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/evs/db/l3nat.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/evs/db/quotas_db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/evs/plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/README
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/common/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/meta_db_v2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/meta_models_v2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/meta_neutron_plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/metaplugin/proxy_neutron_plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/README
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/common/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/driver_api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/driver_context.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/README
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/constants.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/credentials_v2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/mech_cisco_nexus.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/network_db_v2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/network_models_v2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/nexus_db_v2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/nexus_models_v2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/nexus_network_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/cisco/nexus_snippets.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/constants.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/mech_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/l2pop/rpc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_arista/README
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_arista/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_arista/config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_arista/db.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_arista/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_hyperv.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_linuxbridge.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mech_openvswitch.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/mechanism_ncs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/type_flat.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/type_gre.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/type_local.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/type_tunnel.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/type_vlan.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/drivers/type_vxlan.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/managers.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/models.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/ml2/rpc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/plugins/openvswitch/common/constants.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/policy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/quota.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/scheduler/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/scheduler/dhcp_agent_scheduler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/scheduler/l3_agent_scheduler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/server/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/agents/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/agents/firewall_agent_api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/agents/l3reference/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/agents/l3reference/firewall_l3_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/agents/varmour/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/agents/varmour/varmour_api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/agents/varmour/varmour_router.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/agents/varmour/varmour_utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/drivers/fwaas_base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/drivers/linux/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/drivers/linux/iptables_fwaas.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/drivers/varmour/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/drivers/varmour/varmour_fwaas.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/firewall/fwaas_plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/README
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/l3_router/l3_router_plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/agent_scheduler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/constants.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/drivers/abstract_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/drivers/haproxy/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/drivers/haproxy/agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/drivers/haproxy/agent_api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/drivers/haproxy/agent_manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/drivers/haproxy/cfg.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/drivers/haproxy/namespace_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/drivers/haproxy/plugin_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/drivers/noop/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/drivers/noop/noop_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/loadbalancer/plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/agents/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/agents/metering_agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/drivers/abstract_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/drivers/iptables/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/drivers/iptables/iptables_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/drivers/noop/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/drivers/noop/noop_driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/metering/metering_plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/provider_configuration.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/service_base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/vpn/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/vpn/agent.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/vpn/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/vpn/common/topics.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/vpn/device_drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/vpn/device_drivers/ipsec.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/vpn/device_drivers/template/openswan/ipsec.conf.template
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/vpn/device_drivers/template/openswan/ipsec.secret.template
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/vpn/plugin.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/vpn/service_drivers/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/services/vpn/service_drivers/ipsec.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/version.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutron/wsgi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/quantum/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/common/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/dhcp_agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/evs_l3_agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/firewall.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/l3_agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/linux/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/linux/daemon.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/linux/device.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/linux/dhcp.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/linux/external_process.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/linux/interface.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/linux/ip_lib.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/linux/iptables_firewall.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/linux/iptables_manager.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/linux/ovs_lib.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/linux/utils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/metadata/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/metadata/agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/metadata/namespace_proxy.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/netns_cleanup_util.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/ovs_cleanup_util.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/rpc.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/securitygroups_rpc.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/solaris/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/solaris/device.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/solaris/dhcp.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/solaris/interface.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/solaris/ipfilters_manager.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/agent/solaris/net_lib.py
 file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/api_common.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/extensions.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/rpc/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/rpc/agentnotifiers/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/rpc/agentnotifiers/l3_rpc_agent_api.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/v2/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/v2/attributes.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/v2/base.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/v2/resource.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/v2/router.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/versions.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/views/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/api/views/versions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/quantum/auth.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/common/config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/common/constants.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/common/exceptions.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/common/rpc.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/common/test_lib.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/common/topics.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/common/utils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/context.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/agents_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/agentschedulers_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/api.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/db_base_plugin_v2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/dhcp_rpc_base.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/extraroute_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/l3_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/l3_rpc_base.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/loadbalancer/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/loadbalancer/loadbalancer_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/README
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic.ini
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/common_ext_ops.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/env.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/script.py.mako
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/1149d7de0cfa_port_security.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/1341ed32cc1e_nvp_netbinding_update.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/1b693c095aa3_quota_ext_db_grizzly.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/1c33fa3cd1a1_extra_route_config.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/2c4af419145b_l3_support.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/363468ac592c_nvp_network_gw.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/38335592a0dc_nvp_portmap.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/3b54bf9e29f7_nec_plugin_sharednet.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/3cb5d900c5de_security_groups.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/45680af419f9_nvp_qos.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/4692d074d587_agent_scheduler.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/48b6f43f7471_service_type.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/49332180ca96_ryu_plugin_update.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/511471cc46b_agent_ext_model_supp.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/54c2c487e913_lbaas.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/README
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/folsom_initial.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/alembic_migrations/versions/grizzly_release.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/migration/cli.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/model_base.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/models_v2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/portsecurity_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/quota_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/routedserviceinsertion_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/routerservicetype_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/securitygroups_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/securitygroups_rpc_base.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/servicetype_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/db/sqlalchemyutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/debug/README
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/debug/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/debug/commands.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/debug/debug_agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/debug/shell.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/agent.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/agentscheduler.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/extraroute.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/flavor.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/l3.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/loadbalancer.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/portbindings.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/portsecurity.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/providernet.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/quotasv2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/routedserviceinsertion.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/routerservicetype.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/securitygroup.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/extensions/servicetype.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/locale/ja/LC_MESSAGES/quantum.po
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/locale/quantum.pot
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/manager.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/context.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/eventlet_backdoor.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/exception.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/excutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/fileutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/gettextutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/importutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/jsonutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/local.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/lockutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/log.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/loopingcall.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/network_utils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/notifier/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/notifier/api.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/notifier/log_notifier.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/notifier/no_op_notifier.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/notifier/rabbit_notifier.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/notifier/rpc_notifier.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/notifier/rpc_notifier2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/notifier/test_notifier.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/periodic_task.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/policy.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/processutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/rpc/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/rpc/amqp.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/rpc/common.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/rpc/dispatcher.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/rpc/impl_fake.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/rpc/impl_kombu.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/rpc/impl_qpid.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/rpc/impl_zmq.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/rpc/matchmaker.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/rpc/matchmaker_redis.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/rpc/proxy.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/rpc/service.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/service.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/setup.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/threadgroup.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/timeutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/uuidutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/openstack/common/version.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/common/constants.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/evs/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/evs/db/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/evs/db/api.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/evs/db/l3nat.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/evs/db/quotas_db.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/evs/plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/services/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/services/agent_loadbalancer/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/services/agent_loadbalancer/agent/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/services/agent_loadbalancer/agent/api.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/services/agent_loadbalancer/agent/manager.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/services/agent_loadbalancer/constants.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/services/agent_loadbalancer/drivers/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/services/agent_loadbalancer/drivers/haproxy/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/services/agent_loadbalancer/drivers/haproxy/cfg.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/services/agent_loadbalancer/drivers/haproxy/namespace_driver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/services/agent_loadbalancer/plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/plugins/services/service_base.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/policy.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/quantum_plugin_base_v2.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/quota.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/rootwrap/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/rootwrap/filters.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/rootwrap/wrapper.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/scheduler/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/scheduler/dhcp_agent_scheduler.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/scheduler/l3_agent_scheduler.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/server/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/service.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/version.py
-file path=usr/lib/python$(PYVER)/vendor-packages/quantum/wsgi.py
 dir  path=var/lib/neutron owner=neutron group=neutron mode=0700
 link path=var/lib/quantum target=neutron
 group groupname=neutron gid=84
@@ -332,12 +572,12 @@
 # force a dependency on alembic; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/alembic-26
 
+# force a dependency on babel; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/babel-26
+
 # force a dependency on cliff; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/cliff-26
 
-# force a dependency on eventlet; pkgdepend work is needed to flush this out.
-depend type=require fmri=library/python/eventlet-26
-
 # force a dependency on greenlet; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/greenlet-26
 
@@ -347,26 +587,47 @@
 # force a dependency on iso8601; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/iso8601-26
 
+# force a dependency on jinja2; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/jinja2-26
+
+# force a dependency on jsonrpclib; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/jsonrpclib-26
+
+# force a dependency on keystoneclient; used via a paste.deploy filter
+depend type=require fmri=library/python/keystoneclient-26
+
 # force a dependency on kombu; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/kombu-26
 
-# force a dependency on netaddr; pkgdepend work is needed to flush this out.
-depend type=require fmri=library/python/netaddr-26
+# force a dependency on mako; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/mako-26
+
+# force a dependency on neutronclient; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/neutronclient-26
 
 # force a dependency on paste.deploy; pkgdepend work is needed to flush this
 # out.
 depend type=require fmri=library/python/paste.deploy-26
 
-# force a dependency on quantumclient; pkgdepend work is needed to flush this
-# out.
-depend type=require fmri=library/python/quantumclient-26
+# force a dependency on pbr; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/pbr-26
+
+# force a dependency on requests; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/requests-26
 
 # force a dependency on routes; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/routes-26
 
+# force a dependency on six; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/six-26
+
 # force a dependency on sqlalchemy; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/sqlalchemy-26
 
+# force a dependency on stevedore; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/stevedore-26
+
 # force a dependency on webob; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/webob-26
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/patches/01-dhcp-agent-add-solaris.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,76 @@
+--- neutron-2013.2.3/neutron/agent/dhcp_agent.py.orig	2014-04-03 11:49:01.000000000 -0700
++++ neutron-2013.2.3/neutron/agent/dhcp_agent.py	2014-05-29 14:07:12.811163548 -0700
+@@ -3,6 +3,8 @@
+ # Copyright 2012 OpenStack Foundation
+ # All Rights Reserved.
+ #
++# Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
++#
+ #    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ #    not use this file except in compliance with the License. You may obtain
+ #    a copy of the License at
+@@ -16,15 +18,14 @@
+ #    under the License.
+ 
+ import os
++import platform
+ 
+ import eventlet
+ import netaddr
+ from oslo.config import cfg
+ 
+ from neutron.agent.common import config
+-from neutron.agent.linux import dhcp
+ from neutron.agent.linux import external_process
+-from neutron.agent.linux import interface
+ from neutron.agent import rpc as agent_rpc
+ from neutron.common import constants
+ from neutron.common import exceptions
+@@ -42,6 +43,9 @@
+ from neutron import service as neutron_service
+ 
+ LOG = logging.getLogger(__name__)
++# dynamic module import
++dhcp = None
++interface = None
+ 
+ 
+ class DhcpAgent(manager.Manager):
+@@ -602,6 +606,16 @@
+     cfg.CONF.register_opts(DhcpAgent.OPTS)
+     config.register_agent_state_opts_helper(cfg.CONF)
+     config.register_root_helper(cfg.CONF)
++    global dhcp
++    global interface
++    if platform.system() == "SunOS":
++        dhcp = importutils.import_module("neutron.agent.solaris.dhcp")
++        interface = \
++            importutils.import_module("neutron.agent.solaris.interface")
++    else:
++        dhcp = importutils.import_module("neutron.agent.linux.dhcp")
++        interface = \
++            importutils.import_module("neutron.agent.linux.interface")
+     cfg.CONF.register_opts(dhcp.OPTS)
+     cfg.CONF.register_opts(interface.OPTS)
+ 
+--- neutron-2013.2.3/neutron/db/dhcp_rpc_base.py.orig	2014-04-03 11:49:01.000000000 -0700
++++ neutron-2013.2.3/neutron/db/dhcp_rpc_base.py	2014-05-29 14:07:45.666828914 -0700
+@@ -131,11 +131,13 @@
+                 for fixed_ip in port['fixed_ips']:
+                     if fixed_ip['subnet_id'] in dhcp_enabled_subnet_ids:
+                         dhcp_enabled_subnet_ids.remove(fixed_ip['subnet_id'])
+-                port['fixed_ips'].extend(
+-                    [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
+-
+-                retval = plugin.update_port(context, port['id'],
+-                                            dict(port=port))
++                if dhcp_enabled_subnet_ids:
++                    port['fixed_ips'].extend(
++                        [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
++                    retval = plugin.update_port(context, port['id'],
++                                                dict(port=port))
++                else:
++                    retval = port
+ 
+         except n_exc.NotFound as e:
+             LOG.warning(e)
--- a/components/openstack/neutron/patches/01-neutron-no-pyudev.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,17 +0,0 @@
-In-house patch to Neutron's setup.py file.  It contains requirement
-code which looks for the pyudev module.  Since this module is Linux
-specific, we need to exclude it.  Patch has not yet been submitted
-upstream.
-
---- quantum-2013.1.3/setup.py.orig   2013-09-19 10:57:28.784977728 -0600
-+++ quantum-2013.1.3/setup.py    2013-09-19 10:57:48.050712916 -0600
-@@ -24,6 +24,9 @@
-     requires.append('pywin32')
-     requires.append('wmi')
-     requires.remove('pyudev')
-+elif sys.platform == 'sunos5':
-+    requires.remove('pyudev')
-+    requires.remove('amqplib>=0.6.1')
-
- Name = 'quantum'
- Url = "https://launchpad.net/quantum"
--- a/components/openstack/neutron/patches/02-dhcp-agent-add-solaris.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,251 +0,0 @@
-In-house patch to split out the Linux specific implementation details of the
-Neutron DHCP agent into a separate module. This allows for a common
-main DHCP agent module on different platforms.  This patch has not yet
-been submitted upstream.
-
---- quantum-2013.1.4/quantum/agent/dhcp_agent.py.~1~	2013-10-17 11:24:18.000000000 -0700
-+++ quantum-2013.1.4/quantum/agent/dhcp_agent.py	2014-03-13 01:37:26.539103862 -0700
-@@ -17,7 +17,6 @@
- 
- import os
- import socket
--import uuid
- 
- import eventlet
- import netaddr
-@@ -26,11 +25,8 @@
- from quantum.agent.common import config
- from quantum.agent.linux import dhcp
- from quantum.agent.linux import external_process
--from quantum.agent.linux import interface
--from quantum.agent.linux import ip_lib
- from quantum.agent import rpc as agent_rpc
- from quantum.common import constants
--from quantum.common import exceptions
- from quantum.common import topics
- from quantum import context
- from quantum import manager
-@@ -58,6 +54,9 @@
-         cfg.StrOpt('dhcp_driver',
-                    default='quantum.agent.linux.dhcp.Dnsmasq',
-                    help=_("The driver used to manage the DHCP server.")),
-+        cfg.StrOpt('devicemanager',
-+                   default='quantum.agent.linux.device.DeviceManager',
-+                   help=_("The driver to manage OS specific devices.")),
-         cfg.BoolOpt('use_namespaces', default=True,
-                     help=_("Allow overlapping IP.")),
-         cfg.BoolOpt('enable_isolated_metadata', default=False,
-@@ -77,7 +76,9 @@
-         self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
-         ctx = context.get_admin_context_without_session()
-         self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, ctx)
--        self.device_manager = DeviceManager(self.conf, self.plugin_rpc)
-+        self.device_manager = \
-+            importutils.import_object(self.conf.devicemanager,
-+                                      self.conf, self.plugin_rpc)
-         self.lease_relay = DhcpLeaseRelay(self.update_lease)
- 
-         self.dhcp_version = self.dhcp_driver_cls.check_version()
-@@ -494,167 +495,6 @@
-                 'ports': num_ports}
- 
- 
--class DeviceManager(object):
--    OPTS = [
--        cfg.StrOpt('interface_driver',
--                   help=_("The driver used to manage the virtual interface."))
--    ]
--
--    def __init__(self, conf, plugin):
--        self.conf = conf
--        self.root_helper = config.get_root_helper(conf)
--        self.plugin = plugin
--        if not conf.interface_driver:
--            raise SystemExit(_('You must specify an interface driver'))
--        try:
--            self.driver = importutils.import_object(conf.interface_driver,
--                                                    conf)
--        except:
--            msg = _("Error importing interface driver "
--                    "'%s'") % conf.interface_driver
--            raise SystemExit(msg)
--
--    def get_interface_name(self, network, port=None):
--        """Return interface(device) name for use by the DHCP process."""
--        if not port:
--            device_id = self.get_device_id(network)
--            port = self.plugin.get_dhcp_port(network.id, device_id)
--        return self.driver.get_device_name(port)
--
--    def get_device_id(self, network):
--        """Return a unique DHCP device ID for this host on the network."""
--        # There could be more than one dhcp server per network, so create
--        # a device id that combines host and network ids
--
--        host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, socket.gethostname())
--        return 'dhcp%s-%s' % (host_uuid, network.id)
--
--    def _get_device(self, network):
--        """Return DHCP ip_lib device for this host on the network."""
--        device_id = self.get_device_id(network)
--        port = self.plugin.get_dhcp_port(network.id, device_id)
--        interface_name = self.get_interface_name(network, port)
--        namespace = NS_PREFIX + network.id
--        return ip_lib.IPDevice(interface_name,
--                               self.root_helper,
--                               namespace)
--
--    def _set_default_route(self, network):
--        """Sets the default gateway for this dhcp namespace.
--
--        This method is idempotent and will only adjust the route if adjusting
--        it would change it from what it already is.  This makes it safe to call
--        and avoids unnecessary perturbation of the system.
--        """
--        device = self._get_device(network)
--        gateway = device.route.get_gateway()
--
--        for subnet in network.subnets:
--            skip_subnet = (
--                subnet.ip_version != 4
--                or not subnet.enable_dhcp
--                or subnet.gateway_ip is None)
--
--            if skip_subnet:
--                continue
--
--            if gateway != subnet.gateway_ip:
--                m = _('Setting gateway for dhcp netns on net %(n)s to %(ip)s')
--                LOG.debug(m, {'n': network.id, 'ip': subnet.gateway_ip})
--
--                device.route.add_gateway(subnet.gateway_ip)
--
--            return
--
--        # No subnets on the network have a valid gateway.  Clean it up to avoid
--        # confusion from seeing an invalid gateway here.
--        if gateway is not None:
--            msg = _('Removing gateway for dhcp netns on net %s')
--            LOG.debug(msg, network.id)
--
--            device.route.delete_gateway(gateway)
--
--    def setup(self, network, reuse_existing=False):
--        """Create and initialize a device for network's DHCP on this host."""
--        device_id = self.get_device_id(network)
--        port = self.plugin.get_dhcp_port(network.id, device_id)
--
--        interface_name = self.get_interface_name(network, port)
--
--        if self.conf.use_namespaces:
--            namespace = NS_PREFIX + network.id
--        else:
--            namespace = None
--
--        if ip_lib.device_exists(interface_name,
--                                self.root_helper,
--                                namespace):
--            if not reuse_existing:
--                raise exceptions.PreexistingDeviceFailure(
--                    dev_name=interface_name)
--
--            LOG.debug(_('Reusing existing device: %s.'), interface_name)
--        else:
--            self.driver.plug(network.id,
--                             port.id,
--                             interface_name,
--                             port.mac_address,
--                             namespace=namespace)
--        ip_cidrs = []
--        for fixed_ip in port.fixed_ips:
--            subnet = fixed_ip.subnet
--            net = netaddr.IPNetwork(subnet.cidr)
--            ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
--            ip_cidrs.append(ip_cidr)
--
--        if (self.conf.enable_isolated_metadata and
--            self.conf.use_namespaces):
--            ip_cidrs.append(METADATA_DEFAULT_IP)
--
--        self.driver.init_l3(interface_name, ip_cidrs,
--                            namespace=namespace)
--
--        # ensure that the dhcp interface is first in the list
--        if namespace is None:
--            device = ip_lib.IPDevice(interface_name,
--                                     self.root_helper)
--            device.route.pullup_route(interface_name)
--
--        if self.conf.enable_metadata_network:
--            meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_IP)
--            metadata_subnets = [s for s in network.subnets if
--                                netaddr.IPNetwork(s.cidr) in meta_cidr]
--            if metadata_subnets:
--                # Add a gateway so that packets can be routed back to VMs
--                device = ip_lib.IPDevice(interface_name,
--                                         self.root_helper,
--                                         namespace)
--                # Only 1 subnet on metadata access network
--                gateway_ip = metadata_subnets[0].gateway_ip
--                device.route.add_gateway(gateway_ip)
--        elif self.conf.use_namespaces:
--            self._set_default_route(network)
--
--        return interface_name
--
--    def update(self, network):
--        """Update device settings for the network's DHCP on this host."""
--        if self.conf.use_namespaces and not self.conf.enable_metadata_network:
--            self._set_default_route(network)
--
--    def destroy(self, network, device_name):
--        """Destroy the device used for the network's DHCP on this host."""
--        if self.conf.use_namespaces:
--            namespace = NS_PREFIX + network.id
--        else:
--            namespace = None
--
--        self.driver.unplug(device_name, namespace=namespace)
--
--        self.plugin.release_dhcp_port(network.id,
--                                      self.get_device_id(network))
--
--
- class DictModel(object):
-     """Convert dict into an object that provides attribute access to values."""
-     def __init__(self, d):
-@@ -783,11 +623,11 @@
-     cfg.CONF.register_opts(DhcpAgent.OPTS)
-     config.register_agent_state_opts_helper(cfg.CONF)
-     config.register_root_helper(cfg.CONF)
--    cfg.CONF.register_opts(DeviceManager.OPTS)
-     cfg.CONF.register_opts(DhcpLeaseRelay.OPTS)
-     cfg.CONF.register_opts(dhcp.OPTS)
--    cfg.CONF.register_opts(interface.OPTS)
-     cfg.CONF(project='quantum')
-+    if not cfg.CONF.devicemanager:
-+        raise SystemExit(_('You must specify a devicemanager'))
-     config.setup_logging(cfg.CONF)
-     server = quantum_service.Service.create(
-         binary='quantum-dhcp-agent',
---- quantum-2013.1.4/quantum/db/dhcp_rpc_base.py.~1~	2013-10-17 11:24:18.000000000 -0700
-+++ quantum-2013.1.4/quantum/db/dhcp_rpc_base.py	2014-03-13 01:27:46.731450967 -0700
-@@ -97,11 +97,14 @@
-                 for fixed_ip in port['fixed_ips']:
-                     if fixed_ip['subnet_id'] in dhcp_enabled_subnet_ids:
-                         dhcp_enabled_subnet_ids.remove(fixed_ip['subnet_id'])
--                port['fixed_ips'].extend(
--                    [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
-+                if dhcp_enabled_subnet_ids:
-+                    port['fixed_ips'].extend(
-+                        [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
- 
--                retval = plugin.update_port(context, port['id'],
--                                            dict(port=port))
-+                    retval = plugin.update_port(context, port['id'],
-+                                                dict(port=port))
-+                else:
-+                    retval = port
- 
-         except exc.NoResultFound:
-             pass
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/patches/02-l3-agent-add-solaris.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,384 @@
+--- neutron-2013.2.3/neutron/agent/l3_agent.py.orig	2014-04-03 11:49:01.000000000 -0700
++++ neutron-2013.2.3/neutron/agent/l3_agent.py	2014-06-02 02:31:42.660207857 -0700
+@@ -2,6 +2,8 @@
+ #
+ # Copyright 2012 Nicira Networks, Inc.  All rights reserved.
+ #
++# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
++#
+ #    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ #    not use this file except in compliance with the License. You may obtain
+ #    a copy of the License at
+@@ -15,8 +17,11 @@
+ #    under the License.
+ #
+ # @author: Dan Wendlandt, Nicira, Inc
++# @author: Girish Moodalbail, Oracle, Inc
+ #
+ 
++import platform
++
+ import eventlet
+ import netaddr
+ from oslo.config import cfg
+@@ -28,6 +33,8 @@
+ from neutron.agent.linux import iptables_manager
+ from neutron.agent.linux import utils
+ from neutron.agent import rpc as agent_rpc
++from neutron.agent.solaris import ipfilters_manager
++from neutron.agent.solaris import net_lib
+ from neutron.common import constants as l3_constants
+ from neutron.common import legacy
+ from neutron.common import topics
+@@ -150,6 +157,7 @@
+             Per rpc versioning rules,  it is backwards compatible.
+     """
+     RPC_API_VERSION = '1.1'
++    RouterInfo = RouterInfo
+ 
+     OPTS = [
+         cfg.StrOpt('external_network_bridge', default='br-ex',
+@@ -292,8 +300,8 @@
+                 raise
+ 
+     def _router_added(self, router_id, router):
+-        ri = RouterInfo(router_id, self.root_helper,
+-                        self.conf.use_namespaces, router)
++        ri = self.RouterInfo(router_id, self.root_helper,
++                             self.conf.use_namespaces, router)
+         self.router_info[router_id] = ri
+         if self.conf.use_namespaces:
+             self._create_router_namespace(ri)
+@@ -380,12 +388,11 @@
+         for p in new_ports:
+             self._set_subnet_info(p)
+             ri.internal_ports.append(p)
+-            self.internal_network_added(ri, p['network_id'], p['id'],
+-                                        p['ip_cidr'], p['mac_address'])
++            self.internal_network_added(ri, p)
+ 
+         for p in old_ports:
+             ri.internal_ports.remove(p)
+-            self.internal_network_removed(ri, p['id'], p['ip_cidr'])
++            self.internal_network_removed(ri, p)
+ 
+         internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports]
+         # TODO(salv-orlando): RouterInfo would be a better place for
+@@ -572,23 +579,24 @@
+             rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr))
+         return rules
+ 
+-    def internal_network_added(self, ri, network_id, port_id,
+-                               internal_cidr, mac_address):
+-        interface_name = self.get_internal_device_name(port_id)
++    def internal_network_added(self, ri, port):
++        interface_name = self.get_internal_device_name(port['id'])
+         if not ip_lib.device_exists(interface_name,
+                                     root_helper=self.root_helper,
+                                     namespace=ri.ns_name()):
+-            self.driver.plug(network_id, port_id, interface_name, mac_address,
++            self.driver.plug(port['network_id'], port['id'], interface_name,
++                             port['mac_address'],
+                              namespace=ri.ns_name(),
+                              prefix=INTERNAL_DEV_PREFIX)
++        internal_cidr = port['ip_cidr']
+ 
+         self.driver.init_l3(interface_name, [internal_cidr],
+                             namespace=ri.ns_name())
+         ip_address = internal_cidr.split('/')[0]
+         self._send_gratuitous_arp_packet(ri, interface_name, ip_address)
+ 
+-    def internal_network_removed(self, ri, port_id, internal_cidr):
+-        interface_name = self.get_internal_device_name(port_id)
++    def internal_network_removed(self, ri, port):
++        interface_name = self.get_internal_device_name(port['id'])
+         if ip_lib.device_exists(interface_name,
+                                 root_helper=self.root_helper,
+                                 namespace=ri.ns_name()):
+@@ -839,6 +847,8 @@
+     config.register_root_helper(conf)
+     conf.register_opts(interface.OPTS)
+     conf.register_opts(external_process.OPTS)
++    if platform.system() == "SunOS":
++        manager = 'neutron.agent.evs_l3_agent.EVSL3NATAgent'
+     conf(project='neutron')
+     config.setup_logging(conf)
+     legacy.modernize_quantum_config(conf)
+--- neutron-2013.2.3/neutron/db/l3_db.py.~1~	2014-04-03 11:49:01.000000000 -0700
++++ neutron-2013.2.3/neutron/db/l3_db.py	2014-06-02 23:20:58.933635798 -0700
+@@ -2,6 +2,8 @@
+ 
+ # Copyright 2012 Nicira Networks, Inc.  All rights reserved.
+ #
++# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
++#
+ #    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ #    not use this file except in compliance with the License. You may obtain
+ #    a copy of the License at
+@@ -15,6 +17,7 @@
+ #    under the License.
+ #
+ # @author: Dan Wendlandt, Nicira, Inc
++# @author: Girish Moodalbail, Oracle, Inc
+ #
+ 
+ import netaddr
+@@ -56,7 +59,7 @@
+     status = sa.Column(sa.String(16))
+     admin_state_up = sa.Column(sa.Boolean)
+     gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id'))
+-    gw_port = orm.relationship(models_v2.Port)
++    gw_port = orm.relationship(models_v2.Port, lazy='joined')
+ 
+ 
+ class FloatingIP(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
+@@ -79,6 +82,8 @@
+     """Mixin class to add L3/NAT router methods to db_plugin_base_v2."""
+ 
+     l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotify
++    Router = Router
++    FloatingIP = FloatingIP
+ 
+     @property
+     def _core_plugin(self):
+@@ -86,7 +91,7 @@
+ 
+     def _get_router(self, context, id):
+         try:
+-            router = self._get_by_id(context, Router, id)
++            router = self._get_by_id(context, self.Router, id)
+         except exc.NoResultFound:
+             raise l3.RouterNotFound(router_id=id)
+         return router
+@@ -122,11 +127,11 @@
+         with context.session.begin(subtransactions=True):
+             # pre-generate id so it will be available when
+             # configuring external gw port
+-            router_db = Router(id=uuidutils.generate_uuid(),
+-                               tenant_id=tenant_id,
+-                               name=r['name'],
+-                               admin_state_up=r['admin_state_up'],
+-                               status="ACTIVE")
++            router_db = self.Router(id=uuidutils.generate_uuid(),
++                                    tenant_id=tenant_id,
++                                    name=r['name'],
++                                    admin_state_up=r['admin_state_up'],
++                                    status="ACTIVE")
+             context.session.add(router_db)
+             if has_gw_info:
+                 self._update_router_gw_info(context, router_db['id'], gw_info)
+@@ -237,7 +242,10 @@
+             if vpnservice:
+                 vpnservice.check_router_in_use(context, id)
+ 
+-            # delete any gw port
++            context.session.delete(router)
++
++            # Delete the gw port after the router has been removed to
++            # avoid a constraint violation.
+             device_filter = {'device_id': [id],
+                              'device_owner': [DEVICE_OWNER_ROUTER_GW]}
+             ports = self._core_plugin.get_ports(context.elevated(),
+@@ -246,7 +254,6 @@
+                 self._core_plugin._delete_port(context.elevated(),
+                                                ports[0]['id'])
+ 
+-            context.session.delete(router)
+         self.l3_rpc_notifier.router_deleted(context, id)
+ 
+     def get_router(self, context, id, fields=None):
+@@ -257,7 +264,7 @@
+                     sorts=None, limit=None, marker=None,
+                     page_reverse=False):
+         marker_obj = self._get_marker_obj(context, 'router', limit, marker)
+-        return self._get_collection(context, Router,
++        return self._get_collection(context, self.Router,
+                                     self._make_router_dict,
+                                     filters=filters, fields=fields,
+                                     sorts=sorts,
+@@ -266,14 +273,14 @@
+                                     page_reverse=page_reverse)
+ 
+     def get_routers_count(self, context, filters=None):
+-        return self._get_collection_count(context, Router,
++        return self._get_collection_count(context, self.Router,
+                                           filters=filters)
+ 
+     def _check_for_dup_router_subnet(self, context, router_id,
+                                      network_id, subnet_id, subnet_cidr):
+         try:
+-            rport_qry = context.session.query(models_v2.Port)
+-            rports = rport_qry.filter_by(device_id=router_id)
++            filters = {'device_id': [router_id]}
++            rports = self._core_plugin.get_ports(context, filters)
+             # It's possible these ports are on the same network, but
+             # different subnets.
+             new_ipnet = netaddr.IPNetwork(subnet_cidr)
+@@ -329,8 +336,11 @@
+                                                   port['network_id'],
+                                                   subnet['id'],
+                                                   subnet['cidr'])
+-                port.update({'device_id': router_id,
+-                             'device_owner': DEVICE_OWNER_ROUTER_INTF})
++                self._core_plugin.update_port(context,
++                                              interface_info['port_id'],
++                                              {'device_id': router_id,
++                                               'device_owner':
++                                               DEVICE_OWNER_ROUTER_INTF})
+         elif 'subnet_id' in interface_info:
+             subnet_id = interface_info['subnet_id']
+             subnet = self._core_plugin._get_subnet(context, subnet_id)
+@@ -372,7 +382,7 @@
+                                              subnet_id):
+         subnet_db = self._core_plugin._get_subnet(context, subnet_id)
+         subnet_cidr = netaddr.IPNetwork(subnet_db['cidr'])
+-        fip_qry = context.session.query(FloatingIP)
++        fip_qry = context.session.query(self.FloatingIP)
+         for fip_db in fip_qry.filter_by(router_id=router_id):
+             if netaddr.IPAddress(fip_db['fixed_ip_address']) in subnet_cidr:
+                 raise l3.RouterInterfaceInUseByFloatingIP(
+@@ -409,22 +419,21 @@
+             subnet = self._core_plugin._get_subnet(context, subnet_id)
+             found = False
+ 
+-            try:
+-                rport_qry = context.session.query(models_v2.Port)
+-                ports = rport_qry.filter_by(
+-                    device_id=router_id,
+-                    device_owner=DEVICE_OWNER_ROUTER_INTF,
+-                    network_id=subnet['network_id'])
+-
+-                for p in ports:
+-                    if p['fixed_ips'][0]['subnet_id'] == subnet_id:
+-                        port_id = p['id']
+-                        self._core_plugin.delete_port(context, p['id'],
+-                                                      l3_port_check=False)
+-                        found = True
+-                        break
+-            except exc.NoResultFound:
++            filters = {
++                'device_id': router_id,
++                'device_owner': DEVICE_OWNER_ROUTER_INTF,
++                'network_id': subnet['network_id']
++            }
++            ports = self._core_plugin.get_ports(context, filters)
++            if not ports:
+                 pass
++            for p in ports:
++                if p['fixed_ips'][0]['subnet_id'] == subnet_id:
++                    port_id = p['id']
++                    self._core_plugin.delete_port(context, p['id'],
++                                                  l3_port_check=False)
++                    found = True
++                    break
+ 
+             if not found:
+                 raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id,
+@@ -444,7 +453,7 @@
+ 
+     def _get_floatingip(self, context, id):
+         try:
+-            floatingip = self._get_by_id(context, FloatingIP, id)
++            floatingip = self._get_by_id(context, self.FloatingIP, id)
+         except exc.NoResultFound:
+             raise l3.FloatingIPNotFound(floatingip_id=id)
+         return floatingip
+@@ -470,19 +479,22 @@
+             raise q_exc.BadRequest(resource='floatingip', msg=msg)
+ 
+         # find router interface ports on this network
+-        router_intf_qry = context.session.query(models_v2.Port)
+-        router_intf_ports = router_intf_qry.filter_by(
+-            network_id=internal_port['network_id'],
+-            device_owner=DEVICE_OWNER_ROUTER_INTF)
+-
++        router_intf_filter = {
++            'network_id': internal_port['network_id'],
++            'device_owner': DEVICE_OWNER_ROUTER_INTF
++        }
++        router_intf_ports = self._core_plugin.get_ports(
++            context, filters=router_intf_filter)
+         for intf_p in router_intf_ports:
+             if intf_p['fixed_ips'][0]['subnet_id'] == internal_subnet_id:
+                 router_id = intf_p['device_id']
+-                router_gw_qry = context.session.query(models_v2.Port)
+-                has_gw_port = router_gw_qry.filter_by(
+-                    network_id=external_network_id,
+-                    device_id=router_id,
+-                    device_owner=DEVICE_OWNER_ROUTER_GW).count()
++                filters = {
++                    'network_id': external_network_id,
++                    'device_id': router_id,
++                    'device_owner': DEVICE_OWNER_ROUTER_GW
++                }
++                has_gw_port = self._core_plugin.get_ports_count(
++                    context, filters)
+                 if has_gw_port:
+                     return router_id
+ 
+@@ -556,13 +568,13 @@
+                                                     floating_network_id)
+         # confirm that this router has a floating
+         # ip enabled gateway with support for this floating IP network
+-        try:
+-            port_qry = context.elevated().session.query(models_v2.Port)
+-            port_qry.filter_by(
+-                network_id=floating_network_id,
+-                device_id=router_id,
+-                device_owner=DEVICE_OWNER_ROUTER_GW).one()
+-        except exc.NoResultFound:
++        filters = {
++            'network_id': floating_network_id,
++            'device_id': router_id,
++            'device_owner': DEVICE_OWNER_ROUTER_GW
++        }
++        ports = self._core_plugin.get_ports(context.elevated(), filters)
++        if not ports:
+             raise l3.ExternalGatewayForFloatingIPNotFound(
+                 subnet_id=internal_subnet_id,
+                 port_id=internal_port['id'])
+@@ -580,7 +592,7 @@
+                 context,
+                 fip,
+                 floatingip_db['floating_network_id'])
+-            fip_qry = context.session.query(FloatingIP)
++            fip_qry = context.session.query(self.FloatingIP)
+             try:
+                 fip_qry.filter_by(
+                     fixed_port_id=fip['port_id'],
+@@ -628,7 +640,7 @@
+ 
+             floating_fixed_ip = external_port['fixed_ips'][0]
+             floating_ip_address = floating_fixed_ip['ip_address']
+-            floatingip_db = FloatingIP(
++            floatingip_db = self.FloatingIP(
+                 id=fip_id,
+                 tenant_id=tenant_id,
+                 floating_network_id=fip['floating_network_id'],
+@@ -697,7 +709,7 @@
+                 if key in filters:
+                     filters[val] = filters.pop(key)
+ 
+-        return self._get_collection(context, FloatingIP,
++        return self._get_collection(context, self.FloatingIP,
+                                     self._make_floatingip_dict,
+                                     filters=filters, fields=fields,
+                                     sorts=sorts,
+@@ -706,7 +718,7 @@
+                                     page_reverse=page_reverse)
+ 
+     def get_floatingips_count(self, context, filters=None):
+-        return self._get_collection_count(context, FloatingIP,
++        return self._get_collection_count(context, self.FloatingIP,
+                                           filters=filters)
+ 
+     def prevent_l3_port_deletion(self, context, port_id):
+@@ -737,7 +749,7 @@
+     def disassociate_floatingips(self, context, port_id):
+         with context.session.begin(subtransactions=True):
+             try:
+-                fip_qry = context.session.query(FloatingIP)
++                fip_qry = context.session.query(self.FloatingIP)
+                 floating_ip = fip_qry.filter_by(fixed_port_id=port_id).one()
+                 router_id = floating_ip['router_id']
+                 floating_ip.update({'fixed_port_id': None,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/patches/03-CVE-2014-0187.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,253 @@
+This upstream patch addresses CVE-2014-0187 and is tracked under
+Launchpad bug 1300785. It is addressed in Icehouse 2014.1.2 and Havana
+2013.2.4.
+
+commit 03eed8cd34cd4fb043c11fc99f6bb0b4fbd5728d
+Author: marios <[email protected]>
+Date:   Fri Nov 29 18:23:54 2013 +0200
+
+    Validate CIDR given as ip-prefix in security-group-rule-create
+    
+    There was no validation for the provided ip prefix. This just adds
+    a simple parse using netaddr and explodes with appropriate message.
+    Also makes sure ip prefix _is_ cidr (192.168.1.1-->192.168.1.1/32).
+    
+    Validation occurs at the attribute level (API model) as well as at
+    the db level, where the ethertype is validated against the ip_prefix
+    address type.
+    
+    Unit test cases added - bad prefix, unmasked prefix and incorrect
+    ethertype. Also adds attribute test cases for the added
+    convert_ip_prefix_to_cidr method
+    
+    Closes-Bug: 1255338
+    
+    Conflicts:
+    	neutron/tests/unit/test_security_groups_rpc.py
+    	neutron/tests/unit/test_extension_security_group.py
+    
+    Change-Id: I71fb8c887963a122a5bd8cfdda800026c1cd3954
+    (cherry picked from commit 65aa92b0348b7ab8413f359b00825610cdf66607)
+
+diff --git a/neutron/common/exceptions.py b/neutron/common/exceptions.py
+index 88fa6e4..80a75d1 100644
+--- a/neutron/common/exceptions.py
++++ b/neutron/common/exceptions.py
+@@ -306,3 +306,7 @@ class NetworkVxlanPortRangeError(object):
+ class DeviceIDNotOwnedByTenant(Conflict):
+     message = _("The following device_id %(device_id)s is not owned by your "
+                 "tenant or matches another tenants router.")
++
++
++class InvalidCIDR(BadRequest):
++    message = _("Invalid CIDR %(input)s given as IP prefix")
+diff --git a/neutron/db/securitygroups_db.py b/neutron/db/securitygroups_db.py
+index 2a7d2ef..8868546 100644
+--- a/neutron/db/securitygroups_db.py
++++ b/neutron/db/securitygroups_db.py
+@@ -16,6 +16,7 @@
+ #
+ # @author: Aaron Rosen, Nicira, Inc
+ 
++import netaddr
+ import sqlalchemy as sa
+ from sqlalchemy import orm
+ from sqlalchemy.orm import exc
+@@ -331,6 +332,7 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
+             new_rules.add(rule['security_group_id'])
+ 
+             self._validate_port_range(rule)
++            self._validate_ip_prefix(rule)
+ 
+             if rule['remote_ip_prefix'] and rule['remote_group_id']:
+                 raise ext_sg.SecurityGroupRemoteGroupAndRemoteIpPrefix()
+@@ -411,6 +413,24 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
+                 if (i['security_group_rule'] == db_rule):
+                     raise ext_sg.SecurityGroupRuleExists(id=id)
+ 
++    def _validate_ip_prefix(self, rule):
++        """Check that a valid cidr was specified as remote_ip_prefix
++
++        No need to check that it is in fact an IP address as this is already
++        validated by attribute validators.
++        Check that rule ethertype is consistent with remote_ip_prefix ip type.
++        Add mask to ip_prefix if absent (192.168.1.10 -> 192.168.1.10/32).
++        """
++        input_prefix = rule['remote_ip_prefix']
++        if input_prefix:
++            addr = netaddr.IPNetwork(input_prefix)
++            # set input_prefix to always include the netmask:
++            rule['remote_ip_prefix'] = str(addr)
++            # check consistency of ethertype with addr version
++            if rule['ethertype'] != "IPv%d" % (addr.version):
++                raise ext_sg.SecurityGroupRuleParameterConflict(
++                    ethertype=rule['ethertype'], cidr=input_prefix)
++
+     def get_security_group_rules(self, context, filters=None, fields=None,
+                                  sorts=None, limit=None, marker=None,
+                                  page_reverse=False):
+diff --git a/neutron/extensions/securitygroup.py b/neutron/extensions/securitygroup.py
+index 85d499a..3d10b5a 100644
+--- a/neutron/extensions/securitygroup.py
++++ b/neutron/extensions/securitygroup.py
+@@ -17,6 +17,7 @@
+ 
+ from abc import ABCMeta
+ from abc import abstractmethod
++import netaddr
+ 
+ from oslo.config import cfg
+ 
+@@ -102,6 +103,10 @@ class SecurityGroupRuleExists(qexception.InUse):
+     message = _("Security group rule already exists. Group id is %(id)s.")
+ 
+ 
++class SecurityGroupRuleParameterConflict(qexception.InvalidInput):
++    message = _("Conflicting value ethertype %(ethertype)s for CIDR %(cidr)s")
++
++
+ def convert_protocol(value):
+     if value is None:
+         return
+@@ -152,6 +157,16 @@ def convert_to_uuid_list_or_none(value_list):
+     return value_list
+ 
+ 
++def convert_ip_prefix_to_cidr(ip_prefix):
++    if not ip_prefix:
++        return
++    try:
++        cidr = netaddr.IPNetwork(ip_prefix)
++        return str(cidr)
++    except (TypeError, netaddr.AddrFormatError):
++        raise qexception.InvalidCIDR(input=ip_prefix)
++
++
+ def _validate_name_not_default(data, valid_values=None):
+     if data == "default":
+         raise SecurityGroupDefaultAlreadyExists()
+@@ -207,7 +222,8 @@ RESOURCE_ATTRIBUTE_MAP = {
+                       'convert_to': convert_ethertype_to_case_insensitive,
+                       'validate': {'type:values': sg_supported_ethertypes}},
+         'remote_ip_prefix': {'allow_post': True, 'allow_put': False,
+-                             'default': None, 'is_visible': True},
++                             'default': None, 'is_visible': True,
++                             'convert_to': convert_ip_prefix_to_cidr},
+         'tenant_id': {'allow_post': True, 'allow_put': False,
+                       'required_by_policy': True,
+                       'is_visible': True},
+diff --git a/neutron/tests/unit/test_extension_security_group.py b/neutron/tests/unit/test_extension_security_group.py
+index d53e140..f0b1636 100644
+--- a/neutron/tests/unit/test_extension_security_group.py
++++ b/neutron/tests/unit/test_extension_security_group.py
+@@ -21,11 +21,13 @@ import webob.exc
+ 
+ from neutron.api.v2 import attributes as attr
+ from neutron.common import constants as const
++from neutron.common import exceptions as n_exc
+ from neutron.common.test_lib import test_config
+ from neutron import context
+ from neutron.db import db_base_plugin_v2
+ from neutron.db import securitygroups_db
+ from neutron.extensions import securitygroup as ext_sg
++from neutron.tests import base
+ from neutron.tests.unit import test_db_plugin
+ 
+ DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_security_group.'
+@@ -413,6 +415,70 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
+             self.deserialize(self.fmt, res)
+             self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
+ 
++    def test_create_security_group_rule_invalid_ip_prefix(self):
++        name = 'webservers'
++        description = 'my webservers'
++        for bad_prefix in ['bad_ip', 256, "2001:db8:a::123/129", '172.30./24']:
++            with self.security_group(name, description) as sg:
++                sg_id = sg['security_group']['id']
++                remote_ip_prefix = bad_prefix
++                rule = self._build_security_group_rule(
++                    sg_id,
++                    'ingress',
++                    const.PROTO_NAME_TCP,
++                    '22', '22',
++                    remote_ip_prefix)
++                res = self._create_security_group_rule(self.fmt, rule)
++                self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
++
++    def test_create_security_group_rule_invalid_ethertype_for_prefix(self):
++        name = 'webservers'
++        description = 'my webservers'
++        test_addr = {'192.168.1.1/24': 'ipv4', '192.168.1.1/24': 'IPv6',
++                     '2001:db8:1234::/48': 'ipv6',
++                     '2001:db8:1234::/48': 'IPv4'}
++        for prefix, ether in test_addr.iteritems():
++            with self.security_group(name, description) as sg:
++                sg_id = sg['security_group']['id']
++                ethertype = ether
++                remote_ip_prefix = prefix
++                rule = self._build_security_group_rule(
++                    sg_id,
++                    'ingress',
++                    const.PROTO_NAME_TCP,
++                    '22', '22',
++                    remote_ip_prefix,
++                    None,
++                    None,
++                    ethertype)
++                res = self._create_security_group_rule(self.fmt, rule)
++                self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
++
++    def test_create_security_group_rule_with_unmasked_prefix(self):
++        name = 'webservers'
++        description = 'my webservers'
++        addr = {'10.1.2.3': {'mask': '32', 'ethertype': 'IPv4'},
++                'fe80::2677:3ff:fe7d:4c': {'mask': '128', 'ethertype': 'IPv6'}}
++        for ip in addr:
++            with self.security_group(name, description) as sg:
++                sg_id = sg['security_group']['id']
++                ethertype = addr[ip]['ethertype']
++                remote_ip_prefix = ip
++                rule = self._build_security_group_rule(
++                    sg_id,
++                    'ingress',
++                    const.PROTO_NAME_TCP,
++                    '22', '22',
++                    remote_ip_prefix,
++                    None,
++                    None,
++                    ethertype)
++                res = self._create_security_group_rule(self.fmt, rule)
++                self.assertEqual(res.status_int, 201)
++                res_sg = self.deserialize(self.fmt, res)
++                prefix = res_sg['security_group_rule']['remote_ip_prefix']
++                self.assertEqual(prefix, '%s/%s' % (ip, addr[ip]['mask']))
++
+     def test_create_security_group_rule_tcp_protocol_as_number(self):
+         name = 'webservers'
+         description = 'my webservers'
+@@ -1348,5 +1414,25 @@ class TestSecurityGroups(SecurityGroupDBTestCase):
+                 self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
+ 
+ 
++class TestConvertIPPrefixToCIDR(base.BaseTestCase):
++
++    def test_convert_bad_ip_prefix_to_cidr(self):
++        for val in ['bad_ip', 256, "2001:db8:a::123/129"]:
++            self.assertRaises(n_exc.InvalidCIDR,
++                              ext_sg.convert_ip_prefix_to_cidr, val)
++        self.assertIsNone(ext_sg.convert_ip_prefix_to_cidr(None))
++
++    def test_convert_ip_prefix_no_netmask_to_cidr(self):
++        addr = {'10.1.2.3': '32', 'fe80::2677:3ff:fe7d:4c': '128'}
++        for k, v in addr.iteritems():
++            self.assertEqual(ext_sg.convert_ip_prefix_to_cidr(k),
++                             '%s/%s' % (k, v))
++
++    def test_convert_ip_prefix_with_netmask_to_cidr(self):
++        addresses = ['10.1.0.0/16', '10.1.2.3/32', '2001:db8:1234::/48']
++        for addr in addresses:
++            self.assertEqual(ext_sg.convert_ip_prefix_to_cidr(addr), addr)
++
++
+ class TestSecurityGroupsXML(TestSecurityGroups):
+     fmt = 'xml'
--- a/components/openstack/neutron/patches/03-l3-agent-add-solaris.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,410 +0,0 @@
-In-house patch to the Neutron L3 agent to allow an alternate implementation (in
-this case, EVS) of L3 and NAT support.  This patch has not yet been
-submitted upstream.
-
---- quantum-2013.1.4/quantum/agent/l3_agent.py.~1~	2013-10-17 11:24:18.000000000 -0700
-+++ quantum-2013.1.4/quantum/agent/l3_agent.py	2014-03-13 01:51:36.761165189 -0700
-@@ -3,6 +3,8 @@
- #
- # Copyright 2012 Nicira Networks, Inc.  All rights reserved.
- #
-+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
-+#
- #    Licensed under the Apache License, Version 2.0 (the "License"); you may
- #    not use this file except in compliance with the License. You may obtain
- #    a copy of the License at
-@@ -16,12 +18,14 @@
- #    under the License.
- #
- # @author: Dan Wendlandt, Nicira, Inc
-+# @author: Girish Moodalbail, Oracle, Inc
- #
- """
- 
- import eventlet
- from eventlet import semaphore
- import netaddr
-+import platform
- from oslo.config import cfg
- 
- from quantum.agent.common import config
-@@ -31,6 +35,8 @@
- from quantum.agent.linux import iptables_manager
- from quantum.agent.linux import utils
- from quantum.agent import rpc as agent_rpc
-+from quantum.agent.solaris import ipfilters_manager
-+from quantum.agent.solaris import net_lib
- from quantum.common import constants as l3_constants
- from quantum.common import topics
- from quantum.common import utils as common_utils
-@@ -113,6 +119,8 @@
- 
- class L3NATAgent(manager.Manager):
- 
-+    RouterInfo = RouterInfo
-+
-     OPTS = [
-         cfg.StrOpt('external_network_bridge', default='br-ex',
-                    help=_("Name of bridge used for external network "
-@@ -223,8 +231,8 @@
-                 raise
- 
-     def _router_added(self, router_id, router):
--        ri = RouterInfo(router_id, self.root_helper,
--                        self.conf.use_namespaces, router)
-+        ri = self.RouterInfo(router_id, self.root_helper,
-+                             self.conf.use_namespaces, router)
-         self.router_info[router_id] = ri
-         if self.conf.use_namespaces:
-             self._create_router_namespace(ri)
-@@ -303,14 +311,11 @@
-         for p in new_ports:
-             self._set_subnet_info(p)
-             ri.internal_ports.append(p)
--            self.internal_network_added(ri, ex_gw_port,
--                                        p['network_id'], p['id'],
--                                        p['ip_cidr'], p['mac_address'])
-+            self.internal_network_added(ri, ex_gw_port, p)
- 
-         for p in old_ports:
-             ri.internal_ports.remove(p)
--            self.internal_network_removed(ri, ex_gw_port, p['id'],
--                                          p['ip_cidr'])
-+            self.internal_network_removed(ri, ex_gw_port, p)
- 
-         internal_cidrs = [p['ip_cidr'] for p in ri.internal_ports]
- 
-@@ -470,16 +475,17 @@
-             rules.extend(self.internal_network_nat_rules(ex_gw_ip, cidr))
-         return rules
- 
--    def internal_network_added(self, ri, ex_gw_port, network_id, port_id,
--                               internal_cidr, mac_address):
--        interface_name = self.get_internal_device_name(port_id)
-+    def internal_network_added(self, ri, ex_gw_port, p):
-+
-+        interface_name = self.get_internal_device_name(p['id'])
-         if not ip_lib.device_exists(interface_name,
-                                     root_helper=self.root_helper,
-                                     namespace=ri.ns_name()):
--            self.driver.plug(network_id, port_id, interface_name, mac_address,
-+            self.driver.plug(p['network_id'], p['id'], interface_name,
-+                             p['mac_address'],
-                              namespace=ri.ns_name(),
-                              prefix=INTERNAL_DEV_PREFIX)
--
-+        internal_cidr = p['ip_cidr']
-         self.driver.init_l3(interface_name, [internal_cidr],
-                             namespace=ri.ns_name())
-         ip_address = internal_cidr.split('/')[0]
-@@ -492,8 +498,8 @@
-                 ri.iptables_manager.ipv4['nat'].add_rule(c, r)
-             ri.iptables_manager.apply()
- 
--    def internal_network_removed(self, ri, ex_gw_port, port_id, internal_cidr):
--        interface_name = self.get_internal_device_name(port_id)
-+    def internal_network_removed(self, ri, ex_gw_port, p):
-+        interface_name = self.get_internal_device_name(p['id'])
-         if ip_lib.device_exists(interface_name,
-                                 root_helper=self.root_helper,
-                                 namespace=ri.ns_name()):
-@@ -503,7 +509,7 @@
-         if ex_gw_port:
-             ex_gw_ip = ex_gw_port['fixed_ips'][0]['ip_address']
-             for c, r in self.internal_network_nat_rules(ex_gw_ip,
--                                                        internal_cidr):
-+                                                        p['ip_cidr']):
-                 ri.iptables_manager.ipv4['nat'].remove_rule(c, r)
-             ri.iptables_manager.apply()
- 
-@@ -742,16 +748,20 @@
- def main():
-     eventlet.monkey_patch()
-     conf = cfg.CONF
--    conf.register_opts(L3NATAgent.OPTS)
-     config.register_agent_state_opts_helper(conf)
-     config.register_root_helper(conf)
-     conf.register_opts(interface.OPTS)
-     conf.register_opts(external_process.OPTS)
-+    if platform.system() == "SunOS":
-+        manager = 'quantum.agent.evs_l3_agent.EVSL3NATAgent'
-+    else:
-+        conf.register_opts(L3NATAgent.OPTS)
-+        manager = 'quantum.agent.l3_agent.L3NATAgentWithStateReport'
-     conf(project='quantum')
-     config.setup_logging(conf)
-     server = quantum_service.Service.create(
-         binary='quantum-l3-agent',
-         topic=topics.L3_AGENT,
-         report_interval=cfg.CONF.AGENT.report_interval,
--        manager='quantum.agent.l3_agent.L3NATAgentWithStateReport')
-+        manager=manager)
-     service.launch(server).wait()
---- quantum-2013.1.4/quantum/db/l3_db.py.~1~	2013-10-17 11:24:18.000000000 -0700
-+++ quantum-2013.1.4/quantum/db/l3_db.py	2014-03-13 01:48:03.082634902 -0700
-@@ -2,6 +2,8 @@
- 
- # Copyright 2012 Nicira Networks, Inc.  All rights reserved.
- #
-+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
-+#
- #    Licensed under the Apache License, Version 2.0 (the "License"); you may
- #    not use this file except in compliance with the License. You may obtain
- #    a copy of the License at
-@@ -15,6 +17,7 @@
- #    under the License.
- #
- # @author: Dan Wendlandt, Nicira, Inc
-+# @author: Girish Moodalbail, Oracle, Inc
- #
- 
- import netaddr
-@@ -82,6 +85,9 @@
- class L3_NAT_db_mixin(l3.RouterPluginBase):
-     """Mixin class to add L3/NAT router methods to db_plugin_base_v2"""
- 
-+    Router = Router
-+    FloatingIP = FloatingIP
-+
-     def _network_model_hook(self, context, original_model, query):
-         query = query.outerjoin(ExternalNetwork,
-                                 (original_model.id ==
-@@ -117,7 +123,7 @@
- 
-     def _get_router(self, context, id):
-         try:
--            router = self._get_by_id(context, Router, id)
-+            router = self._get_by_id(context, self.Router, id)
-         except exc.NoResultFound:
-             raise l3.RouterNotFound(router_id=id)
-         except exc.MultipleResultsFound:
-@@ -148,11 +154,11 @@
-         with context.session.begin(subtransactions=True):
-             # pre-generate id so it will be available when
-             # configuring external gw port
--            router_db = Router(id=uuidutils.generate_uuid(),
--                               tenant_id=tenant_id,
--                               name=r['name'],
--                               admin_state_up=r['admin_state_up'],
--                               status="ACTIVE")
-+            router_db = self.Router(id=uuidutils.generate_uuid(),
-+                                    tenant_id=tenant_id,
-+                                    name=r['name'],
-+                                    admin_state_up=r['admin_state_up'],
-+                                    status="ACTIVE")
-             context.session.add(router_db)
-             if has_gw_info:
-                 self._update_router_gw_info(context, router_db['id'], gw_info)
-@@ -273,7 +279,7 @@
-                     sorts=None, limit=None, marker=None,
-                     page_reverse=False):
-         marker_obj = self._get_marker_obj(context, 'router', limit, marker)
--        return self._get_collection(context, Router,
-+        return self._get_collection(context, self.Router,
-                                     self._make_router_dict,
-                                     filters=filters, fields=fields,
-                                     sorts=sorts,
-@@ -282,15 +288,14 @@
-                                     page_reverse=page_reverse)
- 
-     def get_routers_count(self, context, filters=None):
--        return self._get_collection_count(context, Router,
-+        return self._get_collection_count(context, self.Router,
-                                           filters=filters)
- 
-     def _check_for_dup_router_subnet(self, context, router_id,
-                                      network_id, subnet_id, subnet_cidr):
-         try:
--            rport_qry = context.session.query(models_v2.Port)
--            rports = rport_qry.filter_by(
--                device_id=router_id).all()
-+            rports = self.get_ports(context,
-+                                    filters={'device_id': [router_id]})
-             # its possible these ports on on the same network, but
-             # different subnet
-             new_ipnet = netaddr.IPNetwork(subnet_cidr)
-@@ -348,8 +353,9 @@
-                                               port['network_id'],
-                                               subnet['id'],
-                                               subnet['cidr'])
--            port.update({'device_id': router_id,
--                         'device_owner': DEVICE_OWNER_ROUTER_INTF})
-+            self.update_port(context, interface_info['port_id'],
-+                             {'device_id': router_id,
-+                              'device_owner': DEVICE_OWNER_ROUTER_INTF})
-         elif 'subnet_id' in interface_info:
-             subnet_id = interface_info['subnet_id']
-             subnet = self._get_subnet(context, subnet_id)
-@@ -394,7 +400,7 @@
-                                              subnet_id):
-         subnet_db = self._get_subnet(context, subnet_id)
-         subnet_cidr = netaddr.IPNetwork(subnet_db['cidr'])
--        fip_qry = context.session.query(FloatingIP)
-+        fip_qry = context.session.query(self.FloatingIP)
-         for fip_db in fip_qry.filter_by(router_id=router_id):
-             if netaddr.IPAddress(fip_db['fixed_ip_address']) in subnet_cidr:
-                 raise l3.RouterInterfaceInUseByFloatingIP(
-@@ -440,22 +446,19 @@
-             subnet = self._get_subnet(context, subnet_id)
-             found = False
- 
--            try:
--                rport_qry = context.session.query(models_v2.Port)
--                ports = rport_qry.filter_by(
--                    device_id=router_id,
--                    device_owner=DEVICE_OWNER_ROUTER_INTF,
--                    network_id=subnet['network_id']).all()
--
--                for p in ports:
--                    if p['fixed_ips'][0]['subnet_id'] == subnet_id:
--                        port_id = p['id']
--                        _network_id = p['network_id']
--                        self.delete_port(context, p['id'], l3_port_check=False)
--                        found = True
--                        break
--            except exc.NoResultFound:
--                pass
-+            filters = {
-+                'device_id': router_id,
-+                'device_owner': DEVICE_OWNER_ROUTER_INTF,
-+                'network_id': subnet['network_id']
-+            }
-+            ports = self.get_ports(context, filters)
-+            for p in ports:
-+                if p['fixed_ips'][0]['subnet_id'] == subnet_id:
-+                    port_id = p['id']
-+                    _network_id = p['network_id']
-+                    self.delete_port(context, p['id'], l3_port_check=False)
-+                    found = True
-+                    break
- 
-             if not found:
-                 raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id,
-@@ -477,7 +480,7 @@
- 
-     def _get_floatingip(self, context, id):
-         try:
--            floatingip = self._get_by_id(context, FloatingIP, id)
-+            floatingip = self._get_by_id(context, self.FloatingIP, id)
-         except exc.NoResultFound:
-             raise l3.FloatingIPNotFound(floatingip_id=id)
-         except exc.MultipleResultsFound:
-@@ -505,19 +508,21 @@
-             raise q_exc.BadRequest(resource='floatingip', msg=msg)
- 
-         # find router interface ports on this network
--        router_intf_qry = context.session.query(models_v2.Port)
--        router_intf_ports = router_intf_qry.filter_by(
--            network_id=internal_port['network_id'],
--            device_owner=DEVICE_OWNER_ROUTER_INTF)
-+        router_intf_filter = {
-+            'network_id': internal_port['network_id'],
-+            'device_owner': DEVICE_OWNER_ROUTER_INTF
-+        }
-+        router_intf_ports = self.get_ports(context, filters=router_intf_filter)
- 
-         for intf_p in router_intf_ports:
-             if intf_p['fixed_ips'][0]['subnet_id'] == internal_subnet_id:
-                 router_id = intf_p['device_id']
--                router_gw_qry = context.session.query(models_v2.Port)
--                has_gw_port = router_gw_qry.filter_by(
--                    network_id=external_network_id,
--                    device_id=router_id,
--                    device_owner=DEVICE_OWNER_ROUTER_GW).count()
-+                filters = {
-+                    'network_id': external_network_id,
-+                    'device_id': router_id,
-+                    'device_owner': DEVICE_OWNER_ROUTER_GW
-+                }
-+                has_gw_port = self.get_ports_count(context, filters)
-                 if has_gw_port:
-                     return router_id
- 
-@@ -578,13 +583,13 @@
-                                                     floating_network_id)
-         # confirm that this router has a floating
-         # ip enabled gateway with support for this floating IP network
--        try:
--            port_qry = context.elevated().session.query(models_v2.Port)
--            ports = port_qry.filter_by(
--                network_id=floating_network_id,
--                device_id=router_id,
--                device_owner=DEVICE_OWNER_ROUTER_GW).one()
--        except exc.NoResultFound:
-+        filters = {
-+            'network_id': floating_network_id,
-+            'device_id': router_id,
-+            'device_owner': DEVICE_OWNER_ROUTER_GW
-+        }
-+        ports = self.get_ports(context.elevated(), filters)
-+        if not ports:
-             raise l3.ExternalGatewayForFloatingIPNotFound(
-                 subnet_id=internal_subnet_id,
-                 port_id=internal_port['id'])
-@@ -602,7 +607,7 @@
-                 context,
-                 fip,
-                 floatingip_db['floating_network_id'])
--            fip_qry = context.session.query(FloatingIP)
-+            fip_qry = context.session.query(self.FloatingIP)
-             try:
-                 fip_qry.filter_by(
-                     fixed_port_id=fip['port_id'],
-@@ -653,7 +658,7 @@
- 
-                 floating_fixed_ip = external_port['fixed_ips'][0]
-                 floating_ip_address = floating_fixed_ip['ip_address']
--                floatingip_db = FloatingIP(
-+                floatingip_db = self.FloatingIP(
-                     id=fip_id,
-                     tenant_id=tenant_id,
-                     floating_network_id=fip['floating_network_id'],
-@@ -731,7 +736,7 @@
-                 if key in filters:
-                     filters[val] = filters.pop(key)
- 
--        return self._get_collection(context, FloatingIP,
-+        return self._get_collection(context, self.FloatingIP,
-                                     self._make_floatingip_dict,
-                                     filters=filters, fields=fields,
-                                     sorts=sorts,
-@@ -740,7 +745,7 @@
-                                     page_reverse=page_reverse)
- 
-     def get_floatingips_count(self, context, filters=None):
--        return self._get_collection_count(context, FloatingIP,
-+        return self._get_collection_count(context, self.FloatingIP,
-                                           filters=filters)
- 
-     def prevent_l3_port_deletion(self, context, port_id):
-@@ -770,7 +775,7 @@
-     def disassociate_floatingips(self, context, port_id):
-         with context.session.begin(subtransactions=True):
-             try:
--                fip_qry = context.session.query(FloatingIP)
-+                fip_qry = context.session.query(self.FloatingIP)
-                 floating_ip = fip_qry.filter_by(fixed_port_id=port_id).one()
-                 router_id = floating_ip['router_id']
-                 floating_ip.update({'fixed_port_id': None,
-@@ -874,14 +879,17 @@
-                            if it is None, all of routers will be queried.
-         @return: a list of dicted routers with dicted gw_port populated if any
-         """
--        router_query = context.session.query(Router)
-+        router_query = context.session.query(self.Router)
-         if router_ids:
-             if 1 == len(router_ids):
--                router_query = router_query.filter(Router.id == router_ids[0])
-+                router_query = \
-+                    router_query.filter(self.Router.id == router_ids[0])
-             else:
--                router_query = router_query.filter(Router.id.in_(router_ids))
-+                router_query = \
-+                    router_query.filter(self.Router.id.in_(router_ids))
-         if active is not None:
--            router_query = router_query.filter(Router.admin_state_up == active)
-+            router_query = \
-+                router_query.filter(self.Router.admin_state_up == active)
-         routers = router_query.all()
-         gw_port_ids = []
-         if not routers:
--- a/components/openstack/neutron/patches/04-CVE-2013-6419.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,219 +0,0 @@
-Upstream patch fixed in Grizzly 2013.1.5, Havana 2013.2.1, Icehouse
-
-commit 67d7d9d617c64f41bb899c4ce525a66c84ccf071
-Author: Aaron Rosen <[email protected]>
-Date:   Mon Oct 7 15:34:38 2013 -0700
-
-    Add X-Tenant-ID to metadata request
-    
-    Previously, one could update a port's device_id to be that of
-    another tenant's instance_id and then be able to retrieve that
-    instance's metadata. In order to prevent this X-Tenant-ID is now
-    passed in the metadata request to nova and nova then checks that
-    X-Tenant-ID also matches the tenant_id for the instance against it's
-    database to ensure it's not being spoofed.
-    
-    DocImpact - When upgrading OpenStack nova and neturon, neutron
-                should be updated first (and neutron-metadata-agent
-                restarted before nova is upgraded) in order to minimize
-                downtime. This is because there is also a patch to nova
-                which has checks X-Tenant-ID against it's database
-                therefore neutron-metadata-agent needs to pass that
-                before nova is upgraded for metadata to work.
-    
-    Change-Id: I2b8fa2f561a7f2914608e68133abf15efa95015a
-    Closes-Bug: #1235450
-
-diff --git a/quantum/agent/metadata/agent.py b/quantum/agent/metadata/agent.py
-index 7bdfae8..e1abe93 100644
---- a/quantum/agent/metadata/agent.py
-+++ b/quantum/agent/metadata/agent.py
-@@ -83,9 +83,9 @@ class MetadataProxyHandler(object):
-         try:
-             LOG.debug(_("Request: %s"), req)
- 
--            instance_id = self._get_instance_id(req)
-+            instance_id, tenant_id = self._get_instance_and_tenant_id(req)
-             if instance_id:
--                return self._proxy_request(instance_id, req)
-+                return self._proxy_request(instance_id, tenant_id, req)
-             else:
-                 return webob.exc.HTTPNotFound()
- 
-@@ -95,7 +95,7 @@ class MetadataProxyHandler(object):
-                     'Please try your request again.')
-             return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
- 
--    def _get_instance_id(self, req):
-+    def _get_instance_and_tenant_id(self, req):
-         qclient = self._get_quantum_client()
- 
-         remote_address = req.headers.get('X-Forwarded-For')
-@@ -116,12 +116,14 @@ class MetadataProxyHandler(object):
-             fixed_ips=['ip_address=%s' % remote_address])['ports']
- 
-         if len(ports) == 1:
--            return ports[0]['device_id']
-+            return ports[0]['device_id'], ports[0]['tenant_id']
-+        return None, None
- 
--    def _proxy_request(self, instance_id, req):
-+    def _proxy_request(self, instance_id, tenant_id, req):
-         headers = {
-             'X-Forwarded-For': req.headers.get('X-Forwarded-For'),
-             'X-Instance-ID': instance_id,
-+            'X-Tenant-ID': tenant_id,
-             'X-Instance-ID-Signature': self._sign_instance_id(instance_id)
-         }
- 
-diff --git a/quantum/tests/unit/test_metadata_agent.py b/quantum/tests/unit/test_metadata_agent.py
-index c81a237..0e74bcb 100644
---- a/quantum/tests/unit/test_metadata_agent.py
-+++ b/quantum/tests/unit/test_metadata_agent.py
-@@ -54,8 +54,9 @@ class TestMetadataProxyHandler(base.BaseTestCase):
- 
-     def test_call(self):
-         req = mock.Mock()
--        with mock.patch.object(self.handler, '_get_instance_id') as get_id:
--            get_id.return_value = 'id'
-+        with mock.patch.object(self.handler,
-+                               '_get_instance_and_tenant_id') as get_ids:
-+            get_ids.return_value = ('instance_id', 'tenant_id')
-             with mock.patch.object(self.handler, '_proxy_request') as proxy:
-                 proxy.return_value = 'value'
- 
-@@ -64,21 +65,23 @@ class TestMetadataProxyHandler(base.BaseTestCase):
- 
-     def test_call_no_instance_match(self):
-         req = mock.Mock()
--        with mock.patch.object(self.handler, '_get_instance_id') as get_id:
--            get_id.return_value = None
-+        with mock.patch.object(self.handler,
-+                               '_get_instance_and_tenant_id') as get_ids:
-+            get_ids.return_value = None, None
-             retval = self.handler(req)
-             self.assertIsInstance(retval, webob.exc.HTTPNotFound)
- 
-     def test_call_internal_server_error(self):
-         req = mock.Mock()
--        with mock.patch.object(self.handler, '_get_instance_id') as get_id:
--            get_id.side_effect = Exception
-+        with mock.patch.object(self.handler,
-+                               '_get_instance_and_tenant_id') as get_ids:
-+            get_ids.side_effect = Exception
-             retval = self.handler(req)
-             self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)
-             self.assertEqual(len(self.log.mock_calls), 2)
- 
--    def _get_instance_id_helper(self, headers, list_ports_retval,
--                                networks=None, router_id=None):
-+    def _get_instance_and_tenant_id_helper(self, headers, list_ports_retval,
-+                                           networks=None, router_id=None):
-         headers['X-Forwarded-For'] = '192.168.1.1'
-         req = mock.Mock(headers=headers)
- 
-@@ -86,8 +89,7 @@ class TestMetadataProxyHandler(base.BaseTestCase):
-             return {'ports': list_ports_retval.pop(0)}
- 
-         self.qclient.return_value.list_ports.side_effect = mock_list_ports
--        retval = self.handler._get_instance_id(req)
--
-+        instance_id, tenant_id = self.handler._get_instance_and_tenant_id(req)
-         expected = [
-             mock.call(
-                 username=FakeConf.admin_user,
-@@ -114,7 +116,7 @@ class TestMetadataProxyHandler(base.BaseTestCase):
- 
-         self.qclient.assert_has_calls(expected)
- 
--        return retval
-+        return (instance_id, tenant_id)
- 
-     def test_get_instance_id_router_id(self):
-         router_id = 'the_id'
-@@ -125,13 +127,14 @@ class TestMetadataProxyHandler(base.BaseTestCase):
-         networks = ['net1', 'net2']
-         ports = [
-             [{'network_id': 'net1'}, {'network_id': 'net2'}],
--            [{'device_id': 'device_id'}]
-+            [{'device_id': 'device_id', 'tenant_id': 'tenant_id'}]
-         ]
- 
-         self.assertEqual(
--            self._get_instance_id_helper(headers, ports, networks=networks,
--                                         router_id=router_id),
--            'device_id'
-+            self._get_instance_and_tenant_id_helper(headers, ports,
-+                                                    networks=networks,
-+                                                    router_id=router_id),
-+            ('device_id', 'tenant_id')
-         )
- 
-     def test_get_instance_id_router_id_no_match(self):
-@@ -145,10 +148,11 @@ class TestMetadataProxyHandler(base.BaseTestCase):
-             [{'network_id': 'net1'}, {'network_id': 'net2'}],
-             []
-         ]
--
--        self.assertIsNone(
--            self._get_instance_id_helper(headers, ports, networks=networks,
--                                         router_id=router_id),
-+        self.assertEqual(
-+            self._get_instance_and_tenant_id_helper(headers, ports,
-+                                                    networks=networks,
-+                                                    router_id=router_id),
-+            (None, None)
-         )
- 
-     def test_get_instance_id_network_id(self):
-@@ -158,12 +162,14 @@ class TestMetadataProxyHandler(base.BaseTestCase):
-         }
- 
-         ports = [
--            [{'device_id': 'device_id'}]
-+            [{'device_id': 'device_id',
-+              'tenant_id': 'tenant_id'}]
-         ]
- 
-         self.assertEqual(
--            self._get_instance_id_helper(headers, ports, networks=['the_id']),
--            'device_id'
-+            self._get_instance_and_tenant_id_helper(headers, ports,
-+                                                    networks=['the_id']),
-+            ('device_id', 'tenant_id')
-         )
- 
-     def test_get_instance_id_network_id_no_match(self):
-@@ -174,8 +180,10 @@ class TestMetadataProxyHandler(base.BaseTestCase):
- 
-         ports = [[]]
- 
--        self.assertIsNone(
--            self._get_instance_id_helper(headers, ports, networks=['the_id'])
-+        self.assertEqual(
-+            self._get_instance_and_tenant_id_helper(headers, ports,
-+                                                    networks=['the_id']),
-+            (None, None)
-         )
- 
-     def _proxy_request_test_helper(self, response_code=200, method='GET'):
-@@ -190,7 +198,8 @@ class TestMetadataProxyHandler(base.BaseTestCase):
-             with mock.patch('httplib2.Http') as mock_http:
-                 mock_http.return_value.request.return_value = (resp, 'content')
- 
--                retval = self.handler._proxy_request('the_id', req)
-+                retval = self.handler._proxy_request('the_id', 'tenant_id',
-+                                                     req)
-                 mock_http.assert_has_calls([
-                     mock.call().request(
-                         'http://9.9.9.9:8775/the_path',
-@@ -198,7 +207,8 @@ class TestMetadataProxyHandler(base.BaseTestCase):
-                         headers={
-                             'X-Forwarded-For': '8.8.8.8',
-                             'X-Instance-ID-Signature': 'signed',
--                            'X-Instance-ID': 'the_id'
-+                            'X-Instance-ID': 'the_id',
-+                            'X-Tenant-ID': 'tenant_id'
-                         },
-                         body=body
-                     )]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/patches/04-requirements.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,48 @@
+In-house patch to remove unnecessary dependencies from Neutron's
+requirements files. The specific reasons are as follows:
+
+amqplib		No longer applicable
+		(upstream commit 39ffcb0dd75d9dc5d8a249f2974a67f0def5204b)
+
+argparse	Not applicable to Solaris (Brocade specific)
+
+novaclient	No longer applicable
+
+pyudev		Not applicable to Solaris (Linux bridging specific)
+
+--- neutron-2013.2.3/neutron.egg-info/requires.txt.orig	2014-04-03 11:49:53.000000000 -0700
++++ neutron-2013.2.3/neutron.egg-info/requires.txt	2014-05-24 23:12:26.486505631 -0700
+@@ -1,9 +1,7 @@
+-pyudev
+ pbr>=0.5.21,<1.0
+ Paste
+ PasteDeploy>=1.5.0
+ Routes>=1.12.3
+-amqplib>=0.6.1
+ anyjson>=0.3.3
+ Babel>=1.3
+ eventlet>=0.13.0
+@@ -23,4 +21,3 @@
+ six>=1.4.1
+ stevedore>=0.10
+ oslo.config>=1.2.0
+-python-novaclient>=2.15.0
+\ No newline at end of file
+
+--- neutron-2013.2.3/requirements.txt.orig	2014-04-03 11:49:01.000000000 -0700
++++ neutron-2013.2.3/requirements.txt	2014-05-24 23:12:57.116696613 -0700
+@@ -3,9 +3,7 @@
+ Paste
+ PasteDeploy>=1.5.0
+ Routes>=1.12.3
+-amqplib>=0.6.1
+ anyjson>=0.3.3
+-argparse
+ Babel>=1.3
+ eventlet>=0.13.0
+ greenlet>=0.3.2
+@@ -25,4 +23,3 @@
+ stevedore>=0.10
+ oslo.config>=1.2.0
+ 
+-python-novaclient>=2.15.0
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/patches/05-launchpad-1210121.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,102 @@
+This proposed upstream patch addresses Launchpad bug 1210121. Although
+it's been addressed in Icehouse 2014.1, the patch below is still not
+yet released for Havana.
+
+From dbbc8338770d2c340903e006dcb3c90c4aad7b29 Mon Sep 17 00:00:00 2001
+From: armando-migliaccio <[email protected]>
+Date: Thu, 13 Mar 2014 12:40:01 -0700
+Subject: [PATCH] Kill 'Skipping unknown group key: firewall_driver' log trace
+
+This is done by trying to import the option first. If this
+does not work, emit a warning instead as in most cases this is
+harmless for a number of reasons: a) the service might not
+even need the opt; b) if things do break down the line, we'll
+see bigger traces; c) it's not gonna be long for this legacy
+quantum/neutron stuff to be removed altogether.
+
+Closes-bug: 1210121
+
+Change-Id: I34917da9cb6117ee1d42140621c742f503279b6b
+(cherry picked from commit b5ee49623982530bfb3c3fe2eefb9d8ddb6353bc)
+---
+ neutron/common/legacy.py          |   20 ++++++++++++++++----
+ neutron/quota.py                  |    2 +-
+ neutron/tests/unit/test_legacy.py |    2 +-
+ 3 files changed, 18 insertions(+), 6 deletions(-)
+
+diff --git a/neutron/common/legacy.py b/neutron/common/legacy.py
+index cf37281..d387aa2 100644
+--- a/neutron/common/legacy.py
++++ b/neutron/common/legacy.py
+@@ -17,6 +17,8 @@
+ 
+ # @author Mark McClain (DreamHost)
+ 
++from oslo.config import cfg
++
+ from neutron.openstack.common import log as logging
+ 
+ LOG = logging.getLogger(__name__)
+@@ -45,11 +47,19 @@ def override_config(config, config_keys=None):
+         group = None
+         if not isinstance(key, basestring):
+             try:
+-                group, key = key
++                group, key, module_str = key
+                 old_value = getattr(getattr(config, group), key, None)
+             except AttributeError:
+-                LOG.error(_('Skipping unknown group key: %s'), key)
+-                continue
++                try:
++                    config.import_opt(key, module_str, group)
++                    old_value = getattr(getattr(config, group), key, None)
++                except (cfg.NoSuchOptError,
++                        cfg.NoSuchGroupError,
++                        AttributeError):
++                    LOG.warn(_('Key %(key)s in group %(group)s is unknown. '
++                               'It may not be defined or needed by this '
++                               'service.') % {'key': key, 'group': group})
++                    continue
+         else:
+             old_value = getattr(config, key, None)
+         if not old_value:
+@@ -77,7 +87,9 @@ def modernize_quantum_config(config):
+         'router_scheduler_driver',
+         'rpc_backend',
+         'service_plugins',
+-        ('SECURITYGROUP', 'firewall_driver'),
++        ('SECURITYGROUP',
++         'firewall_driver',
++         'neutron.agent.securitygroups_rpc'),
+     ]
+ 
+     override_config(config, config_keys)
+diff --git a/neutron/quota.py b/neutron/quota.py
+index 4111078..105be06 100644
+--- a/neutron/quota.py
++++ b/neutron/quota.py
+@@ -58,7 +58,7 @@ quota_opts = [
+ ]
+ # Register the configuration options
+ cfg.CONF.register_opts(quota_opts, 'QUOTAS')
+-legacy.override_config(cfg.CONF, [('QUOTAS', 'quota_driver')])
++legacy.override_config(cfg.CONF, [('QUOTAS', 'quota_driver', 'neutron.quota')])
+ 
+ 
+ class ConfDriver(object):
+diff --git a/neutron/tests/unit/test_legacy.py b/neutron/tests/unit/test_legacy.py
+index 539f7de..6723d06 100644
+--- a/neutron/tests/unit/test_legacy.py
++++ b/neutron/tests/unit/test_legacy.py
+@@ -71,7 +71,7 @@ class TestLegacyConfigOverride(base.BaseTestCase):
+ 
+     def test_override_config_group_key(self):
+         self.cfg(args=['--bar-baz=quantum'])
+-        legacy.override_config(self.cfg, [('bar', 'baz')])
++        legacy.override_config(self.cfg, [('bar', 'baz', 'mod')])
+         self.assertEqual(self.cfg.bar.baz, 'neutron')
+ 
+     def test_override_config_list_value(self):
+-- 
+1.7.9.2
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/neutron/patches/06-launchpad-1255441.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,49 @@
+Although the following patch has been addressed in Icehouse 2014.1, it
+still has not yet been released for Havana.  It has been modified to
+apply cleanly into our current Havana implementation
+
+commit 5b61df1f539d78cf9d164a142d731e471aa18d4e
+Author: Maru Newby <[email protected]>
+Date:   Wed Nov 27 07:57:48 2013 +0000
+
+    Stop logging unnecessary warning on context create
+    
+    The context was previously logging at the 'warn' level when unknown
+    kwargs were being passed to its __init__().  Since the agents were
+    passing tenant=None with each rpc request, this was generating an
+    unreasonable amount of log chatter that would not be useful to an
+    operator.  The fix is to log at the debug level instead so that
+    the operators don't see the output by default but developers can
+    still choose to.
+    
+    Change-Id: I5c328f628c597eb949c1fe67b23120d2b5d1c7da
+    Related-Bug: #1254530
+    Partial-Bug: #1255441
+
+--- neutron-2013.2.3/neutron/context.py.~1~	2014-04-03 11:49:01.000000000 -0700
++++ neutron-2013.2.3/neutron/context.py	2014-06-08 12:01:16.420520735 -0700
+@@ -46,8 +46,8 @@
+             *only* deleted records are visible.
+         """
+         if kwargs:
+-            LOG.warn(_('Arguments dropped when creating '
+-                       'context: %s'), kwargs)
++            LOG.debug(_('Arguments dropped when creating '
++                        'context: %s'), kwargs)
+         super(ContextBase, self).__init__(user=user_id, tenant=tenant_id,
+                                           is_admin=is_admin)
+         self.read_deleted = read_deleted
+--- neutron-2013.2.3/neutron/tests/unit/test_neutron_context.py.~1~	2014-04-03 11:49:01.000000000 -0700
++++ neutron-2013.2.3/neutron/tests/unit/test_neutron_context.py	2014-06-08 12:10:04.483779074 -0700
+@@ -35,6 +35,11 @@
+         self.assertEqual('user_id', cxt.user_id)
+         self.assertEqual('tenant_id', cxt.project_id)
+ 
++    def test_neutron_context_create_logs_unknown_kwarg(self):
++        with mock.patch.object(context.LOG, 'debug') as mock_log:
++            context.Context('user_id', 'tenant_id', foo=None)
++        self.assertEqual(mock_log.call_count, 1)
++
+     def test_neutron_context_to_dict(self):
+         cxt = context.Context('user_id', 'tenant_id')
+         cxt_dict = cxt.to_dict()
--- a/components/openstack/neutron/resolve.deps	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/neutron/resolve.deps	Wed Jun 11 17:13:12 2014 -0700
@@ -1,5 +1,6 @@
+library/python/eventlet-26
+library/python/netaddr-26
 library/python/oslo.config-26
-library/python/setuptools-26
 network/ipfilter
 runtime/python-26
 service/network/dnsmasq
--- a/components/openstack/nova/Makefile	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/Makefile	Wed Jun 11 17:13:12 2014 -0700
@@ -25,12 +25,12 @@
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		nova
-COMPONENT_CODENAME=	grizzly
-COMPONENT_VERSION=	2013.1.4
+COMPONENT_CODENAME=	havana
+COMPONENT_VERSION=	2013.2.3
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:0491ec81552b9c407021941ea1c477d5bcd93ec1dcc66d5fc0c1cef594dac760
+    sha256:02902cb65b5adb0419c69cdb03ea2a0cfdfe8f7df342be44f3760d66cdecb61e
 COMPONENT_ARCHIVE_URL=	http://launchpad.net/$(COMPONENT_NAME)/$(COMPONENT_CODENAME)/$(COMPONENT_VERSION)/+download/$(COMPONENT_ARCHIVE)
 COMPONENT_PROJECT_URL=	http://www.openstack.org/
 COMPONENT_BUGDB=	service/nova
@@ -48,8 +48,10 @@
 
 PKG_MACROS +=		PYVER=$(PYTHON_VERSIONS)
 
+# Replace the standard nova/virt/libvirt/__init__.py with an empty file.
 COMPONENT_POST_INSTALL_ACTION += \
-	($(MKDIR) $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/nova/virt/solariszones; \
+	($(CP) /dev/null $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/nova/virt/libvirt/__init__.py; \
+	 $(MKDIR) $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/nova/virt/solariszones; \
 	 $(CP) files/solariszones/__init__.py $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/nova/virt/solariszones; \
 	 $(CP) files/solariszones/driver.py $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/nova/virt/solariszones; \
 	 $(CP) files/solariszones/sysconfig.py $(PROTO_DIR)/usr/lib/python2.6/vendor-packages/nova/virt/solariszones); \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/nova/files/api-paste.ini	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,127 @@
+############
+# Metadata #
+############
+[composite:metadata]
+use = egg:Paste#urlmap
+/: meta
+
+[pipeline:meta]
+pipeline = ec2faultwrap logrequest metaapp
+
+[app:metaapp]
+paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
+
+#######
+# EC2 #
+#######
+
+[composite:ec2]
+use = egg:Paste#urlmap
+/services/Cloud: ec2cloud
+
+[composite:ec2cloud]
+use = call:nova.api.auth:pipeline_factory
+noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
+keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
+
+[filter:ec2faultwrap]
+paste.filter_factory = nova.api.ec2:FaultWrapper.factory
+
+[filter:logrequest]
+paste.filter_factory = nova.api.ec2:RequestLogging.factory
+
+[filter:ec2lockout]
+paste.filter_factory = nova.api.ec2:Lockout.factory
+
+[filter:ec2keystoneauth]
+paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
+
+[filter:ec2noauth]
+paste.filter_factory = nova.api.ec2:NoAuth.factory
+
+[filter:cloudrequest]
+controller = nova.api.ec2.cloud.CloudController
+paste.filter_factory = nova.api.ec2:Requestify.factory
+
+[filter:authorizer]
+paste.filter_factory = nova.api.ec2:Authorizer.factory
+
+[filter:validator]
+paste.filter_factory = nova.api.ec2:Validator.factory
+
+[app:ec2executor]
+paste.app_factory = nova.api.ec2:Executor.factory
+
+#############
+# Openstack #
+#############
+
+[composite:osapi_compute]
+use = call:nova.api.openstack.urlmap:urlmap_factory
+/: oscomputeversions
+/v1.1: openstack_compute_api_v2
+/v2: openstack_compute_api_v2
+/v3: openstack_compute_api_v3
+
+[composite:openstack_compute_api_v2]
+use = call:nova.api.auth:pipeline_factory
+noauth = faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
+keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
+keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
+
+[composite:openstack_compute_api_v3]
+use = call:nova.api.auth:pipeline_factory
+noauth = faultwrap sizelimit noauth_v3 ratelimit_v3 osapi_compute_app_v3
+keystone = faultwrap sizelimit authtoken keystonecontext ratelimit_v3 osapi_compute_app_v3
+keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3
+
+[filter:faultwrap]
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+
+[filter:noauth]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
+[filter:noauth_v3]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareV3.factory
+
+[filter:ratelimit]
+paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
+
+[filter:ratelimit_v3]
+paste.filter_factory = nova.api.openstack.compute.plugins.v3.limits:RateLimitingMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
+
+[app:osapi_compute_app_v2]
+paste.app_factory = nova.api.openstack.compute:APIRouter.factory
+
+[app:osapi_compute_app_v3]
+paste.app_factory = nova.api.openstack.compute:APIRouterV3.factory
+
+[pipeline:oscomputeversions]
+pipeline = faultwrap oscomputeversionapp
+
+[app:oscomputeversionapp]
+paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
+auth_uri = http://127.0.0.1:5000/v2.0
+identity_uri = http://127.0.0.1:35357
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USER%
+admin_password = %SERVICE_PASSWORD%
+# signing_dir is configurable, but the default behavior of the authtoken
+# middleware should be sufficient.  It will create a temporary directory
+# in the home directory for the user the nova process is running as.
+#signing_dir = /var/lib/nova/keystone-signing
+# Workaround for https://bugs.launchpad.net/nova/+bug/1154809
+auth_version = v2.0
--- a/components/openstack/nova/files/nova-api-ec2.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/nova-api-ec2.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/nova-api %m ec2">
       <method_context>
--- a/components/openstack/nova/files/nova-api-metadata.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/nova-api-metadata.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/nova-api %m metadata">
       <method_context>
--- a/components/openstack/nova/files/nova-api-osapi-compute.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/nova-api-osapi-compute.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/nova-api %m osapi_compute">
       <method_context>
--- a/components/openstack/nova/files/nova-cert.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/nova-cert.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,18 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='conductor' grouping='optional_all' restart_on='error'
+      type='service'>
+      <service_fmri value='svc:/application/openstack/nova/nova-conductor' />
+    </dependency>
+
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/nova-cert %m">
       <method_context>
--- a/components/openstack/nova/files/nova-compute.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/nova-compute.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -29,15 +29,24 @@
       type='service'>
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
+
     <dependency name='evs' grouping='require_all' restart_on='error'
       type='service'>
       <service_fmri value='svc:/network/evs:default' />
     </dependency>
+
     <dependency name='conductor' grouping='optional_all' restart_on='error'
       type='service'>
       <service_fmri value='svc:/application/openstack/nova/nova-conductor' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <!-- increase the timeout to 120 seconds.  Nova's RPC calls have a timeout
          of 60 seconds so the start method needs to extend past that -->
     <exec_method timeout_seconds="120" type="method" name="start"
--- a/components/openstack/nova/files/nova-conductor.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/nova-conductor.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/nova-conductor %m">
       <method_context>
--- a/components/openstack/nova/files/nova-consoleauth.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/nova-consoleauth.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/nova-consoleauth %m">
       <method_context>
--- a/components/openstack/nova/files/nova-novncproxy.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/nova-novncproxy.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/nova-novncproxy %m">
       <method_context>
--- a/components/openstack/nova/files/nova-objectstore.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/nova-objectstore.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/nova-objectstore %m">
       <method_context>
--- a/components/openstack/nova/files/nova-scheduler.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/nova-scheduler.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,18 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='conductor' grouping='optional_all' restart_on='error'
+      type='service'>
+      <service_fmri value='svc:/application/openstack/nova/nova-conductor' />
+    </dependency>
+
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/nova-scheduler %m">
       <method_context>
--- a/components/openstack/nova/files/nova.conf	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/nova.conf	Wed Jun 11 17:13:12 2014 -0700
@@ -52,15 +52,6 @@
 
 
 #
-# Options defined in nova.manager
-#
-
-# Some periodic tasks can be run in a separate process. Should
-# we run them here? (boolean value)
-#run_external_periodic_tasks=true
-
-
-#
 # Options defined in nova.netconf
 #
 
@@ -82,22 +73,16 @@
 #
 
 # If set, send compute.instance.update notifications on
-# instance state changes.  Valid values are False for no
-# notifications, True for notifications on any instance
-# changes. (boolean value)
-#notify_on_any_change=false
-
-# If set, send api.fault notifications on caught exceptions in
-# the API service. (boolean value)
-#notify_api_faults=false
-
-# If set, send compute.instance.update notifications on
 # instance state changes.  Valid values are None for no
 # notifications, "vm_state" for notifications on VM state
 # changes, or "vm_and_task_state" for notifications on VM and
 # task state changes. (string value)
 #notify_on_state_change=<None>
 
+# If set, send api.fault notifications on caught exceptions in
+# the API service. (boolean value)
+#notify_api_faults=false
+
 
 #
 # Options defined in nova.paths
@@ -143,6 +128,10 @@
 # number of floating ips allowed per project (integer value)
 #quota_floating_ips=10
 
+# number of fixed ips allowed per project (this should be at
+# least the number of instances allowed) (integer value)
+#quota_fixed_ips=-1
+
 # number of metadata items allowed per instance (integer
 # value)
 #quota_metadata_items=128
@@ -271,14 +260,11 @@
 #monkey_patch=false
 
 # List of modules/decorators to monkey patch (list value)
-#monkey_patch_modules=nova.api.ec2.cloud:nova.openstack.common.notifier.api.notify_decorator,nova.compute.api:nova.openstack.common.notifier.api.notify_decorator
+#monkey_patch_modules=nova.api.ec2.cloud:nova.notifications.notify_decorator,nova.compute.api:nova.notifications.notify_decorator
 
 # Length of generated instance admin passwords (integer value)
 #password_length=12
 
-# Whether to disable inter-process locks (boolean value)
-#disable_process_locking=false
-
 # time period to generate instance usages for.  Time period
 # must be hour, day, month or year (string value)
 #instance_usage_audit_period=month
@@ -325,12 +311,13 @@
 # Options defined in nova.api.auth
 #
 
-# whether to rate limit the api (boolean value)
-#api_rate_limit=true
+# whether to use per-user rate limiting for the api. (boolean
+# value)
+#api_rate_limit=false
 
 # The strategy to use for auth: noauth or keystone. (string
 # value)
-#auth_strategy=noauth
+auth_strategy=keystone
 
 # Treat X-Forwarded-For as the canonical remote address. Only
 # enable this if you have a sanitizing proxy. (boolean value)
@@ -398,18 +385,29 @@
 # drive (string value)
 #config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
 
+# Driver to use for vendor data (string value)
+#vendordata_driver=nova.api.metadata.vendordata_json.JsonFileVendorData
+
 
 #
 # Options defined in nova.api.metadata.handler
 #
 
-# Set flag to indicate Quantum will proxy metadata requests
+# Set flag to indicate Neutron will proxy metadata requests
 # and resolve instance ids. (boolean value)
-#service_quantum_metadata_proxy=false
-
-# Shared secret to validate proxies Quantum metadata requests
+#service_neutron_metadata_proxy=false
+
+# Shared secret to validate proxies Neutron metadata requests
 # (string value)
-#quantum_metadata_proxy_shared_secret=
+#neutron_metadata_proxy_shared_secret=
+
+
+#
+# Options defined in nova.api.metadata.vendordata_json
+#
+
+# File to load json formated vendor data from (string value)
+#vendordata_jsonfile_path=<None>
 
 
 #
@@ -457,28 +455,19 @@
 
 
 #
-# Options defined in nova.api.openstack.compute.contrib.hide_server_addresses
-#
-
-# List of instance states that should hide network info (list
-# value)
-#osapi_hide_server_address_states=building
-
-
-#
 # Options defined in nova.api.openstack.compute.contrib.os_tenant_networks
 #
 
-# Enables or disables quotaing of tenant networks (boolean
-# value)
+# Enables or disables quota checking for tenant networks
+# (boolean value)
 #enable_network_quota=false
 
 # Control for checking for default networks (string value)
-#use_quantum_default_nets=False
-
-# Default tenant id when creating quantum networks (string
+#use_neutron_default_nets=False
+
+# Default tenant id when creating neutron networks (string
 # value)
-#quantum_default_tenant_id=default
+#neutron_default_tenant_id=default
 
 
 #
@@ -490,6 +479,15 @@
 
 
 #
+# Options defined in nova.api.openstack.compute.plugins.v3.hide_server_addresses
+#
+
+# List of instance states that should hide network info (list
+# value)
+#osapi_hide_server_address_states=building
+
+
+#
 # Options defined in nova.api.openstack.compute.servers
 #
 
@@ -508,6 +506,15 @@
 
 
 #
+# Options defined in nova.cells.opts
+#
+
+# The full class name of the compute API class to use
+# (deprecated) (string value)
+#compute_api_class=nova.compute.api.API
+
+
+#
 # Options defined in nova.cert.rpcapi
 #
 
@@ -523,8 +530,8 @@
 # (string value)
 #vpn_image_id=0
 
-# Instance type for vpn instances (string value)
-#vpn_instance_type=m1.tiny
+# Flavor for vpn instances (string value)
+#vpn_flavor=m1.tiny
 
 # Template for cloudpipe instance boot script (string value)
 #boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template
@@ -541,20 +548,54 @@
 
 
 #
-# Options defined in nova.common.memorycache
-#
-
-# Memcached servers or None for in process cache. (list value)
-#memcached_servers=<None>
-
-
-#
-# Options defined in nova.compute
-#
-
-# The full class name of the compute API class to use (string
+# Options defined in nova.cmd.novnc
+#
+
+# Record sessions to FILE.[session_number] (boolean value)
+#record=false
+
+# Become a daemon (background process) (boolean value)
+#daemon=false
+
+# Disallow non-encrypted connections (boolean value)
+#ssl_only=false
+
+# Source is ipv6 (boolean value)
+#source_is_ipv6=false
+
+# SSL certificate file (string value)
+#cert=self.pem
+
+# SSL key file (if separate from cert) (string value)
+#key=<None>
+
+# Run webserver on same port. Serve files from DIR. (string
 # value)
-#compute_api_class=nova.compute.api.API
+#web=/usr/share/spice-html5
+
+
+#
+# Options defined in nova.cmd.novncproxy
+#
+
+# Host on which to listen for incoming requests (string value)
+#novncproxy_host=0.0.0.0
+
+# Port on which to listen for incoming requests (integer
+# value)
+#novncproxy_port=6080
+
+
+#
+# Options defined in nova.cmd.spicehtml5proxy
+#
+
+# Host on which to listen for incoming requests (string value)
+#spicehtml5proxy_host=0.0.0.0
+
+# Port on which to listen for incoming requests (integer
+# value)
+#spicehtml5proxy_port=6082
 
 
 #
@@ -565,6 +606,10 @@
 # when testing in single-host environments. (boolean value)
 #allow_resize_to_same_host=false
 
+# Allow migrate machine to the same host. Useful when testing
+# in single-host environments. (boolean value)
+#allow_migrate_to_same_host=false
+
 # availability zone to use when user doesn't specify one
 # (string value)
 #default_schedule_zone=<None>
@@ -586,13 +631,20 @@
 # keys for the template are: name, uuid, count. (string value)
 #multi_instance_display_name_template=%(name)s-%(uuid)s
 
-
-#
-# Options defined in nova.compute.instance_types
-#
-
-# default instance type to use, testing only (string value)
-#default_instance_type=m1.small
+# Maximum number of devices that will result in a local image
+# being created on the hypervisor node. Setting this to 0
+# means nova will allow only boot from volume. A negative
+# number means unlimited. (integer value)
+#max_local_block_devices=3
+
+
+#
+# Options defined in nova.compute.flavors
+#
+
+# default flavor to use for the EC2 API only. The Nova API
+# does not support a default flavor. (string value)
+#default_flavor=m1.small
 
 
 #
@@ -627,9 +679,21 @@
 # rebooted (boolean value)
 #resume_guests_state_on_host_boot=false
 
+# Number of times to retry network allocation on failures
+# (integer value)
+#network_allocate_retries=0
+
+# The number of times to attempt to reap an instance's files.
+# (integer value)
+#maximum_instance_delete_attempts=5
+
 # interval to pull bandwidth usage info (integer value)
 #bandwidth_poll_interval=600
 
+# interval to sync power states between the database and the
+# hypervisor (integer value)
+#sync_power_state_interval=600
+
 # Number of seconds between instance info_cache self healing
 # updates (integer value)
 #heal_instance_info_cache_interval=60
@@ -650,6 +714,19 @@
 # value)
 #volume_usage_poll_interval=0
 
+# Interval in seconds for polling shelved instances to offload
+# (integer value)
+#shelved_poll_interval=3600
+
+# Time in seconds before a shelved instance is eligible for
+# removing from a host.  -1 never offload, 0 offload when
+# shelved (integer value)
+#shelved_offload_time=0
+
+# Interval in seconds for retrying failed instance file
+# deletes (integer value)
+#instance_delete_interval=300
+
 # Action to take if a running deleted instance is
 # detected.Valid options are 'noop', 'log' and 'reap'. Set to
 # 'noop' to disable. (string value)
@@ -707,6 +784,16 @@
 
 
 #
+# Options defined in nova.conductor.tasks.live_migrate
+#
+
+# Number of times to retry live-migration before failing. If
+# == -1, try until out of hosts. If == 0, only try once, no
+# retries. (integer value)
+#migrate_max_retries=-1
+
+
+#
 # Options defined in nova.console.manager
 #
 
@@ -855,7 +942,7 @@
 # value)
 #image_decryption_dir=/tmp
 
-# hostname or ip for openstack to use when accessing the s3
+# hostname or ip for OpenStack to use when accessing the s3
 # api (string value)
 #s3_host=$my_ip
 
@@ -890,7 +977,7 @@
 
 # The full class name of the network API class to use (string
 # value)
-network_api_class=nova.network.quantumv2.api.API
+network_api_class=nova.network.neutronv2.api.API
 
 
 #
@@ -1038,14 +1125,18 @@
 # the port for the metadata api port (integer value)
 #metadata_port=8775
 
-# Regular expression to match iptables rule that shouldalways
+# Regular expression to match iptables rule that should always
 # be on the top. (string value)
 #iptables_top_regex=
 
-# Regular expression to match iptables rule that shouldalways
+# Regular expression to match iptables rule that should always
 # be on the bottom. (string value)
 #iptables_bottom_regex=
 
+# The table that iptables to jump to when a packet is to be
+# dropped. (string value)
+#iptables_drop_action=DROP
+
 
 #
 # Options defined in nova.network.manager
@@ -1083,9 +1174,6 @@
 # Number of addresses in each private subnet (integer value)
 #network_size=256
 
-# Fixed IP address block (string value)
-#fixed_range=10.0.0.0/8
-
 # Fixed IPv6 address block (string value)
 #fixed_range_v6=fd00::/48
 
@@ -1121,10 +1209,12 @@
 
 # If True, send a dhcp release on instance termination
 # (boolean value)
-#force_dhcp_release=false
+#force_dhcp_release=true
 
 # If True in multi_host mode, all compute hosts share the same
-# dhcp address. (boolean value)
+# dhcp address. The same IP address used for DHCP will be
+# added on each nova-network node which is only visible to the
+# vms on the same host. (boolean value)
 #share_dhcp_address=false
 
 # If True, when a DNS entry must be updated, it sends a fanout
@@ -1144,50 +1234,57 @@
 
 
 #
-# Options defined in nova.network.quantumv2.api
-#
-
-# URL for connecting to quantum (string value)
-#quantum_url=http://127.0.0.1:9696
-
-# timeout value for connecting to quantum in seconds (integer
+# Options defined in nova.network.neutronv2.api
+#
+
+# URL for connecting to neutron (string value)
+#neutron_url=http://127.0.0.1:9696
+
+# timeout value for connecting to neutron in seconds (integer
 # value)
-#quantum_url_timeout=30
-
-# username for connecting to quantum in admin context (string
+#neutron_url_timeout=30
+
+# username for connecting to neutron in admin context (string
 # value)
-#quantum_admin_username=<None>
-
-# password for connecting to quantum in admin context (string
+#neutron_admin_username=<None>
+
+# password for connecting to neutron in admin context (string
 # value)
-#quantum_admin_password=<None>
-
-# tenant name for connecting to quantum in admin context
+#neutron_admin_password=<None>
+
+# tenant name for connecting to neutron in admin context
 # (string value)
-#quantum_admin_tenant_name=<None>
-
-# region name for connecting to quantum in admin context
+#neutron_admin_tenant_name=<None>
+
+# region name for connecting to neutron in admin context
 # (string value)
-#quantum_region_name=<None>
-
-# auth url for connecting to quantum in admin context (string
+#neutron_region_name=<None>
+
+# auth url for connecting to neutron in admin context (string
 # value)
-#quantum_admin_auth_url=http://localhost:5000/v2.0
+#neutron_admin_auth_url=http://localhost:5000/v2.0
 
 # if set, ignore any SSL validation issues (boolean value)
-#quantum_api_insecure=false
-
-# auth strategy for connecting to quantum in admin context
+#neutron_api_insecure=false
+
+# auth strategy for connecting to neutron in admin context
 # (string value)
-#quantum_auth_strategy=keystone
+#neutron_auth_strategy=keystone
 
 # Name of Integration Bridge used by Open vSwitch (string
 # value)
-#quantum_ovs_bridge=br-int
-
-# Number of seconds before querying quantum for extensions
+#neutron_ovs_bridge=br-int
+
+# Number of seconds before querying neutron for extensions
 # (integer value)
-#quantum_extension_sync_interval=600
+#neutron_extension_sync_interval=600
+
+# Location of ca certificates file to use for neutron client
+# requests. (string value)
+#neutron_ca_certificates_file=<None>
+
+# Use per-port DHCP options with Neutron (boolean value)
+#dhcp_options_enabled=false
 
 
 #
@@ -1210,55 +1307,6 @@
 # The full class name of the security API class (string value)
 #security_group_api=nova
 
-# The full class name of the security group handler class
-# (string value)
-#security_group_handler=nova.network.sg.NullSecurityGroupHandler
-
-
-#
-# Options defined in bin.nova-clear-rabbit-queues
-#
-
-# Queues to delete (multi valued)
-#queues=
-
-# delete nova exchange too. (boolean value)
-#delete_exchange=false
-
-
-#
-# Options defined in bin.nova-novncproxy
-#
-
-# Record sessions to FILE.[session_number] (boolean value)
-#record=false
-
-# Become a daemon (background process) (boolean value)
-#daemon=false
-
-# Disallow non-encrypted connections (boolean value)
-#ssl_only=false
-
-# Source is ipv6 (boolean value)
-#source_is_ipv6=false
-
-# SSL certificate file (string value)
-#cert=self.pem
-
-# SSL key file (if separate from cert) (string value)
-#key=<None>
-
-# Run webserver on same port. Serve files from DIR. (string
-# value)
-#web=/usr/share/novnc
-
-# Host on which to listen for incoming requests (string value)
-#novncproxy_host=0.0.0.0
-
-# Port on which to listen for incoming requests (integer
-# value)
-#novncproxy_port=6080
-
 
 #
 # Options defined in nova.objectstore.s3server
@@ -1275,69 +1323,28 @@
 
 
 #
-# Options defined in nova.openstack.common.db.api
-#
-
-# The backend to use for db (string value)
-#db_backend=sqlalchemy
-
-# Enable the experimental use of thread pooling for all DB API
-# calls (boolean value)
-#dbapi_use_tpool=false
-
-
-#
 # Options defined in nova.openstack.common.db.sqlalchemy.session
 #
 
-# The SQLAlchemy connection string used to connect to the
-# database (string value)
-#sql_connection=sqlite:////nova/openstack/common/db/$sqlite_db
-
 # the filename to use with sqlite (string value)
 #sqlite_db=nova.sqlite
 
-# timeout before idle sql connections are reaped (integer
-# value)
-#sql_idle_timeout=3600
-
-# If passed, use synchronous mode for sqlite (boolean value)
+# If true, use synchronous mode for sqlite (boolean value)
 #sqlite_synchronous=true
 
-# Minimum number of SQL connections to keep open in a pool
-# (integer value)
-#sql_min_pool_size=1
-
-# Maximum number of SQL connections to keep open in a pool
-# (integer value)
-#sql_max_pool_size=5
-
-# maximum db connection retries during startup. (setting -1
-# implies an infinite retry count) (integer value)
-#sql_max_retries=10
-
-# interval between retries of opening a sql connection
-# (integer value)
-#sql_retry_interval=10
-
-# If set, use this value for max_overflow with sqlalchemy
-# (integer value)
-#sql_max_overflow=<None>
-
-# Verbosity of SQL debugging information. 0=None,
-# 100=Everything (integer value)
-#sql_connection_debug=0
-
-# Add python stack traces to SQL as comment strings (boolean
-# value)
-#sql_connection_trace=false
-
 
 #
 # Options defined in nova.openstack.common.eventlet_backdoor
 #
 
-# port for eventlet backdoor to listen (integer value)
+# Enable eventlet backdoor. Acceptable values are 0, <port>
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number, <port> results in listening on the
+# specified port number and not enabling backdoorif it is in
+# use and <start>:<end> results in listening on the smallest
+# unused port number within the specified range of port
+# numbers. The chosen port is displayed in the service's log
+# file. (string value)
 #backdoor_port=<None>
 
 
@@ -1348,9 +1355,8 @@
 # Whether to disable inter-process locks (boolean value)
 #disable_process_locking=false
 
-# Directory to use for lock files. Default to a temp directory
-# (string value)
-#lock_path=<None>
+# Directory to use for lock files. (string value)
+lock_path=$state_path
 
 
 #
@@ -1368,13 +1374,9 @@
 # Log output to standard error (boolean value)
 #use_stderr=true
 
-# Default file mode used when creating log files (string
-# value)
-#logfile_mode=0644
-
 # format string to use for log messages with context (string
 # value)
-#logging_context_format_string=%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s
 
 # format string to use for log messages without context
 # (string value)
@@ -1412,21 +1414,23 @@
 # (string value)
 #log_config=<None>
 
-# A logging.Formatter log message format string which may use
-# any of the available logging.LogRecord attributes. Default:
-# %(default)s (string value)
-#log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated.  Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
 
 # Format string for %%(asctime)s in log records. Default:
 # %(default)s (string value)
 #log_date_format=%Y-%m-%d %H:%M:%S
 
-# (Optional) Name of log file to output to. If not set,
-# logging will go to stdout. (string value)
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
 #log_file=<None>
 
-# (Optional) The directory to keep log files in (will be
-# prepended to --log-file) (string value)
+# (Optional) The base directory used for relative --log-file
+# paths (string value)
 #log_dir=<None>
 
 # Use syslog for logging. (boolean value)
@@ -1437,6 +1441,14 @@
 
 
 #
+# Options defined in nova.openstack.common.memorycache
+#
+
+# Memcached servers or None for in process cache. (list value)
+#memcached_servers=<None>
+
+
+#
 # Options defined in nova.openstack.common.notifier.api
 #
 
@@ -1450,18 +1462,27 @@
 
 # Default publisher_id for outgoing notifications (string
 # value)
-#default_publisher_id=$host
+#default_publisher_id=<None>
 
 
 #
 # Options defined in nova.openstack.common.notifier.rpc_notifier
 #
 
-# AMQP topic used for openstack notifications (list value)
+# AMQP topic used for OpenStack notifications (list value)
 #notification_topics=notifications
 
 
 #
+# Options defined in nova.openstack.common.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+
+#
 # Options defined in nova.openstack.common.rpc
 #
 
@@ -1485,7 +1506,7 @@
 
 # Modules of exceptions that are permitted to be recreatedupon
 # receiving exception data from an rpc call. (list value)
-#allowed_rpc_exception_modules=nova.openstack.common.exception,nova.exception,cinder.exception,exceptions
+#allowed_rpc_exception_modules=nova.exception,cinder.exception,exceptions
 
 # If passed, use a fake RabbitMQ provider (boolean value)
 #fake_rabbit=false
@@ -1499,17 +1520,20 @@
 # Options defined in nova.openstack.common.rpc.amqp
 #
 
-# Enable a fast single reply queue if using AMQP based RPC
-# like RabbitMQ or Qpid. (boolean value)
-#amqp_rpc_single_reply_queue=false
+# Use durable queues in amqp. (boolean value)
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
 
 
 #
 # Options defined in nova.openstack.common.rpc.impl_kombu
 #
 
-# SSL version to use (valid only if SSL enabled) (string
-# value)
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions (string value)
 #kombu_ssl_version=
 
 # SSL key file (valid only if SSL enabled) (string value)
@@ -1558,9 +1582,6 @@
 # value)
 #rabbit_max_retries=0
 
-# use durable queues in RabbitMQ (boolean value)
-#rabbit_durable_queues=false
-
 # use H/A queues in RabbitMQ (x-ha-policy: all).You need to
 # wipe RabbitMQ database when changing this option. (boolean
 # value)
@@ -1574,7 +1595,7 @@
 # Qpid broker hostname (string value)
 #qpid_hostname=localhost
 
-# Qpid broker port (string value)
+# Qpid broker port (integer value)
 #qpid_port=5672
 
 # Qpid HA cluster host:port pairs (list value)
@@ -1600,6 +1621,14 @@
 # Disable Nagle algorithm (boolean value)
 #qpid_tcp_nodelay=true
 
+# The qpid topology version to use.  Version 1 is what was
+# originally used by impl_qpid.  Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work.  Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
 
 #
 # Options defined in nova.openstack.common.rpc.impl_zmq
@@ -1629,15 +1658,42 @@
 # Name of this node. Must be a valid hostname, FQDN, or IP
 # address. Must match "host" option, if running Nova. (string
 # value)
-#rpc_zmq_host=sorcha
+#rpc_zmq_host=nova
 
 
 #
 # Options defined in nova.openstack.common.rpc.matchmaker
 #
 
-# Matchmaker ring file (JSON) (string value)
-#matchmaker_ringfile=/etc/nova/matchmaker_ring.json
+# Heartbeat frequency (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+
+#
+# Options defined in nova.pci.pci_request
+#
+
+# An alias for a PCI passthrough device requirement. This
+# allows users to specify the alias in the extra_spec for a
+# flavor, without needing to repeat all the PCI property
+# requirements. For example: pci_alias = { "name":
+# "QuicAssist",   "product_id": "0443",   "vendor_id": "8086",
+# "device_type": "ACCEL" } defines an alias for the Intel
+# QuickAssist card. (multi valued) (multi valued)
+#pci_alias=
+
+
+#
+# Options defined in nova.pci.pci_whitelist
+#
+
+# White list of PCI devices available to VMs. For example:
+# pci_passthrough_whitelist =  [{"vendor_id": "8086",
+# "product_id": "0443"}] (multi valued)
+#pci_passthrough_whitelist=
 
 
 #
@@ -1669,8 +1725,11 @@
 # Options defined in nova.scheduler.filters.core_filter
 #
 
-# Virtual CPU to Physical CPU allocation ratio (floating point
-# value)
+# Virtual CPU to physical CPU allocation ratio which affects
+# all CPU filters. This configuration specifies a global ratio
+# for CoreFilter. For AggregateCoreFilter, it will fall back
+# to this configuration value if no per-aggregate setting
+# found. (floating point value)
 #cpu_allocation_ratio=16.0
 
 
@@ -1702,6 +1761,10 @@
 # Host reserved for specific images (list value)
 #isolated_hosts=
 
+# Whether to force isolated hosts to run only isolated images
+# (boolean value)
+#restrict_isolated_hosts_to_isolated_images=true
+
 
 #
 # Options defined in nova.scheduler.filters.num_instances_filter
@@ -1715,8 +1778,11 @@
 # Options defined in nova.scheduler.filters.ram_filter
 #
 
-# virtual ram to physical ram allocation ratio (floating point
-# value)
+# Virtual ram to physical ram allocation ratio which affects
+# all ram filters. This configuration specifies a global ratio
+# for RamFilter. For AggregateRamFilter, it will fall back to
+# this configuration value if no per-aggregate setting found.
+# (floating point value)
 #ram_allocation_ratio=1.5
 
 
@@ -1748,17 +1814,6 @@
 
 
 #
-# Options defined in nova.scheduler.multi
-#
-
-# Driver to use for scheduling compute calls (string value)
-#compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
-
-# Default driver to use for scheduling calls (string value)
-#default_scheduler_driver=nova.scheduler.chance.ChanceScheduler
-
-
-#
 # Options defined in nova.scheduler.rpcapi
 #
 
@@ -1776,24 +1831,6 @@
 
 
 #
-# Options defined in nova.scheduler.weights.least_cost
-#
-
-# Which cost functions the LeastCostScheduler should use (list
-# value)
-#least_cost_functions=<None>
-
-# How much weight to give the noop cost function (floating
-# point value)
-#noop_cost_fn_weight=1.0
-
-# How much weight to give the fill-first cost function. A
-# negative value will reverse behavior: e.g. spread-first
-# (floating point value)
-#compute_fill_first_cost_fn_weight=<None>
-
-
-#
 # Options defined in nova.scheduler.weights.ram
 #
 
@@ -1845,6 +1882,13 @@
 #virt_mkfs=linux=mkfs.ext3 -L %(fs_label)s -F %(target)s
 #virt_mkfs=windows=mkfs.ntfs --force --fast --label %(fs_label)s %(target)s
 
+# Attempt to resize the filesystem by accessing the image over
+# a block device. This is done by the host and may not be
+# necessary if the image contains a recent version of cloud-
+# init. Possible mechanisms require the nbd driver (for qcow
+# and raw), or loop (for raw). (boolean value)
+#resize_fs_using_block_device=false
+
 
 #
 # Options defined in nova.virt.disk.mount.nbd
@@ -1855,13 +1899,23 @@
 
 
 #
+# Options defined in nova.virt.docker.driver
+#
+
+# Default TCP port to find the docker-registry container
+# (integer value)
+#docker_registry_default_port=5042
+
+
+#
 # Options defined in nova.virt.driver
 #
 
 # Driver to use for controlling virtualization. Options
 # include: libvirt.LibvirtDriver, xenapi.XenAPIDriver,
 # fake.FakeDriver, baremetal.BareMetalDriver,
-# vmwareapi.VMWareESXDriver (string value)
+# vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver (string
+# value)
 compute_driver=solariszones.SolarisZonesDriver
 
 # The default format an ephemeral_volume will be formatted
@@ -1891,52 +1945,6 @@
 
 
 #
-# Options defined in nova.virt.hyperv.vif
-#
-
-# External virtual switch Name, if not provided, the first
-# external virtual switch is used (string value)
-#vswitch_name=<None>
-
-
-#
-# Options defined in nova.virt.hyperv.vmops
-#
-
-# Required for live migration among hosts with different CPU
-# features (boolean value)
-#limit_cpu_features=false
-
-# Sets the admin password in the config drive image (boolean
-# value)
-#config_drive_inject_password=false
-
-# qemu-img is used to convert between different image types
-# (string value)
-#qemu_img_cmd=qemu-img.exe
-
-# Attaches the Config Drive image as a cdrom drive instead of
-# a disk drive (boolean value)
-#config_drive_cdrom=false
-
-
-#
-# Options defined in nova.virt.hyperv.volumeops
-#
-
-# The number of times we retry on attaching volume  (integer
-# value)
-#hyperv_attaching_volume_retry_count=10
-
-# The seconds to wait between an volume attachment attempt
-# (integer value)
-#hyperv_wait_between_attach_retry=5
-
-# Force volumeutils v1 (boolean value)
-#force_volumeutils_v1=false
-
-
-#
 # Options defined in nova.virt.images
 #
 
@@ -2003,7 +2011,7 @@
 #libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
 
 # Libvirt handlers for remote volumes. (list value)
-#libvirt_volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver,aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver,glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver,fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver,scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver
+#libvirt_volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver,aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver,glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver,fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver,scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver
 
 # Override the default disk prefix for the devices attached to
 # a server, which is dependent on libvirt_type. (valid options
@@ -2045,14 +2053,19 @@
 # ["file=directsync","block=none"] (list value)
 #disk_cachemodes=
 
+# Which pcpus can be used by vcpus of instance e.g:
+# "4-12,^8,15" (string value)
+#vcpu_pin_set=<None>
+
 
 #
 # Options defined in nova.virt.libvirt.imagebackend
 #
 
-# VM Images format. Acceptable values are: raw, qcow2, lvm,
-# default. If default is specified, then use_cow_images flag
-# is used instead of this one. (string value)
+# VM Images format. Acceptable values are: raw, qcow2,
+# lvm,rbd, default. If default is specified, then
+# use_cow_images flag is used instead of this one. (string
+# value)
 #libvirt_images_type=default
 
 # LVM Volume Group that is used for VM images, when you
@@ -2067,6 +2080,13 @@
 # snapshot copy-on-write blocks. (integer value)
 #libvirt_lvm_snapshot_size=1000
 
+# the RADOS pool in which rbd volumes are stored (string
+# value)
+#libvirt_images_rbd_pool=rbd
+
+# path to the ceph configuration file to use (string value)
+#libvirt_images_rbd_ceph_conf=
+
 
 #
 # Options defined in nova.virt.libvirt.imagecache
@@ -2135,6 +2155,10 @@
 # (integer value)
 #num_iscsi_scan_tries=3
 
+# number of times to rescan iSER target to find volume
+# (integer value)
+#num_iser_scan_tries=3
+
 # the RADOS client name for accessing rbd volumes (string
 # value)
 #rbd_user=<None>
@@ -2162,6 +2186,9 @@
 # use multipath connection of the iSCSI volume (boolean value)
 #libvirt_iscsi_use_multipath=false
 
+# use multipath connection of the iSER volume (boolean value)
+#libvirt_iser_use_multipath=false
+
 # Path or URL to Scality SOFS configuration file (string
 # value)
 #scality_sofs_config=<None>
@@ -2169,6 +2196,10 @@
 # Base dir where Scality SOFS shall be mounted (string value)
 #scality_sofs_mount_point=$state_path/scality
 
+# Protocols listed here will be accessed directly from QEMU.
+# Currently supported protocols: [gluster] (list value)
+#qemu_allowed_storage_drivers=
+
 
 #
 # Options defined in nova.virt.powervm.driver
@@ -2197,71 +2228,6 @@
 
 
 #
-# Options defined in nova.virt.vmwareapi.driver
-#
-
-# URL for connection to VMware ESX/VC host. Required if
-# compute_driver is vmwareapi.VMwareESXDriver or
-# vmwareapi.VMwareVCDriver. (string value)
-#vmwareapi_host_ip=<None>
-
-# Username for connection to VMware ESX/VC host. Used only if
-# compute_driver is vmwareapi.VMwareESXDriver or
-# vmwareapi.VMwareVCDriver. (string value)
-#vmwareapi_host_username=<None>
-
-# Password for connection to VMware ESX/VC host. Used only if
-# compute_driver is vmwareapi.VMwareESXDriver or
-# vmwareapi.VMwareVCDriver. (string value)
-#vmwareapi_host_password=<None>
-
-# Name of a VMware Cluster ComputeResource. Used only if
-# compute_driver is vmwareapi.VMwareVCDriver. (string value)
-#vmwareapi_cluster_name=<None>
-
-# The interval used for polling of remote tasks. Used only if
-# compute_driver is vmwareapi.VMwareESXDriver or
-# vmwareapi.VMwareVCDriver. (floating point value)
-#vmwareapi_task_poll_interval=5.0
-
-# The number of times we retry on failures, e.g., socket
-# error, etc. Used only if compute_driver is
-# vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.
-# (integer value)
-#vmwareapi_api_retry_count=10
-
-# VNC starting port (integer value)
-#vnc_port=5900
-
-# Total number of VNC ports (integer value)
-#vnc_port_total=10000
-
-# VNC password (string value)
-#vnc_password=<None>
-
-# Whether to use linked clone (boolean value)
-#use_linked_clone=true
-
-
-#
-# Options defined in nova.virt.vmwareapi.vif
-#
-
-# Physical ethernet adapter name for vlan networking (string
-# value)
-#vmwareapi_vlan_interface=vmnic0
-
-
-#
-# Options defined in nova.virt.vmwareapi.vim
-#
-
-# Optional VIM Service WSDL Location e.g
-# http://<server>/vimService.wsdl
-#vmwareapi_wsdl_loc=<None>
-
-
-#
 # Options defined in nova.virt.xenapi.agent
 #
 
@@ -2283,18 +2249,27 @@
 # (string value)
 #xenapi_agent_path=usr/sbin/xe-update-networking
 
-# Disable XenAPI agent. Reduces the amount of time it takes
-# nova to detect that a VM has started, when that VM does not
-# have the agent installed (boolean value)
+# Disables the use of the XenAPI agent in any image regardless
+# of what image properties are present.  (boolean value)
 #xenapi_disable_agent=false
 
+# Determines if the xenapi agent should be used when the image
+# used does not contain a hint to declare if the agent is
+# present or not. The hint is a glance property
+# "xenapi_use_agent" that has the value "true" or "false".
+# Note that waiting for the agent when it is not present will
+# significantly increase server boot times. (boolean value)
+#xenapi_use_agent_default=false
+
 
 #
 # Options defined in nova.virt.xenapi.driver
 #
 
-# URL for connection to XenServer/Xen Cloud Platform. Required
-# if compute_driver=xenapi.XenAPIDriver (string value)
+# URL for connection to XenServer/Xen Cloud Platform. A
+# special value of unix://local can be used to connect to the
+# local unix socket.  Required if
+# compute_driver=xenapi.XenAPIDriver (string value)
 #xenapi_connection_url=<None>
 
 # Username for connection to XenServer/Xen Cloud Platform.
@@ -2348,64 +2323,8 @@
 
 
 #
-# Options defined in nova.virt.xenapi.pool
-#
-
-# To use for hosts with different CPUs (boolean value)
-#use_join_force=true
-
-
-#
-# Options defined in nova.virt.xenapi.vif
-#
-
-# Name of Integration Bridge used by Open vSwitch (string
-# value)
-#xenapi_ovs_integration_bridge=xapi1
-
-
-#
-# Options defined in nova.virt.xenapi.vm_utils
-#
-
-# Cache glance images locally. `all` will cache all images,
-# `some` will only cache images that have the image_property
-# `cache_in_nova=True`, and `none` turns off caching entirely
-# (string value)
-#cache_images=all
-
-# Default OS type (string value)
-#default_os_type=linux
-
-# Time to wait for a block device to be created (integer
-# value)
-#block_device_creation_timeout=10
-
-# Maximum size in bytes of kernel or ramdisk images (integer
-# value)
-#max_kernel_ramdisk_size=16777216
-
-# Filter for finding the SR to be used to install guest
-# instances on. The default value is the Local Storage in
-# default XenServer/XCP installations. To select an SR with a
-# different matching criteria, you could set it to other-
-# config:my_favorite_sr=true. On the other hand, to fall back
-# on the Default SR, as displayed by XenCenter, set this flag
-# to: default-sr:true (string value)
-#sr_matching_filter=other-config:i18n-key=local-storage
-
-# Whether to use sparse_copy for copying data on a resize down
-# (False will use standard dd). This speeds up resizes down
-# considerably since large runs of zeros won't have to be
-# rsynced (boolean value)
-#xenapi_sparse_copy=true
-
-# Maximum number of retries to unplug VBD (integer value)
-#xenapi_num_vbd_unplug_retries=10
-
-# Whether or not to download images via Bit Torrent
-# (all|some|none). (string value)
-#xenapi_torrent_images=none
+# Options defined in nova.virt.xenapi.image.bittorrent
+#
 
 # Base URL for torrent files. (string value)
 #xenapi_torrent_base_url=<None>
@@ -2438,6 +2357,83 @@
 
 
 #
+# Options defined in nova.virt.xenapi.pool
+#
+
+# To use for hosts with different CPUs (boolean value)
+#use_join_force=true
+
+
+#
+# Options defined in nova.virt.xenapi.vif
+#
+
+# Name of Integration Bridge used by Open vSwitch (string
+# value)
+#xenapi_ovs_integration_bridge=xapi1
+
+
+#
+# Options defined in nova.virt.xenapi.vm_utils
+#
+
+# Cache glance images locally. `all` will cache all images,
+# `some` will only cache images that have the image_property
+# `cache_in_nova=True`, and `none` turns off caching entirely
+# (string value)
+#cache_images=all
+
+# Compression level for images, e.g., 9 for gzip -9. Range is
+# 1-9, 9 being most compressed but most CPU intensive on dom0.
+# (integer value)
+#xenapi_image_compression_level=<None>
+
+# Default OS type (string value)
+#default_os_type=linux
+
+# Time to wait for a block device to be created (integer
+# value)
+#block_device_creation_timeout=10
+
+# Maximum size in bytes of kernel or ramdisk images (integer
+# value)
+#max_kernel_ramdisk_size=16777216
+
+# Filter for finding the SR to be used to install guest
+# instances on. To use the Local Storage in default
+# XenServer/XCP installations set this flag to other-config
+# :i18n-key=local-storage. To select an SR with a different
+# matching criteria, you could set it to other-
+# config:my_favorite_sr=true. On the other hand, to fall back
+# on the Default SR, as displayed by XenCenter, set this flag
+# to: default-sr:true (string value)
+#sr_matching_filter=default-sr:true
+
+# Whether to use sparse_copy for copying data on a resize down
+# (False will use standard dd). This speeds up resizes down
+# considerably since large runs of zeros won't have to be
+# rsynced (boolean value)
+#xenapi_sparse_copy=true
+
+# Maximum number of retries to unplug VBD (integer value)
+#xenapi_num_vbd_unplug_retries=10
+
+# Whether or not to download images via Bit Torrent
+# (all|some|none). (string value)
+#xenapi_torrent_images=none
+
+# Name of network to use for booting iPXE ISOs (string value)
+#xenapi_ipxe_network_name=<None>
+
+# URL to the iPXE boot menu (string value)
+#xenapi_ipxe_boot_menu_url=<None>
+
+# Name and optionally path of the tool used for ISO image
+# creation (string value)
+#xenapi_ipxe_mkisofs_cmd=mkisofs
+
+
+#
 # Options defined in nova.virt.xenapi.vmops
 #
 
@@ -2449,9 +2445,9 @@
 # value)
 #xenapi_vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver
 
-# Object Store Driver used to handle image uploads. (string
+# Dom0 plugin driver used to handle image uploads. (string
 # value)
-#xenapi_image_upload_handler=nova.virt.xenapi.imageupload.glance.GlanceStore
+#xenapi_image_upload_handler=nova.virt.xenapi.image.glance.GlanceStore
 
 
 #
@@ -2518,6 +2514,10 @@
 # region name of this node (string value)
 #os_region_name=<None>
 
+# Location of ca certificates file to use for cinder client
+# requests. (string value)
+#cinder_ca_certificates_file=<None>
+
 # Number of cinderclient retries on failed http calls (integer
 # value)
 #cinder_http_retries=3
@@ -2531,7 +2531,20 @@
 #cinder_cross_az_attach=true
 
 
-[HYPERV]
+#
+# Options defined in nova.virt.solariszones.driver
+#
+
+# Default path to Glance cache for Solaris Zones. (string
+# value)
+#glancecache_dirname=$state_path/images
+
+# Location where solariszones driver will store snapshots
+# before uploading them to image service (string value)
+#solariszones_snapshots_directory=$instances_path/snapshots
+
+
+[hyperv]
 
 #
 # Options defined in nova.virt.hyperv.pathutils
@@ -2545,6 +2558,123 @@
 #instances_path_share=
 
 
+#
+# Options defined in nova.virt.hyperv.utilsfactory
+#
+
+# Force V1 WMI utility classes (boolean value)
+#force_hyperv_utils_v1=false
+
+# Force V1 volume utility class (boolean value)
+#force_volumeutils_v1=false
+
+
+#
+# Options defined in nova.virt.hyperv.vif
+#
+
+# External virtual switch Name, if not provided, the first
+# external virtual switch is used (string value)
+#vswitch_name=<None>
+
+
+#
+# Options defined in nova.virt.hyperv.vmops
+#
+
+# Required for live migration among hosts with different CPU
+# features (boolean value)
+#limit_cpu_features=false
+
+# Sets the admin password in the config drive image (boolean
+# value)
+#config_drive_inject_password=false
+
+# qemu-img is used to convert between different image types
+# (string value)
+#qemu_img_cmd=qemu-img.exe
+
+# Attaches the Config Drive image as a cdrom drive instead of
+# a disk drive (boolean value)
+#config_drive_cdrom=false
+
+# Enables metrics collections for an instance by using
+# Hyper-V's metric APIs. Collected data can by retrieved by
+# other apps and services, e.g.: Ceilometer. Requires Hyper-V
+# / Windows Server 2012 and above (boolean value)
+#enable_instance_metrics_collection=false
+
+# Enables dynamic memory allocation (ballooning) when set to a
+# value greater than 1. The value expresses the ratio between
+# the total RAM assigned to an instance and its startup RAM
+# amount. For example a ratio of 2.0 for an instance with
+# 1024MB of RAM implies 512MB of RAM allocated at startup
+# (floating point value)
+#dynamic_memory_ratio=1.0
+
+
+#
+# Options defined in nova.virt.hyperv.volumeops
+#
+
+# The number of times to retry to attach a volume (integer
+# value)
+#volume_attach_retry_count=10
+
+# Interval between volume attachment attempts, in seconds
+# (integer value)
+#volume_attach_retry_interval=5
+
+# The number of times to retry checking for a disk mounted via
+# iSCSI. (integer value)
+#mounted_disk_query_retry_count=10
+
+# Interval between checks for a mounted iSCSI disk, in
+# seconds. (integer value)
+#mounted_disk_query_retry_interval=5
+
+
+[zookeeper]
+
+#
+# Options defined in nova.servicegroup.drivers.zk
+#
+
+# The ZooKeeper addresses for servicegroup service in the
+# format of host1:port,host2:port,host3:port (string value)
+#address=<None>
+
+# recv_timeout parameter for the zk session (integer value)
+#recv_timeout=4000
+
+# The prefix used in ZooKeeper to store ephemeral nodes
+# (string value)
+#sg_prefix=/servicegroups
+
+# Number of seconds to wait until retrying to join the session
+# (integer value)
+#sg_retry_interval=5
+
+
+[osapi_v3]
+
+#
+# Options defined in nova.api.openstack
+#
+
+# Whether the V3 API is enabled or not (boolean value)
+#enabled=false
+
+# A list of v3 API extensions to never load. Specify the
+# extension aliases here. (list value)
+#extensions_blacklist=
+
+# If the list is not empty then a v3 API extension will only
+# be loaded if it exists in this list. Specify the extension
+# aliases here. (list value)
+#extensions_whitelist=
+
+
 [conductor]
 
 #
@@ -2560,6 +2690,30 @@
 # full class name for the Manager for conductor (string value)
 #manager=nova.conductor.manager.ConductorManager
 
+# Number of workers for OpenStack Conductor service (integer
+# value)
+#workers=<None>
+
+
+[keymgr]
+
+#
+# Options defined in nova.keymgr
+#
+
+# The full class name of the key manager API class (string
+# value)
+#api_class=nova.keymgr.conf_key_mgr.ConfKeyManager
+
+
+#
+# Options defined in nova.keymgr.conf_key_mgr
+#
+
+# Fixed key returned by key manager, specified in hex (string
+# value)
+#fixed_key=<None>
+
 
 [cells]
 
@@ -2614,6 +2768,21 @@
 # value)
 #call_timeout=60
 
+# Percentage of cell capacity to hold in reserve. Affects both
+# memory and disk utilization (floating point value)
+#reserve_percent=10.0
+
+# Type of cell: api or compute (string value)
+#cell_type=<None>
+
+# Number of seconds after which a lack of capability and
+# capacity updates signals the child cell is to be treated as
+# a mute. (integer value)
+#mute_child_interval=300
+
+# Seconds between bandwidth updates for cells. (integer value)
+#bandwidth_update_interval=600
+
 
 #
 # Options defined in nova.cells.rpc_driver
@@ -2629,6 +2798,16 @@
 # Options defined in nova.cells.scheduler
 #
 
+# Filter classes the cells scheduler should use.  An entry of
+# "nova.cells.filters.all_filters"maps to all cells filters
+# included with nova. (list value)
+#scheduler_filter_classes=nova.cells.filters.all_filters
+
+# Weigher classes the cells scheduler should use.  An entry of
+# "nova.cells.weights.all_weighers"maps to all cell weighers
+# included with nova. (list value)
+#scheduler_weight_classes=nova.cells.weights.all_weighers
+
 # How many retries when no cells are available. (integer
 # value)
 #scheduler_retries=10
@@ -2646,27 +2825,107 @@
 # value)
 #db_check_interval=60
 
-
-[zookeeper]
-
-#
-# Options defined in nova.servicegroup.drivers.zk
-#
-
-# The ZooKeeper addresses for servicegroup service in the
-# format of host1:port,host2:port,host3:port (string value)
-#address=<None>
-
-# recv_timeout parameter for the zk session (integer value)
-#recv_timeout=4000
-
-# The prefix used in ZooKeeper to store ephemeral nodes
-# (string value)
-#sg_prefix=/servicegroups
-
-# Number of seconds to wait until retrying to join the session
+# Configuration file from which to read cells configuration.
+# If given, overrides reading cells from the database. (string
+# value)
+#cells_config=<None>
+
+
+#
+# Options defined in nova.cells.weights.mute_child
+#
+
+# Multiplier used to weigh mute children.  (The value should
+# be negative.) (floating point value)
+#mute_weight_multiplier=-10.0
+
+# Weight value assigned to mute children.  (The value should
+# be positive.) (floating point value)
+#mute_weight_value=1000.0
+
+
+#
+# Options defined in nova.cells.weights.ram_by_instance_type
+#
+
+# Multiplier used for weighing ram.  Negative numbers mean to
+# stack vs spread. (floating point value)
+#ram_weight_multiplier=10.0
+
+
+[database]
+
+#
+# Options defined in nova.openstack.common.db.api
+#
+
+# The backend to use for db (string value)
+#backend=sqlalchemy
+
+# Enable the experimental use of thread pooling for all DB API
+# calls (boolean value)
+#use_tpool=false
+
+
+#
+# Options defined in nova.openstack.common.db.sqlalchemy.session
+#
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+#connection=sqlite:///$state_path/$sqlite_db
+
+# The SQLAlchemy connection string used to connect to the
+# slave database (string value)
+#slave_connection=
+
+# timeout before idle sql connections are reaped (integer
+# value)
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool
 # (integer value)
-#sg_retry_interval=5
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+#max_pool_size=<None>
+
+# maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+#max_retries=10
+
+# interval between retries of opening a sql connection
+# (integer value)
+#retry_interval=10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+#connection_debug=0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+#connection_trace=false
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+#pool_timeout=<None>
+
+
+[image_file_url]
+
+#
+# Options defined in nova.image.download.file
+#
+
+# A list of filesystems that will be configured in this file
+# under the sections image_file_url:<list entry name> (list
+# value)
+#filesystems=
 
 
 [baremetal]
@@ -2692,13 +2951,6 @@
 # Options defined in nova.virt.baremetal.driver
 #
 
-# Whether baremetal compute injects password or not (boolean
-# value)
-#inject_password=true
-
-# Template file for injected network (string value)
-#injected_network_template=$pybasedir/nova/virt/baremetal/interfaces.template
-
 # Baremetal VIF driver. (string value)
 #vif_driver=nova.virt.baremetal.vif_driver.BareMetalVIFDriver
 
@@ -2737,7 +2989,7 @@
 
 # maximal number of retries for IPMI operations (integer
 # value)
-#ipmi_power_retry=5
+#ipmi_power_retry=10
 
 
 #
@@ -2757,7 +3009,7 @@
 
 # additional append parameters for baremetal PXE boot (string
 # value)
-#pxe_append_params=<None>
+#pxe_append_params=nofb nomodeset vga=normal
 
 # Template file for PXE configuration (string value)
 #pxe_config_template=$pybasedir/nova/virt/baremetal/pxe_config.template
@@ -2766,6 +3018,38 @@
 # value)
 #pxe_deploy_timeout=0
 
+# If set, pass the network configuration details to the
+# initramfs via cmdline. (boolean value)
+#pxe_network_config=false
+
+# This gets passed to Neutron as the bootfile dhcp parameter
+# when the dhcp_options_enabled is set. (string value)
+#pxe_bootfile_name=pxelinux.0
+
+
+#
+# Options defined in nova.virt.baremetal.tilera_pdu
+#
+
+# ip address of tilera pdu (string value)
+#tile_pdu_ip=10.0.100.1
+
+# management script for tilera pdu (string value)
+#tile_pdu_mgr=/tftpboot/pdu_mgr
+
+# power status of tilera PDU is OFF (integer value)
+#tile_pdu_off=2
+
+# power status of tilera PDU is ON (integer value)
+#tile_pdu_on=1
+
+# power status of tilera PDU (integer value)
+#tile_pdu_status=9
+
+# wait time in seconds until check the result after tilera
+# power operations (integer value)
+#tile_power_wait=9
+
 
 #
 # Options defined in nova.virt.baremetal.virtual_power_driver
@@ -2774,9 +3058,12 @@
 # ip or name to virtual power host (string value)
 #virtual_power_ssh_host=
 
+# Port to use for ssh to virtual power host (integer value)
+#virtual_power_ssh_port=22
+
 # base command to use for virtual power(vbox,virsh) (string
 # value)
-#virtual_power_type=vbox
+#virtual_power_type=virsh
 
 # user to execute virtual power commands as (string value)
 #virtual_power_host_user=
@@ -2784,6 +3071,9 @@
 # password for virtual power host_user (string value)
 #virtual_power_host_pass=
 
+# ssh key for virtual power host_user (string value)
+#virtual_power_host_key=<None>
+
 
 #
 # Options defined in nova.virt.baremetal.volume_driver
@@ -2805,10 +3095,45 @@
 # Options defined in nova.openstack.common.notifier.rpc_notifier2
 #
 
-# AMQP topic(s) used for openstack notifications (list value)
+# AMQP topic(s) used for OpenStack notifications (list value)
 #topics=notifications
 
 
+[matchmaker_redis]
+
+#
+# Options defined in nova.openstack.common.rpc.matchmaker_redis
+#
+
+# Host to locate redis (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server. (optional) (string value)
+#password=<None>
+
+
+[ssl]
+
+#
+# Options defined in nova.openstack.common.sslutils
+#
+
+# CA certificate file to use to verify connecting clients
+# (string value)
+#ca_file=<None>
+
+# Certificate file to use when starting the server securely
+# (string value)
+#cert_file=<None>
+
+# Private key file to use when starting the server securely
+# (string value)
+#key_file=<None>
+
+
 [trusted_computing]
 
 #
@@ -2835,9 +3160,200 @@
 #attestation_auth_timeout=60
 
 
+[upgrade_levels]
+
+#
+# Options defined in nova.baserpc
+#
+
+# Set a version cap for messages sent to the base api in any
+# service (string value)
+#baseapi=<None>
+
+
+#
+# Options defined in nova.cells.rpc_driver
+#
+
+# Set a version cap for messages sent between cells services
+# (string value)
+#intercell=<None>
+
+
+#
+# Options defined in nova.cells.rpcapi
+#
+
+# Set a version cap for messages sent to local cells services
+# (string value)
+#cells=<None>
+
+
+#
+# Options defined in nova.cert.rpcapi
+#
+
+# Set a version cap for messages sent to cert services (string
+# value)
+#cert=<None>
+
+
+#
+# Options defined in nova.compute.rpcapi
+#
+
+# Set a version cap for messages sent to compute services. If
+# you plan to do a live upgrade from havana to icehouse, you
+# should set this option to "icehouse-compat" before beginning
+# the live upgrade procedure. (string value)
+#compute=<None>
+
+
+#
+# Options defined in nova.conductor.rpcapi
+#
+
+# Set a version cap for messages sent to conductor services
+# (string value)
+#conductor=<None>
+
+
+#
+# Options defined in nova.console.rpcapi
+#
+
+# Set a version cap for messages sent to console services
+# (string value)
+#console=<None>
+
+
+#
+# Options defined in nova.consoleauth.rpcapi
+#
+
+# Set a version cap for messages sent to consoleauth services
+# (string value)
+#consoleauth=<None>
+
+
+#
+# Options defined in nova.network.rpcapi
+#
+
+# Set a version cap for messages sent to network services
+# (string value)
+#network=<None>
+
+
+#
+# Options defined in nova.scheduler.rpcapi
+#
+
+# Set a version cap for messages sent to scheduler services
+# (string value)
+#scheduler=<None>
+
+
+[matchmaker_ring]
+
+#
+# Options defined in nova.openstack.common.rpc.matchmaker_ring
+#
+
+# Matchmaker ring file (JSON) (string value)
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
 [vmware]
 
 #
+# Options defined in nova.virt.vmwareapi.driver
+#
+
+# URL for connection to VMware ESX/VC host. Required if
+# compute_driver is vmwareapi.VMwareESXDriver or
+# vmwareapi.VMwareVCDriver. (string value)
+#host_ip=<None>
+
+# Username for connection to VMware ESX/VC host. Used only if
+# compute_driver is vmwareapi.VMwareESXDriver or
+# vmwareapi.VMwareVCDriver. (string value)
+#host_username=<None>
+
+# Password for connection to VMware ESX/VC host. Used only if
+# compute_driver is vmwareapi.VMwareESXDriver or
+# vmwareapi.VMwareVCDriver. (string value)
+#host_password=<None>
+
+# Name of a VMware Cluster ComputeResource. Used only if
+# compute_driver is vmwareapi.VMwareVCDriver. (multi valued)
+#cluster_name=<None>
+
+# Regex to match the name of a datastore. Used only if
+# compute_driver is vmwareapi.VMwareVCDriver. (string value)
+#datastore_regex=<None>
+
+# The interval used for polling of remote tasks. Used only if
+# compute_driver is vmwareapi.VMwareESXDriver or
+# vmwareapi.VMwareVCDriver. (floating point value)
+#task_poll_interval=5.0
+
+# The number of times we retry on failures, e.g., socket
+# error, etc. Used only if compute_driver is
+# vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.
+# (integer value)
+#api_retry_count=10
+
+# VNC starting port (integer value)
+#vnc_port=5900
+
+# Total number of VNC ports (integer value)
+#vnc_port_total=10000
+
+# DEPRECATED. VNC password. The password-based access to VNC
+# consoles will be removed in the next release. The default
+# value will disable password protection on the VNC console.
+# (string value)
+#vnc_password=<None>
+
+# Whether to use linked clone (boolean value)
+#use_linked_clone=true
+
+
+#
+# Options defined in nova.virt.vmwareapi.vif
+#
+
+# Physical ethernet adapter name for vlan networking (string
+# value)
+#vlan_interface=vmnic0
+
+
+#
+# Options defined in nova.virt.vmwareapi.vim
+#
+
+# Optional VIM Service WSDL Location e.g
+# http://<server>/vimService.wsdl. Optional over-ride to
+# default location for bug work-arounds (string value)
+#wsdl_location=<None>
+
+
+#
+# Options defined in nova.virt.vmwareapi.vim_util
+#
+
+# The maximum number of ObjectContent data objects that should
+# be returned in a single result. A positive value will cause
+# the operation to suspend the retrieval when the count of
+# objects reaches the specified maximum. The server may still
+# limit the count to something less than the configured value.
+# Any remaining objects may be retrieved with additional
+# requests. (integer value)
+#maximum_objects=100
+
+
+#
 # Options defined in nova.virt.vmwareapi.vmops
 #
 
@@ -2873,21 +3389,3 @@
 #keymap=en-us
 
 
-#
-# Options defined in nova.virt.solariszones.driver
-#
-
-# Default path to Glance cache for Solaris Zones. (string
-# value)
-#glancecache_dirname=/export/glance
-
-# Default path to Solaris Zone root file systems. (string
-# value)
-#zonepath_dirname=/export/zone
-
-# Location where solariszones driver will store snapshots
-# before uploading them to image service (string value)
-#solariszones_snapshots_directory=$instances_path/snapshots
-
-
-# Total option count: 587
--- a/components/openstack/nova/files/nova.exec_attr	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/nova.exec_attr	Wed Jun 11 17:13:12 2014 -0700
@@ -7,4 +7,6 @@
 nova-compute:solaris:cmd:RO::/usr/lib/rad/rad:\
 privs={zone}\:/etc/zones/*,{zone}\:/system/volatile/zones/*
 
+nova-compute:solaris:cmd:RO::/usr/sbin/fcinfo:privs=file_dac_read,sys_devices
+
 nova-compute:solaris:cmd:RO::/usr/sbin/iscsiadm:euid=0
--- a/components/openstack/nova/files/nova.prof_attr	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/nova.prof_attr	Wed Jun 11 17:13:12 2014 -0700
@@ -5,7 +5,8 @@
 solaris.admin.edit/etc/nova/nova.conf,\
 solaris.admin.edit/etc/nova/policy.json,\
 solaris.smf.manage.cinder,\
-solaris.smf.value.cinder
+solaris.smf.value.cinder;\
+defaultpriv={file_dac_read}\:/var/svc/log/application-openstack-*
 
 OpenStack Management:RO:::profiles=OpenStack Compute Management
 
--- a/components/openstack/nova/files/release	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/release	Wed Jun 11 17:13:12 2014 -0700
@@ -1,4 +1,4 @@
 [Nova]
 vendor = Oracle Solaris
 product = OpenStack Nova
-package = Grizzly 2013.1.4
+package = Havana 2013.2.3
--- a/components/openstack/nova/files/solariszones/driver.py	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/files/solariszones/driver.py	Wed Jun 11 17:13:12 2014 -0700
@@ -29,7 +29,6 @@
 import uuid
 
 import rad.bindings.com.oracle.solaris.rad.kstat as kstat
-import rad.bindings.com.oracle.solaris.rad.zonesbridge as zonesbridge
 import rad.bindings.com.oracle.solaris.rad.zonemgr as zonemgr
 import rad.client
 import rad.connect
@@ -37,6 +36,7 @@
 from solaris_install.archive import LOGFILE as ARCHIVE_LOGFILE
 from solaris_install.archive import UnifiedArchive
 from solaris_install.engine import InstallEngine
+from solaris_install.target.size import Size
 
 from eventlet import greenthread
 from lxml import etree
@@ -44,16 +44,19 @@
 
 from nova.compute import power_state
 from nova.compute import task_states
-from nova.compute import vm_mode
+from nova.compute import vm_states
 from nova import conductor
 from nova import context as nova_context
 from nova import exception
 from nova.image import glance
-from nova.network import quantumv2
+from nova.network import neutronv2
 from nova.openstack.common import fileutils
+from nova.openstack.common.gettextutils import _
 from nova.openstack.common import jsonutils
 from nova.openstack.common import log as logging
-from nova import paths
+from nova.openstack.common import loopingcall
+from nova.openstack.common import processutils
+from nova.openstack.common import strutils
 from nova import utils
 from nova.virt import driver
 from nova.virt import event as virtevent
@@ -132,7 +135,7 @@
 
 def lookup_resource_property_value(zone, resource, prop, value):
     """Lookup specified property with value from specified Solaris Zone
-       resource. Returns property if matching value is found, else None
+    resource. Returns property if matching value is found, else None
     """
     try:
         resources = zone.getResources(zonemgr.Resource(resource))
@@ -149,20 +152,19 @@
 
 
 class ZoneConfig(object):
-    """ ZoneConfig - context manager for access zone configurations.
+    """ZoneConfig - context manager for access zone configurations.
     Automatically opens the configuration for a zone and commits any changes
     before exiting
     """
     def __init__(self, zone):
-        """ zone is a zonemgr object representing either a kernel zone or
+        """zone is a zonemgr object representing either a kernel zone or
         non-glboal zone.
         """
         self.zone = zone
         self.editing = False
 
     def __enter__(self):
-        """ enables the editing of the zone.
-        """
+        """enables the editing of the zone."""
         try:
             self.zone.editConfig()
             self.editing = True
@@ -173,7 +175,7 @@
             raise
 
     def __exit__(self, exc_type, exc_val, exc_tb):
-        """ looks for any kind of exception before exiting.  If one is found,
+        """looks for any kind of exception before exiting.  If one is found,
         cancel any configuration changes and reraise the exception.  If not,
         commit the new configuration.
         """
@@ -192,7 +194,7 @@
                 raise
 
     def setprop(self, resource, prop, value):
-        """ sets a property for an existing resource OR creates a new resource
+        """sets a property for an existing resource OR creates a new resource
         with the given property(s).
         """
         current = lookup_resource_property(self.zone, resource, prop)
@@ -215,8 +217,7 @@
             raise
 
     def addresource(self, resource, props=None):
-        """ creates a new resource with an optional property list.
-        """
+        """creates a new resource with an optional property list."""
         if props is None:
             props = []
 
@@ -229,8 +230,8 @@
             raise
 
     def removeresources(self, resource, props=None):
-        """ removes resources whose properties include the optional property
-            list specified in props.
+        """removes resources whose properties include the optional property
+        list specified in props.
         """
         if props is None:
             props = []
@@ -283,6 +284,9 @@
         self.virtapi = virtapi
         self._compute_event_callback = None
         self._conductor_api = conductor.API()
+        self._fc_hbas = None
+        self._fc_wwnns = None
+        self._fc_wwpns = None
         self._host_stats = {}
         self._initiator = None
         self._install_engine = None
@@ -312,10 +316,79 @@
 
     def init_host(self, host):
         """Initialize anything that is necessary for the driver to function,
-        including catching up with currently running VM's on the given host."""
+        including catching up with currently running VM's on the given host.
+        """
         # TODO(Vek): Need to pass context in for access to auth_token
+        self._init_rad()
 
-        self._init_rad()
+    def _get_fc_hbas(self):
+        """Get Fibre Channel HBA information."""
+        if self._fc_hbas:
+            return self._fc_hbas
+
+        out = None
+        try:
+            out, err = utils.execute('/usr/sbin/fcinfo', 'hba-port')
+        except processutils.ProcessExecutionError as err:
+            return []
+
+        if out is None:
+            raise RuntimeError(_("Cannot find any Fibre Channel HBAs"))
+
+        hbas = []
+        hba = {}
+        for line in out.splitlines():
+            line = line.strip()
+            # Collect the following hba-port data:
+            # 1: Port WWN
+            # 2: State (online|offline)
+            # 3: Node WWN
+            if line.startswith("HBA Port WWN:"):
+                # New HBA port entry
+                hba = {}
+                wwpn = line.split()[-1]
+                hba['port_name'] = wwpn
+                continue
+            elif line.startswith("Port Mode:"):
+                mode = line.split()[-1]
+                # Skip Target mode ports
+                if mode != 'Initiator':
+                    break
+            elif line.startswith("State:"):
+                state = line.split()[-1]
+                hba['port_state'] = state
+                continue
+            elif line.startswith("Node WWN:"):
+                wwnn = line.split()[-1]
+                hba['node_name'] = wwnn
+                continue
+            if len(hba) == 3:
+                hbas.append(hba)
+                hba = {}
+        self._fc_hbas = hbas
+        return self._fc_hbas
+
+    def _get_fc_wwnns(self):
+        """Get Fibre Channel WWNNs from the system, if any."""
+        hbas = self._get_fc_hbas()
+
+        wwnns = []
+        for hba in hbas:
+            if hba['port_state'] == 'online':
+                wwnn = hba['node_name']
+                wwnns.append(wwnn)
+        return wwnns
+
+    def _get_fc_wwpns(self):
+        """Get Fibre Channel WWPNs from the system, if any."""
+        hbas = self._get_fc_hbas()
+
+        wwpns = []
+        for hba in hbas:
+            if hba['port_state'] == 'online':
+                wwpn = hba['port_name']
+                wwpns.append(wwpn)
+        return wwpns
 
     def _get_iscsi_initiator(self):
         """ Return the iSCSI initiator node name IQN for this host """
@@ -327,6 +400,19 @@
         initiator_iqn = initiator_name_line.rsplit(' ', 1)[1]
         return initiator_iqn
 
+    def _get_zone_auto_install_state(self, zone_name):
+        """Returns the SMF state of the auto-installer service,
+           or None if auto-installer service is non-existent
+        """
+        try:
+            out, err = utils.execute('/usr/sbin/zlogin', '-S', zone_name,
+                                     '/usr/bin/svcs', '-H', '-o', 'state',
+                                     'auto-installer:default')
+            return out.strip()
+        except processutils.ProcessExecutionError as err:
+            # No auto-installer instance most likely.
+            return None
+
     def _get_zone_by_name(self, name):
         """Return a Solaris Zones object via RAD by name."""
         try:
@@ -336,7 +422,6 @@
             return None
         except Exception:
             raise
-
         return zone
 
     def _get_state(self, zone):
@@ -351,7 +436,7 @@
         """Return the maximum memory in KBytes allowed."""
         max_mem = lookup_resource_property(zone, 'capped-memory', 'physical')
         if max_mem is not None:
-            return utils.to_bytes(max_mem) / 1024
+            return strutils.to_bytes(max_mem) / 1024
 
         # If physical property in capped-memory doesn't exist, this may
         # represent a non-global zone so just return the system's total
@@ -422,7 +507,6 @@
         for named in kstat_object.fresh_snapshot().data.NAMED:
             kstat_data[named.name] = getattr(named.value,
                                              str(named.value.discriminant))
-
         return kstat_data
 
     def _get_cpu_time(self, zone):
@@ -455,7 +539,6 @@
             LOG.error(_("Unable to find instance '%s' via zonemgr(3RAD)")
                       % name)
             raise exception.InstanceNotFound(instance_id=name)
-
         return {
             'state':    self._get_state(zone),
             'max_mem':  self._get_max_mem(zone),
@@ -496,6 +579,18 @@
         """
         return instance_id in self.list_instances()
 
+    def estimate_instance_overhead(self, instance_info):
+        """Estimate the virtualization overhead required to build an instance
+        of the given flavor.
+
+        Defaults to zero, drivers should override if per-instance overhead
+        calculations are desired.
+
+        :param instance_info: Instance/flavor to calculate overhead for.
+        :returns: Dict of estimated overhead values.
+        """
+        return {'memory_mb': 0}
+
     def _get_list_zone_object(self):
         """Return a list of all Solaris Zones objects via RAD."""
         return self._rad_instance.list_objects(zonemgr.Zone())
@@ -538,7 +633,6 @@
             LOG.error(_("Unable to fetch Glance image: id %s: %s")
                       % (instance['image_ref'], reason))
             raise
-
         return image
 
     def _validate_image(self, image, instance):
@@ -602,10 +696,10 @@
 
     def _suri_from_volume_info(self, connection_info):
         """Returns a suri(5) formatted string based on connection_info
-           Currently supports local ZFS volume and iSCSI driver types.
+        Currently supports local ZFS volume and iSCSI driver types.
         """
         driver_type = connection_info['driver_volume_type']
-        if driver_type not in ['iscsi', 'local']:
+        if driver_type not in ['iscsi', 'fibre_channel', 'local']:
             raise exception.VolumeDriverNotFound(driver_type=driver_type)
         if driver_type == 'local':
             suri = 'dev:/dev/zvol/dsk/%s' % connection_info['volume_path']
@@ -621,7 +715,42 @@
                                                     data['target_iqn'],
                                                     data['target_lun'])
             # TODO(npower): need to handle CHAP authentication also
+        elif driver_type == 'fibre_channel':
+            data = connection_info['data']
+            target_wwn = data['target_wwn']
+            # Check for multiple target_wwn values in a list
+            if isinstance(target_wwn, list):
+                target_wwn = target_wwn[0]
+            # Ensure there's a fibre channel HBA.
+            hbas = self._get_fc_hbas()
+            if not hbas:
+                LOG.error(_("Cannot attach Fibre Channel volume '%s' because "
+                          "no Fibre Channel HBA initiators were found")
+                          % (target_wwn))
+                raise exception.InvalidVolume(reason="No host FC initiator")
 
+            target_lun = data['target_lun']
+            # If the volume was exported just a few seconds previously then
+            # it will probably not be visible to the local adapter yet.
+            # Invoke 'fcinfo remote-port' on all local HBA ports to trigger
+            # a refresh.
+            for wwpn in self._get_fc_wwpns():
+                utils.execute('/usr/sbin/fcinfo', 'remote-port',
+                              '-p', wwpn)
+
+            # Use suriadm(1M) to generate a Fibre Channel storage URI.
+            try:
+                out, err = utils.execute('/usr/sbin/suriadm', 'lookup-uri',
+                                         '-p', 'target=naa.%s' % target_wwn,
+                                         '-p', 'lun=%s' % target_lun)
+            except processutils.ProcessExecutionError as err:
+                LOG.error(_("Lookup failure of Fibre Channel volume '%s', lun "
+                          "%s: %s") % (target_wwn, target_lun, err.stderr))
+                raise
+
+            lines = out.split('\n')
+            # Use the long form SURI on the second output line.
+            suri = lines[1].strip()
         return suri
 
     def _set_global_properties(self, name, extra_specs, brand):
@@ -677,11 +806,12 @@
                 greenthread.sleep(1)
 
         except Exception as reason:
-            LOG.error(_("Unable to create root zpool volume for instance '%s':"
-                        "%s") % (instance['name'], reason))
+            LOG.error(_("Unable to create root zpool volume for instance '%s'"
+                        ": %s") % (instance['name'], reason))
             raise
 
         instance_uuid = instance['uuid']
+        volume_id = volume['id']
         # TODO(npower): Adequate for default boot device. We currently
         # ignore this value, but cinder gets stroppy about this if we set it to
         # None
@@ -689,9 +819,8 @@
 
         try:
             connector = self.get_volume_connector(instance)
-            connection_info = self._volume_api.initialize_connection(context,
-                                                                     volume,
-                                                                     connector)
+            connection_info = self._volume_api.initialize_connection(
+                context, volume_id, connector)
             # Check connection_info to determine if the provided volume is
             # local to this compute node. If it is, then don't use it for
             # Solaris branded zones in order to avoid a know ZFS deadlock issue
@@ -725,7 +854,8 @@
                                     "as a boot device for 'solaris' branded "
                                     "zones."))
                         delete_boot_volume = True
-                else:
+                # Assuming that fibre_channel is non-local
+                elif driver_type != 'fibre_channel':
                     # Some other connection type that we don't understand
                     # Let zone use some local fallback instead.
                     LOG.warning(_("Unsupported volume driver type '%s' "
@@ -735,11 +865,12 @@
 
             if delete_boot_volume:
                 LOG.warning(_("Volume '%s' is being discarded") % volume['id'])
-                self._volume_api.delete(context, volume)
+                self._volume_api.delete(context, volume_id)
                 return None
 
             # Notify Cinder DB of the volume attachment.
-            self._volume_api.attach(context, volume, instance_uuid, mountpoint)
+            self._volume_api.attach(context, volume_id, instance_uuid,
+                                    mountpoint)
             values = {
                 'instance_uuid': instance['uuid'],
                 'connection_info': jsonutils.dumps(connection_info),
@@ -749,7 +880,7 @@
                 'delete_on_termination': True,
                 'virtual_name': None,
                 'snapshot_id': None,
-                'volume_id': volume['id'],
+                'volume_id': volume_id,
                 'volume_size': instance['root_gb'],
                 'no_device': None}
             self._conductor_api.block_device_mapping_update_or_create(context,
@@ -758,10 +889,9 @@
         except Exception as reason:
             LOG.error(_("Unable to attach root zpool volume '%s' to instance "
                         "%s: %s") % (volume['id'], instance['name'], reason))
-            self._volume_api.detach(context, volume)
-            self._volume_api.delete(context, volume)
+            self._volume_api.detach(context, volume_id)
+            self._volume_api.delete(context, volume_id)
             raise
-
         return connection_info
 
     def _set_boot_device(self, name, connection_info, brand):
@@ -821,8 +951,7 @@
 
     def _set_network(self, context, name, instance, network_info, brand,
                      sc_dir):
-        """ add networking information to the zone.
-        """
+        """add networking information to the zone."""
         zone = self._get_zone_by_name(name)
         if zone is None:
             raise exception.InstanceNotFound(instance_id=name)
@@ -865,7 +994,7 @@
                     linkname = 'net%s' % id
 
             # create the required sysconfig file
-            network_plugin = quantumv2.get_client(context)
+            network_plugin = neutronv2.get_client(context)
             port = network_plugin.show_port(port_uuid)['port']
             subnet_uuid = port['fixed_ips'][0]['subnet_id']
             subnet = network_plugin.show_subnet(subnet_uuid)['subnet']
@@ -887,10 +1016,9 @@
                 zc.setprop('global', 'tenant', tenant_id)
 
     def _verify_sysconfig(self, sc_dir, instance):
-        """ verify the SC profile(s) passed in contain an entry for
+        """verify the SC profile(s) passed in contain an entry for
         system/config-user to configure the root account.  If an SSH key is
         specified, configure root's profile to use it.
-
         """
         usercheck = lambda e: e.attrib.get('name') == 'system/config-user'
         hostcheck = lambda e: e.attrib.get('name') == 'system/identity'
@@ -1103,17 +1231,74 @@
         # Attempt to provision a (Cinder) volume service backed boot volume
         connection_info = self._connect_boot_volume(context, instance,
                                                     extra_specs)
+        name = instance['name']
+
+        def _ai_health_check(zone):
+            # TODO(npower) A hung kernel zone installation will not always
+            # be detected by zoneadm in the host global zone, which locks
+            # out other zoneadm commands.
+            # Workaround:
+            # Check the state of the auto-installer:default SMF service in
+            # the kernel zone. If installation failed, it should be in the
+            # 'maintenance' state. Unclog zoneadm by executing a shutdown
+            # inside the kernel zone if that's the case.
+            # Eventually we'll be able to pass a boot option to the zone
+            # to have it automatically shutdown if the installation fails.
+            if instance['vm_state'] == vm_states.BUILDING:
+                if self._get_zone_auto_install_state(name) == 'maintenance':
+                    # Poweroff the zone. This will cause the current call to
+                    # self._install() to catch an exception and tear down
+                    # the kernel zone.
+                    LOG.error(_("Automated installation of instance '%s' "
+                              "failed. Powering off the kernel zone '%s'.")
+                              % (instance['display_name'], name))
+                    try:
+                        utils.execute('/usr/sbin/zlogin', '-S', name,
+                                      '/usr/sbin/poweroff')
+                    except processutils.ProcessExecutionError as err:
+                        # poweroff pulls the rug from under zlogin, so ignore
+                        # the anticipated error.
+                        pass
+                    finally:
+                        raise loopingcall.LoopingCallDone()
+                else:
+                    # Looks like it installed OK
+                    if zone.state == ZONE_STATE_INSTALLED:
+                        LOG.debug(_("Kernel zone '%s' (%s) state: %s.")
+                                  % (name, instance['display_name'],
+                                     zone.state))
+                        raise loopingcall.LoopingCallDone()
+                    else:
+                        return
+            else:
+                # Can't imagine why we'd get here under normal circumstances
+                LOG.warning(_("Unexpected vm_state during installation of "
+                            "'%s' (%s): %s. Zone state: %s")
+                            % (name, instance['display_name'],
+                               instance['vm_state'], zone.state))
+                raise loopingcall.LoopingCallDone()
 
         LOG.debug(_("creating zone configuration for '%s' (%s)") %
-                  (instance['name'], instance['display_name']))
+                  (name, instance['display_name']))
         self._create_config(context, instance, network_info,
                             connection_info, extra_specs, sc_dir)
         try:
-            self._install(instance, image, extra_specs, sc_dir)
+            zone = self._get_zone_by_name(name)
+            is_kz = lookup_resource_property_value(zone, "global", "brand",
+                                                   ZONE_BRAND_SOLARIS_KZ)
+            # Monitor kernel zone installation explicitly
+            if is_kz:
+                monitor = loopingcall.FixedIntervalLoopingCall(
+                    _ai_health_check, zone)
+                monitor.start(interval=15, initial_delay=60)
+                self._install(instance, image, extra_specs, sc_dir)
+                monitor.wait()
+            else:
+                self._install(instance, image, extra_specs, sc_dir)
             self._power_on(instance)
         except Exception as reason:
             LOG.error(_("Unable to spawn instance '%s' via zonemgr(3RAD): %s")
-                      % (instance['name'], reason))
+                      % (name, reason))
             self._uninstall(instance)
             self._delete_config(instance)
             raise
@@ -1129,7 +1314,24 @@
             if halt_type == 'SOFT':
                 zone.shutdown()
             else:
-                zone.halt()
+                # 'HARD'
+                # TODO(npower) See comments for _ai_health_check() for why
+                # it is sometimes necessary to poweroff from within the zone,
+                # until zoneadm and auto-install can perform this internally.
+                zprop = lookup_resource_property_value(zone, "global", "brand",
+                                                       ZONE_BRAND_SOLARIS_KZ)
+                if zprop and self._get_zone_auto_install_state(name):
+                    # Don't really care what state the install service is in.
+                    # Just shut it down ASAP.
+                    try:
+                        utils.execute('/usr/sbin/zlogin', '-S', name,
+                                      '/usr/sbin/poweroff')
+                    except processutils.ProcessExecutionError as err:
+                        # Poweroff pulls the rug from under zlogin, so ignore
+                        # the anticipated error.
+                        return
+                else:
+                    zone.halt()
             return
         except rad.client.ObjectError as reason:
             result = reason.get_payload()
@@ -1144,7 +1346,7 @@
             raise exception.InstancePowerOffFailure(reason=reason)
 
     def destroy(self, instance, network_info, block_device_info=None,
-                destroy_disks=True):
+                destroy_disks=True, context=None):
         """Destroy (shutdown and delete) the specified instance.
 
         If the instance is not found (for example if networking failed), this
@@ -1253,7 +1455,6 @@
                 for line in log.readlines():
                     fragment += line
                 console_str = fragment + console_str
-
         return console_str
 
     def get_console_output(self, instance):
@@ -1297,7 +1498,6 @@
             for key in kstat_data.keys():
                 if key not in ('class', 'crtime', 'snaptime'):
                     diagnostics[key] = kstat_data[key]
-
         return diagnostics
 
     def get_diagnostics(self, instance):
@@ -1309,17 +1509,18 @@
             LOG.error(_("Unable to find instance '%s' via zonemgr(3RAD)")
                       % name)
             raise exception.InstanceNotFound(instance_id=name)
-
         return self._get_zone_diagnostics(zone)
 
     def get_all_bw_counters(self, instances):
         """Return bandwidth usage counters for each interface on each
-           running VM"""
+           running VM.
+        """
         raise NotImplementedError()
 
     def get_all_volume_usage(self, context, compute_host_bdms):
         """Return usage info for volumes attached to vms on
-           a given host"""
+           a given host.-
+        """
         raise NotImplementedError()
 
     def get_host_ip_addr(self):
@@ -1329,7 +1530,8 @@
         # TODO(Vek): Need to pass context in for access to auth_token
         return CONF.my_ip
 
-    def attach_volume(self, connection_info, instance, mountpoint):
+    def attach_volume(self, context, connection_info, instance, mountpoint,
+                      encryption=None):
         """Attach the disk to the instance at mountpoint using info."""
         # TODO(npower): Apply mountpoint in a meaningful way to the zone
         # (I don't think this is even possible for Solaris brand zones)
@@ -1349,7 +1551,8 @@
         with ZoneConfig(zone) as zc:
             zc.addresource("device", [zonemgr.Property("storage", suri)])
 
-    def detach_volume(self, connection_info, instance, mountpoint):
+    def detach_volume(self, connection_info, instance, mountpoint,
+                      encryption=None):
         """Detach the disk attached to the instance."""
         name = instance['name']
         zone = self._get_zone_by_name(name)
@@ -1374,11 +1577,16 @@
         with ZoneConfig(zone) as zc:
             zc.removeresources("device", [zonemgr.Property("storage", suri)])
 
-    def attach_interface(self, instance, image_meta, network_info):
+    def swap_volume(self, old_connection_info, new_connection_info,
+                    instance, mountpoint):
+        """Replace the disk attached to the instance."""
+        raise NotImplementedError()
+
+    def attach_interface(self, instance, image_meta, vif):
         """Attach an interface to the instance."""
         raise NotImplementedError()
 
-    def detach_interface(self, instance, network_info):
+    def detach_interface(self, instance, vif):
         """Detach an interface from the instance."""
         raise NotImplementedError()
 
@@ -1391,6 +1599,17 @@
         """
         raise NotImplementedError()
 
+    def live_snapshot(self, context, instance, image_id, update_task_state):
+        """
+        Live-snapshots the specified instance (includes ram and proc state).
+
+        :param context: security context
+        :param instance: Instance object as returned by DB layer.
+        :param image_id: Reference to a pre-created image that will
+                         hold the snapshot.
+        """
+        raise NotImplementedError()
+
     def snapshot(self, context, instance, image_id, update_task_state):
         """
         Snapshots the specified instance.
@@ -1485,14 +1704,23 @@
 
     def finish_migration(self, context, migration, instance, disk_info,
                          network_info, image_meta, resize_instance,
-                         block_device_info=None):
-        """Completes a resize, turning on the migrated instance
+                         block_device_info=None, power_on=True):
+        """Completes a resize.
 
+        :param context: the context for the migration/resize
+        :param migration: the migrate/resize information
+        :param instance: the instance being migrated/resized
+        :param disk_info: the newly transferred disk information
         :param network_info:
            :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
         :param image_meta: image object returned by nova.image.glance that
                            defines the image from which this instance
                            was created
+        :param resize_instance: True if the instance is being resized,
+                                False otherwise
+        :param block_device_info: instance volume block device info
+        :param power_on: True if the instance should be powered on, False
+                         otherwise
         """
         raise NotImplementedError()
 
@@ -1502,8 +1730,17 @@
         raise NotImplementedError()
 
     def finish_revert_migration(self, instance, network_info,
-                                block_device_info=None):
-        """Finish reverting a resize, powering back on the instance."""
+                                block_device_info=None, power_on=True):
+        """
+        Finish reverting a resize.
+
+        :param instance: the instance being migrated/resized
+        :param network_info:
+           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
+        :param block_device_info: instance volume block device info
+        :param power_on: True if the instance should be powered on, False
+                         otherwise
+        """
         # TODO(Vek): Need to pass context in for access to auth_token
         raise NotImplementedError()
 
@@ -1517,44 +1754,22 @@
         # TODO(Vek): Need to pass context in for access to auth_token
         raise NotImplementedError()
 
-    def _suspend(self, instance):
-        """Suspend a Solaris Zone."""
-        name = instance['name']
-        zone = self._get_zone_by_name(name)
-        if zone is None:
-            raise exception.InstanceNotFound(instance_id=name)
-
-        if self._uname[4] != 'i86pc':
-            # Only x86 platforms are currently supported.
-            raise NotImplementedError()
-
-        zprop = lookup_resource_property_value(zone, "global", "brand",
-                                               ZONE_BRAND_SOLARIS_KZ)
-        if not zprop:
-            # Only Solaris Kernel zones are currently supported.
-            raise NotImplementedError()
-
-        try:
-            zone.suspend()
-        except Exception as reason:
-            # TODO(dcomay): Try to recover in cases where zone has been
-            # resumed automatically.
-            LOG.error(_("Unable to suspend instance '%s' via zonemgr(3RAD): "
-                        "%s") % (name, reason))
-            raise exception.InstanceSuspendFailure(reason=reason)
-
     def suspend(self, instance):
         """suspend the specified instance."""
         # TODO(Vek): Need to pass context in for access to auth_token
-        self._suspend(instance)
+        raise NotImplementedError()
+
+    def resume(self, context, instance, network_info, block_device_info=None):
+        """
+        resume the specified instance.
 
-    def resume(self, instance, network_info, block_device_info=None):
-        """resume the specified instance."""
-        # TODO(Vek): Need to pass context in for access to auth_token
-        try:
-            self._power_on(instance)
-        except Exception as reason:
-            raise exception.InstanceResumeFailure(reason=reason)
+        :param context: the context for the resume
+        :param instance: the instance being resumed
+        :param network_info:
+           :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
+        :param block_device_info: instance volume block device info
+        """
+        raise NotImplementedError()
 
     def resume_state_on_host_boot(self, context, instance, network_info,
                                   block_device_info=None):
@@ -1604,7 +1819,7 @@
         try:
             value = None
             (out, _err) = utils.execute('/usr/sbin/zpool', 'get', prop, zpool)
-        except exception.ProcessExecutionError as err:
+        except processutils.ProcessExecutionError as err:
             LOG.error(_("Failed to get property '%s' from zpool '%s': %s")
                       % (prop, zpool, err.stderr))
             return value
@@ -1612,7 +1827,6 @@
         zpool_prop = out.splitlines()[1].split()
         if zpool_prop[1] == prop:
             value = zpool_prop[2]
-
         return value
 
     def _update_host_stats(self):
@@ -1625,10 +1839,10 @@
         out, err = utils.execute('/usr/sbin/zfs', 'list', '-Ho', 'name', '/')
         root_zpool = out.split('/')[0]
         size = self._get_zpool_property('size', root_zpool)
-        if size is None:
+        if size is not None:
+            host_stats['local_gb'] = Size(size).get(Size.gb_units)
+        else:
             host_stats['local_gb'] = 0
-        else:
-            host_stats['local_gb'] = utils.to_bytes(size)/(1024 ** 3)
 
         # Account for any existing processor sets by looking at the the
         # number of CPUs not assigned to any processor sets.
@@ -1644,26 +1858,39 @@
         kstat_data = self._get_kstat_by_name('pages', 'unix', '0',
                                              'system_pages')
         if kstat_data is not None:
+            free_ram_mb = self._pages_to_kb(kstat_data['freemem']) / 1024
             host_stats['memory_mb_used'] = \
-                self._pages_to_kb((pages - kstat_data['freemem'])) / 1024
+                host_stats['memory_mb'] - free_ram_mb
         else:
             host_stats['memory_mb_used'] = 0
 
-        host_stats['local_gb_used'] = 0
+        free = self._get_zpool_property('free', root_zpool)
+        if free is not None:
+            free_disk_gb = Size(free).get(Size.gb_units)
+        else:
+            free_disk_gb = 0
+        host_stats['local_gb_used'] = host_stats['local_gb'] - free_disk_gb
+
         host_stats['hypervisor_type'] = 'solariszones'
         host_stats['hypervisor_version'] = int(self._uname[2].replace('.', ''))
         host_stats['hypervisor_hostname'] = self._uname[1]
+
         if self._uname[4] == 'i86pc':
             architecture = 'x86_64'
         else:
             architecture = 'sparc64'
-        host_stats['cpu_info'] = str({'arch': architecture})
+        cpu_info = {
+            'arch': architecture
+        }
+        host_stats['cpu_info'] = jsonutils.dumps(cpu_info)
+
         host_stats['disk_available_least'] = 0
 
         supported_instances = [
-            (architecture, 'solariszones', 'zones')
+            (architecture, 'solariszones', 'solariszones')
         ]
-        host_stats['supported_instances'] = supported_instances
+        host_stats['supported_instances'] = \
+            jsonutils.dumps(supported_instances)
 
         self._host_stats = host_stats
 
@@ -1671,7 +1898,7 @@
         """Retrieve resource information.
 
         This method is called when nova-compute launches, and
-        as part of a periodic task
+        as part of a periodic task that records the results in the DB.
 
         :param nodename:
             node which the caller want to get resources from
@@ -1693,7 +1920,7 @@
         resources['hypervisor_hostname'] = host_stats['hypervisor_hostname']
         resources['cpu_info'] = host_stats['cpu_info']
         resources['disk_available_least'] = host_stats['disk_available_least']
-
+        resources['supported_instances'] = host_stats['supported_instances']
         return resources
 
     def pre_live_migration(self, ctxt, instance_ref, block_device_info,
@@ -1731,6 +1958,15 @@
         """
         raise NotImplementedError()
 
+    def post_live_migration(self, ctxt, instance_ref, block_device_info):
+        """Post operation of live migration at source host.
+
+        :param ctxt: security contet
+        :instance_ref: instance object that was migrated
+        :block_device_info: instance block device information
+        """
+        pass
+
     def post_live_migration_at_destination(self, ctxt, instance_ref,
                                            network_info,
                                            block_migration=False,
@@ -1744,6 +1980,33 @@
         """
         raise NotImplementedError()
 
+    def check_instance_shared_storage_local(self, ctxt, instance):
+        """Check if instance files located on shared storage.
+
+        This runs check on the destination host, and then calls
+        back to the source host to check the results.
+
+        :param ctxt: security context
+        :param instance: nova.db.sqlalchemy.models.Instance
+        """
+        raise NotImplementedError()
+
+    def check_instance_shared_storage_remote(self, ctxt, data):
+        """Check if instance files located on shared storage.
+
+        :param context: security context
+        :param data: result of check_instance_shared_storage_local
+        """
+        raise NotImplementedError()
+
+    def check_instance_shared_storage_cleanup(self, ctxt, data):
+        """Do cleanup on host after check_instance_shared_storage calls
+
+        :param ctxt: security context
+        :param data: result of check_instance_shared_storage_local
+        """
+        pass
+
     def check_can_live_migrate_destination(self, ctxt, instance_ref,
                                            src_compute_info, dst_compute_info,
                                            block_migration=False,
@@ -1759,6 +2022,7 @@
         :param dst_compute_info: Info about the receiving machine
         :param block_migration: if true, prepare for block migration
         :param disk_over_commit: if true, allow disk over commit
+        :returns: a dict containing migration info (hypervisor-dependent)
         """
         raise NotImplementedError()
 
@@ -1781,6 +2045,7 @@
         :param context: security context
         :param instance_ref: nova.db.sqlalchemy.models.Instance
         :param dest_check_data: result of check_can_live_migrate_destination
+        :returns: a dict containing migration info (hypervisor-dependent)
         """
         raise NotImplementedError()
 
@@ -1951,7 +2216,8 @@
 
     def host_maintenance_mode(self, host, mode):
         """Start/Stop host maintenance window. On start, it triggers
-        guest VMs evacuation."""
+        guest VMs evacuation.
+        """
         raise NotImplementedError()
 
     def set_host_enabled(self, host, enabled):
@@ -1974,10 +2240,20 @@
         raise NotImplementedError()
 
     def get_host_stats(self, refresh=False):
-        """Return currently known host stats."""
-        if refresh:
+        """Return currently known host stats.
+
+        If the hypervisor supports pci passthrough, the returned
+        dictionary includes a key-value pair for it.
+        The key of pci passthrough device is "pci_passthrough_devices"
+        and the value is a json string for the list of assignable
+        pci devices. Each device is a dictionary, with mandatory
+        keys of 'address', 'vendor_id', 'product_id', 'dev_type',
+        'dev_id', 'label' and other optional device specific information.
+
+        Refer to the objects/pci_device.py for more idea of these keys.
+        """
+        if refresh or not self._host_stats:
             self._update_host_stats()
-
         return self._host_stats
 
     def block_stats(self, instance_name, disk_id):
@@ -2020,12 +2296,6 @@
         """
         raise NotImplementedError()
 
-    def legacy_nwinfo(self):
-        """True if the driver requires the legacy network_info format."""
-        # TODO(tr3buchet): update all subclasses and remove this method and
-        # related helpers.
-        return False
-
     def macs_for_instance(self, instance):
         """What MAC addresses must this instance have?
 
@@ -2055,6 +2325,30 @@
         """
         return None
 
+    def dhcp_options_for_instance(self, instance):
+        """Get DHCP options for this instance.
+
+        Some hypervisors (such as bare metal) require that instances boot from
+        the network, and manage their own TFTP service. This requires passing
+        the appropriate options out to the DHCP service. Most hypervisors can
+        use the default implementation which returns None.
+
+        This is called during spawn_instance by the compute manager.
+
+        Note that the format of the return value is specific to Quantum
+        client API.
+
+        :return: None, or a set of DHCP options, eg:
+                 [{'opt_name': 'bootfile-name',
+                   'opt_value': '/tftpboot/path/to/config'},
+                  {'opt_name': 'server-ip-address',
+                   'opt_value': '1.2.3.4'},
+                  {'opt_name': 'tftp-server',
+                   'opt_value': '1.2.3.4'}
+                 ]
+        """
+        pass
+
     def manage_image_cache(self, context, all_instances):
         """
         Manage the driver's local image cache.
@@ -2085,11 +2379,14 @@
 
         Connector information is a dictionary representing the ip of the
         machine that will be making the connection, the name of the iscsi
-        initiator and the hostname of the machine as follows::
+        initiator, the WWPN and WWNN values of the Fibre Channel initiator,
+        and the hostname of the machine as follows:
 
             {
                 'ip': ip,
                 'initiator': initiator,
+                'wwnns': wwnns,
+                'wwpns': wwpns,
                 'host': hostname
             }
         """
@@ -2104,9 +2401,26 @@
             LOG.warning(_("Could not determine iSCSI initiator name"),
                         instance=instance)
 
+        if not self._fc_wwnns:
+            self._fc_wwnns = self._get_fc_wwnns()
+            if not self._fc_wwnns or len(self._fc_wwnns) == 0:
+                LOG.debug(_('Could not determine Fibre Channel '
+                          'World Wide Node Names'),
+                          instance=instance)
+
+        if not self._fc_wwpns:
+            self._fc_wwpns = self._get_fc_wwpns()
+            if not self._fc_wwpns or len(self._fc_wwpns) == 0:
+                LOG.debug(_('Could not determine Fibre channel '
+                          'World Wide Port Names'),
+                          instance=instance)
+
+        if self._fc_wwnns and self._fc_wwpns:
+            connector["wwnns"] = self._fc_wwnns
+            connector["wwpns"] = self._fc_wwpns
         return connector
 
-    def get_available_nodes(self):
+    def get_available_nodes(self, refresh=False):
         """Returns nodenames of all nodes managed by the compute service.
 
         This method is for multi compute-nodes support. If a driver supports
@@ -2114,11 +2428,18 @@
         by the service. Otherwise, this method should return
         [hypervisor_hostname].
         """
-        stats = self.get_host_stats(refresh=True)
+        stats = self.get_host_stats(refresh=refresh)
         if not isinstance(stats, list):
             stats = [stats]
         return [s['hypervisor_hostname'] for s in stats]
 
+    def node_is_available(self, nodename):
+        """Return whether this compute service manages a particular node."""
+        if nodename in self.get_available_nodes():
+            return True
+        # Refresh and check again.
+        return nodename in self.get_available_nodes(refresh=True)
+
     def get_per_instance_usage(self):
         """Get information about instance resource usage.
 
@@ -2146,7 +2467,8 @@
         Register a callback to receive asynchronous event
         notifications from hypervisors. The callback will
         be invoked with a single parameter, which will be
-        an instance of the nova.virt.event.Event class."""
+        an instance of the nova.virt.event.Event class.
+        """
 
         self._compute_event_callback = callback
 
@@ -2155,10 +2477,11 @@
 
         Invokes the event callback registered by the
         compute manager to dispatch the event. This
-        must only be invoked from a green thread."""
+        must only be invoked from a green thread.
+        """
 
         if not self._compute_event_callback:
-            LOG.debug("Discarding event %s" % str(event))
+            LOG.debug(_("Discarding event %s") % str(event))
             return
 
         if not isinstance(event, virtevent.Event):
@@ -2166,8 +2489,67 @@
                 _("Event must be an instance of nova.virt.event.Event"))
 
         try:
-            LOG.debug("Emitting event %s" % str(event))
+            LOG.debug(_("Emitting event %s") % str(event))
             self._compute_event_callback(event)
         except Exception as ex:
-            LOG.error(_("Exception dispatching event %(event)s: %(ex)s")
-                      % locals())
+            LOG.error(_("Exception dispatching event %(event)s: %(ex)s"),
+                      {'event': event, 'ex': ex})
+
+    def delete_instance_files(self, instance):
+        """Delete any lingering instance files for an instance.
+
+        :returns: True if the instance was deleted from disk, False otherwise.
+        """
+        return True
+
+    @property
+    def need_legacy_block_device_info(self):
+        """Tell the caller if the driver requires legacy block device info.
+
+        Tell the caller weather we expect the legacy format of block
+        device info to be passed in to methods that expect it.
+        """
+        return True
+
+    def volume_snapshot_create(self, context, instance, volume_id,
+                               create_info):
+        """
+        Snapshots volumes attached to a specified instance.
+
+        :param context: request context
+        :param instance: Instance object that has the volume attached
+        :param volume_id: Volume to be snapshotted
+        :param create_info: The data needed for nova to be able to attach
+               to the volume.  This is the same data format returned by
+               Cinder's initialize_connection() API call.  In the case of
+               doing a snapshot, it is the image file Cinder expects to be
+               used as the active disk after the snapshot operation has
+               completed.  There may be other data included as well that is
+               needed for creating the snapshot.
+        """
+        raise NotImplementedError()
+
+    def volume_snapshot_delete(self, context, instance, volume_id,
+                               snapshot_id, delete_info):
+        """
+        Snapshots volumes attached to a specified instance.
+
+        :param context: request context
+        :param instance: Instance object that has the volume attached
+        :param volume_id: Attached volume associated with the snapshot
+        :param snapshot_id: The snapshot to delete.
+        :param delete_info: Volume backend technology specific data needed to
+               be able to complete the snapshot.  For example, in the case of
+               qcow2 backed snapshots, this would include the file being
+               merged, and the file being merged into (if appropriate).
+        """
+        raise NotImplementedError()
+
+    def default_root_device_name(self, instance, image_meta, root_bdm):
+        """Provide a default root device name for the driver."""
+        raise NotImplementedError()
+
+    def default_device_names_for_instance(self, instance, root_device_name,
+                                          *block_device_lists):
+        """Default the missing device names in the block device mapping."""
+        raise NotImplementedError()
--- a/components/openstack/nova/nova.p5m	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/nova.p5m	Wed Jun 11 17:13:12 2014 -0700
@@ -28,9 +28,9 @@
 set name=pkg.summary value="OpenStack Nova"
 set name=pkg.description \
     value="OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of virtualization technologies. In addition to its native API, it includes compatibility with the commonly encountered Amazon EC2 and S3 APIs."
-set name=pkg.human-version value="Grizzly $(COMPONENT_VERSION)"
+set name=pkg.human-version value="Havana $(COMPONENT_VERSION)"
 set name=com.oracle.info.description value="Nova, the OpenStack compute service"
-set name=com.oracle.info.tpno value=16245
+set name=com.oracle.info.tpno value=17715
 set name=info.classification \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
     value="org.opensolaris.category.2008:System/Enterprise Management" \
@@ -39,17 +39,20 @@
 set name=info.source-url value=$(COMPONENT_ARCHIVE_URL)
 set name=info.upstream value="OpenStack <[email protected]>"
 set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
-set name=org.opensolaris.arc-caseid value=PSARC/2013/350 value=PSARC/2014/049
+set name=org.opensolaris.arc-caseid value=PSARC/2013/350 value=PSARC/2014/049 \
+    value=PSARC/2014/210
 set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
 dir  path=etc/nova owner=nova group=nova mode=0700
-file path=etc/nova/api-paste.ini owner=nova group=nova mode=0644 overlay=allow \
-    preserve=true
+file files/api-paste.ini path=etc/nova/api-paste.ini owner=nova group=nova \
+    mode=0644 overlay=allow preserve=renamenew
+file path=etc/nova/cells.json owner=nova group=nova mode=0644 overlay=allow \
+    preserve=renamenew
 file etc/nova/logging_sample.conf path=etc/nova/logging.conf owner=nova \
-    group=nova mode=0644 overlay=allow preserve=true
+    group=nova mode=0644 overlay=allow preserve=renamenew
 file files/nova.conf path=etc/nova/nova.conf owner=nova group=nova mode=0644 \
-    overlay=allow preserve=true
+    overlay=allow preserve=renamenew
 file path=etc/nova/policy.json owner=nova group=nova mode=0644 overlay=allow \
-    preserve=true
+    preserve=renamenew
 file files/release path=etc/nova/release owner=nova group=nova
 file files/nova.auth_attr path=etc/security/auth_attr.d/cloud:openstack:nova \
     group=sys
@@ -78,8 +81,7 @@
 file files/nova-conductor path=lib/svc/method/nova-conductor
 file files/nova-objectstore path=lib/svc/method/nova-objectstore
 file files/nova-scheduler path=lib/svc/method/nova-scheduler
-file path=usr/bin/nova-clear-rabbit-queues
-file path=usr/bin/nova-manage pkg.depend.bypass-generate=.*/bpython.*
+file path=usr/bin/nova-manage
 file usr/bin/nova-api-ec2 path=usr/lib/nova/nova-api-ec2 mode=0555
 file usr/bin/nova-api-metadata path=usr/lib/nova/nova-api-metadata mode=0555
 file usr/bin/nova-api-os-compute path=usr/lib/nova/nova-api-os-compute mode=0555
@@ -98,6 +100,8 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova-$(COMPONENT_VERSION)-py$(PYVER).egg-info/PKG-INFO
 file path=usr/lib/python$(PYVER)/vendor-packages/nova-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/nova-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/nova-$(COMPONENT_VERSION)-py$(PYVER).egg-info/entry_points.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/nova-$(COMPONENT_VERSION)-py$(PYVER).egg-info/not-zip-safe
 file path=usr/lib/python$(PYVER)/vendor-packages/nova-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/nova-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/CA/geninter.sh
@@ -117,6 +121,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/metadata/base.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/metadata/handler.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/metadata/password.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/metadata/vendordata_json.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/auth.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/common.py
@@ -126,9 +131,11 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/admin_actions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/agents.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/aggregates.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/assisted_volume_snapshots.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/attach_interfaces.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/availability_zone.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/baremetal_nodes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/block_device_mapping_v2_boot.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/cell_capacities.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/cells.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/certificates.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/cloudpipe.py
@@ -142,9 +149,15 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/disk_config.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/evacuate.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/extended_availability_zone.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/extended_floating_ips.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/extended_ips.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/extended_ips_mac.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/extended_quotas.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/extended_server_attributes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/extended_services.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/extended_status.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/extended_volumes.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/fixed_ips.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/flavor_access.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/flavor_disabled.py
@@ -165,6 +178,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/instance_actions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/keypairs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/migrations.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/multinic.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/multiple_create.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/networks_associate.py
@@ -179,11 +193,16 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/server_diagnostics.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/server_password.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/server_start_stop.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/server_usage.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/services.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/shelve.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/simple_tenant_usage.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/used_limits.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/used_limits_for_admin.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/user_data.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/user_quotas.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/virtual_interfaces.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/volume_attachment_update.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/contrib/volumes.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/extensions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/flavors.py
@@ -191,6 +210,63 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/images.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/ips.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/limits.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/admin_actions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/admin_password.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/agents.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/aggregates.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/attach_interfaces.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/availability_zone.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/block_device_mapping.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/cells.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/certificates.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/config_drive.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/console_output.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/consoles.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/coverage.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/deferred_delete.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/disk_config.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/evacuate.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/extended_availability_zone.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/extended_server_attributes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/extended_status.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/extended_volumes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/extension_info.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/flavor_access.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/flavor_manage.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/flavor_rxtx.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/flavors.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/hide_server_addresses.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/hosts.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/hypervisors.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/instance_actions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/instance_usage_audit_log.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/ips.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/keypairs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/limits.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/migrations.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/multinic.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/multiple_create.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/personalities.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/quota_classes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/quota_sets.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/remote_consoles.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/rescue.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/scheduler_hints.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/security_groups.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/server_diagnostics.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/server_metadata.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/server_password.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/server_usage.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/servers.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/services.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/shelve.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/simple_tenant_usage.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/used_limits.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/user_data.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/plugins/v3/versions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/schemas/atom-link.rng
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/schemas/atom.rng
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/schemas/v1.1/addresses.rng
@@ -209,6 +285,12 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/schemas/v1.1/servers_index.rng
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/schemas/v1.1/version.rng
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/schemas/v1.1/versions.rng
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/schemas/v3/addresses.rng
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/schemas/v3/flavor.rng
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/schemas/v3/flavors.rng
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/schemas/v3/server.rng
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/schemas/v3/servers.rng
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/schemas/v3/servers_index.rng
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/server_metadata.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/servers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/openstack/compute/versions.py
@@ -226,9 +308,13 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/sizelimit.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/api/validator.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/availability_zones.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/baserpc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/block_device.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/filters/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/filters/image_properties.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/filters/target_cell.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/messaging.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/opts.py
@@ -237,6 +323,10 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/scheduler.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/state.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/weights/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/weights/mute_child.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/weights/ram_by_instance_type.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cells/weights/weight_offset.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/cert/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/cert/manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/cert/rpcapi.py
@@ -244,14 +334,37 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/cloudpipe/bootscript.template
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/cloudpipe/client.ovpn.template
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/cloudpipe/pipelib.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/all.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/api_ec2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/api_metadata.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/api_os_compute.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/baremetal_deploy_helper.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/baremetal_manage.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/cells.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/cert.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/compute.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/conductor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/console.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/consoleauth.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/dhcpbridge.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/manage.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/network.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/novnc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/novncproxy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/objectstore.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/rpc_zmq_receiver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/scheduler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/spicehtml5proxy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/cmd/xvpvncproxy.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/compute/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/compute/api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/compute/cells_api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/compute/claims.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/compute/fakevirtinstance.xml
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/compute/flavors.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/compute/instance_actions.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/compute/instance_types.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/compute/manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/compute/power_state.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/compute/resource_tracker.py
@@ -265,6 +378,8 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/conductor/api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/conductor/manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/conductor/rpcapi.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/conductor/tasks/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/conductor/tasks/live_migrate.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/config.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/console/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/console/api.py
@@ -324,6 +439,63 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/159_sqlite_upgrade.sql
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/160_fix_system_metadata_deleted.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/161_fix_system_metadata_none_strings.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/162_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/163_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/164_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/165_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/166_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/167_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/168_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/169_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/170_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/171_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/172_add_instance_type_uc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/173_add_unique_constraint_to_key_pairs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/174_add_instance_type_access_uc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/175_add_project_user_id_to_volume_usage_cache.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/176_add_availability_zone_to_volume_usage_cache.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/177_add_floating_ip_uc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/178_add_index_to_compute_node_stats.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/179_change_cells_deleted_to_int.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/180_fix_175_and_176_migration_sync_shadow_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/181_fix_179_migration_sync_shadow_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/182_fix_156_migration_sync_shadow_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/183_fix_157_migration_sync_shadow_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/184_fix_159_migration_sync_shadow_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/184_sqlite_downgrade.sql
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/184_sqlite_upgrade.sql
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/185_rename_unique_constraints.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/186_new_bdm_format.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/187_add_instance_groups.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/188_add_reason_column_to_service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/189_add_cells_uc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/190_add_security_group_uc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/191_add_quota_uc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/192_change_virtual_interface_uc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/193_cinder_cleanup.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/194_fix_152_migration_indexes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/195_add_fixed_ip_uc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/196_add_service_uc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/197_add_agent_build_uc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/198_add_console_pools_uc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/199_add_aggregate_hosts_uc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/200_add_transport_url_to_cell.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/201_add_sqlite_indexes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/202_add_instance_type_extra_specs_uc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/203_make_user_quotas_key_and_value.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/204_add_indexes_to_reservations.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/205_add_locked_by_to_instance.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/206_add_instance_cleaned.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/207_fix_uniqname_cells.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/208_expand_compute_node.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/209_add_missing_foreign_keys.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/210_fix_project_user_quotas_user_id_deleted_index.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/211_add_aggregate_metadata_uc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/212_fix_migrations_index.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/213_add_pci_devices.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/214_complete_194_missing_index.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/215_fix_deleted_compute_node_stats.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/216_sync_quota_usages.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migrate_repo/versions/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/migration.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/models.py
@@ -331,16 +503,31 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/db/sqlalchemy/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/exception.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/filters.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/hacking/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/hacking/checks.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/hooks.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/image/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/image/download/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/image/download/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/image/download/file.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/image/glance.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/image/s3.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/ipv6/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/ipv6/account_identifier.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/ipv6/api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/ipv6/rfc2462.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/keymgr/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/keymgr/conf_key_mgr.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/keymgr/key.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/keymgr/key_mgr.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/keymgr/mock_key_mgr.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/keymgr/not_implemented_key_mgr.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/keymgr/single_key_mgr.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/loadables.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/ar/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/bg_BG/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/bs/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/ca/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/cs/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/da/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/de/LC_MESSAGES/nova.po
@@ -348,25 +535,47 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/en_GB/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/en_US/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/es/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/es_MX/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/fi_FI/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/fil/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/fr/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/hi/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/hr/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/hu/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/id/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/it/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/it_IT/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/ja/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/ka_GE/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/kn/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/ko/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/ko_KR/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/ms/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/nb/LC_MESSAGES/nova.po
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/nova.pot
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/ne/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/nl_NL/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/pl_PL/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/pt/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/pt_BR/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/ro/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/ru/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/ru_RU/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/sk/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/sl_SI/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/sw_KE/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/tl/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/tl_PH/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/tr/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/tr_TR/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/uk/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/vi_VN/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/zh_CN/LC_MESSAGES/nova.po
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/zh_HK/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/locale/zh_TW/LC_MESSAGES/nova.po
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/netconf.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/api.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/api_deprecated.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/dns_driver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/driver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/floating_ips.py
@@ -376,23 +585,42 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/minidns.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/model.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/neutronv2/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/neutronv2/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/neutronv2/constants.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/noop_dns_driver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/nova_ipam_lib.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/quantumv2/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/quantumv2/api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/rpcapi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/security_group/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/security_group/neutron_driver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/security_group/openstack_driver.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/security_group/quantum_driver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/security_group/security_group_base.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/network/sg.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/notifications.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/notifier.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/aggregate.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/compute_node.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/instance.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/instance_action.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/instance_fault.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/instance_group.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/instance_info_cache.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/keypair.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/migration.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/pci_device.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/quotas.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/security_group.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/objects/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/objectstore/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/objectstore/s3server.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/README
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/cliutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/config/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/config/generator.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/context.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/db/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/db/api.py
@@ -410,6 +638,8 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/local.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/lockutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/log.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/log_handler.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/loopingcall.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/memorycache.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/network_utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/notifier/__init__.py
@@ -420,13 +650,11 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/notifier/rpc_notifier.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/notifier/rpc_notifier2.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/notifier/test_notifier.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/plugin/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/plugin/callbackplugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/plugin/plugin.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/plugin/pluginmanager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/periodic_task.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/policy.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/processutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rootwrap/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rootwrap/cmd.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rootwrap/filters.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rootwrap/wrapper.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rpc/__init__.py
@@ -438,16 +666,30 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rpc/impl_qpid.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rpc/impl_zmq.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rpc/matchmaker.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rpc/matchmaker_redis.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rpc/matchmaker_ring.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rpc/proxy.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rpc/securemessage.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rpc/serializer.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rpc/service.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/setup.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/rpc/zmq_receiver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/service.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/sslutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/strutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/threadgroup.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/timeutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/uuidutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/version.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/openstack/common/xmlutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/paths.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/pci/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/pci/pci_manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/pci/pci_request.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/pci/pci_stats.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/pci/pci_utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/pci/pci_whitelist.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/policy.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/quota.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/rpcclient.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/safe_utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/baremetal_host_manager.py
@@ -470,17 +712,17 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/filters/isolated_hosts_filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/filters/json_filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/filters/num_instances_filter.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/filters/pci_passthrough_filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/filters/ram_filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/filters/retry_filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/filters/trusted_filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/filters/type_filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/host_manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/manager.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/multi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/rpcapi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/scheduler_options.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/weights/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/weights/least_cost.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/scheduler/weights/ram.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/service.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/servicegroup/__init__.py
@@ -496,6 +738,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/version.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/block_device.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/configdrive.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/disk/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/disk/api.py
@@ -505,6 +748,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/disk/mount/nbd.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/disk/vfs/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/disk/vfs/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/disk/vfs/guestfs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/disk/vfs/localfs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/driver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/event.py
@@ -512,6 +756,8 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/firewall.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/images.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/interfaces.template
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/libvirt/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/libvirt/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/netutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/solariszones/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/virt/solariszones/driver.py
@@ -522,6 +768,11 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/vnc/xvp_proxy.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/volume/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/volume/cinder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/volume/encryptors/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/volume/encryptors/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/volume/encryptors/cryptsetup.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/volume/encryptors/luks.py
+file path=usr/lib/python$(PYVER)/vendor-packages/nova/volume/encryptors/nop.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/weights.py
 file path=usr/lib/python$(PYVER)/vendor-packages/nova/wsgi.py
 dir  path=var/lib/nova owner=nova group=nova mode=0700
@@ -530,34 +781,42 @@
     home-dir=/var/lib/nova uid=85
 license nova.license license="Apache v2.0"
 
+# force a group dependency on the optional anyjson; pkgdepend work is needed to
+# flush this out.
+depend type=group fmri=library/python/anyjson-26
+
+# force a group dependency on the optional coverage; pkgdepend work is needed to
+# flush this out.
+depend type=group fmri=library/python/coverage-26
+
 # force a dependency on package delivering archiveadm(1M)
 depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/archiveadm
 
+# force a dependency on package delivering fcinfo(1M)
+depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/fcinfo
+
 # force a dependency on package delivering iscsiadm(1M)
 depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/iscsiadm
 
+# force a dependency on package delivering suriadm(1M)
+depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/suriadm
+
 # force a dependency on package delivering zfs(1M)
 depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/zfs
 
 # force a dependency on package delivering zpool(1M)
 depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/zpool
 
-# force a dependency on anyjson; pkgdepend work is needed to flush this out.
-depend type=require fmri=library/python/anyjson-26
+# force a dependency on babel; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/babel-26
 
 # force a dependency on boto; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/boto-26
 
-# force a dependency on cheetah; pkgdepend work is needed to flush this out.
-depend type=require fmri=library/python/cheetah-26
-
 # force a dependency on cinderclient; pkgdepend work is needed to flush this
 # out.
 depend type=require fmri=library/python/cinderclient-26
 
-# force a dependency on coverage; pkgdepend work is needed to flush this out.
-depend type=require fmri=library/python/coverage-26
-
 # force a dependency on glanceclient; pkgdepend work is needed to flush this
 # out.
 depend type=require fmri=library/python/glanceclient-26
@@ -568,12 +827,28 @@
 # force a dependency on iso8601; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/iso8601-26
 
+# force a dependency on jinja2; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/jinja2-26
+
+# force a dependency on jsonschema; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/jsonschema-26
+
+# force a dependency on keystoneclient; used via a paste.deploy filter
+depend type=require fmri=library/python/keystoneclient-26
+
 # force a dependency on kombu; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/kombu-26
 
 # force a dependency on lxml; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/lxml-26
 
+# force a dependency on netaddr; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/netaddr-26
+
+# force a dependency on neutronclient; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/neutronclient-26
+
 # force a dependency on paste; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/paste-26
 
@@ -581,21 +856,23 @@
 # out.
 depend type=require fmri=library/python/paste.deploy-26
 
+# force a dependency on pbr; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/pbr-26
+
 # force a dependency on pyasn1; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/pyasn1-26
 
 # force a dependency on python-ldap; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/python-ldap-26
 
-# force a dependency on quantumclient; pkgdepend work is needed to flush this
-# out.
-depend type=require fmri=library/python/quantumclient-26
+# force a dependency on requests; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/requests-26
 
 # force a dependency on routes; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/routes-26
 
-# force a dependency on setuptools; pkgdepend work is needed to flush this out.
-depend type=require fmri=library/python/setuptools-26
+# force a dependency on six; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/six-26
 
 # force a dependency on stevedore; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/stevedore-26
--- a/components/openstack/nova/patches/02-noamqplib.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-In-house patch to remove amqplib from Nova's requirements files as an alternate
-implementation is used on Solaris.  This patch is Solaris-specific and
-not suitable for upstream.
-
---- nova-2013.1.4/tools/pip-requires.orig       2013-11-22 08:08:07.506739321 -0700
-+++ nova-2013.1.4/tools/pip-requires        2013-11-22 08:08:13.543791383 -0700
-@@ -1,6 +1,5 @@
- SQLAlchemy>=0.7.8,<0.7.99
- Cheetah>=2.4.4
--amqplib>=0.6.1
- anyjson>=0.2.4
- argparse
- boto
-
---- nova-2013.1.4/nova.egg-info/requires.txt.orig       2013-11-22 08:09:22.863753329 -0700
-+++ nova-2013.1.4/nova.egg-info/requires.txt        2013-11-22 08:09:28.573969576 -0700
-@@ -1,6 +1,5 @@
- SQLAlchemy>=0.7.8,<0.7.99
- Cheetah>=2.4.4
--amqplib>=0.6.1
- anyjson>=0.2.4
- boto
- eventlet>=0.9.17
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/nova/patches/02-requirements.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,51 @@
+In-house patch to remove unnecessary dependencies from Nova's
+requirements files. The specific reasons are as follows:
+
+amqplib		No longer applicable
+		(upstream commit 48591e83894354b57f0528ea98e6efe026acac28)
+
+argparse	Not applicable to Solaris (nova-network specific)
+
+paramiko	Not applicable to Solaris (PowerVM specific)
+
+suds		Not applicable to Solaris (VMware specific)
+
+--- nova-2013.2.3/nova.egg-info/requires.txt.orig	2014-04-03 11:55:51.000000000 -0700
++++ nova-2013.2.3/nova.egg-info/requires.txt	2014-05-24 23:15:07.242887922 -0700
+@@ -1,6 +1,5 @@
+ pbr>=0.5.21,<1.0
+ SQLAlchemy>=0.7.8,<=0.7.99
+-amqplib>=0.6.1
+ anyjson>=0.3.3
+ boto>=2.4.0,!=2.13.0
+ eventlet>=0.13.0
+@@ -14,8 +13,6 @@
+ Paste
+ sqlalchemy-migrate>=0.7.2
+ netaddr
+-suds>=0.4
+-paramiko>=1.8.0
+ pyasn1
+ Babel>=1.3
+ iso8601>=0.1.8
+
+--- nova-2013.2.3/requirements.txt.orig	2014-04-03 11:49:46.000000000 -0700
++++ nova-2013.2.3/requirements.txt	2014-05-24 23:15:23.285062223 -0700
+@@ -1,8 +1,6 @@
+ pbr>=0.5.21,<1.0
+ SQLAlchemy>=0.7.8,<=0.7.99
+-amqplib>=0.6.1
+ anyjson>=0.3.3
+-argparse
+ boto>=2.4.0,!=2.13.0
+ eventlet>=0.13.0
+ Jinja2
+@@ -15,8 +13,6 @@
+ Paste
+ sqlalchemy-migrate>=0.7.2
+ netaddr
+-suds>=0.4
+-paramiko>=1.8.0
+ pyasn1
+ Babel>=1.3
+ iso8601>=0.1.8
--- a/components/openstack/nova/patches/03-Solaris-flavors.patch	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/patches/03-Solaris-flavors.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -1,8 +1,8 @@
 In-house patch to update the default flavors for use with Solaris.
 This patch has not yet been submitted upstream.
 
---- nova-2013.1.4/nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py.orig	2014-02-11 08:54:04.148157965 -0700
-+++ nova-2013.1.4/nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py	2014-02-11 08:54:09.036798132 -0700
+--- nova-2013.2.3/nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py.~1~	2014-04-03 11:49:46.000000000 -0700
++++ nova-2013.2.3/nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py	2014-05-19 04:14:17.367917095 -0700
 @@ -2,6 +2,8 @@
  
  # Copyright 2012 OpenStack Foundation
@@ -12,49 +12,49 @@
  #    Licensed under the Apache License, Version 2.0 (the "License"); you may
  #    not use this file except in compliance with the License. You may obtain
  #    a copy of the License at
-@@ -36,17 +38,45 @@
+@@ -37,17 +39,45 @@
      return Text().with_variant(dialects.mysql.MEDIUMTEXT(), 'mysql')
  
  
 -def _populate_instance_types(instance_types_table):
 +def _populate_instance_types(instance_types_table, instance_type_extra_specs):
      default_inst_types = {
--        'm1.tiny': dict(mem=512, vcpus=1, root_gb=0, eph_gb=0, flavid=1),
+-        'm1.tiny': dict(mem=512, vcpus=1, root_gb=1, eph_gb=0, flavid=1),
 -        'm1.small': dict(mem=2048, vcpus=1, root_gb=20, eph_gb=0, flavid=2),
 -        'm1.medium': dict(mem=4096, vcpus=2, root_gb=40, eph_gb=0, flavid=3),
 -        'm1.large': dict(mem=8192, vcpus=4, root_gb=80, eph_gb=0, flavid=4),
 -        'm1.xlarge': dict(mem=16384, vcpus=8, root_gb=160, eph_gb=0, flavid=5)
-+        'Oracle Solaris kernel zone - tiny':
-+            dict(mem=2048, vcpus=1, root_gb=10, eph_gb=0, flavid=1,
-+                 extra='solaris-kz'),
-+        'Oracle Solaris kernel zone - small':
-+            dict(mem=4096, vcpus=4, root_gb=20, eph_gb=0, flavid=2,
-+                 extra='solaris-kz'),
-+        'Oracle Solaris kernel zone - medium':
-+            dict(mem=8192, vcpus=8, root_gb=40, eph_gb=0, flavid=3,
-+                 extra='solaris-kz'),
-+        'Oracle Solaris kernel zone - large':
-+            dict(mem=16384, vcpus=16, root_gb=40, eph_gb=0, flavid=4,
-+                 extra='solaris-kz'),
-+        'Oracle Solaris kernel zone - xlarge':
-+            dict(mem=32768, vcpus=32, root_gb=80, eph_gb=0, flavid=5,
-+                 extra='solaris-kz'),
++        'Oracle Solaris kernel zone - tiny': dict(
++            mem=2048, vcpus=1, root_gb=10, eph_gb=0, flavid=1,
++            extra='solaris-kz'),
++        'Oracle Solaris kernel zone - small': dict(
++            mem=4096, vcpus=4, root_gb=20, eph_gb=0, flavid=2,
++            extra='solaris-kz'),
++        'Oracle Solaris kernel zone - medium': dict(
++            mem=8192, vcpus=8, root_gb=40, eph_gb=0, flavid=3,
++            extra='solaris-kz'),
++        'Oracle Solaris kernel zone - large': dict(
++            mem=16384, vcpus=16, root_gb=40, eph_gb=0, flavid=4,
++            extra='solaris-kz'),
++        'Oracle Solaris kernel zone - xlarge': dict(
++            mem=32768, vcpus=32, root_gb=80, eph_gb=0, flavid=5,
++            extra='solaris-kz'),
 +
-+        'Oracle Solaris non-global zone - tiny':
-+            dict(mem=2048, vcpus=1, root_gb=10, eph_gb=0, flavid=6,
-+                 extra='solaris'),
-+        'Oracle Solaris non-global zone - small':
-+            dict(mem=3072, vcpus=4, root_gb=20, eph_gb=0, flavid=7,
-+                 extra='solaris'),
-+        'Oracle Solaris non-global zone - medium':
-+            dict(mem=4096, vcpus=8, root_gb=40, eph_gb=0, flavid=8,
-+                 extra='solaris'),
-+        'Oracle Solaris non-global zone - large':
-+            dict(mem=8192, vcpus=16, root_gb=40, eph_gb=0, flavid=9,
-+                 extra='solaris'),
-+        'Oracle Solaris non-global zone - xlarge':
-+            dict(mem=16384, vcpus=32, root_gb=80, eph_gb=0, flavid=10,
-+                 extra='solaris')
++        'Oracle Solaris non-global zone - tiny': dict(
++            mem=2048, vcpus=1, root_gb=10, eph_gb=0, flavid=6,
++            extra='solaris'),
++        'Oracle Solaris non-global zone - small': dict(
++            mem=3072, vcpus=4, root_gb=20, eph_gb=0, flavid=7,
++            extra='solaris'),
++        'Oracle Solaris non-global zone - medium': dict(
++            mem=4096, vcpus=8, root_gb=40, eph_gb=0, flavid=8,
++            extra='solaris'),
++        'Oracle Solaris non-global zone - large': dict(
++            mem=8192, vcpus=16, root_gb=40, eph_gb=0, flavid=9,
++            extra='solaris'),
++        'Oracle Solaris non-global zone - xlarge': dict(
++            mem=16384, vcpus=32, root_gb=80, eph_gb=0, flavid=10,
++            extra='solaris')
          }
  
      try:
@@ -64,7 +64,7 @@
          for name, values in default_inst_types.iteritems():
              i.execute({'name': name, 'memory_mb': values["mem"],
                          'vcpus': values["vcpus"], 'deleted': False,
-@@ -57,6 +87,10 @@
+@@ -58,6 +88,10 @@
                          'flavorid': values["flavid"],
                          'disabled': False,
                          'is_public': True})
@@ -75,7 +75,7 @@
      except Exception:
          LOG.info(repr(instance_types_table))
          LOG.exception(_('Exception while seeding instance_types table'))
-@@ -1218,7 +1252,7 @@
+@@ -1219,7 +1253,7 @@
                           name='instance_info_caches_instance_id_key').create()
  
      # populate initial instance types
--- a/components/openstack/nova/patches/04-CVE-2013-4497.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,152 +0,0 @@
-Upstream patch fixed in Grizzly 2013.1.5, Havana 2013.2
-
-commit df2ea2e3acdede21b40d47b7adbeac04213d031b
-Author: John Garbutt <[email protected]>
-Date:   Thu Sep 12 18:11:49 2013 +0100
-
-    xenapi: enforce filters after live-migration
-    
-    Currently and network filters, including security groups, are
-    lost after a server has been live-migrated.
-    
-    This partially fixes the issue by ensuring that security groups are
-    re-applied to the VM once it reached the destination, and been started.
-    
-    This leaves a small amount of time during the live-migrate where the VM
-    is not protected. There is a further bug raised to close the rest of
-    this whole, but this helps keep the VM protected for the majority of the
-    time.
-    
-    Fixes bug 1202266
-    
-    (Cherry picked from commit: 5cced7a6dd32d231c606e25dbf762d199bf9cca7)
-    
-    Change-Id: I66bc7af1c6da74e18dce47180af0cb6020ba2c1a
-
-diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
-index f7fb81d..d4c19a4 100644
---- a/nova/tests/test_xenapi.py
-+++ b/nova/tests/test_xenapi.py
-@@ -2723,7 +2723,27 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
-         # ensure method is present
-         stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
-         self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
--        self.conn.post_live_migration_at_destination(None, None, None, None)
-+
-+        fake_instance = "instance"
-+        fake_network_info = "network_info"
-+
-+        def fake_fw(instance, network_info):
-+            self.assertEquals(instance, fake_instance)
-+            self.assertEquals(network_info, fake_network_info)
-+            fake_fw.called += 1
-+
-+        fake_fw.called = 0
-+        _vmops = self.conn._vmops
-+        self.stubs.Set(_vmops.firewall_driver,
-+                       'setup_basic_filtering', fake_fw)
-+        self.stubs.Set(_vmops.firewall_driver,
-+                       'prepare_instance_filter', fake_fw)
-+        self.stubs.Set(_vmops.firewall_driver,
-+                       'apply_instance_filter', fake_fw)
-+
-+        self.conn.post_live_migration_at_destination(None, fake_instance,
-+                                                     fake_network_info, None)
-+        self.assertEqual(fake_fw.called, 3)
- 
-     def test_check_can_live_migrate_destination_with_block_migration(self):
-         stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
-diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
-index 128f67f..564c587 100755
---- a/nova/virt/xenapi/driver.py
-+++ b/nova/virt/xenapi/driver.py
-@@ -1,4 +1,3 @@
--# vim: tabstop=4 shiftwidth=4 softtabstop=4
- 
- # Copyright (c) 2010 Citrix Systems, Inc.
- # Copyright 2010 OpenStack Foundation
-@@ -514,7 +513,8 @@ class XenAPIDriver(driver.ComputeDriver):
-         :params : block_migration: if true, post operation of block_migraiton.
-         """
-         # TODO(JohnGarbutt) look at moving/downloading ramdisk and kernel
--        pass
-+        self._vmops.post_live_migration_at_destination(ctxt, instance_ref,
-+                network_info, block_device_info, block_device_info)
- 
-     def unfilter_instance(self, instance_ref, network_info):
-         """Removes security groups configured for an instance."""
-diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
-index eccf3e0..ae5c697 100644
---- a/nova/virt/xenapi/vmops.py
-+++ b/nova/virt/xenapi/vmops.py
-@@ -1737,6 +1737,24 @@ class VMOps(object):
-                 recover_method(context, instance, destination_hostname,
-                                block_migration)
- 
-+    def post_live_migration_at_destination(self, context, instance,
-+                                           network_info, block_migration,
-+                                           block_device_info):
-+        # FIXME(johngarbutt): we should block all traffic until we have
-+        # applied security groups, however this requires changes to XenServer
-+        try:
-+            self.firewall_driver.setup_basic_filtering(
-+                    instance, network_info)
-+        except NotImplementedError:
-+            # NOTE(salvatore-orlando): setup_basic_filtering might be
-+            # empty or not implemented at all, as basic filter could
-+            # be implemented with VIF rules created by xapi plugin
-+            pass
-+
-+        self.firewall_driver.prepare_instance_filter(instance,
-+                                                     network_info)
-+        self.firewall_driver.apply_instance_filter(instance, network_info)
-+
-     def get_per_instance_usage(self):
-         """Get usage info about each active instance."""
-         usage = {}
-commit 01de658210fd65171bfbf5450c93673b5ce0bd9e
-Author: John Garbutt <[email protected]>
-Date:   Mon Oct 21 19:34:43 2013 +0100
-
-    xenapi: apply firewall rules in finish_migrate
-    
-    When security groups were added, the rules were not re-applied to
-    servers that have been migrated to a new hypervisor.
-    
-    This change ensures the firewall rules are applied as part of creating
-    the new VM in finish_migrate. This code follows a very similar pattern
-    to the code in spawn, and that is where the cut and paste code comes
-    from. This code duplication was removed in Havana.
-    
-    Fixes bug 1073306
-    
-    Change-Id: I6295a782df328a759e358fb82b76dd3f7bd4b39e
-
-diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
-index eccf3e0..7a96ac2 100644
---- a/nova/virt/xenapi/vmops.py
-+++ b/nova/virt/xenapi/vmops.py
-@@ -277,8 +277,23 @@ class VMOps(object):
- 
-         self._attach_mapped_block_devices(instance, block_device_info)
- 
-+        try:
-+            self.firewall_driver.setup_basic_filtering(
-+                    instance, network_info)
-+        except NotImplementedError:
-+            # NOTE(salvatore-orlando): setup_basic_filtering might be
-+            # empty or not implemented at all, as basic filter could
-+            # be implemented with VIF rules created by xapi plugin
-+            pass
-+
-+        self.firewall_driver.prepare_instance_filter(instance,
-+                                                     network_info)
-+
-         # 5. Start VM
-         self._start(instance, vm_ref=vm_ref)
-+
-+        self.firewall_driver.apply_instance_filter(instance, network_info)
-+
-         self._update_instance_progress(context, instance,
-                                        step=5,
-                                        total_steps=RESIZE_TOTAL_STEPS)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/nova/patches/04-CVE-2014-0134-partial.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,182 @@
+This proposed upstream patch is a follow-up to the original fix for
+CVE-2014-0134 (Launchpad bug 1221190) but is tracked under the same CVE
+and Launchpad bug as the original fix. It is designated as such below
+('Partial-bug: #1221190').
+
+From 63064a709162ba1a8a9a19643bd3acdf57c0c0b4 Mon Sep 17 00:00:00 2001
+From: Nikola Dipanov <[email protected]>
+Date: Wed, 9 Apr 2014 15:50:20 +0200
+Subject: [PATCH] Avoid the possibility of truncating disk info file
+
+Commit dc8de42 makes nova persist image format to a file to avoid
+attacks based on changing it later. However the way it was implemented
+leaves a small window of opportunity for the file to be truncated before
+it gets written back to effectively making it possible for data to get
+lost leaving us with a potential problem next time it is attempted to be
+read.
+
+This patch changes the way file is updated to be atomic, thus closing
+the race window (and also removes the chown that we did not really
+need).
+
+It is worth noting that a better solution to this would be
+to allow the code calling the imagebackend to write the file (once!)
+and make it impossible to update after the boot process is done. This
+approach would require more refactoring of the libvirt driver code, and
+may be done in the future.
+
+Partial-bug: #1221190
+Change-Id: Ia1b073f38e096989f34d1774a12a1b4151773fc7
+(cherry picked from commit d416f4310bb946b4b127201ec3c37e530d988714)
+---
+ etc/nova/rootwrap.d/compute.filters          |    1 -
+ nova/tests/virt/libvirt/test_imagebackend.py |   21 ---------------------
+ nova/utils.py                                |   14 --------------
+ nova/virt/libvirt/imagebackend.py            |   25 +++++++++++++------------
+ 4 files changed, 13 insertions(+), 48 deletions(-)
+
+diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters
+index e98c3f2..ac67180 100644
+--- a/etc/nova/rootwrap.d/compute.filters
++++ b/etc/nova/rootwrap.d/compute.filters
+@@ -41,7 +41,6 @@ mkdir: CommandFilter, mkdir, root
+ # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
+ # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
+ # nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk')
+-# nova/utils.py: 'chown', owner_uid, path
+ chown: CommandFilter, chown, root
+ 
+ # nova/virt/disk/vfs/localfs.py: 'chmod'
+diff --git a/nova/tests/virt/libvirt/test_imagebackend.py b/nova/tests/virt/libvirt/test_imagebackend.py
+index 5424f7b..80ade57 100644
+--- a/nova/tests/virt/libvirt/test_imagebackend.py
++++ b/nova/tests/virt/libvirt/test_imagebackend.py
+@@ -29,7 +29,6 @@ from nova.openstack.common import uuidutils
+ from nova import test
+ from nova.tests import fake_processutils
+ from nova.tests.virt.libvirt import fake_libvirt_utils
+-from nova import utils
+ from nova.virt.libvirt import imagebackend
+ 
+ CONF = cfg.CONF
+@@ -68,10 +67,6 @@ class _ImageTestCase(object):
+             'nova.virt.libvirt.imagebackend.libvirt_utils',
+             fake_libvirt_utils))
+ 
+-        def fake_chown(path, owner_uid=None):
+-            return None
+-        self.stubs.Set(utils, 'chown', fake_chown)
+-
+     def tearDown(self):
+         super(_ImageTestCase, self).tearDown()
+         shutil.rmtree(self.INSTANCES_PATH)
+@@ -128,10 +123,6 @@ class RawTestCase(_ImageTestCase, test.NoDBTestCase):
+         super(RawTestCase, self).setUp()
+         self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
+ 
+-        def fake_chown(path, owner_uid=None):
+-            return None
+-        self.stubs.Set(utils, 'chown', fake_chown)
+-
+     def prepare_mocks(self):
+         fn = self.mox.CreateMockAnything()
+         self.mox.StubOutWithMock(imagebackend.utils.synchronized,
+@@ -246,10 +237,6 @@ class RawTestCase(_ImageTestCase, test.NoDBTestCase):
+         self.mox.StubOutWithMock(os.path, 'exists')
+         self.mox.StubOutWithMock(imagebackend.images, 'qemu_img_info')
+ 
+-        def fake_chown(path, owner_uid=None):
+-            return None
+-        self.stubs.Set(utils, 'chown', fake_chown)
+-
+         os.path.exists(self.PATH).AndReturn(True)
+         os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+         info = self.mox.CreateMockAnything()
+@@ -278,10 +265,6 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
+         self.QCOW2_BASE = (self.TEMPLATE_PATH +
+                            '_%d' % (self.SIZE / (1024 * 1024 * 1024)))
+ 
+-        def fake_chown(path, owner_uid=None):
+-            return None
+-        self.stubs.Set(utils, 'chown', fake_chown)
+-
+     def prepare_mocks(self):
+         fn = self.mox.CreateMockAnything()
+         self.mox.StubOutWithMock(imagebackend.utils.synchronized,
+@@ -873,10 +856,6 @@ class BackendTestCase(test.NoDBTestCase):
+     def setUp(self):
+         super(BackendTestCase, self).setUp()
+ 
+-        def fake_chown(path, owner_uid=None):
+-            return None
+-        self.stubs.Set(utils, 'chown', fake_chown)
+-
+     def get_image(self, use_cow, image_type):
+         return imagebackend.Backend(use_cow).image(self.INSTANCE,
+                                                    self.NAME,
+diff --git a/nova/utils.py b/nova/utils.py
+index 4757f3a..599cb64 100755
+--- a/nova/utils.py
++++ b/nova/utils.py
+@@ -924,20 +924,6 @@ def temporary_chown(path, owner_uid=None):
+             execute('chown', orig_uid, path, run_as_root=True)
+ 
+ 
+-def chown(path, owner_uid=None):
+-    """chown a path.
+-
+-    :param owner_uid: UID of owner (defaults to current user)
+-    """
+-    if owner_uid is None:
+-        owner_uid = os.getuid()
+-
+-    orig_uid = os.stat(path).st_uid
+-
+-    if orig_uid != owner_uid:
+-        execute('chown', owner_uid, path, run_as_root=True)
+-
+-
+ @contextlib.contextmanager
+ def tempdir(**kwargs):
+     argdict = kwargs.copy()
+diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
+index ed11c90..29131d9 100644
+--- a/nova/virt/libvirt/imagebackend.py
++++ b/nova/virt/libvirt/imagebackend.py
+@@ -264,20 +264,21 @@ class Image(object):
+                             lock_path=self.lock_path)
+         def write_to_disk_info_file():
+             # Use os.open to create it without group or world write permission.
+-            fd = os.open(self.disk_info_path, os.O_RDWR | os.O_CREAT, 0o644)
+-            with os.fdopen(fd, "r+") as disk_info_file:
++            fd = os.open(self.disk_info_path, os.O_RDONLY | os.O_CREAT, 0o644)
++            with os.fdopen(fd, "r") as disk_info_file:
+                 line = disk_info_file.read().rstrip()
+                 dct = _dict_from_line(line)
+-                if self.path in dct:
+-                    msg = _("Attempted overwrite of an existing value.")
+-                    raise exception.InvalidDiskInfo(reason=msg)
+-                dct.update({self.path: driver_format})
+-                disk_info_file.seek(0)
+-                disk_info_file.truncate()
+-                disk_info_file.write('%s\n' % jsonutils.dumps(dct))
+-            # Ensure the file is always owned by the nova user so qemu can't
+-            # write it.
+-            utils.chown(self.disk_info_path, owner_uid=os.getuid())
++
++            if self.path in dct:
++                msg = _("Attempted overwrite of an existing value.")
++                raise exception.InvalidDiskInfo(reason=msg)
++            dct.update({self.path: driver_format})
++
++            tmp_path = self.disk_info_path + ".tmp"
++            fd = os.open(tmp_path, os.O_WRONLY | os.O_CREAT, 0o644)
++            with os.fdopen(fd, "w") as tmp_file:
++                tmp_file.write('%s\n' % jsonutils.dumps(dct))
++            os.rename(tmp_path, self.disk_info_path)
+ 
+         try:
+             if (self.disk_info_path is not None and
+-- 
+1.7.9.2
+
--- a/components/openstack/nova/patches/05-CVE-2013-4463.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,424 +0,0 @@
-Upstream patch fixed in Grizzly 2013.1.5, Havana 2013.2.1, Icehouse
-
-commit 135faa7b5d9855312bedc19e5e1ecebae34d3d18
-Author: Pádraig Brady <[email protected]>
-Date:   Fri Sep 27 04:07:14 2013 +0100
-
-    ensure we don't boot oversized images
-    
-    Since we can't generally shrink incoming images, add extra checks
-    to ensure oversized images are not allowed through.
-    All cases when populating the libvirt image cache are now handled,
-    including the initial download from glance, where we avoid
-    converting to raw, as that could generate non sparse images
-    much larger than the downloaded image.
-    
-    * nova/virt/libvirt/utils.py (fetch_image): Allow passing through
-    of the max_size parameter.
-    * nova/virt/images.py (fetch_to_raw): Accept the max_size parameter,
-    and use it to discard images with larger (virtual) sizes.
-    * nova/virt/libvirt/imagebackend.py (verify_base_size): A new
-    refactored function to identify and raise exception to oversized images.
-    (Raw.create_image): Pass the max_size to the fetch function.
-    Also enforce virtual image size checking for already fetched images,
-    as this class (despite the name) can be handling qcow files.
-    (Qcow2.create_image): Pass the max_size to the fetch function,
-    or verify the virtual size for the instance as done previously.
-    (Lvm.create_image): Pass the max_size to the fetch function.
-    Also check the size before transferring to the volume to improve
-    efficiency by not even attempting the transfer of oversized images.
-    (Rbd.create_image): Likewise.
-    * nova/tests/fake_libvirt_utils.py: Support max_size arg.
-    * nova/tests/test_libvirt.py (test_fetch_raw_image):
-    Add a case to check oversized images are discarded.
-    * nova/tests/test_imagebackend.py (test_create_image_too_small):
-    Adjust to avoid the fetch size check.
-    
-    Fixes bug: 1177830
-    Fixes bug: 1206081
-    
-    Conflicts:
-    
-    	nova/tests/test_imagebackend.py
-    	nova/virt/libvirt/imagebackend.py
-    
-    Change-Id: Idc35fce580be4f74e23883d1b4bea6475c3f6e30
-
-diff --git a/nova/tests/fake_libvirt_utils.py b/nova/tests/fake_libvirt_utils.py
-index 23b758e..ecf357a 100644
---- a/nova/tests/fake_libvirt_utils.py
-+++ b/nova/tests/fake_libvirt_utils.py
-@@ -193,7 +193,7 @@ def get_fs_info(path):
-             'free': 84 * (1024 ** 3)}
- 
- 
--def fetch_image(context, target, image_id, user_id, project_id):
-+def fetch_image(context, target, image_id, user_id, project_id, max_size=0):
-     pass
- 
- 
-diff --git a/nova/tests/test_imagebackend.py b/nova/tests/test_imagebackend.py
-index 77446e8..93ed23d 100644
---- a/nova/tests/test_imagebackend.py
-+++ b/nova/tests/test_imagebackend.py
-@@ -189,7 +189,7 @@ class RawTestCase(_ImageTestCase, test.TestCase):
- 
-     def test_create_image(self):
-         fn = self.prepare_mocks()
--        fn(target=self.TEMPLATE_PATH, image_id=None)
-+        fn(target=self.TEMPLATE_PATH, max_size=None, image_id=None)
-         imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
-         self.mox.ReplayAll()
- 
-@@ -210,7 +210,7 @@ class RawTestCase(_ImageTestCase, test.TestCase):
- 
-     def test_create_image_extend(self):
-         fn = self.prepare_mocks()
--        fn(target=self.TEMPLATE_PATH, image_id=None)
-+        fn(max_size=self.SIZE, target=self.TEMPLATE_PATH, image_id=None)
-         imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
-         imagebackend.disk.extend(self.PATH, self.SIZE)
-         self.mox.ReplayAll()
-@@ -260,7 +260,7 @@ class Qcow2TestCase(_ImageTestCase, test.TestCase):
- 
-     def test_create_image(self):
-         fn = self.prepare_mocks()
--        fn(target=self.TEMPLATE_PATH)
-+        fn(max_size=None, target=self.TEMPLATE_PATH)
-         imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
-                                                     self.PATH)
-         self.mox.ReplayAll()
-@@ -272,15 +272,12 @@ class Qcow2TestCase(_ImageTestCase, test.TestCase):
- 
-     def test_create_image_with_size(self):
-         fn = self.prepare_mocks()
--        fn(target=self.TEMPLATE_PATH)
-+        fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
-         self.mox.StubOutWithMock(os.path, 'exists')
--        self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
-         if self.OLD_STYLE_INSTANCE_PATH:
-             os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
-         os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
-         os.path.exists(self.PATH).AndReturn(False)
--        imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
--                                       ).AndReturn(self.SIZE)
-         os.path.exists(self.PATH).AndReturn(False)
-         imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
-                                                     self.PATH)
-@@ -294,27 +291,24 @@ class Qcow2TestCase(_ImageTestCase, test.TestCase):
- 
-     def test_create_image_too_small(self):
-         fn = self.prepare_mocks()
--        fn(target=self.TEMPLATE_PATH)
-         self.mox.StubOutWithMock(os.path, 'exists')
-         self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
-         if self.OLD_STYLE_INSTANCE_PATH:
-             os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
--        os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
--        os.path.exists(self.PATH).AndReturn(False)
-+        os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
-         imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
-                                        ).AndReturn(self.SIZE)
-         self.mox.ReplayAll()
- 
-         image = self.image_class(self.INSTANCE, self.NAME)
--        self.assertRaises(exception.ImageTooLarge, image.create_image, fn,
--                          self.TEMPLATE_PATH, 1)
-+        self.assertRaises(exception.InstanceTypeDiskTooSmall,
-+                          image.create_image, fn, self.TEMPLATE_PATH, 1)
-         self.mox.VerifyAll()
- 
-     def test_generate_resized_backing_files(self):
-         fn = self.prepare_mocks()
--        fn(target=self.TEMPLATE_PATH)
-+        fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
-         self.mox.StubOutWithMock(os.path, 'exists')
--        self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
-         self.mox.StubOutWithMock(imagebackend.libvirt_utils,
-                                  'get_disk_backing_file')
-         if self.OLD_STYLE_INSTANCE_PATH:
-@@ -329,8 +323,6 @@ class Qcow2TestCase(_ImageTestCase, test.TestCase):
-                                               self.QCOW2_BASE)
-         imagebackend.disk.extend(self.QCOW2_BASE, self.SIZE)
- 
--        imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
--                                       ).AndReturn(self.SIZE)
-         os.path.exists(self.PATH).AndReturn(True)
-         self.mox.ReplayAll()
- 
-@@ -341,9 +333,8 @@ class Qcow2TestCase(_ImageTestCase, test.TestCase):
- 
-     def test_qcow2_exists_and_has_no_backing_file(self):
-         fn = self.prepare_mocks()
--        fn(target=self.TEMPLATE_PATH)
-+        fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
-         self.mox.StubOutWithMock(os.path, 'exists')
--        self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
-         self.mox.StubOutWithMock(imagebackend.libvirt_utils,
-                                  'get_disk_backing_file')
-         if self.OLD_STYLE_INSTANCE_PATH:
-@@ -353,8 +344,6 @@ class Qcow2TestCase(_ImageTestCase, test.TestCase):
- 
-         imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
-             .AndReturn(None)
--        imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
--                                       ).AndReturn(self.SIZE)
-         os.path.exists(self.PATH).AndReturn(True)
-         self.mox.ReplayAll()
- 
-@@ -391,7 +380,7 @@ class LvmTestCase(_ImageTestCase, test.TestCase):
- 
-     def _create_image(self, sparse):
-         fn = self.prepare_mocks()
--        fn(target=self.TEMPLATE_PATH)
-+        fn(max_size=None, target=self.TEMPLATE_PATH)
-         self.libvirt_utils.create_lvm_image(self.VG,
-                                             self.LV,
-                                             self.TEMPLATE_SIZE,
-@@ -423,7 +412,7 @@ class LvmTestCase(_ImageTestCase, test.TestCase):
- 
-     def _create_image_resize(self, sparse):
-         fn = self.prepare_mocks()
--        fn(target=self.TEMPLATE_PATH)
-+        fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
-         self.libvirt_utils.create_lvm_image(self.VG, self.LV,
-                                             self.SIZE, sparse=sparse)
-         self.disk.get_disk_size(self.TEMPLATE_PATH
-@@ -462,7 +451,7 @@ class LvmTestCase(_ImageTestCase, test.TestCase):
- 
-     def test_create_image_negative(self):
-         fn = self.prepare_mocks()
--        fn(target=self.TEMPLATE_PATH)
-+        fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
-         self.libvirt_utils.create_lvm_image(self.VG,
-                                             self.LV,
-                                             self.SIZE,
-diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
-index d8c4cf2..e422ec7 100644
---- a/nova/tests/test_libvirt.py
-+++ b/nova/tests/test_libvirt.py
-@@ -4826,7 +4826,8 @@ disk size: 4.4M''', ''))
-         image_id = '4'
-         user_id = 'fake'
-         project_id = 'fake'
--        images.fetch_to_raw(context, image_id, target, user_id, project_id)
-+        images.fetch_to_raw(context, image_id, target, user_id, project_id,
-+                            max_size=0)
- 
-         self.mox.ReplayAll()
-         libvirt_utils.fetch_image(context, target, image_id,
-@@ -4856,20 +4857,27 @@ disk size: 4.4M''', ''))
-                 file_format = path.split('.')[-2]
-             elif file_format == 'converted':
-                 file_format = 'raw'
-+
-             if 'backing' in path:
-                 backing_file = 'backing'
-             else:
-                 backing_file = None
- 
-+            if 'big' in path:
-+                virtual_size = 2
-+            else:
-+                virtual_size = 1
-+
-             FakeImgInfo.file_format = file_format
-             FakeImgInfo.backing_file = backing_file
-+            FakeImgInfo.virtual_size = virtual_size
- 
-             return FakeImgInfo()
- 
-         self.stubs.Set(utils, 'execute', fake_execute)
-         self.stubs.Set(os, 'rename', fake_rename)
-         self.stubs.Set(os, 'unlink', fake_unlink)
--        self.stubs.Set(images, 'fetch', lambda *_: None)
-+        self.stubs.Set(images, 'fetch', lambda *_, **__: None)
-         self.stubs.Set(images, 'qemu_img_info', fake_qemu_img_info)
-         self.stubs.Set(utils, 'delete_if_exists', fake_rm_on_errror)
- 
-@@ -4884,7 +4892,8 @@ disk size: 4.4M''', ''))
-                               't.qcow2.part', 't.qcow2.converted'),
-                              ('rm', 't.qcow2.part'),
-                              ('mv', 't.qcow2.converted', 't.qcow2')]
--        images.fetch_to_raw(context, image_id, target, user_id, project_id)
-+        images.fetch_to_raw(context, image_id, target, user_id, project_id,
-+                            max_size=1)
-         self.assertEqual(self.executes, expected_commands)
- 
-         target = 't.raw'
-@@ -4901,6 +4910,15 @@ disk size: 4.4M''', ''))
-                           context, image_id, target, user_id, project_id)
-         self.assertEqual(self.executes, expected_commands)
- 
-+        target = 'big.qcow2'
-+        self.executes = []
-+        expected_commands = [('rm', '-f', 'big.qcow2.part')]
-+        self.assertRaises(exception.InstanceTypeDiskTooSmall,
-+                          images.fetch_to_raw,
-+                          context, image_id, target, user_id, project_id,
-+                          max_size=1)
-+        self.assertEqual(self.executes, expected_commands)
-+
-         del self.executes
- 
-     def test_get_disk_backing_file(self):
-diff --git a/nova/virt/images.py b/nova/virt/images.py
-index b40f566..541779a 100755
---- a/nova/virt/images.py
-+++ b/nova/virt/images.py
-@@ -190,7 +190,7 @@ def convert_image(source, dest, out_format, run_as_root=False):
-     utils.execute(*cmd, run_as_root=run_as_root)
- 
- 
--def fetch(context, image_href, path, _user_id, _project_id):
-+def fetch(context, image_href, path, _user_id, _project_id, max_size=0):
-     # TODO(vish): Improve context handling and add owner and auth data
-     #             when it is added to glance.  Right now there is no
-     #             auth checking in glance, so we assume that access was
-@@ -202,9 +202,10 @@ def fetch(context, image_href, path, _user_id, _project_id):
-             image_service.download(context, image_id, image_file)
- 
- 
--def fetch_to_raw(context, image_href, path, user_id, project_id):
-+def fetch_to_raw(context, image_href, path, user_id, project_id, max_size=0):
-     path_tmp = "%s.part" % path
--    fetch(context, image_href, path_tmp, user_id, project_id)
-+    fetch(context, image_href, path_tmp, user_id, project_id,
-+          max_size=max_size)
- 
-     with utils.remove_path_on_error(path_tmp):
-         data = qemu_img_info(path_tmp)
-@@ -220,6 +221,23 @@ def fetch_to_raw(context, image_href, path, user_id, project_id):
-             raise exception.ImageUnacceptable(image_id=image_href,
-                 reason=_("fmt=%(fmt)s backed by: %(backing_file)s") % locals())
- 
-+        # We can't generally shrink incoming images, so disallow
-+        # images > size of the flavor we're booting.  Checking here avoids
-+        # an immediate DoS where we convert large qcow images to raw
-+        # (which may compress well but not be sparse).
-+        # TODO(p-draigbrady): loop through all flavor sizes, so that
-+        # we might continue here and not discard the download.
-+        # If we did that we'd have to do the higher level size checks
-+        # irrespective of whether the base image was prepared or not.
-+        disk_size = data.virtual_size
-+        if max_size and max_size < disk_size:
-+            msg = _('%(base)s virtual size %(disk_size)s '
-+                    'larger than flavor root disk size %(size)s')
-+            LOG.error(msg % {'base': path,
-+                             'disk_size': disk_size,
-+                             'size': max_size})
-+            raise exception.InstanceTypeDiskTooSmall()
-+
-         if fmt != "raw" and CONF.force_raw_images:
-             staged = "%s.converted" % path
-             LOG.debug("%s was %s, converting to raw" % (image_href, fmt))
-diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
-index e2c7ccf..dc85c97 100755
---- a/nova/virt/libvirt/imagebackend.py
-+++ b/nova/virt/libvirt/imagebackend.py
-@@ -177,6 +177,36 @@ class Image(object):
-                           (CONF.preallocate_images, self.path))
-         return can_fallocate
- 
-+    @staticmethod
-+    def verify_base_size(base, size, base_size=0):
-+        """Check that the base image is not larger than size.
-+           Since images can't be generally shrunk, enforce this
-+           constraint taking account of virtual image size.
-+        """
-+
-+        # Note(pbrady): The size and min_disk parameters of a glance
-+        #  image are checked against the instance size before the image
-+        #  is even downloaded from glance, but currently min_disk is
-+        #  adjustable and doesn't currently account for virtual disk size,
-+        #  so we need this extra check here.
-+        # NOTE(cfb): Having a flavor that sets the root size to 0 and having
-+        #  nova effectively ignore that size and use the size of the
-+        #  image is considered a feature at this time, not a bug.
-+
-+        if size is None:
-+            return
-+
-+        if size and not base_size:
-+            base_size = disk.get_disk_size(base)
-+
-+        if size < base_size:
-+            msg = _('%(base)s virtual size %(base_size)s '
-+                    'larger than flavor root disk size %(size)s')
-+            LOG.error(msg % {'base': base,
-+                              'base_size': base_size,
-+                              'size': size})
-+            raise exception.InstanceTypeDiskTooSmall()
-+
-     def snapshot_create(self):
-         raise NotImplementedError
- 
-@@ -217,7 +247,8 @@ class Raw(Image):
-             #Generating image in place
-             prepare_template(target=self.path, *args, **kwargs)
-         else:
--            prepare_template(target=base, *args, **kwargs)
-+            prepare_template(target=base, max_size=size, *args, **kwargs)
-+            self.verify_base_size(base, size)
-             if not os.path.exists(self.path):
-                 with utils.remove_path_on_error(self.path):
-                     copy_raw_image(base, self.path, size)
-@@ -257,7 +288,9 @@ class Qcow2(Image):
- 
-         # Download the unmodified base image unless we already have a copy.
-         if not os.path.exists(base):
--            prepare_template(target=base, *args, **kwargs)
-+            prepare_template(target=base, max_size=size, *args, **kwargs)
-+        else:
-+            self.verify_base_size(base, size)
- 
-         legacy_backing_size = None
-         legacy_base = base
-@@ -283,13 +316,6 @@ class Qcow2(Image):
-                     libvirt_utils.copy_image(base, legacy_base)
-                     disk.extend(legacy_base, legacy_backing_size)
- 
--        # NOTE(cfb): Having a flavor that sets the root size to 0 and having
--        #            nova effectively ignore that size and use the size of the
--        #            image is considered a feature at this time, not a bug.
--        if size and size < disk.get_disk_size(base):
--            LOG.error('%s virtual size larger than flavor root disk size %s' %
--                      (base, size))
--            raise exception.ImageTooLarge()
-         if not os.path.exists(self.path):
-             with utils.remove_path_on_error(self.path):
-                 copy_qcow2_image(base, self.path, size)
-@@ -348,6 +374,7 @@ class Lvm(Image):
-                                 lock_path=self.lock_path)
-         def create_lvm_image(base, size):
-             base_size = disk.get_disk_size(base)
-+            self.verify_base_size(base, size, base_size=base_size)
-             resize = size > base_size
-             size = size if resize else base_size
-             libvirt_utils.create_lvm_image(self.vg, self.lv,
-@@ -365,7 +392,7 @@ class Lvm(Image):
-             with self.remove_volume_on_error(self.path):
-                 prepare_template(target=self.path, *args, **kwargs)
-         else:
--            prepare_template(target=base, *args, **kwargs)
-+            prepare_template(target=base, max_size=size, *args, **kwargs)
-             with self.remove_volume_on_error(self.path):
-                 create_lvm_image(base, size)
- 
-diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
-index 6972243..4c31fcb 100755
---- a/nova/virt/libvirt/utils.py
-+++ b/nova/virt/libvirt/utils.py
-@@ -592,9 +592,10 @@ def get_fs_info(path):
-             'used': used}
- 
- 
--def fetch_image(context, target, image_id, user_id, project_id):
-+def fetch_image(context, target, image_id, user_id, project_id, max_size=0):
-     """Grab image."""
--    images.fetch_to_raw(context, image_id, target, user_id, project_id)
-+    images.fetch_to_raw(context, image_id, target, user_id, project_id,
-+                        max_size=max_size)
- 
- 
- def get_instance_path(instance, forceold=False, relative=False):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/nova/patches/05-CVE-2014-0167.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,146 @@
+This upstream patch addresses CVE-2014-0167 and is tracked under
+Launchpad bug 1290537. It is addressed in Icehouse 2014.1 and Havana
+2013.2.4.
+
+commit dbb7dd03fea68120ef5ac9bbb1b3f184e3f2eacc
+Author: Andrew Laski <[email protected]>
+Date:   Wed Apr 9 09:27:44 2014 -0400
+
+    Add RBAC policy for ec2 API security groups calls
+    
+    The revoke_security_group_ingress, revoke_security_group_ingress, and
+    delete_security_group calls in the ec2 API were not restricted by policy
+    checks.  This prevented a deployer from restricting their usage via
+    roles or other checks.  Checks have been added for these calls.
+    
+    Based on commit d4056f8723cc6cefb28ff6e5a7c0df5ea77f82ef but modified
+    for the backport.
+    
+    Closes-Bug: #1290537
+    Change-Id: I4bf681bedd68ed2216b429d34db735823e0a6189
+
+diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
+index 94ff160..36c2f12 100644
+--- a/nova/api/ec2/cloud.py
++++ b/nova/api/ec2/cloud.py
+@@ -30,6 +30,7 @@ from oslo.config import cfg
+ from nova.api.ec2 import ec2utils
+ from nova.api.ec2 import inst_state
+ from nova.api.metadata import password
++from nova.api.openstack import extensions
+ from nova.api import validator
+ from nova import availability_zones
+ from nova import block_device
+@@ -85,6 +86,9 @@ LOG = logging.getLogger(__name__)
+ 
+ QUOTAS = quota.QUOTAS
+ 
++security_group_authorizer = extensions.extension_authorizer('compute',
++                                                            'security_groups')
++
+ 
+ def validate_ec2_id(val):
+     if not validator.validate_str()(val):
+@@ -631,6 +635,8 @@ class CloudController(object):
+         security_group = self.security_group_api.get(context, group_name,
+                                                      group_id)
+ 
++        security_group_authorizer(context, security_group)
++
+         prevalues = kwargs.get('ip_permissions', [kwargs])
+ 
+         rule_ids = []
+@@ -665,6 +671,8 @@ class CloudController(object):
+         security_group = self.security_group_api.get(context, group_name,
+                                                      group_id)
+ 
++        security_group_authorizer(context, security_group)
++
+         prevalues = kwargs.get('ip_permissions', [kwargs])
+         postvalues = []
+         for values in prevalues:
+@@ -737,6 +745,8 @@ class CloudController(object):
+         security_group = self.security_group_api.get(context, group_name,
+                                                      group_id)
+ 
++        security_group_authorizer(context, security_group)
++
+         self.security_group_api.destroy(context, security_group)
+ 
+         return True
+diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
+index 269a738..b28d194 100644
+--- a/nova/tests/api/ec2/test_cloud.py
++++ b/nova/tests/api/ec2/test_cloud.py
+@@ -23,6 +23,7 @@ import copy
+ import datetime
+ import functools
+ import iso8601
++import mock
+ import os
+ import string
+ import tempfile
+@@ -47,6 +48,7 @@ from nova.image import s3
+ from nova.network import api as network_api
+ from nova.network import neutronv2
+ from nova.openstack.common import log as logging
++from nova.openstack.common import policy as common_policy
+ from nova.openstack.common import timeutils
+ from nova import test
+ from nova.tests.api.openstack.compute.contrib import (
+@@ -471,6 +473,34 @@ class CloudTestCase(test.TestCase):
+         delete = self.cloud.delete_security_group
+         self.assertRaises(exception.MissingParameter, delete, self.context)
+ 
++    def test_delete_security_group_policy_not_allowed(self):
++        rules = common_policy.Rules(
++                {'compute_extension:security_groups':
++                    common_policy.parse_rule('project_id:%(project_id)s')})
++        common_policy.set_rules(rules)
++
++        with mock.patch.object(self.cloud.security_group_api,
++                'get') as get:
++            get.return_value = {'project_id': 'invalid'}
++
++            self.assertRaises(exception.PolicyNotAuthorized,
++                    self.cloud.delete_security_group, self.context,
++                    'fake-name', 'fake-id')
++
++    def test_authorize_security_group_ingress_policy_not_allowed(self):
++        rules = common_policy.Rules(
++                {'compute_extension:security_groups':
++                    common_policy.parse_rule('project_id:%(project_id)s')})
++        common_policy.set_rules(rules)
++
++        with mock.patch.object(self.cloud.security_group_api,
++                'get') as get:
++            get.return_value = {'project_id': 'invalid'}
++
++            self.assertRaises(exception.PolicyNotAuthorized,
++                    self.cloud.authorize_security_group_ingress, self.context,
++                    'fake-name', 'fake-id')
++
+     def test_authorize_security_group_ingress(self):
+         kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+         sec = db.security_group_create(self.context, kwargs)
+@@ -575,6 +605,20 @@ class CloudTestCase(test.TestCase):
+         db.security_group_destroy(self.context, sec2['id'])
+         db.security_group_destroy(self.context, sec1['id'])
+ 
++    def test_revoke_security_group_ingress_policy_not_allowed(self):
++        rules = common_policy.Rules(
++                {'compute_extension:security_groups':
++                    common_policy.parse_rule('project_id:%(project_id)s')})
++        common_policy.set_rules(rules)
++
++        with mock.patch.object(self.cloud.security_group_api,
++                'get') as get:
++            get.return_value = {'project_id': 'invalid'}
++
++            self.assertRaises(exception.PolicyNotAuthorized,
++                    self.cloud.revoke_security_group_ingress, self.context,
++                    'fake-name', 'fake-id')
++
+     def test_revoke_security_group_ingress(self):
+         kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+         sec = db.security_group_create(self.context, kwargs)
--- a/components/openstack/nova/patches/06-CVE-2013-6419.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,133 +0,0 @@
-Upstream patch fixed in Grizzly 2013.1.5, Havana 2013.2.1, Icehouse
-
-commit 07006be9165d1008ca0382b6f0ad25b13a676a55
-Author: Aaron Rosen <[email protected]>
-Date:   Mon Oct 7 13:33:31 2013 -0700
-
-    Prevent spoofing instance_id from neutron to nova
-    
-    Previously, one could update a port's device_id in neutron to be
-    that of another tenant's instance_id and then be able to retrieve
-    that instance's metadata. This patch prevents this from occurring by
-    checking that X-Tenant-ID received from the metadata request matches
-    the tenant_id in the nova database.
-    
-    DocImpact - This patch is dependent on another patch in neutron
-                which adds X-Tenant-ID to the request. Therefore to
-                minimize downtime one should upgrade Neutron first (then
-                restart neutron-metadata-agent) and lastly update nova.
-    
-    Change-Id: I93bf662797c3986324ca2099b403833c2e990fb4
-    Closes-Bug: #1235450
-
-diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
-index bbaeba5..2b7f659 100644
---- a/nova/api/metadata/handler.py
-+++ b/nova/api/metadata/handler.py
-@@ -144,6 +144,7 @@ class MetadataRequestHandler(wsgi.Application):
- 
-     def _handle_instance_id_request(self, req):
-         instance_id = req.headers.get('X-Instance-ID')
-+        tenant_id = req.headers.get('X-Tenant-ID')
-         signature = req.headers.get('X-Instance-ID-Signature')
-         remote_address = req.headers.get('X-Forwarded-For')
- 
-@@ -151,8 +152,12 @@ class MetadataRequestHandler(wsgi.Application):
- 
-         if instance_id is None:
-             msg = _('X-Instance-ID header is missing from request.')
-+        elif tenant_id is None:
-+            msg = _('X-Tenant-ID header is missing from request.')
-         elif not isinstance(instance_id, basestring):
-             msg = _('Multiple X-Instance-ID headers found within request.')
-+        elif not isinstance(tenant_id, basestring):
-+            msg = _('Multiple X-Tenant-ID headers found within request.')
-         else:
-             msg = None
- 
-@@ -188,4 +193,12 @@ class MetadataRequestHandler(wsgi.Application):
-             LOG.error(_('Failed to get metadata for instance id: %s'),
-                       instance_id)
- 
-+        if meta_data.instance['project_id'] != tenant_id:
-+            LOG.warning(_("Tenant_id %(tenant_id)s does not match tenant_id "
-+                          "of instance %(instance_id)s."),
-+                        {'tenant_id': tenant_id,
-+                         'instance_id': instance_id})
-+            # causes a 404 to be raised
-+            meta_data = None
-+
-         return meta_data
-diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
-index 01f274f..51b6f72 100644
---- a/nova/tests/test_metadata.py
-+++ b/nova/tests/test_metadata.py
-@@ -510,6 +510,7 @@ class MetadataHandlerTestCase(test.TestCase):
-             relpath="/2009-04-04/user-data",
-             address="192.192.192.2",
-             headers={'X-Instance-ID': 'a-b-c-d',
-+                     'X-Tenant-ID': 'test',
-                      'X-Instance-ID-Signature': signed})
-         self.assertEqual(response.status_int, 200)
- 
-@@ -522,6 +523,7 @@ class MetadataHandlerTestCase(test.TestCase):
-             fake_get_metadata_by_instance_id=fake_get_metadata,
-             headers={'X-Forwarded-For': '192.192.192.2',
-                      'X-Instance-ID': 'a-b-c-d',
-+                     'X-Tenant-ID': 'test',
-                      'X-Instance-ID-Signature': signed})
- 
-         self.assertEqual(response.status_int, 200)
-@@ -536,10 +538,36 @@ class MetadataHandlerTestCase(test.TestCase):
-             fake_get_metadata_by_instance_id=fake_get_metadata,
-             headers={'X-Forwarded-For': '192.192.192.2',
-                      'X-Instance-ID': 'a-b-c-d',
-+                     'X-Tenant-ID': 'test',
-                      'X-Instance-ID-Signature': ''})
- 
-         self.assertEqual(response.status_int, 403)
- 
-+        # missing X-Tenant-ID from request
-+        response = fake_request(
-+            self.stubs, self.mdinst,
-+            relpath="/2009-04-04/user-data",
-+            address="192.192.192.2",
-+            fake_get_metadata_by_instance_id=fake_get_metadata,
-+            headers={'X-Forwarded-For': '192.192.192.2',
-+                     'X-Instance-ID': 'a-b-c-d',
-+                     'X-Instance-ID-Signature': signed})
-+
-+        self.assertEqual(response.status_int, 400)
-+
-+        # mismatched X-Tenant-ID
-+        response = fake_request(
-+            self.stubs, self.mdinst,
-+            relpath="/2009-04-04/user-data",
-+            address="192.192.192.2",
-+            fake_get_metadata_by_instance_id=fake_get_metadata,
-+            headers={'X-Forwarded-For': '192.192.192.2',
-+                     'X-Instance-ID': 'a-b-c-d',
-+                     'X-Tenant-ID': 'FAKE',
-+                     'X-Instance-ID-Signature': signed})
-+
-+        self.assertEqual(response.status_int, 404)
-+
-         # without X-Forwarded-For
-         response = fake_request(
-             self.stubs, self.mdinst,
-@@ -547,6 +575,7 @@ class MetadataHandlerTestCase(test.TestCase):
-             address="192.192.192.2",
-             fake_get_metadata_by_instance_id=fake_get_metadata,
-             headers={'X-Instance-ID': 'a-b-c-d',
-+                     'X-Tenant-ID': 'test',
-                      'X-Instance-ID-Signature': signed})
- 
-         self.assertEqual(response.status_int, 500)
-@@ -564,6 +593,7 @@ class MetadataHandlerTestCase(test.TestCase):
-             fake_get_metadata_by_instance_id=fake_get_metadata,
-             headers={'X-Forwarded-For': '192.192.192.2',
-                      'X-Instance-ID': 'z-z-z-z',
-+                     'X-Tenant-ID': 'test',
-                      'X-Instance-ID-Signature': signed})
-         self.assertEqual(response.status_int, 500)
- 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/nova/patches/06-CVE-2014-2573.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,178 @@
+This upstream patch addresses CVE-2014-2573 and is tracked under
+Launchpad bug 1269418. It is addressed in Icehouse 2014.1 and Havana
+2013.2.4. It has been modified to apply cleanly into our current Havana
+implementation
+
+This particulr hypervisor driver is not currently shipped with
+Solaris.
+
+commit b3cc3f62a60662e5bb82136c0cfa464592a6afe9
+Author: Gary Kotton <[email protected]>
+Date:   Thu Mar 13 06:53:58 2014 -0700
+
+    VMware: ensure rescue instance is deleted when instance is deleted
+    
+    If the user creates a rescue instance and then proceeded to delete
+    the original instance then the rescue instance would still be up
+    and running on the backend.
+    
+    This patch ensures that the rescue instance is cleaned up if
+    necessary.
+    
+    The vmops unrescue method has a new parameter indicating if
+    the original VM should be powered on.
+    
+    Closes-bug: 1269418
+    (cherry picked from commit efb66531bc37ee416778a70d46c657608ca767af)
+    
+    Conflicts:
+    
+    	nova/tests/virt/vmwareapi/test_vmwareapi.py
+    	nova/virt/vmwareapi/vmops.py
+    
+    Change-Id: I3c1d0b1d003392b306094b80ea1ac99377441fbf
+
+--- nova-2013.2.3/nova/tests/virt/vmwareapi/test_vmwareapi.py.~1~	2014-04-03 11:49:46.000000000 -0700
++++ nova-2013.2.3/nova/tests/virt/vmwareapi/test_vmwareapi.py	2014-06-09 23:03:38.008877252 -0700
+@@ -34,6 +34,7 @@
+ from nova.compute import api as compute_api
+ from nova.compute import power_state
+ from nova.compute import task_states
++from nova.compute import vm_states
+ from nova import context
+ from nova import db
+ from nova import exception
+@@ -793,6 +794,31 @@
+                                    'node': self.instance_node})
+         self._check_vm_info(info, power_state.RUNNING)
+ 
++    def destroy_rescued(self, fake_method):
++        self._rescue()
++        with (
++            mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
++                              fake_method)
++        ):
++            self.instance['vm_state'] = vm_states.RESCUED
++            self.conn.destroy(self.instance, self.network_info)
++            inst_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid)
++            self.assertFalse(vmwareapi_fake.get_file(inst_path))
++            rescue_file_path = '[%s] %s-rescue/%s-rescue.vmdk' % (self.ds,
++                                                                  self.uuid,
++                                                                  self.uuid)
++            self.assertFalse(vmwareapi_fake.get_file(rescue_file_path))
++
++    def test_destroy_rescued(self):
++        def fake_detach_disk_from_vm(*args, **kwargs):
++            pass
++        self.destroy_rescued(fake_detach_disk_from_vm)
++
++    def test_destroy_rescued_with_exception(self):
++        def fake_detach_disk_from_vm(*args, **kwargs):
++            raise exception.NovaException('Here is my fake exception')
++        self.destroy_rescued(fake_detach_disk_from_vm)
++
+     def test_destroy(self):
+         self._create_vm()
+         info = self.conn.get_info({'uuid': self.uuid,
+--- nova-2013.2.3/nova/virt/vmwareapi/vmops.py.~1~	2014-04-03 11:49:46.000000000 -0700
++++ nova-2013.2.3/nova/virt/vmwareapi/vmops.py	2014-06-09 23:09:13.557941347 -0700
+@@ -35,6 +35,7 @@
+ from nova import compute
+ from nova.compute import power_state
+ from nova.compute import task_states
++from nova.compute import vm_states
+ from nova import context as nova_context
+ from nova import exception
+ from nova.openstack.common import excutils
+@@ -904,13 +905,9 @@
+         except Exception as exc:
+             LOG.exception(exc, instance=instance)
+ 
+-    def destroy(self, instance, network_info, destroy_disks=True):
+-        """
+-        Destroy a VM instance. Steps followed are:
+-        1. Power off the VM, if it is in poweredOn state.
+-        2. Un-register a VM.
+-        3. Delete the contents of the folder holding the VM related data.
+-        """
++    def _destroy_instance(self, instance, network_info, destroy_disks=True,
++                          instance_name=None):
++        # Destroy a VM instance
+         try:
+             vm_ref = vm_util.get_vm_ref(self._session, instance)
+             lst_properties = ["config.files.vmPathName", "runtime.powerState",
+@@ -943,8 +940,9 @@
+                                            "UnregisterVM", vm_ref)
+                 LOG.debug(_("Unregistered the VM"), instance=instance)
+             except Exception as excep:
+-                LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
+-                           " while un-registering the VM: %s") % str(excep))
++                LOG.warn(_("In vmwareapi:vmops:_destroy_instance, got this "
++                           "exception while un-registering the VM: %s"),
++                         excep)
+ 
+             if network_info:
+                 self.unplug_vifs(instance, network_info)
+@@ -976,13 +974,37 @@
+                                {'datastore_name': datastore_name},
+                               instance=instance)
+                 except Exception as excep:
+-                    LOG.warn(_("In vmwareapi:vmops:destroy, "
+-                                 "got this exception while deleting"
+-                                 " the VM contents from the disk: %s")
+-                                 % str(excep))
++                    LOG.warn(_("In vmwareapi:vmops:_destroy_instance, "
++                                "got this exception while deleting "
++                                "the VM contents from the disk: %s"),
++                             excep)
+         except Exception as exc:
+             LOG.exception(exc, instance=instance)
+ 
++    def destroy(self, instance, network_info, destroy_disks=True):
++        """Destroy a VM instance.
++
++        Steps followed for each VM are:
++        1. Power off, if it is in poweredOn state.
++        2. Un-register.
++        3. Delete the contents of the folder holding the VM related data.
++        """
++        # If there is a rescue VM then we need to destroy that one too.
++        LOG.debug(_("Destroying instance"), instance=instance)
++        if instance['vm_state'] == vm_states.RESCUED:
++            LOG.debug(_("Rescue VM configured"), instance=instance)
++            try:
++                self.unrescue(instance, power_on=False)
++                LOG.debug(_("Rescue VM destroyed"), instance=instance)
++            except Exception:
++                rescue_name = instance['uuid'] + self._rescue_suffix
++                self._destroy_instance(instance, network_info,
++                                       destroy_disks=destroy_disks,
++                                       instance_name=rescue_name)
++        self._destroy_instance(instance, network_info,
++                               destroy_disks=destroy_disks)
++        LOG.debug(_("Instance destroyed"), instance=instance)
++
+     def pause(self, instance):
+         msg = _("pause not supported for vmwareapi")
+         raise NotImplementedError(msg)
+@@ -1066,7 +1088,7 @@
+                                 controller_key=controller_key,
+                                 unit_number=unit_number)
+ 
+-    def unrescue(self, instance):
++    def unrescue(self, instance, power_on=True):
+         """Unrescue the specified instance."""
+         # Get the original vmdk_path
+         vm_ref = vm_util.get_vm_ref(self._session, instance)
+@@ -1079,8 +1101,9 @@
+         r_instance = copy.deepcopy(instance)
+         r_instance['name'] = r_instance['name'] + self._rescue_suffix
+         r_instance['uuid'] = r_instance['uuid'] + self._rescue_suffix
+-        self.destroy(r_instance, None)
+-        self._power_on(instance)
++        self._destroy_instance(r_instance, None, instance_name=instance_name)
++        if power_on:
++            self._power_on(instance)
+ 
+     def power_off(self, instance):
+         """Power off the specified instance."""
--- a/components/openstack/nova/patches/07-CVE-2013-7048.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-Upstream patch fixed in Grizzly 2013.1.5, Havana 2013.2.2, Icehouse
-
-commit 9bd7fff8c0160057643cfc37c5e2b1cd3337d6aa
-Author: Xavier Queralt <[email protected]>
-Date:   Wed Nov 27 20:44:36 2013 +0100
-
-    Enforce permissions in snapshots temporary dir
-    
-    Live snapshots creates a temporary directory where libvirt driver
-    creates a new image from the instance's disk using blockRebase.
-    Currently this directory is created with 777 permissions making this
-    directory accessible by all the users in the system.
-    
-    This patch changes the tempdir permissions so they have the o+x
-    flag set, which is what libvirt needs to be able to write in it and
-    
-    Closes-Bug: #1227027
-    Change-Id: I767ff5247b4452821727e92b668276004fc0f84d
-    (cherry picked from commit 8a34fc3d48c467aa196f65eed444ccdc7c02f19f)
-
-diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
-index 6b977cb..4cc85f1 100755
---- a/nova/virt/libvirt/driver.py
-+++ b/nova/virt/libvirt/driver.py
-@@ -1191,9 +1191,8 @@ class LibvirtDriver(driver.ComputeDriver):
-             try:
-                 out_path = os.path.join(tmpdir, snapshot_name)
-                 if live_snapshot:
--                    # NOTE (rmk): libvirt needs to be able to write to the
--                    #             temp directory, which is owned nova.
--                    utils.execute('chmod', '777', tmpdir, run_as_root=True)
-+                    # NOTE(xqueralt): libvirt needs o+x in the temp directory
-+                    os.chmod(tmpdir, 0o701)
-                     self._live_snapshot(virt_dom, disk_path, out_path,
-                                         image_format)
-                 else:
--- a/components/openstack/nova/patches/08-CVE-2013-7130.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,158 +0,0 @@
-Upstream patch fixed in Grizzly 2013.1.5, Havana 2013.2.2, Icehouse
-
-commit cbeb5e51886b0296349fc476305bfe3d63c627c3
-Author: Nikola Dipanov <[email protected]>
-Date:   Tue Dec 10 17:43:17 2013 +0100
-
-    libvirt: Fix root disk leak in live mig
-    
-    This patch makes sure that _create_images_and_backing method of the
-    libvirt driver (called in several places, but most problematic one is
-    the call in the pre_live_migration method) creates all the files the
-    instance needs that are not present.
-    
-    Prioir to this patch - the method would only attempt to download the
-    image, and if it did so with the path of the ephemeral drives, it could
-    expose the image to other users as an ephemeral devices. See the related
-    bug for more detaiis.
-    
-    After this patch - we properly distinguish between image, ephemeral and
-    swap files, and make sure that the imagebackend does the correct thing.
-    
-    Closes-bug: #1251590
-    
-    Co-authored-by: Loganathan Parthipan <[email protected]>
-    
-    This patch also includes part of commit
-    65386c91910ee03d947c2b8bcc226a53c30e060a, not cherry-picked as a whole
-    due to the fact that it is a trivial change, and to avoud the
-    proliferation of patches needed to fix this bug.
-    
-    (cherry picked from commit c69a619668b5f44e94a8fe1a23f3d887ba2834d7)
-    
-    Conflicts:
-    	nova/tests/test_libvirt.py
-    	nova/virt/libvirt/driver.py
-    
-    Change-Id: I78aa2f4243899db4f4941e77014a7e18e27fc63e
-
-diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
-index d2ac73b..d9c7405 100644
---- a/nova/tests/test_libvirt.py
-+++ b/nova/tests/test_libvirt.py
-@@ -2346,6 +2346,69 @@ class LibvirtConnTestCase(test.TestCase):
- 
-         db.instance_destroy(self.context, instance_ref['uuid'])
- 
-+    def test_create_images_and_backing(self):
-+        conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-+        self.mox.StubOutWithMock(conn, '_fetch_instance_kernel_ramdisk')
-+        self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image')
-+
-+        libvirt_driver.libvirt_utils.create_image(mox.IgnoreArg(),
-+                                                  mox.IgnoreArg(),
-+                                                  mox.IgnoreArg())
-+        conn._fetch_instance_kernel_ramdisk(self.context, self.test_instance)
-+        self.mox.ReplayAll()
-+
-+        self.stubs.Set(os.path, 'exists', lambda *args: False)
-+        disk_info_json = jsonutils.dumps([{'path': 'foo', 'type': None,
-+                                           'disk_size': 0,
-+                                           'backing_file': None}])
-+        conn._create_images_and_backing(self.context, self.test_instance,
-+                                        "/fake/instance/dir", disk_info_json)
-+
-+    def test_create_images_and_backing_ephemeral_gets_created(self):
-+        conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-+        disk_info_json = jsonutils.dumps(
-+            [{u'backing_file': u'fake_image_backing_file',
-+              u'disk_size': 10747904,
-+              u'path': u'disk_path',
-+              u'type': u'qcow2',
-+              u'virt_disk_size': 25165824},
-+             {u'backing_file': u'ephemeral_1_default',
-+              u'disk_size': 393216,
-+              u'over_committed_disk_size': 1073348608,
-+              u'path': u'disk_eph_path',
-+              u'type': u'qcow2',
-+              u'virt_disk_size': 1073741824}])
-+
-+        base_dir = os.path.join(CONF.instances_path, '_base')
-+        ephemeral_target = os.path.join(base_dir, 'ephemeral_1_default')
-+        image_target = os.path.join(base_dir, 'fake_image_backing_file')
-+        self.test_instance.update({'name': 'fake_instance',
-+                                   'user_id': 'fake-user',
-+                                   'os_type': None,
-+                                   'project_id': 'fake-project'})
-+
-+        self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'fetch_image')
-+        self.mox.StubOutWithMock(conn, '_create_ephemeral')
-+        self.mox.StubOutWithMock(conn, '_fetch_instance_kernel_ramdisk')
-+
-+        conn._create_ephemeral(
-+                target=ephemeral_target,
-+                ephemeral_size=self.test_instance['ephemeral_gb'],
-+                max_size=mox.IgnoreArg(), os_type=mox.IgnoreArg(),
-+                fs_label=mox.IgnoreArg())
-+        libvirt_driver.libvirt_utils.fetch_image(context=self.context,
-+                image_id=mox.IgnoreArg(),
-+                user_id=mox.IgnoreArg(), project_id=mox.IgnoreArg(),
-+                max_size=mox.IgnoreArg(), target=image_target)
-+        conn._fetch_instance_kernel_ramdisk(
-+                self.context, self.test_instance).AndReturn(None)
-+
-+        self.mox.ReplayAll()
-+
-+        conn._create_images_and_backing(self.context, self.test_instance,
-+                                        "/fake/instance/dir",
-+                                        disk_info_json)
-+
-     def test_pre_live_migration_works_correctly_mocked(self):
-         # Creating testdata
-         vol = {'block_device_mapping': [
-diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
-index 0f0ea46..6c2a22c 100755
---- a/nova/virt/libvirt/driver.py
-+++ b/nova/virt/libvirt/driver.py
-@@ -3304,19 +3304,32 @@ class LibvirtDriver(driver.ComputeDriver):
-             elif info['backing_file']:
-                 # Creating backing file follows same way as spawning instances.
-                 cache_name = os.path.basename(info['backing_file'])
--                # Remove any size tags which the cache manages
--                cache_name = cache_name.split('_')[0]
- 
-                 image = self.image_backend.image(instance,
-                                                  instance_disk,
-                                                  CONF.libvirt_images_type)
--                image.cache(fetch_func=libvirt_utils.fetch_image,
--                            context=ctxt,
--                            filename=cache_name,
--                            image_id=instance['image_ref'],
--                            user_id=instance['user_id'],
--                            project_id=instance['project_id'],
--                            size=info['virt_disk_size'])
-+                if cache_name.startswith('ephemeral'):
-+                    image.cache(fetch_func=self._create_ephemeral,
-+                                fs_label=cache_name,
-+                                os_type=instance["os_type"],
-+                                filename=cache_name,
-+                                size=info['virt_disk_size'],
-+                                ephemeral_size=instance['ephemeral_gb'])
-+                elif cache_name.startswith('swap'):
-+                    inst_type = instance_types.extract_instance_type(instance)
-+                    swap_mb = inst_type['swap']
-+                    image.cache(fetch_func=self._create_swap,
-+                                filename="swap_%s" % swap_mb,
-+                                size=swap_mb * (1024 ** 2),
-+                                swap_mb=swap_mb)
-+                else:
-+                    image.cache(fetch_func=libvirt_utils.fetch_image,
-+                                context=ctxt,
-+                                filename=cache_name,
-+                                image_id=instance['image_ref'],
-+                                user_id=instance['user_id'],
-+                                project_id=instance['project_id'],
-+                                size=info['virt_disk_size'])
- 
-         # if image has kernel and ramdisk, just download
-         # following normal way.
--- a/components/openstack/nova/resolve.deps	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/nova/resolve.deps	Wed Jun 11 17:13:12 2014 -0700
@@ -1,7 +1,5 @@
 install/archive
 library/python/eventlet-26
-library/python/ipython-26
-library/python/netaddr-26
 library/python/oslo.config-26
 library/python/sqlalchemy-26
 library/python/sqlalchemy-migrate-26
@@ -10,4 +8,6 @@
 service/network/evs
 system/core-os
 system/file-system/zfs
+system/library/storage/suri
+system/storage/fc-utilities
 system/storage/iscsi/iscsi-initiator
--- a/components/openstack/swift/Makefile	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/Makefile	Wed Jun 11 17:13:12 2014 -0700
@@ -25,12 +25,12 @@
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		swift
-COMPONENT_CODENAME=	grizzly
-COMPONENT_VERSION=	1.8.0
+COMPONENT_CODENAME=	havana
+COMPONENT_VERSION=	1.10.0
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:613185473e0c6f11c77bdc6b65efee195303268117afd9024fee82ebde6ed8a3
+    sha256:9a841225c3a00a93a15a160102d3f7116f2f1ba98ebffedfe641747844e14889
 COMPONENT_ARCHIVE_URL=	http://launchpad.net/$(COMPONENT_NAME)/$(COMPONENT_CODENAME)/$(COMPONENT_VERSION)/+download/$(COMPONENT_ARCHIVE)
 COMPONENT_PROJECT_URL=	http://www.openstack.org/
 COMPONENT_BUG_DB=	service/swift
--- a/components/openstack/swift/files/proxy-server.conf	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/proxy-server.conf	Wed Jun 11 17:13:12 2014 -0700
@@ -4,37 +4,59 @@
 # bind_timeout = 30
 # backlog = 4096
 # swift_dir = /etc/swift
-# workers = 1
 # user = swift
+#
+# Use an integer to override the number of pre-forked processes that will
+# accept connections.  Should default to the number of effective cpu
+# cores in the system.  It's worth noting that individual workers will
+# use many eventlet co-routines to service multiple concurrent requests.
+# workers = auto
+#
+# Maximum concurrent requests per worker
+# max_clients = 1024
+#
 # Set the following two lines to enable SSL. This is for testing only.
 # cert_file = /etc/swift/proxy.crt
 # key_file = /etc/swift/proxy.key
+#
 # expiring_objects_container_divisor = 86400
+#
 # You can specify default log routing here if you want:
 # log_name = swift
 # log_facility = LOG_LOCAL0
 # log_level = INFO
-# log_headers = False
+# log_headers = false
 # log_address = /dev/log
+#
+# This optional suffix (default is empty) that would be appended to the swift transaction
+# id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
+# This is very useful when one is managing more than one swift cluster.
+# trans_id_suffix =
+#
 # comma separated list of functions to call to setup custom log handlers.
 # functions get passed: conf, name, log_to_console, log_route, fmt, logger,
 # adapted_logger
 # log_custom_handlers =
+#
 # If set, log_udp_host will override log_address
 # log_udp_host =
 # log_udp_port = 514
+#
 # You can enable StatsD logging here:
 # log_statsd_host = localhost
 # log_statsd_port = 8125
 # log_statsd_default_sample_rate = 1.0
 # log_statsd_sample_rate_factor = 1.0
 # log_statsd_metric_prefix =
+#
 # Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
 # cors_allow_origin =
+#
+# client_timeout = 60
 # eventlet_debug = false
 
 [pipeline:main]
-pipeline = catch_errors healthcheck proxy-logging cache slo ratelimit tempauth authtoken keystoneauth container-quotas account-quotas proxy-logging proxy-server
+pipeline = catch_errors healthcheck proxy-logging cache bulk slo ratelimit tempauth authtoken keystoneauth container-quotas account-quotas proxy-logging proxy-server
 
 [app:proxy-server]
 use = egg:swift#proxy
@@ -43,61 +65,123 @@
 # set log_facility = LOG_LOCAL0
 # set log_level = INFO
 # set log_address = /dev/log
-# log_handoffs = True
+#
+# log_handoffs = true
 # recheck_account_existence = 60
 # recheck_container_existence = 60
 # object_chunk_size = 8192
 # client_chunk_size = 8192
 # node_timeout = 10
-# client_timeout = 60
 # conn_timeout = 0.5
+#
 # How long without an error before a node's error count is reset. This will
 # also be how long before a node is reenabled after suppression is triggered.
 # error_suppression_interval = 60
+#
 # How many errors can accumulate before a node is temporarily ignored.
 # error_suppression_limit = 10
+#
 # If set to 'true' any authorized user may create and delete accounts; if
 # 'false' no one, even authorized, can.
 # allow_account_management = false
+#
 # Set object_post_as_copy = false to turn on fast posts where only the metadata
 # changes are stored anew and the original data file is kept in place. This
 # makes for quicker posts; but since the container metadata isn't updated in
 # this mode, features like container sync won't be able to sync posts.
 # object_post_as_copy = true
+#
 # If set to 'true' authorized accounts that do not yet exist within the Swift
 # cluster will be automatically created.
 account_autocreate = true
+#
 # If set to a positive value, trying to create a container when the account
 # already has at least this maximum containers will result in a 403 Forbidden.
 # Note: This is a soft limit, meaning a user might exceed the cap for
 # recheck_account_existence before the 403s kick in.
 # max_containers_per_account = 0
+#
 # This is a comma separated list of account hashes that ignore the
 # max_containers_per_account cap.
 # max_containers_whitelist =
+#
 # Comma separated list of Host headers to which the proxy will deny requests.
 # deny_host_headers =
+#
 # Prefix used when automatically creating accounts.
 # auto_create_account_prefix = .
+#
 # Depth of the proxy put queue.
 # put_queue_depth = 10
+#
 # Start rate-limiting object segment serving after the Nth segment of a
 # segmented object.
 # rate_limit_after_segment = 10
+#
 # Once segment rate-limiting kicks in for an object, limit segments served
 # to N per second.
 # rate_limit_segments_per_sec = 1
-# Storage nodes can be chosen at random (shuffle) or by using timing
-# measurements. Using timing measurements may allow for lower overall latency.
-# The valid values for sorting_method are "shuffle" and "timing"
+#
+# Storage nodes can be chosen at random (shuffle), by using timing
+# measurements (timing), or by using an explicit match (affinity).
+# Using timing measurements may allow for lower overall latency, while
+# using affinity allows for finer control. In both the timing and
+# affinity cases, equally-sorting nodes are still randomly chosen to
+# spread load.
+# The valid values for sorting_method are "affinity", "shuffle", and "timing".
 # sorting_method = shuffle
-# If the timing sorting_method is used, the timings will only be valid for
+#
+# If the "timing" sorting_method is used, the timings will only be valid for
 # the number of seconds configured by timing_expiry.
 # timing_expiry = 300
+#
 # If set to false will treat objects with X-Static-Large-Object header set
 # as a regular object on GETs, i.e. will return that object's contents. Should
 # be set to false if slo is not used in pipeline.
 # allow_static_large_object = true
+#
+# The maximum time (seconds) that a large object connection is allowed to last.
+# max_large_object_get_time = 86400
+#
+# Set to the number of nodes to contact for a normal request. You can use
+# '* replicas' at the end to have it use the number given times the number of
+# replicas for the ring being used for the request.
+# request_node_count = 2 * replicas
+#
+# Which backend servers to prefer on reads. Format is r<N> for region
+# N or r<N>z<M> for region N, zone M. The value after the equals is
+# the priority; lower numbers are higher priority.
+#
+# Example: first read from region 1 zone 1, then region 1 zone 2, then
+# anything in region 2, then everything else:
+# read_affinity = r1z1=100, r1z2=200, r2=300
+# Default is empty, meaning no preference.
+# read_affinity =
+#
+# Which backend servers to prefer on writes. Format is r<N> for region
+# N or r<N>z<M> for region N, zone M. If this is set, then when
+# handling an object PUT request, some number (see setting
+# write_affinity_node_count) of local backend servers will be tried
+# before any nonlocal ones.
+#
+# Example: try to write to regions 1 and 2 before writing to any other
+# nodes:
+# write_affinity = r1, r2
+# Default is empty, meaning no preference.
+# write_affinity =
+#
+# The number of local (as governed by the write_affinity setting)
+# nodes to attempt to contact first, before any non-local ones. You
+# can use '* replicas' at the end to have it use the number given
+# times the number of replicas for the ring being used for the
+# request.
+# write_affinity_node_count = 2 * replicas
+#
+# These are the headers whose values will only be shown to swift_owners. The
+# exact definition of a swift_owner is up to the auth system in use, but
+# usually indicates administrative responsibilities.
+# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2
+
 
 [filter:tempauth]
 use = egg:swift#tempauth
@@ -105,26 +189,31 @@
 # set log_name = tempauth
 # set log_facility = LOG_LOCAL0
 # set log_level = INFO
-# set log_headers = False
+# set log_headers = false
 # set log_address = /dev/log
+#
 # The reseller prefix will verify a token begins with this prefix before even
 # attempting to validate it. Also, with authorization, only Swift storage
 # accounts with this prefix will be authorized by this middleware. Useful if
 # multiple auth systems are in use for one Swift cluster.
 # reseller_prefix = AUTH
+#
 # The auth prefix will cause requests beginning with this prefix to be routed
 # to the auth subsystem, for granting tokens, etc.
 # auth_prefix = /auth/
 # token_life = 86400
+#
 # This allows middleware higher in the WSGI pipeline to override auth
 # processing, useful for middleware such as tempurl and formpost. If you know
 # you're not going to use such middleware and you want a bit of extra security,
 # you can set this to false.
 # allow_overrides = true
+#
 # This specifies what scheme to return with storage urls:
 # http, https, or default (chooses based on what the server is running as)
 # This can be useful with an SSL load balancer in front of a non-SSL server.
 # storage_url_scheme = default
+#
 # Lastly, you need to list all the accounts/users you want here. The format is:
 #   user_<account>_<user> = <key> [group] [group] [...] [storage_url]
 # or if you want underscores in <account> or <user>, you can base64 encode them
@@ -152,14 +241,12 @@
 #
 # You'll need to have as well the keystoneauth middleware enabled
 # and have it in your main pipeline so instead of having tempauth in
-# there you can change it to: authtoken keystone
+# there you can change it to: authtoken keystoneauth
 #
 [filter:authtoken]
 paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
-auth_host = 127.0.0.1
-auth_port = 35357
-auth_protocol = http
 auth_uri = http://127.0.0.1:5000/
+identity_uri = http://127.0.0.1:35357
 admin_tenant_name = %SERVICE_TENANT_NAME%
 admin_user = %SERVICE_USER%
 admin_password = %SERVICE_PASSWORD%
@@ -173,6 +260,8 @@
 # Operator roles is the role which user would be allowed to manage a
 # tenant and be able to create container or give ACL to others.
 # operator_roles = admin, swiftoperator
+# The reseller admin role has the ability to create and delete accounts
+# reseller_admin_role = ResellerAdmin
 
 [filter:healthcheck]
 use = egg:swift#healthcheck
@@ -189,8 +278,9 @@
 # set log_name = cache
 # set log_facility = LOG_LOCAL0
 # set log_level = INFO
-# set log_headers = False
+# set log_headers = false
 # set log_address = /dev/log
+#
 # If not set here, the value for memcache_servers will be read from
 # memcache.conf (see memcache.conf-sample) or lacking that file, it will
 # default to the value below. You can specify multiple servers separated with
@@ -215,18 +305,23 @@
 # set log_name = ratelimit
 # set log_facility = LOG_LOCAL0
 # set log_level = INFO
-# set log_headers = False
+# set log_headers = false
 # set log_address = /dev/log
+#
 # clock_accuracy should represent how accurate the proxy servers' system clocks
 # are with each other. 1000 means that all the proxies' clock are accurate to
 # each other within 1 millisecond.  No ratelimit should be higher than the
 # clock accuracy.
 # clock_accuracy = 1000
+#
 # max_sleep_time_seconds = 60
+#
 # log_sleep_time_seconds of 0 means disabled
 # log_sleep_time_seconds = 0
+#
 # allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
 # rate_buffer_seconds = 5
+#
 # account_ratelimit of 0 means disabled
 # account_ratelimit = 0
 
@@ -235,21 +330,28 @@
 # account_blacklist = c,d
 
 # with container_limit_x = r
-# for containers of size x limit requests per second to r.  The container
+# for containers of size x limit write requests per second to r.  The container
 # rate will be linearly interpolated from the values given. With the values
 # below, a container of size 5 will get a rate of 75.
 # container_ratelimit_0 = 100
 # container_ratelimit_10 = 50
 # container_ratelimit_50 = 20
 
+# Similarly to the above container-level write limits, the following will limit
+# container GET (listing) requests.
+# container_listing_ratelimit_0 = 100
+# container_listing_ratelimit_10 = 50
+# container_listing_ratelimit_50 = 20
+
 [filter:domain_remap]
 use = egg:swift#domain_remap
 # You can override the default log routing for this filter here:
 # set log_name = domain_remap
 # set log_facility = LOG_LOCAL0
 # set log_level = INFO
-# set log_headers = False
+# set log_headers = false
 # set log_address = /dev/log
+#
 # storage_domain = example.com
 # path_root = v1
 # reseller_prefixes = AUTH
@@ -260,7 +362,7 @@
 # set log_name = catch_errors
 # set log_facility = LOG_LOCAL0
 # set log_level = INFO
-# set log_headers = False
+# set log_headers = false
 # set log_address = /dev/log
 
 [filter:cname_lookup]
@@ -270,20 +372,21 @@
 # set log_name = cname_lookup
 # set log_facility = LOG_LOCAL0
 # set log_level = INFO
-# set log_headers = False
+# set log_headers = false
 # set log_address = /dev/log
+#
 # storage_domain = example.com
 # lookup_depth = 1
 
 # Note: Put staticweb just after your auth filter(s) in the pipeline
 [filter:staticweb]
 use = egg:swift#staticweb
-# Seconds to cache container x-container-meta-web-* header values.
-# cache_timeout = 300
 
 # Note: Put tempurl just before your auth filter(s) in the pipeline
 [filter:tempurl]
 use = egg:swift#tempurl
+# The methods allowed with Temp URLs.
+# methods = GET HEAD PUT
 #
 # The headers to remove from incoming requests. Simply a whitespace delimited
 # list of header names and names can optionally end with '*' to indicate a
@@ -329,19 +432,35 @@
 # access_log_facility = LOG_LOCAL0
 # access_log_level = INFO
 # access_log_address = /dev/log
+#
 # If set, access_log_udp_host will override access_log_address
 # access_log_udp_host =
 # access_log_udp_port = 514
+#
 # You can use log_statsd_* from [DEFAULT] or override them here:
 # access_log_statsd_host = localhost
 # access_log_statsd_port = 8125
 # access_log_statsd_default_sample_rate = 1.0
 # access_log_statsd_sample_rate_factor = 1.0
 # access_log_statsd_metric_prefix =
-# access_log_headers = False
+# access_log_headers = false
+#
+# By default, the X-Auth-Token is logged. To obscure the value,
+# set reveal_sensitive_prefix to the number of characters to log.
+# For example, if set to 12, only the first 12 characters of the
+# token appear in the log. An unauthorized access of the log file
+# won't allow unauthorized usage of the token. However, the first
+# 12 or so characters is unique enough that you can trace/debug
+# token usage. Set to 0 to suppress the token completely (replaced
+# by '...' in the log).
+# Note: reveal_sensitive_prefix will not affect the value
+# logged with access_log_headers=True.
+# reveal_sensitive_prefix = 8192
+#
 # What HTTP methods are allowed for StatsD logging (comma-sep); request methods
 # not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
 # log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
+#
 # Note: The double proxy-logging in the pipeline is not a mistake. The
 # left-most proxy-logging is there to log requests that were handled in
 # middleware and never made it through to the right-most middleware (and
@@ -352,8 +471,9 @@
 [filter:bulk]
 use = egg:swift#bulk
 # max_containers_per_extraction = 10000
-# max_failed_files = 1000
-# max_deletes_per_request = 1000
+# max_failed_extractions = 1000
+# max_deletes_per_request = 10000
+# yield_frequency = 60
 
 # Note: Put after auth in the pipeline.
 [filter:container-quotas]
--- a/components/openstack/swift/files/swift-account-auditor.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-account-auditor.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-account-auditor %m">
       <method_context>
--- a/components/openstack/swift/files/swift-account-reaper.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-account-reaper.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-account-reaper %m">
       <method_context>
--- a/components/openstack/swift/files/swift-account-replicator.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-account-replicator.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-account-replicator %m">
       <method_context>
--- a/components/openstack/swift/files/swift-account-server.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-account-server.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -35,6 +35,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-account-server %m">
       <method_context>
--- a/components/openstack/swift/files/swift-container-auditor.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-container-auditor.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-container-auditor %m">
       <method_context>
--- a/components/openstack/swift/files/swift-container-replicator.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-container-replicator.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-container-replicator %m">
       <method_context>
--- a/components/openstack/swift/files/swift-container-server.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-container-server.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -35,6 +35,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-container-server %m">
       <method_context>
--- a/components/openstack/swift/files/swift-container-sync.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-container-sync.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-container-sync %m">
       <method_context>
--- a/components/openstack/swift/files/swift-container-updater.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-container-updater.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-container-updater %m">
       <method_context>
--- a/components/openstack/swift/files/swift-object-auditor.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-object-auditor.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-object-auditor %m">
       <method_context>
--- a/components/openstack/swift/files/swift-object-expirer.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-object-expirer.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-object-expirer %m">
       <method_context>
--- a/components/openstack/swift/files/swift-object-replicator.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-object-replicator.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-object-replicator %m">
       <method_context>
--- a/components/openstack/swift/files/swift-object-server.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-object-server.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -35,6 +35,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-object-server %m">
       <method_context>
--- a/components/openstack/swift/files/swift-object-updater.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-object-updater.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-object-updater %m">
       <method_context>
--- a/components/openstack/swift/files/swift-proxy-server.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-proxy-server.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -35,6 +35,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-proxy-server %m">
       <method_context>
--- a/components/openstack/swift/files/swift-replicator-rsync	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-replicator-rsync	Wed Jun 11 17:13:12 2014 -0700
@@ -25,8 +25,9 @@
 def start():
     cfgfile = "/etc/swift/rsyncd.conf"
     if not os.path.isfile(cfgfile):
-        smf_include.smf_method_exit(smf_include.SMF_EXIT_ERR_CONFIG,
-          "missing_config", "Missing configuration file")
+        smf_include.smf_method_exit(
+            smf_include.SMF_EXIT_ERR_CONFIG, "missing_config",
+            "Missing configuration file")
 
     # This is the default delivered in /etc/swift/rsyncd.conf
     try:
@@ -40,16 +41,18 @@
         proc = subprocess.Popen(cmdline)
     except OSError as err:
         print >> sys.stderr, "Error executing rsync: %s" % err
-        smf_include.smf_method_exit(smf_include.SMF_EXIT_ERR_FATAL,
-          "exec_error", "Error executing rsync: %s" % err)
+        smf_include.smf_method_exit(
+            smf_include.SMF_EXIT_ERR_FATAL, "exec_error",
+            "Error executing rsync: %s" % err)
 
     ret = proc.wait()
     if ret != 0:
         print >> sys.stderr, "rsync daemon failed to start (see message above)"
         print >> sys.stderr, "commandline:", " ".join(cmdline)
         print >> sys.stderr, "exit code:", ret
-        smf_include.smf_method_exit(smf_include.SMF_EXIT_ERR_FATAL,
-          "exec_fail", "rsync daemon failed to start (see service log)")
+        smf_include.smf_method_exit(
+            smf_include.SMF_EXIT_ERR_FATAL, "exec_fail",
+            "rsync daemon failed to start (see service log)")
 
     return smf_include.SMF_EXIT_OK
 
--- a/components/openstack/swift/files/swift-replicator-rsync.xml	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-replicator-rsync.xml	Wed Jun 11 17:13:12 2014 -0700
@@ -30,6 +30,13 @@
       <service_fmri value='svc:/milestone/multi-user:default' />
     </dependency>
 
+    <dependency name='ntp' grouping='optional_all' restart_on='none'
+      type='service'>
+      <service_fmri value='svc:/network/ntp'/>
+    </dependency>
+
+    <logfile_attributes permissions='600'/>
+
     <exec_method timeout_seconds="60" type="method" name="start"
       exec="/lib/svc/method/swift-replicator-rsync %m">
       <method_context>
--- a/components/openstack/swift/files/swift-smf-method	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift-smf-method	Wed Jun 11 17:13:12 2014 -0700
@@ -32,6 +32,7 @@
     "proxy-server": ["account", "container", "object"]
 }
 
+
 def start():
     # All the Swift services do essentially the same thing, so there's no need
     # to have different method executables.  Just look at the FMRI and run the
@@ -47,8 +48,9 @@
         cfgfile = "/etc/swift/%s-server.conf" % exepath.split("-")[1]
     if not os.path.isfile(cfgfile):
         print >> sys.stderr, "Missing configuration file"
-        smf_include.smf_method_exit(smf_include.SMF_EXIT_ERR_CONFIG,
-          "missing_config", "Missing configuration file")
+        smf_include.smf_method_exit(
+            smf_include.SMF_EXIT_ERR_CONFIG, "missing_config",
+            "Missing configuration file")
 
     missing_rings = []
     for ring in rings.get(exepath.split("-", 1)[1], ()):
@@ -57,9 +59,9 @@
             missing_rings.append(ringfile)
     if missing_rings:
         print >> sys.stderr, "Missing ring(s): " + ", ".join(missing_rings)
-        smf_include.smf_method_exit(smf_include.SMF_EXIT_ERR_CONFIG,
-          "missing_ring",
-          "Missing ring(s): " + ", ".join(missing_rings))
+        smf_include.smf_method_exit(
+            smf_include.SMF_EXIT_ERR_CONFIG, "missing_ring",
+            "Missing ring(s): " + ", ".join(missing_rings))
 
     # This is the default recon_cache_path (from the config files) as well as
     # the default run_dir (from the code).
--- a/components/openstack/swift/files/swift.prof_attr	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/files/swift.prof_attr	Wed Jun 11 17:13:12 2014 -0700
@@ -1,18 +1,18 @@
 OpenStack Object Storage Management:RO::\
 Manage OpenStack Swift:\
-auths=solaris.admin.edit/etc/swift/account-server.conf,
-solaris.admin.edit/etc/swift/container-server.conf,
-solaris.admin.edit/etc/swift/dispersion.conf,
-solaris.admin.edit/etc/swift/memcache.conf,
-solaris.admin.edit/etc/swift/mime.types,
-solaris.admin.edit/etc/swift/object-expirer.conf,
-solaris.admin.edit/etc/swift/object-server.conf,
-solaris.admin.edit/etc/swift/proxy-server.conf,
-solaris.admin.edit/etc/swift/rsyncd.conf,
-solaris.admin.edit/etc/swift/swift-bench.conf,
-solaris.admin.edit/etc/swift/swift.conf,
+auths=solaris.admin.edit/etc/swift/account-server.conf,\
+solaris.admin.edit/etc/swift/container-server.conf,\
+solaris.admin.edit/etc/swift/dispersion.conf,\
+solaris.admin.edit/etc/swift/memcache.conf,\
+solaris.admin.edit/etc/swift/mime.types,\
+solaris.admin.edit/etc/swift/object-expirer.conf,\
+solaris.admin.edit/etc/swift/object-server.conf,\
+solaris.admin.edit/etc/swift/proxy-server.conf,\
+solaris.admin.edit/etc/swift/rsyncd.conf,\
+solaris.admin.edit/etc/swift/swift-bench.conf,\
+solaris.admin.edit/etc/swift/swift.conf,\
 solaris.smf.manage.swift,\
-solaris.smf.value.swift
+solaris.smf.value.swift;\
+defaultpriv={file_dac_read}\:/var/svc/log/application-openstack-*
 
 OpenStack Management:RO:::profiles=OpenStack Object Storage Management
-
--- a/components/openstack/swift/patches/01-CVE-2013-4155.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,416 +0,0 @@
-commit 1f4ec235cdfd8c868f2d6458532f9dc32c00b8ca
-Author: Peter Portante <[email protected]>
-Date:   Fri Jul 26 15:03:34 2013 -0400
-
-    Fix handling of DELETE obj reqs with old timestamp
-    
-    The DELETE object REST API was creating tombstone files with old
-    timestamps, potentially filling up the disk, as well as sending
-    container updates.
-    
-    Here we now make DELETEs with a request timestamp return a 409 (HTTP
-    Conflict) if a data file exists with a newer timestamp, only creating
-    tombstones if they have a newer timestamp.
-    
-    The key fix is to actually read the timestamp metadata from an
-    existing tombstone file (thanks to Pete Zaitcev for catching this),
-    and then only create tombstone files with newer timestamps.
-    
-    We also prevent PUT and POST operations using old timestamps as well.
-    
-    Change-Id: I631957029d17c6578bca5779367df5144ba01fc9
-    Signed-off-by: Peter Portante <[email protected]>
-
---- a/swift/obj/server.py
-+++ b/swift/obj/server.py
-@@ -46,7 +46,7 @@ from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
-     HTTPInternalServerError, HTTPNoContent, HTTPNotFound, HTTPNotModified, \
-     HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \
-     HTTPClientDisconnect, HTTPMethodNotAllowed, Request, Response, UTC, \
--    HTTPInsufficientStorage, multi_range_iterator
-+    HTTPInsufficientStorage, multi_range_iterator, HTTPConflict
- 
- 
- DATADIR = 'objects'
-@@ -121,7 +121,6 @@ class DiskFile(object):
-         self.tmppath = None
-         self.logger = logger
-         self.metadata = {}
--        self.meta_file = None
-         self.data_file = None
-         self.fp = None
-         self.iter_etag = None
-@@ -133,15 +132,18 @@ class DiskFile(object):
-         if not os.path.exists(self.datadir):
-             return
-         files = sorted(os.listdir(self.datadir), reverse=True)
--        for file in files:
--            if file.endswith('.ts'):
--                self.data_file = self.meta_file = None
--                self.metadata = {'deleted': True}
--                return
--            if file.endswith('.meta') and not self.meta_file:
--                self.meta_file = os.path.join(self.datadir, file)
--            if file.endswith('.data') and not self.data_file:
--                self.data_file = os.path.join(self.datadir, file)
-+        meta_file = None
-+        for afile in files:
-+            if afile.endswith('.ts'):
-+                self.data_file = None
-+                with open(os.path.join(self.datadir, afile)) as mfp:
-+                    self.metadata = read_metadata(mfp)
-+                self.metadata['deleted'] = True
-+                break
-+            if afile.endswith('.meta') and not meta_file:
-+                meta_file = os.path.join(self.datadir, afile)
-+            if afile.endswith('.data') and not self.data_file:
-+                self.data_file = os.path.join(self.datadir, afile)
-                 break
-         if not self.data_file:
-             return
-@@ -149,8 +151,8 @@ class DiskFile(object):
-         self.metadata = read_metadata(self.fp)
-         if not keep_data_fp:
-             self.close(verify_file=False)
--        if self.meta_file:
--            with open(self.meta_file) as mfp:
-+        if meta_file:
-+            with open(meta_file) as mfp:
-                 for key in self.metadata.keys():
-                     if key.lower() not in DISALLOWED_HEADERS:
-                         del self.metadata[key]
-@@ -594,6 +596,9 @@ class ObjectController(object):
-         except (DiskFileError, DiskFileNotExist):
-             file.quarantine()
-             return HTTPNotFound(request=request)
-+        orig_timestamp = file.metadata.get('X-Timestamp', '0')
-+        if orig_timestamp >= request.headers['x-timestamp']:
-+            return HTTPConflict(request=request)
-         metadata = {'X-Timestamp': request.headers['x-timestamp']}
-         metadata.update(val for val in request.headers.iteritems()
-                         if val[0].lower().startswith('x-object-meta-'))
-@@ -639,6 +644,8 @@ class ObjectController(object):
-         file = DiskFile(self.devices, device, partition, account, container,
-                         obj, self.logger, disk_chunk_size=self.disk_chunk_size)
-         orig_timestamp = file.metadata.get('X-Timestamp')
-+        if orig_timestamp and orig_timestamp >= request.headers['x-timestamp']:
-+            return HTTPConflict(request=request)
-         upload_expiration = time.time() + self.max_upload_time
-         etag = md5()
-         upload_size = 0
-@@ -863,23 +870,26 @@ class ObjectController(object):
-             return HTTPPreconditionFailed(
-                 request=request,
-                 body='X-If-Delete-At and X-Delete-At do not match')
--        orig_timestamp = file.metadata.get('X-Timestamp')
--        if file.is_deleted() or file.is_expired():
--            response_class = HTTPNotFound
--        metadata = {
--            'X-Timestamp': request.headers['X-Timestamp'], 'deleted': True,
--        }
-         old_delete_at = int(file.metadata.get('X-Delete-At') or 0)
-         if old_delete_at:
-             self.delete_at_update('DELETE', old_delete_at, account,
-                                   container, obj, request.headers, device)
--        file.put_metadata(metadata, tombstone=True)
--        file.unlinkold(metadata['X-Timestamp'])
--        if not orig_timestamp or \
--                orig_timestamp < request.headers['x-timestamp']:
-+        orig_timestamp = file.metadata.get('X-Timestamp', 0)
-+        req_timestamp = request.headers['X-Timestamp']
-+        if file.is_deleted() or file.is_expired():
-+            response_class = HTTPNotFound
-+        else:
-+            if orig_timestamp < req_timestamp:
-+                response_class = HTTPNoContent
-+            else:
-+                response_class = HTTPConflict
-+        if orig_timestamp < req_timestamp:
-+            file.put_metadata({'X-Timestamp': req_timestamp},
-+                              tombstone=True)
-+            file.unlinkold(req_timestamp)
-             self.container_update(
-                 'DELETE', account, container, obj, request.headers,
--                {'x-timestamp': metadata['X-Timestamp'],
-+                {'x-timestamp': req_timestamp,
-                  'x-trans-id': request.headers.get('x-trans-id', '-')},
-                 device)
-         resp = response_class(request=request)
-
-
-diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py
-
-index 8ee266b..b354b97 100755 (executable)
-
-
---- a/test/unit/obj/test_server.py
-+++ b/test/unit/obj/test_server.py
-@@ -509,6 +509,41 @@ class TestObjectController(unittest.TestCase):
-                      "X-Object-Meta-3" in resp.headers)
-         self.assertEquals(resp.headers['Content-Type'], 'application/x-test')
- 
-+    def test_POST_old_timestamp(self):
-+        ts = time()
-+        timestamp = normalize_timestamp(ts)
-+        req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
-+                            headers={'X-Timestamp': timestamp,
-+                                     'Content-Type': 'application/x-test',
-+                                     'X-Object-Meta-1': 'One',
-+                                     'X-Object-Meta-Two': 'Two'})
-+        req.body = 'VERIFY'
-+        resp = self.object_controller.PUT(req)
-+        self.assertEquals(resp.status_int, 201)
-+
-+        # Same timestamp should result in 409
-+        req = Request.blank('/sda1/p/a/c/o',
-+                            environ={'REQUEST_METHOD': 'POST'},
-+                            headers={'X-Timestamp': timestamp,
-+                                     'X-Object-Meta-3': 'Three',
-+                                     'X-Object-Meta-4': 'Four',
-+                                     'Content-Encoding': 'gzip',
-+                                     'Content-Type': 'application/x-test'})
-+        resp = self.object_controller.POST(req)
-+        self.assertEquals(resp.status_int, 409)
-+
-+        # Earlier timestamp should result in 409
-+        timestamp = normalize_timestamp(ts - 1)
-+        req = Request.blank('/sda1/p/a/c/o',
-+                            environ={'REQUEST_METHOD': 'POST'},
-+                            headers={'X-Timestamp': timestamp,
-+                                     'X-Object-Meta-5': 'Five',
-+                                     'X-Object-Meta-6': 'Six',
-+                                     'Content-Encoding': 'gzip',
-+                                     'Content-Type': 'application/x-test'})
-+        resp = self.object_controller.POST(req)
-+        self.assertEquals(resp.status_int, 409)
-+
-     def test_POST_not_exist(self):
-         timestamp = normalize_timestamp(time())
-         req = Request.blank('/sda1/p/a/c/fail',
-@@ -555,11 +590,15 @@ class TestObjectController(unittest.TestCase):
- 
-         old_http_connect = object_server.http_connect
-         try:
--            timestamp = normalize_timestamp(time())
-+            ts = time()
-+            timestamp = normalize_timestamp(ts)
-             req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD':
-                 'POST'}, headers={'X-Timestamp': timestamp, 'Content-Type':
-                 'text/plain', 'Content-Length': '0'})
-             resp = self.object_controller.PUT(req)
-+            self.assertEquals(resp.status_int, 201)
-+
-+            timestamp = normalize_timestamp(ts + 1)
-             req = Request.blank('/sda1/p/a/c/o',
-                     environ={'REQUEST_METHOD': 'POST'},
-                     headers={'X-Timestamp': timestamp,
-@@ -571,6 +610,8 @@ class TestObjectController(unittest.TestCase):
-             object_server.http_connect = mock_http_connect(202)
-             resp = self.object_controller.POST(req)
-             self.assertEquals(resp.status_int, 202)
-+
-+            timestamp = normalize_timestamp(ts + 2)
-             req = Request.blank('/sda1/p/a/c/o',
-                     environ={'REQUEST_METHOD': 'POST'},
-                     headers={'X-Timestamp': timestamp,
-@@ -582,6 +623,8 @@ class TestObjectController(unittest.TestCase):
-             object_server.http_connect = mock_http_connect(202, with_exc=True)
-             resp = self.object_controller.POST(req)
-             self.assertEquals(resp.status_int, 202)
-+
-+            timestamp = normalize_timestamp(ts + 3)
-             req = Request.blank('/sda1/p/a/c/o',
-                     environ={'REQUEST_METHOD': 'POST'},
-                     headers={'X-Timestamp': timestamp,
-@@ -718,6 +761,32 @@ class TestObjectController(unittest.TestCase):
-                            'name': '/a/c/o',
-                            'Content-Encoding': 'gzip'})
- 
-+    def test_PUT_old_timestamp(self):
-+        ts = time()
-+        req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
-+                headers={'X-Timestamp': normalize_timestamp(ts),
-+                         'Content-Length': '6',
-+                         'Content-Type': 'application/octet-stream'})
-+        req.body = 'VERIFY'
-+        resp = self.object_controller.PUT(req)
-+        self.assertEquals(resp.status_int, 201)
-+
-+        req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
-+                            headers={'X-Timestamp': normalize_timestamp(ts),
-+                                     'Content-Type': 'text/plain',
-+                                     'Content-Encoding': 'gzip'})
-+        req.body = 'VERIFY TWO'
-+        resp = self.object_controller.PUT(req)
-+        self.assertEquals(resp.status_int, 409)
-+
-+        req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
-+                            headers={'X-Timestamp': normalize_timestamp(ts - 1),
-+                                     'Content-Type': 'text/plain',
-+                                     'Content-Encoding': 'gzip'})
-+        req.body = 'VERIFY THREE'
-+        resp = self.object_controller.PUT(req)
-+        self.assertEquals(resp.status_int, 409)
-+
-     def test_PUT_no_etag(self):
-         req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
-                            headers={'X-Timestamp': normalize_timestamp(time()),
-@@ -1306,12 +1375,32 @@ class TestObjectController(unittest.TestCase):
-         self.assertEquals(resp.status_int, 400)
-         # self.assertRaises(KeyError, self.object_controller.DELETE, req)
- 
-+        # The following should have created a tombstone file
-         timestamp = normalize_timestamp(time())
-         req = Request.blank('/sda1/p/a/c/o',
-                             environ={'REQUEST_METHOD': 'DELETE'},
-                             headers={'X-Timestamp': timestamp})
-         resp = self.object_controller.DELETE(req)
-         self.assertEquals(resp.status_int, 404)
-+        objfile = os.path.join(self.testdir, 'sda1',
-+            storage_directory(object_server.DATADIR, 'p',
-+                              hash_path('a', 'c', 'o')),
-+            timestamp + '.ts')
-+        self.assert_(os.path.isfile(objfile))
-+
-+        # The following should *not* have created a tombstone file.
-+        timestamp = normalize_timestamp(float(timestamp) - 1)
-+        req = Request.blank('/sda1/p/a/c/o',
-+                            environ={'REQUEST_METHOD': 'DELETE'},
-+                            headers={'X-Timestamp': timestamp})
-+        resp = self.object_controller.DELETE(req)
-+        self.assertEquals(resp.status_int, 404)
-+        objfile = os.path.join(self.testdir, 'sda1',
-+            storage_directory(object_server.DATADIR, 'p',
-+                              hash_path('a', 'c', 'o')),
-+            timestamp + '.ts')
-+        self.assertFalse(os.path.exists(objfile))
-+        self.assertEquals(len(os.listdir(os.path.dirname(objfile))), 1)
- 
-         sleep(.00001)
-         timestamp = normalize_timestamp(time())
-@@ -1325,17 +1414,19 @@ class TestObjectController(unittest.TestCase):
-         resp = self.object_controller.PUT(req)
-         self.assertEquals(resp.status_int, 201)
- 
-+        # The following should *not* have created a tombstone file.
-         timestamp = normalize_timestamp(float(timestamp) - 1)
-         req = Request.blank('/sda1/p/a/c/o',
-                             environ={'REQUEST_METHOD': 'DELETE'},
-                             headers={'X-Timestamp': timestamp})
-         resp = self.object_controller.DELETE(req)
--        self.assertEquals(resp.status_int, 204)
-+        self.assertEquals(resp.status_int, 409)
-         objfile = os.path.join(self.testdir, 'sda1',
-             storage_directory(object_server.DATADIR, 'p',
-                               hash_path('a', 'c', 'o')),
-             timestamp + '.ts')
--        self.assert_(os.path.isfile(objfile))
-+        self.assertFalse(os.path.exists(objfile))
-+        self.assertEquals(len(os.listdir(os.path.dirname(objfile))), 1)
- 
-         sleep(.00001)
-         timestamp = normalize_timestamp(time())
-@@ -1350,6 +1441,103 @@ class TestObjectController(unittest.TestCase):
-             timestamp + '.ts')
-         self.assert_(os.path.isfile(objfile))
- 
-+    def test_DELETE_container_updates(self):
-+        # Test swift.object_server.ObjectController.DELETE and container
-+        # updates, making sure container update is called in the correct
-+        # state.
-+        timestamp = normalize_timestamp(time())
-+        req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
-+                            headers={
-+                                'X-Timestamp': timestamp,
-+                                'Content-Type': 'application/octet-stream',
-+                                'Content-Length': '4',
-+                                })
-+        req.body = 'test'
-+        resp = self.object_controller.PUT(req)
-+        self.assertEquals(resp.status_int, 201)
-+
-+        calls_made = [0]
-+
-+        def our_container_update(*args, **kwargs):
-+            calls_made[0] += 1
-+
-+        orig_cu = self.object_controller.container_update
-+        self.object_controller.container_update = our_container_update
-+        try:
-+            # The following request should return 409 (HTTP Conflict). A
-+            # tombstone file should not have been created with this timestamp.
-+            timestamp = normalize_timestamp(float(timestamp) - 1)
-+            req = Request.blank('/sda1/p/a/c/o',
-+                                environ={'REQUEST_METHOD': 'DELETE'},
-+                                headers={'X-Timestamp': timestamp})
-+            resp = self.object_controller.DELETE(req)
-+            self.assertEquals(resp.status_int, 409)
-+            objfile = os.path.join(self.testdir, 'sda1',
-+                storage_directory(object_server.DATADIR, 'p',
-+                                  hash_path('a', 'c', 'o')),
-+                timestamp + '.ts')
-+            self.assertFalse(os.path.isfile(objfile))
-+            self.assertEquals(len(os.listdir(os.path.dirname(objfile))), 1)
-+            self.assertEquals(0, calls_made[0])
-+
-+            # The following request should return 204, and the object should
-+            # be truly deleted (container update is performed) because this
-+            # timestamp is newer. A tombstone file should have been created
-+            # with this timestamp.
-+            sleep(.00001)
-+            timestamp = normalize_timestamp(time())
-+            req = Request.blank('/sda1/p/a/c/o',
-+                                environ={'REQUEST_METHOD': 'DELETE'},
-+                                headers={'X-Timestamp': timestamp})
-+            resp = self.object_controller.DELETE(req)
-+            self.assertEquals(resp.status_int, 204)
-+            objfile = os.path.join(self.testdir, 'sda1',
-+                storage_directory(object_server.DATADIR, 'p',
-+                                  hash_path('a', 'c', 'o')),
-+                timestamp + '.ts')
-+            self.assert_(os.path.isfile(objfile))
-+            self.assertEquals(1, calls_made[0])
-+            self.assertEquals(len(os.listdir(os.path.dirname(objfile))), 1)
-+
-+            # The following request should return a 404, as the object should
-+            # already have been deleted, but it should have also performed a
-+            # container update because the timestamp is newer, and a tombstone
-+            # file should also exist with this timestamp.
-+            sleep(.00001)
-+            timestamp = normalize_timestamp(time())
-+            req = Request.blank('/sda1/p/a/c/o',
-+                                environ={'REQUEST_METHOD': 'DELETE'},
-+                                headers={'X-Timestamp': timestamp})
-+            resp = self.object_controller.DELETE(req)
-+            self.assertEquals(resp.status_int, 404)
-+            objfile = os.path.join(self.testdir, 'sda1',
-+                storage_directory(object_server.DATADIR, 'p',
-+                                  hash_path('a', 'c', 'o')),
-+                timestamp + '.ts')
-+            self.assert_(os.path.isfile(objfile))
-+            self.assertEquals(2, calls_made[0])
-+            self.assertEquals(len(os.listdir(os.path.dirname(objfile))), 1)
-+
-+            # The following request should return a 404, as the object should
-+            # already have been deleted, and it should not have performed a
-+            # container update because the timestamp is older, or created a
-+            # tombstone file with this timestamp.
-+            timestamp = normalize_timestamp(float(timestamp) - 1)
-+            req = Request.blank('/sda1/p/a/c/o',
-+                                environ={'REQUEST_METHOD': 'DELETE'},
-+                                headers={'X-Timestamp': timestamp})
-+            resp = self.object_controller.DELETE(req)
-+            self.assertEquals(resp.status_int, 404)
-+            objfile = os.path.join(self.testdir, 'sda1',
-+                storage_directory(object_server.DATADIR, 'p',
-+                                  hash_path('a', 'c', 'o')),
-+                timestamp + '.ts')
-+            self.assertFalse(os.path.isfile(objfile))
-+            self.assertEquals(2, calls_made[0])
-+            self.assertEquals(len(os.listdir(os.path.dirname(objfile))), 1)
-+        finally:
-+            self.object_controller.container_update = orig_cu
-+
-     def test_call(self):
-         """ Test swift.object_server.ObjectController.__call__ """
-         inbuf = StringIO()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/swift/patches/01-CVE-2014-0006.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,61 @@
+This proposed upstream patch addresses CVE-2014-0006 and is tracked
+under Launchpad bug 1265665. Although it's been addressed in 1.12.0,
+the patch below is still not yet released for 1.10.0.
+
+commit b2c61375b3255486adb2900922a894dc7dad3c6d
+Author: Samuel Merritt <[email protected]>
+Date:   Thu Jan 16 13:44:23 2014 +0100
+
+    Use constant time comparison in tempURL
+    
+    Use constant time comparison when evaluating tempURL to avoid timing
+    attacks (CVE-2014-0006). This is the havana backport of the master
+    patch.
+    
+    Fixes bug 1265665
+    
+    Change-Id: I11e4ad83cc4077e52adf54a0bd0f9749294b2a48
+
+diff --git a/swift/common/middleware/tempurl.py b/swift/common/middleware/tempurl.py
+index ffc1431..ae2f4a1 100644
+--- a/swift/common/middleware/tempurl.py
++++ b/swift/common/middleware/tempurl.py
+@@ -98,7 +98,7 @@ from urlparse import parse_qs
+ 
+ from swift.proxy.controllers.base import get_account_info
+ from swift.common.swob import HeaderKeyDict
+-from swift.common.utils import split_path
++from swift.common.utils import split_path, streq_const_time
+ 
+ 
+ #: Default headers to remove from incoming requests. Simply a whitespace
+@@ -267,17 +267,20 @@ class TempURL(object):
+         if not keys:
+             return self._invalid(env, start_response)
+         if env['REQUEST_METHOD'] == 'HEAD':
+-            hmac_vals = self._get_hmacs(env, temp_url_expires, keys,
+-                                        request_method='GET')
+-            if temp_url_sig not in hmac_vals:
+-                hmac_vals = self._get_hmacs(env, temp_url_expires, keys,
+-                                            request_method='PUT')
+-                if temp_url_sig not in hmac_vals:
+-                    return self._invalid(env, start_response)
++            hmac_vals = (self._get_hmacs(env, temp_url_expires, keys,
++                                         request_method='GET') +
++                         self._get_hmacs(env, temp_url_expires, keys,
++                                         request_method='PUT'))
+         else:
+             hmac_vals = self._get_hmacs(env, temp_url_expires, keys)
+-            if temp_url_sig not in hmac_vals:
+-                return self._invalid(env, start_response)
++
++        # While it's true that any() will short-circuit, this doesn't affect
++        # the timing-attack resistance since the only way this will
++        # short-circuit is when a valid signature is passed in.
++        is_valid_hmac = any(streq_const_time(temp_url_sig, h)
++                            for h in hmac_vals)
++        if not is_valid_hmac:
++            return self._invalid(env, start_response)
+         self._clean_incoming_headers(env)
+         env['swift.authorize'] = lambda req: None
+         env['swift.authorize_override'] = True
--- a/components/openstack/swift/patches/02-CVE-2014-0006.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-From c0eed792a22865b280f99cbb79076fa7ad19fcbb Mon Sep 17 00:00:00 2001
-From: Samuel Merritt <[email protected]>
-Date: Thu, 16 Jan 2014 12:45:52 +0000
-Subject: Use constant time comparison in tempURL
-
-Use constant time comparison when evaluating tempURL to avoid timing
-attacks (CVE-2014-0006). This is the grizzly backport of the master
-patch.
-
-Fixes bug 1265665
-
-Change-Id: I11e4ad83cc4077e52adf54a0bd0f9749294b2a48
----
-diff --git a/swift/common/middleware/tempurl.py b/swift/common/middleware/tempurl.py
-index 5a05de7..8a2517e 100644
---- a/swift/common/middleware/tempurl.py
-+++ b/swift/common/middleware/tempurl.py
-@@ -98,6 +98,7 @@ from urlparse import parse_qs
- 
- from swift.common.wsgi import make_pre_authed_env
- from swift.common.http import HTTP_UNAUTHORIZED
-+from swift.common.utils import streq_const_time
- 
- 
- #: Default headers to remove from incoming requests. Simply a whitespace
-@@ -248,14 +249,14 @@ class TempURL(object):
-         if env['REQUEST_METHOD'] == 'HEAD':
-             hmac_val = self._get_hmac(env, temp_url_expires, key,
-                                       request_method='GET')
--            if temp_url_sig != hmac_val:
-+            if not streq_const_time(temp_url_sig, hmac_val):
-                 hmac_val = self._get_hmac(env, temp_url_expires, key,
-                                           request_method='PUT')
--                if temp_url_sig != hmac_val:
-+                if not streq_const_time(temp_url_sig, hmac_val):
-                     return self._invalid(env, start_response)
-         else:
-             hmac_val = self._get_hmac(env, temp_url_expires, key)
--            if temp_url_sig != hmac_val:
-+            if not streq_const_time(temp_url_sig, hmac_val):
-                 return self._invalid(env, start_response)
-         self._clean_incoming_headers(env)
-         env['swift.authorize'] = lambda req: None
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/swift/patches/02-requirements.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -0,0 +1,30 @@
+In-house patch to remove unnecessary dependencies from Swift's
+requirements files. The specific reasons are as follows:
+
+greenlet	No longer applicable
+
+--- swift-1.10.0/requirements.txt.orig	2013-10-17 07:07:33.000000000 -0700
++++ swift-1.10.0/requirements.txt	2014-05-24 23:17:47.564223065 -0700
+@@ -1,7 +1,6 @@
+ pbr>=0.5.21,<1.0
+ dnspython>=1.9.4
+ eventlet>=0.9.15
+-greenlet>=0.3.1
+ netifaces>=0.5
+ pastedeploy>=1.3.3
+ simplejson>=2.0.9
+
+--- swift-1.10.0/swift.egg-info/requires.txt.orig	2013-10-17 07:08:06.000000000 -0700
++++ swift-1.10.0/swift.egg-info/requires.txt	2014-05-24 23:17:53.923411767 -0700
+@@ -1,9 +1,8 @@
+ pbr>=0.5.21,<1.0
+ dnspython>=1.9.4
+ eventlet>=0.9.15
+-greenlet>=0.3.1
+ netifaces>=0.5
+ pastedeploy>=1.3.3
+ simplejson>=2.0.9
+ xattr>=0.4
+-python-swiftclient
+\ No newline at end of file
++python-swiftclient
--- a/components/openstack/swift/patches/03-CVE-2013-2161.patch	Tue Jun 10 14:07:48 2014 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-commit 6659382c4fa348e1ebbce2424968dd7267ea1db1
-Author: Alex Gaynor <[email protected]>
-Date:   Mon May 27 02:07:39 2013 +0000
-
-    Check user input in XML responses.
-    
-    Fixes bug 1183884.
-    
-    * swift/account/server.py: Escape account name in XML listings.
-    
-    Change-Id: I7ba54631ed1349516132c00a53fae74f0b84ac37
-
-diff --git a/swift/account/server.py b/swift/account/server.py
-index 81c4d90..baca5a5 100644
---- a/swift/account/server.py
-+++ b/swift/account/server.py
-@@ -241,7 +241,7 @@ class AccountController(object):
-             account_list = json.dumps(data)
-         elif out_content_type.endswith('/xml'):
-             output_list = ['<?xml version="1.0" encoding="UTF-8"?>',
--                           '<account name="%s">' % account]
-+                           '<account name="%s">' % saxutils.escape(account)]
-             for (name, object_count, bytes_used, is_subdir) in account_list:
-                 name = saxutils.escape(name)
-                 if is_subdir:
--- a/components/openstack/swift/patches/manager.patch	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/patches/manager.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -8,7 +8,7 @@
 diff --git a/swift/common/manager.py b/swift/common/manager.py
 --- a/swift/common/manager.py
 +++ b/swift/common/manager.py
-@@ -532,7 +532,8 @@ class Server():
+@@ -534,7 +534,8 @@ class Server():
                  re_out = subprocess.PIPE
              else:
                  re_out = open(os.devnull, 'w+b')
@@ -21,7 +21,7 @@
 diff --git a/test/unit/common/test_manager.py b/test/unit/common/test_manager.py
 --- a/test/unit/common/test_manager.py
 +++ b/test/unit/common/test_manager.py
-@@ -816,7 +816,7 @@ class TestServer(unittest.TestCase):
+@@ -858,7 +858,7 @@ class TestServer(unittest.TestCase):
          class MockProc():
  
              def __init__(self, pid, args, stdout=MockProcess.NOTHING,
--- a/components/openstack/swift/patches/manpages.patch	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/patches/manpages.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -20,7 +20,7 @@
  
  
  
-@@ -245,13 +245,13 @@ Connection timeout to external services.
+@@ -254,11 +254,11 @@ Connection timeout to external services.
  .SH DOCUMENTATION
  .LP
  More in depth documentation about the swift-account-server and
@@ -34,8 +34,6 @@
  .SH "SEE ALSO"
 -.BR swift-account-server(1),
 +.BR swift-account-server (1)
- 
- 
 diff --git a/doc/manpages/container-server.conf.5 b/doc/manpages/container-server.conf.5
 --- a/doc/manpages/container-server.conf.5
 +++ b/doc/manpages/container-server.conf.5
@@ -54,7 +52,7 @@
  
  
  
-@@ -270,13 +270,13 @@ Maximum amount of time to spend syncing 
+@@ -279,11 +279,11 @@ Maximum amount of time to spend syncing 
  .SH DOCUMENTATION
  .LP
  More in depth documentation about the swift-container-server and
@@ -66,10 +64,8 @@
  
  
  .SH "SEE ALSO"
--.BR swift-container-server(1),
+-.BR swift-container-server(1)
 +.BR swift-container-server (1)
- 
- 
 diff --git a/doc/manpages/dispersion.conf.5 b/doc/manpages/dispersion.conf.5
 --- a/doc/manpages/dispersion.conf.5
 +++ b/doc/manpages/dispersion.conf.5
@@ -88,16 +84,16 @@
  
  .SH SYNOPSIS
  .LP
-@@ -43,7 +43,7 @@ Authentication system account/user name
+@@ -45,7 +45,7 @@ Authentication system account/user name
  .IP "\fBauth_key\fR"
  Authentication system account/user password 
  .IP "\fBswift_dir\fR"
 -Location of openstack-swift configuration and ring files
 +Location of OpenStack Swift configuration and ring files
  .IP "\fBdispersion_coverage\fR"
- Percentage of partition coverage to use. The default is 1.
+ Percentage of partition coverage to use. The default is 1.0.
  .IP "\fBretries\fR"
-@@ -80,13 +80,13 @@ Whether to run the object report. The de
+@@ -83,13 +83,13 @@ Whether to run the object report. The de
  .SH DOCUMENTATION
  .LP
  More in depth documentation about the swift-dispersion utilities and
@@ -165,7 +161,7 @@
  
  
  
-@@ -262,13 +262,13 @@ The default is 50.
+@@ -273,11 +273,11 @@ The default is 50.
  .SH DOCUMENTATION
  .LP
  More in depth documentation about the swift-object-server and
@@ -179,8 +175,6 @@
  .SH "SEE ALSO"
 -.BR swift-object-server(1),
 +.BR swift-object-server (1)
- 
- 
 diff --git a/doc/manpages/proxy-server.conf.5 b/doc/manpages/proxy-server.conf.5
 --- a/doc/manpages/proxy-server.conf.5
 +++ b/doc/manpages/proxy-server.conf.5
@@ -199,7 +193,7 @@
  
  
  
-@@ -539,10 +539,10 @@ per second.  The default is 1.
+@@ -552,10 +552,10 @@ per second.  The default is 1.
  .SH DOCUMENTATION
  .LP
  More in depth documentation about the swift-proxy-server and
@@ -686,7 +680,7 @@
  
  .SH SYNOPSIS
  .LP
-@@ -88,12 +88,12 @@ Example \fI/etc/swift/dispersion.conf\fR
+@@ -95,12 +95,12 @@ Example \fI/etc/swift/dispersion.conf\fR
  .SH DOCUMENTATION
  .LP
  More in depth documentation about the swift-dispersion utilities and
@@ -720,7 +714,7 @@
  
  .SH SYNOPSIS
  .LP
-@@ -124,12 +124,12 @@ Example \fI/etc/swift/dispersion.conf\fR
+@@ -131,12 +131,12 @@ Example \fI/etc/swift/dispersion.conf\fR
  .SH DOCUMENTATION
  .LP
  More in depth documentation about the swift-dispersion utilities and
@@ -1167,11 +1161,11 @@
 @@ -15,12 +15,12 @@
  .\" See the License for the specific language governing permissions and
  .\" limitations under the License.
- .\"  
+ .\"
 -.TH swift-orphans 1 "3/15/2012" "Linux" "OpenStack Swift"
 +.TH swift-orphans 1 "3/15/2012" "OpenStack" "OpenStack Swift"
  
- .SH NAME 
+ .SH NAME
  .LP
  .B swift-orphans
 -\- Openstack-swift orphans tool
@@ -1180,11 +1174,11 @@
  .SH SYNOPSIS
  .LP
 @@ -65,6 +65,6 @@ The options are as follows:
-     
+ 
  .SH DOCUMENTATION
  .LP
--More documentation about Openstack-Swift can be found at 
-+More documentation about OpenStack Swift can be found at 
+-More documentation about Openstack-Swift can be found at
++More documentation about OpenStack Swift can be found at
  .BI http://swift.openstack.org/index.html
  
 diff --git a/doc/manpages/swift-proxy-server.1 b/doc/manpages/swift-proxy-server.1
@@ -1244,11 +1238,11 @@
 @@ -15,12 +15,12 @@
  .\" See the License for the specific language governing permissions and
  .\" limitations under the License.
- .\"  
+ .\"
 -.TH swift-recon 1 "8/26/2011" "Linux" "OpenStack Swift"
 +.TH swift-recon 1 "8/26/2011" "OpenStack" "OpenStack Swift"
  
- .SH NAME 
+ .SH NAME
  .LP
  .B swift-recon
 -\- Openstack-swift recon middleware cli tool
@@ -1260,10 +1254,10 @@
  
  .SH DOCUMENTATION
  .LP
--More documentation about Openstack-Swift can be found at 
-+More documentation about OpenStack Swift can be found at 
- .BI http://swift.openstack.org/index.html 
- Also more specific documentation about swift-recon can be found at 
+-More documentation about Openstack-Swift can be found at
++More documentation about OpenStack Swift can be found at
+ .BI http://swift.openstack.org/index.html
+ Also more specific documentation about swift-recon can be found at
  .BI http://swift.openstack.org/admin_guide.html#cluster-telemetry-and-monitoring
 @@ -112,6 +112,6 @@ Also more specific documentation about s
  
@@ -1291,7 +1285,7 @@
  
  .SH SYNOPSIS
  .LP
-@@ -208,7 +208,7 @@ calls when no rebalance is needed but yo
+@@ -210,7 +210,7 @@ calls when no rebalance is needed but yo
  
  .SH DOCUMENTATION
  .LP
--- a/components/openstack/swift/patches/orphans.patch	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/patches/orphans.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -4,7 +4,7 @@
 diff --git a/bin/swift-orphans b/bin/swift-orphans
 --- a/bin/swift-orphans
 +++ b/bin/swift-orphans
-@@ -42,12 +42,12 @@ Example (sends SIGTERM to all orphaned S
+@@ -54,12 +54,12 @@ Example (sends SIGTERM to all orphaned S
              if name.endswith('.pid'):
                  pids.append(open(os.path.join(root, name)).read().strip())
                  pids.extend(subprocess.Popen(
@@ -19,7 +19,7 @@
              stdout=subprocess.PIPE).communicate()[0].split('\n'):
          if not line:
              continue
-@@ -58,6 +58,7 @@ Example (sends SIGTERM to all orphaned S
+@@ -70,6 +70,7 @@ Example (sends SIGTERM to all orphaned S
              sys.exit('Could not process ps line %r' % line)
          if pid in pids:
              continue
--- a/components/openstack/swift/patches/recon.patch	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/patches/recon.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -5,7 +5,7 @@
 diff --git a/bin/swift-recon-cron b/bin/swift-recon-cron
 --- a/bin/swift-recon-cron
 +++ b/bin/swift-recon-cron
-@@ -34,8 +34,8 @@ def main():
+@@ -49,8 +49,8 @@ def main():
          sys.exit(1)
      conf = dict(c.items('filter:recon'))
      device_dir = conf.get('devices', '/srv/node')
@@ -19,7 +19,7 @@
 diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample
 --- a/etc/account-server.conf-sample
 +++ b/etc/account-server.conf-sample
-@@ -56,7 +56,7 @@ use = egg:swift#healthcheck
+@@ -78,7 +78,7 @@ use = egg:swift#healthcheck
  
  [filter:recon]
  use = egg:swift#recon
@@ -28,16 +28,16 @@
  
  [account-replicator]
  # You can override the default log routing for this app here (don't use set!):
-@@ -80,7 +80,7 @@ use = egg:swift#recon
- # reclaim_age = 604800
+@@ -109,7 +109,7 @@ use = egg:swift#recon
  # Time in seconds to wait between replication passes
  # run_pause = 30
+ #
 -# recon_cache_path = /var/cache/swift
 +# recon_cache_path = /var/lib/swift/recon-cache
  
  [account-auditor]
  # You can override the default log routing for this app here (don't use set!):
-@@ -93,7 +93,7 @@ use = egg:swift#recon
+@@ -124,7 +124,7 @@ use = egg:swift#recon
  # log_facility = LOG_LOCAL0
  # log_level = INFO
  # accounts_per_second = 200
@@ -49,7 +49,7 @@
 diff --git a/etc/container-server.conf-sample b/etc/container-server.conf-sample
 --- a/etc/container-server.conf-sample
 +++ b/etc/container-server.conf-sample
-@@ -62,7 +62,7 @@ use = egg:swift#healthcheck
+@@ -85,7 +85,7 @@ use = egg:swift#healthcheck
  
  [filter:recon]
  use = egg:swift#recon
@@ -58,27 +58,27 @@
  
  [container-replicator]
  # You can override the default log routing for this app here (don't use set!):
-@@ -81,7 +81,7 @@ use = egg:swift#recon
- # reclaim_age = 604800
+@@ -108,7 +108,7 @@ use = egg:swift#recon
  # Time in seconds to wait between replication passes
  # run_pause = 30
+ #
 -# recon_cache_path = /var/cache/swift
 +# recon_cache_path = /var/lib/swift/recon-cache
  
  [container-updater]
  # You can override the default log routing for this app here (don't use set!):
-@@ -97,7 +97,7 @@ use = egg:swift#recon
- # slowdown = 0.01
+@@ -128,7 +128,7 @@ use = egg:swift#recon
  # Seconds to suppress updating an account that has generated an error
  # account_suppression_time = 60
+ #
 -# recon_cache_path = /var/cache/swift
 +# recon_cache_path = /var/lib/swift/recon-cache
  
  [container-auditor]
  # You can override the default log routing for this app here (don't use set!):
-@@ -108,7 +108,7 @@ use = egg:swift#recon
- # Will audit each container at most once per interval
+@@ -141,7 +141,7 @@ use = egg:swift#recon
  # interval = 1800
+ #
  # containers_per_second = 200
 -# recon_cache_path = /var/cache/swift
 +# recon_cache_path = /var/lib/swift/recon-cache
@@ -88,7 +88,7 @@
 diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample
 --- a/etc/object-server.conf-sample
 +++ b/etc/object-server.conf-sample
-@@ -71,8 +71,8 @@ use = egg:swift#healthcheck
+@@ -99,8 +99,8 @@ use = egg:swift#healthcheck
  
  [filter:recon]
  use = egg:swift#recon
@@ -99,34 +99,37 @@
  
  [object-replicator]
  # You can override the default log routing for this app here (don't use set!):
-@@ -96,7 +96,7 @@ use = egg:swift#recon
- # The replicator also performs reclamation
+@@ -134,7 +134,7 @@ use = egg:swift#recon
  # reclaim_age = 604800
+ #
  # ring_check_interval = 15
 -# recon_cache_path = /var/cache/swift
 +# recon_cache_path = /var/lib/swift/recon-cache
- 
- [object-updater]
- # You can override the default log routing for this app here (don't use set!):
-@@ -110,7 +110,7 @@ use = egg:swift#recon
- # conn_timeout = 0.5
+ #
+ # limits how long rsync error log lines are
+ # 0 means to log the entire line
+@@ -155,7 +155,7 @@ use = egg:swift#recon
  # slowdown will sleep that amount between objects
  # slowdown = 0.01
+ #
 -# recon_cache_path = /var/cache/swift
 +# recon_cache_path = /var/lib/swift/recon-cache
  
  [object-auditor]
  # You can override the default log routing for this app here (don't use set!):
-@@ -122,4 +122,4 @@ use = egg:swift#recon
+@@ -168,7 +168,7 @@ use = egg:swift#recon
  # bytes_per_second = 10000000
  # log_time = 3600
  # zero_byte_files_per_second = 50
 -# recon_cache_path = /var/cache/swift
 +# recon_cache_path = /var/lib/swift/recon-cache
+ 
+ # Takes a comma separated list of ints. If set, the object auditor will
+ # increment a counter for every object whose size is <= to the given break
 diff --git a/swift/account/auditor.py b/swift/account/auditor.py
 --- a/swift/account/auditor.py
 +++ b/swift/account/auditor.py
-@@ -44,7 +44,7 @@ class AccountAuditor(Daemon):
+@@ -45,7 +45,7 @@ class AccountAuditor(Daemon):
          swift.common.db.DB_PREALLOCATION = \
              config_true_value(conf.get('db_preallocation', 'f'))
          self.recon_cache_path = conf.get('recon_cache_path',
@@ -138,7 +141,7 @@
 diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py
 --- a/swift/common/db_replicator.py
 +++ b/swift/common/db_replicator.py
-@@ -166,7 +166,7 @@ class Replicator(Daemon):
+@@ -169,7 +169,7 @@ class Replicator(Daemon):
              config_true_value(conf.get('db_preallocation', 'f'))
          self._zero_stats()
          self.recon_cache_path = conf.get('recon_cache_path',
@@ -155,10 +158,10 @@
  import errno
  import os
 +import sys
+ from swift import gettext_ as _
  
- from swift.common.swob import Request, Response
- from swift.common.utils import get_logger, config_true_value, json
-@@ -43,7 +44,7 @@ class ReconMiddleware(object):
+ from swift import __version__ as swiftver
+@@ -45,7 +46,7 @@ class ReconMiddleware(object):
          swift_dir = conf.get('swift_dir', '/etc/swift')
          self.logger = get_logger(conf, log_route='recon')
          self.recon_cache_path = conf.get('recon_cache_path',
@@ -167,7 +170,7 @@
          self.object_recon_cache = os.path.join(self.recon_cache_path,
                                                 'object.recon')
          self.container_recon_cache = os.path.join(self.recon_cache_path,
-@@ -80,28 +81,43 @@ class ReconMiddleware(object):
+@@ -87,28 +88,43 @@ class ReconMiddleware(object):
      def get_mounted(self, openr=open):
          """get ALL mounted fs from /proc/mounts"""
          mounts = []
@@ -221,7 +224,7 @@
 diff --git a/swift/container/auditor.py b/swift/container/auditor.py
 --- a/swift/container/auditor.py
 +++ b/swift/container/auditor.py
-@@ -44,7 +44,7 @@ class ContainerAuditor(Daemon):
+@@ -45,7 +45,7 @@ class ContainerAuditor(Daemon):
          swift.common.db.DB_PREALLOCATION = \
              config_true_value(conf.get('db_preallocation', 'f'))
          self.recon_cache_path = conf.get('recon_cache_path',
@@ -233,55 +236,55 @@
 diff --git a/swift/container/updater.py b/swift/container/updater.py
 --- a/swift/container/updater.py
 +++ b/swift/container/updater.py
-@@ -59,7 +59,7 @@ class ContainerUpdater(Daemon):
+@@ -60,7 +60,7 @@ class ContainerUpdater(Daemon):
          swift.common.db.DB_PREALLOCATION = \
              config_true_value(conf.get('db_preallocation', 'f'))
          self.recon_cache_path = conf.get('recon_cache_path',
 -                                         '/var/cache/swift')
 +                                         '/var/lib/swift/recon-cache')
          self.rcache = os.path.join(self.recon_cache_path, "container.recon")
+         self.user_agent = 'container-updater %s' % os.getpid()
  
-     def get_account_ring(self):
 diff --git a/swift/obj/auditor.py b/swift/obj/auditor.py
 --- a/swift/obj/auditor.py
 +++ b/swift/obj/auditor.py
-@@ -54,7 +54,7 @@ class AuditorWorker(object):
+@@ -56,7 +56,7 @@ class AuditorWorker(object):
          self.quarantines = 0
          self.errors = 0
          self.recon_cache_path = conf.get('recon_cache_path',
 -                                         '/var/cache/swift')
 +                                         '/var/lib/swift/recon-cache')
          self.rcache = os.path.join(self.recon_cache_path, "object.recon")
- 
-     def audit_all_objects(self, mode='once'):
+         self.stats_sizes = sorted(
+             [int(s) for s in list_from_csv(conf.get('object_size_stats'))])
 diff --git a/swift/obj/expirer.py b/swift/obj/expirer.py
 --- a/swift/obj/expirer.py
 +++ b/swift/obj/expirer.py
-@@ -50,7 +50,7 @@ class ObjectExpirer(Daemon):
+@@ -54,7 +54,7 @@ class ObjectExpirer(Daemon):
          self.report_first_time = self.report_last_time = time()
          self.report_objects = 0
          self.recon_cache_path = conf.get('recon_cache_path',
 -                                         '/var/cache/swift')
 +                                         '/var/lib/swift/recon-cache')
          self.rcache = join(self.recon_cache_path, 'object.recon')
- 
-     def report(self, final=False):
+         self.concurrency = int(conf.get('concurrency', 1))
+         if self.concurrency < 1:
 diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py
 --- a/swift/obj/replicator.py
 +++ b/swift/obj/replicator.py
-@@ -265,7 +265,7 @@ class ObjectReplicator(Daemon):
+@@ -76,7 +76,7 @@ class ObjectReplicator(Daemon):
          self.http_timeout = int(conf.get('http_timeout', 60))
          self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
          self.recon_cache_path = conf.get('recon_cache_path',
 -                                         '/var/cache/swift')
 +                                         '/var/lib/swift/recon-cache')
          self.rcache = os.path.join(self.recon_cache_path, "object.recon")
- 
-     def _rsync(self, args):
+         self.headers = {
+             'Content-Length': '0',
 diff --git a/swift/obj/updater.py b/swift/obj/updater.py
 --- a/swift/obj/updater.py
 +++ b/swift/obj/updater.py
-@@ -51,7 +51,7 @@ class ObjectUpdater(Daemon):
+@@ -52,7 +52,7 @@ class ObjectUpdater(Daemon):
          self.successes = 0
          self.failures = 0
          self.recon_cache_path = conf.get('recon_cache_path',
@@ -293,17 +296,18 @@
 diff --git a/test/unit/common/middleware/test_recon.py b/test/unit/common/middleware/test_recon.py
 --- a/test/unit/common/middleware/test_recon.py
 +++ b/test/unit/common/middleware/test_recon.py
-@@ -18,6 +18,8 @@ from unittest import TestCase
+@@ -18,7 +18,9 @@ from unittest import TestCase
  from contextlib import contextmanager
  from posix import stat_result, statvfs_result
  import os
 +import sys
+ import mock
 +from nose import SkipTest
  
  import swift.common.constraints
- from swift.common.swob import Request
-@@ -272,10 +274,12 @@ class TestReconSuccess(TestCase):
-                         {'device': 'none', 'path': '/proc/fs/vmblock/mountPoint'}]
+ from swift import __version__ as swiftver
+@@ -277,10 +279,12 @@ class TestReconSuccess(TestCase):
+             {'device': 'none', 'path': '/proc/fs/vmblock/mountPoint'}]
          oart = OpenAndReadTester(mounts_content)
          rv = self.app.get_mounted(openr=oart.open)
 -        self.assertEquals(oart.open_calls, [(('/proc/mounts', 'r'), {})])
@@ -316,7 +320,7 @@
          oart = OpenAndReadTester(['0.03 0.03 0.00 1/220 16306'])
          rv = self.app.get_load(openr=oart.open)
          self.assertEquals(oart.read_calls, [((), {})])
-@@ -285,6 +289,8 @@ class TestReconSuccess(TestCase):
+@@ -290,6 +294,8 @@ class TestReconSuccess(TestCase):
                                 '1m': 0.029999999999999999})
  
      def test_get_mem(self):
@@ -325,75 +329,75 @@
          meminfo_content = ['MemTotal:         505840 kB',
                             'MemFree:           26588 kB',
                             'Buffers:           44948 kB',
-@@ -395,7 +401,7 @@ class TestReconSuccess(TestCase):
+@@ -401,7 +407,7 @@ class TestReconSuccess(TestCase):
          self.assertEquals(self.fakecache.fakeout_calls,
-                             [((['replication_time', 'replication_stats',
-                                 'replication_last'],
--                                '/var/cache/swift/account.recon'), {})])
-+                                '/var/lib/swift/recon-cache/account.recon'), {})])
-         self.assertEquals(rv, {"replication_stats": {
-                                     "attempted": 1, "diff": 0,
-                                     "diff_capped": 0, "empty": 0,
-@@ -424,7 +430,7 @@ class TestReconSuccess(TestCase):
+                           [((['replication_time', 'replication_stats',
+                               'replication_last'],
+-                              '/var/cache/swift/account.recon'), {})])
++                              '/var/lib/swift/recon-cache/account.recon'), {})])
+         self.assertEquals(rv, {
+             "replication_stats": {
+                 "attempted": 1, "diff": 0,
+@@ -432,7 +438,7 @@ class TestReconSuccess(TestCase):
          self.assertEquals(self.fakecache.fakeout_calls,
-                             [((['replication_time', 'replication_stats',
-                                 'replication_last'],
--                                '/var/cache/swift/container.recon'), {})])
-+                                '/var/lib/swift/recon-cache/container.recon'), {})])
-         self.assertEquals(rv, {"replication_time": 200.0,
-                                "replication_stats": {
-                                     "attempted": 179, "diff": 0,
-@@ -445,7 +451,7 @@ class TestReconSuccess(TestCase):
+                           [((['replication_time', 'replication_stats',
+                               'replication_last'],
+-                              '/var/cache/swift/container.recon'), {})])
++                              '/var/lib/swift/recon-cache/container.recon'), {})])
+         self.assertEquals(rv, {
+             "replication_time": 200.0,
+             "replication_stats": {
+@@ -454,7 +460,7 @@ class TestReconSuccess(TestCase):
          self.assertEquals(self.fakecache.fakeout_calls,
-                             [((['object_replication_time',
-                                 'object_replication_last'],
--                                '/var/cache/swift/object.recon'), {})])
-+                                '/var/lib/swift/recon-cache/object.recon'), {})])
+                           [((['object_replication_time',
+                               'object_replication_last'],
+-                              '/var/cache/swift/object.recon'), {})])
++                              '/var/lib/swift/recon-cache/object.recon'), {})])
          self.assertEquals(rv, {'object_replication_time': 200.0,
                                 'object_replication_last': 1357962809.15})
  
-@@ -456,7 +462,7 @@ class TestReconSuccess(TestCase):
+@@ -465,7 +471,7 @@ class TestReconSuccess(TestCase):
          rv = self.app.get_updater_info('container')
          self.assertEquals(self.fakecache.fakeout_calls,
-                             [((['container_updater_sweep'],
--                            '/var/cache/swift/container.recon'), {})])
-+                            '/var/lib/swift/recon-cache/container.recon'), {})])
+                           [((['container_updater_sweep'],
+-                             '/var/cache/swift/container.recon'), {})])
++                             '/var/lib/swift/recon-cache/container.recon'), {})])
          self.assertEquals(rv, {"container_updater_sweep": 18.476239919662476})
  
      def test_get_updater_info_object(self):
-@@ -466,7 +472,7 @@ class TestReconSuccess(TestCase):
+@@ -475,7 +481,7 @@ class TestReconSuccess(TestCase):
          rv = self.app.get_updater_info('object')
          self.assertEquals(self.fakecache.fakeout_calls,
-                             [((['object_updater_sweep'],
--                            '/var/cache/swift/object.recon'), {})])
-+                            '/var/lib/swift/recon-cache/object.recon'), {})])
+                           [((['object_updater_sweep'],
+-                             '/var/cache/swift/object.recon'), {})])
++                             '/var/lib/swift/recon-cache/object.recon'), {})])
          self.assertEquals(rv, {"object_updater_sweep": 0.79848217964172363})
  
      def test_get_auditor_info_account(self):
-@@ -482,7 +488,7 @@ class TestReconSuccess(TestCase):
-                                 'account_auditor_pass_completed',
-                                 'account_audits_since',
-                                 'account_audits_failed'],
--                                '/var/cache/swift/account.recon'), {})])
-+                                '/var/lib/swift/recon-cache/account.recon'), {})])
+@@ -491,7 +497,7 @@ class TestReconSuccess(TestCase):
+                               'account_auditor_pass_completed',
+                               'account_audits_since',
+                               'account_audits_failed'],
+-                              '/var/cache/swift/account.recon'), {})])
++                              '/var/lib/swift/recon-cache/account.recon'), {})])
          self.assertEquals(rv, {"account_auditor_pass_completed": 0.24,
                                 "account_audits_failed": 0,
                                 "account_audits_passed": 6,
-@@ -501,7 +507,7 @@ class TestReconSuccess(TestCase):
-                                 'container_auditor_pass_completed',
-                                 'container_audits_since',
-                                 'container_audits_failed'],
--                                '/var/cache/swift/container.recon'), {})])
-+                                '/var/lib/swift/recon-cache/container.recon'), {})])
+@@ -510,7 +516,7 @@ class TestReconSuccess(TestCase):
+                               'container_auditor_pass_completed',
+                               'container_audits_since',
+                               'container_audits_failed'],
+-                              '/var/cache/swift/container.recon'), {})])
++                              '/var/lib/swift/recon-cache/container.recon'), {})])
          self.assertEquals(rv, {"container_auditor_pass_completed": 0.24,
                                 "container_audits_failed": 0,
                                 "container_audits_passed": 6,
-@@ -528,7 +534,7 @@ class TestReconSuccess(TestCase):
+@@ -538,7 +544,7 @@ class TestReconSuccess(TestCase):
          self.assertEquals(self.fakecache.fakeout_calls,
-                             [((['object_auditor_stats_ALL',
-                                 'object_auditor_stats_ZBF'],
--                            '/var/cache/swift/object.recon'), {})])
-+                            '/var/lib/swift/recon-cache/object.recon'), {})])
-         self.assertEquals(rv, {"object_auditor_stats_ALL": {
-                                     "audit_time": 115.14418768882751,
-                                     "bytes_processed": 234660,
+                           [((['object_auditor_stats_ALL',
+                               'object_auditor_stats_ZBF'],
+-                              '/var/cache/swift/object.recon'), {})])
++                              '/var/lib/swift/recon-cache/object.recon'), {})])
+         self.assertEquals(rv, {
+             "object_auditor_stats_ALL": {
+                 "audit_time": 115.14418768882751,
--- a/components/openstack/swift/patches/test.patch	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/patches/test.patch	Wed Jun 11 17:13:12 2014 -0700
@@ -5,21 +5,15 @@
 
   - Solaris doesn't yet support syslog logging to /dev/log.
 
-  - Solaris doesn't have TCP_KEEPIDLE.
-
-  - Three tests make connections to 127.0.0.[234], which take minutes to
-    timeout (and one test fails).  Mock the connections to make the return
-    success immediately.
-
-The last has already been fixed upstream, as of 1.9.1.  The middle two are
-Solaris-only, and not suitable for upstream.  The first, while potentially
-useful elsewhere, is really only an issue on Solaris because Linux runs
-almost exclusively 64-bit, which makes this a non-issue.
+The first, while potentially useful elsewhere, is really only an issue on
+Solaris because Linux runs almost exclusively 64-bit, which makes this a
+non-issue.  The last is Solaris-only -- though clearly a similar problem
+exists on MacOS -- and we will want to fix this in our Python.
 
 diff --git a/test/unit/__init__.py b/test/unit/__init__.py
 --- a/test/unit/__init__.py
 +++ b/test/unit/__init__.py
-@@ -315,7 +315,7 @@ def fake_http_connect(*code_iter, **kwar
+@@ -450,7 +450,7 @@ def fake_http_connect(*code_iter, **kwar
                  else:
                      etag = '"68b329da9893e34099c7d8ad5cb9c940"'
  
@@ -31,7 +25,7 @@
 diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py
 --- a/test/unit/proxy/test_server.py
 +++ b/test/unit/proxy/test_server.py
-@@ -2310,6 +2310,9 @@ class TestObjectController(unittest.Test
+@@ -2876,6 +2876,9 @@ class TestObjectController(unittest.Test
  
              class LargeResponseBody(object):
  
@@ -41,7 +35,7 @@
                  def __len__(self):
                      return MAX_FILE_SIZE + 1
  
-@@ -2439,6 +2442,9 @@ class TestObjectController(unittest.Test
+@@ -3006,6 +3009,9 @@ class TestObjectController(unittest.Test
  
              class LargeResponseBody(object):
  
@@ -54,97 +48,13 @@
 diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py
 --- a/test/unit/common/test_utils.py
 +++ b/test/unit/common/test_utils.py
-@@ -425,9 +425,15 @@ class TestUtils(unittest.TestCase):
-             logger = utils.get_logger({
-                 'log_facility': 'LOG_LOCAL3',
+@@ -437,7 +437,8 @@ class TestUtils(unittest.TestCase):
              }, 'server', log_route='server')
-+            if sys.platform == 'sunos5':
-+                extra = [
-+                  ((), {'facility': orig_sysloghandler.LOG_LOCAL3})
-+                ]
-+            else:
-+                extra = []
-             self.assertEquals([
-                 ((), {'address': '/dev/log',
--                      'facility': orig_sysloghandler.LOG_LOCAL3})],
-+                      'facility': orig_sysloghandler.LOG_LOCAL3})] + extra,
-                 syslog_handler_args)
- 
-             syslog_handler_args = []
-diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py
---- a/test/unit/common/test_wsgi.py
-+++ b/test/unit/common/test_wsgi.py
-@@ -116,11 +116,12 @@ class TestWSGI(unittest.TestCase):
-                 socket.SOL_SOCKET: {
-                     socket.SO_REUSEADDR: 1,
-                     socket.SO_KEEPALIVE: 1,
--                },
--                socket.IPPROTO_TCP: {
-+                }
-+            }
-+            if hasattr(socket, 'TCP_KEEPIDLE'):
-+                expected_socket_opts[socket.IPPROTO_TCP] = {
-                     socket.TCP_KEEPIDLE: 600,
--                },
--            }
-+                }
-             self.assertEquals(sock.opts, expected_socket_opts)
-             # test ssl
-             sock = wsgi.get_socket(ssl_conf)
-diff --git a/test/unit/obj/test_replicator.py b/test/unit/obj/test_replicator.py
---- a/test/unit/obj/test_replicator.py
-+++ b/test/unit/obj/test_replicator.py
-@@ -17,6 +17,7 @@ from __future__ import with_statement
- 
- import unittest
- import os
-+from mock import patch as mockpatch
- from gzip import GzipFile
- from shutil import rmtree
- import cPickle as pickle
-@@ -482,14 +483,16 @@ class TestObjectReplicator(unittest.Test
-             self.replicator.logger.log_dict['warning'])
- 
-     def test_delete_partition(self):
--        df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
--        mkdirs(df.datadir)
--        ohash = hash_path('a', 'c', 'o')
--        data_dir = ohash[-3:]
--        part_path = os.path.join(self.objects, '1')
--        self.assertTrue(os.access(part_path, os.F_OK))
--        self.replicator.replicate()
--        self.assertFalse(os.access(part_path, os.F_OK))
-+        with mockpatch('swift.obj.replicator.http_connect',
-+                        mock_http_connect(200)):
-+            df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
-+            mkdirs(df.datadir)
-+            ohash = hash_path('a', 'c', 'o')
-+            data_dir = ohash[-3:]
-+            part_path = os.path.join(self.objects, '1')
-+            self.assertTrue(os.access(part_path, os.F_OK))
-+            self.replicator.replicate()
-+            self.assertFalse(os.access(part_path, os.F_OK))
- 
-     def test_delete_partition_override_params(self):
-         df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
-@@ -613,12 +616,16 @@ class TestObjectReplicator(unittest.Test
-             tpool.execute = was_execute
- 
-     def test_run(self):
--        with _mock_process([(0, '')] * 100):
--            self.replicator.replicate()
-+        with mockpatch('swift.obj.replicator.http_connect',
-+                        mock_http_connect(200)):
-+            with _mock_process([(0, '')] * 100):
-+                self.replicator.replicate()
- 
-     def test_run_withlog(self):
--        with _mock_process([(0, "stuff in log")] * 100):
--            self.replicator.replicate()
-+        with mockpatch('swift.obj.replicator.http_connect',
-+                        mock_http_connect(200)):
-+            with _mock_process([(0, "stuff in log")] * 100):
-+                self.replicator.replicate()
- 
- if __name__ == '__main__':
-     unittest.main()
+             expected_args = [((), {'address': '/dev/log',
+                                    'facility': orig_sysloghandler.LOG_LOCAL3})]
+-            if not os.path.exists('/dev/log') or \
++            if sys.platform == 'sunos5' or \
++                    not os.path.exists('/dev/log') or \
+                     os.path.isfile('/dev/log') or \
+                     os.path.isdir('/dev/log'):
+                 # Since socket on OSX is in /var/run/syslog, there will be
--- a/components/openstack/swift/resolve.deps	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/resolve.deps	Wed Jun 11 17:13:12 2014 -0700
@@ -1,5 +1,3 @@
-library/python-2/eventlet-26
-library/python-2/simplejson-26
 library/python/eventlet-26
 library/python/simplejson-26
 library/python/swiftclient-26
--- a/components/openstack/swift/swift.p5m	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/openstack/swift/swift.p5m	Wed Jun 11 17:13:12 2014 -0700
@@ -30,10 +30,10 @@
 set name=pkg.summary value="OpenStack Swift"
 set name=pkg.description \
     value="The OpenStack Object Store project, known as Swift, offers cloud storage software so that you can store and retrieve data in virtual containers"
-set name=pkg.human-version value="Grizzly 2013.1.4"
+set name=pkg.human-version value="Havana 2013.2.3"
 set name=com.oracle.info.description \
     value="Swift, the OpenStack object storage service"
-set name=com.oracle.info.tpno value=14503
+set name=com.oracle.info.tpno value=17868
 set name=info.classification \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
     value="org.opensolaris.category.2008:System/Enterprise Management" \
@@ -52,11 +52,12 @@
 file path=etc/security/prof_attr.d/cloud:openstack:swift
 <transform file path=etc/swift/(.*) -> set action.hash etc/%<1>-sample>
 <transform file path=etc/swift/rsyncd.conf -> set action.hash files/rsyncd.conf>
+<transform file path=etc/swift/proxy-server.conf -> set action.hash files/proxy-server.conf>
 <transform dir file path=etc/swift/? -> default owner swift>
 <transform dir file path=etc/swift/? -> default group swift>
 <transform file path=etc/swift/ -> default mode 0644>
 <transform file path=etc/swift/ -> default overlay allow>
-<transform file path=etc/swift/ -> default preserve true>
+<transform file path=etc/swift/ -> default preserve renamenew>
 dir  path=etc/swift mode=0700
 file path=etc/swift/account-server.conf
 file path=etc/swift/container-server.conf
@@ -107,6 +108,7 @@
 file path=usr/bin/swift-account-audit
 file path=usr/bin/swift-bench
 file path=usr/bin/swift-bench-client
+file path=usr/bin/swift-config
 file path=usr/bin/swift-dispersion-populate
 file path=usr/bin/swift-dispersion-report
 file path=usr/bin/swift-form-signature
@@ -119,14 +121,17 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/swift-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/swift-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/swift-$(COMPONENT_VERSION)-py$(PYVER).egg-info/entry_points.txt
+file path=usr/lib/python$(PYVER)/vendor-packages/swift-$(COMPONENT_VERSION)-py$(PYVER).egg-info/not-zip-safe
 file path=usr/lib/python$(PYVER)/vendor-packages/swift-$(COMPONENT_VERSION)-py$(PYVER).egg-info/requires.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/swift-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/account/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/account/auditor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/swift/account/backend.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/account/reaper.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/account/replicator.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/account/server.py
+file path=usr/lib/python$(PYVER)/vendor-packages/swift/account/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/bench.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/bufferedhttp.py
@@ -147,6 +152,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/middleware/catch_errors.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/middleware/cname_lookup.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/middleware/container_quotas.py
+file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/middleware/crossdomain.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/middleware/domain_remap.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/middleware/formpost.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/middleware/healthcheck.py
@@ -161,6 +167,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/middleware/staticweb.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/middleware/tempauth.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/middleware/tempurl.py
+file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/request_helpers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/ring/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/ring/builder.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/ring/ring.py
@@ -170,12 +177,14 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/common/wsgi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/container/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/container/auditor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/swift/container/backend.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/container/replicator.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/container/server.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/container/sync.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/container/updater.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/obj/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/obj/auditor.py
+file path=usr/lib/python$(PYVER)/vendor-packages/swift/obj/diskfile.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/obj/expirer.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/obj/replicator.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swift/obj/server.py
@@ -246,13 +255,21 @@
 #
 license LICENSE license="Apache v2.0"
 
-# force a dependency on netifaces; pkgdepend work is needed to flush this
-# out.
+# force a dependency on dnspython; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/dnspython-26
+
+# force a dependency on keystoneclient; used via a paste.deploy filter
+depend type=require fmri=library/python/keystoneclient-26
+
+# force a dependency on netifaces; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/netifaces-26
 
 # force a dependency on paste.deploy; pkgdepend work is needed to flush this
 # out.
 depend type=require fmri=library/python/paste.deploy-26
 
+# force a dependency on pbr; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/pbr-26
+
 # force a dependency on xattr; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/xattr-26
--- a/components/python/cinderclient/Makefile	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/python/cinderclient/Makefile	Wed Jun 11 17:13:12 2014 -0700
@@ -25,15 +25,18 @@
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		python-cinderclient
-COMPONENT_VERSION=	1.0.7
+COMPONENT_VERSION=	1.0.9
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:8ad67a35e7370fe1379703738050deeae0da33ad6c1e1466dd0cf51c82d4255b
+    sha256:e146e370c9a6e4aa44e8f0662fd33aec532f3965bae9fd564360c279bee03f97
 COMPONENT_ARCHIVE_URL=	$(call pypi_url)
 COMPONENT_PROJECT_URL=	http://launchpad.net/python-cinderclient
 COMPONENT_BUGDB=	service/cinder
 
+# depends on keystoneclient which is not Python 3 ready
+PYTHON_VERSIONS=	2.7 2.6
+
 include $(WS_TOP)/make-rules/prep.mk
 include $(WS_TOP)/make-rules/setup.py.mk
 include $(WS_TOP)/make-rules/ips.mk
--- a/components/python/cinderclient/cinderclient-PYVER.p5m	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/python/cinderclient/cinderclient-PYVER.p5m	Wed Jun 11 17:13:12 2014 -0700
@@ -31,7 +31,7 @@
     value="A client for the OpenStack Block Storage API. There's a Python API (the 'cinderclient' module), and a command-line script ('cinder'). Each implements 100% of the OpenStack Block Storage API."
 set name=com.oracle.info.description \
     value="cinderclient, the Python bindings to the OpenStack Block Storage API"
-set name=com.oracle.info.tpno value=16347
+set name=com.oracle.info.tpno value=17869
 set name=info.classification \
     value=org.opensolaris.category.2008:Development/Python \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
@@ -45,6 +45,7 @@
     mediator-version=$(PYVER)
 file path=usr/bin/cinder-$(PYVER)
 file path=usr/lib/python$(PYVER)/vendor-packages/cinderclient/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinderclient/auth_plugin.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinderclient/base.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinderclient/client.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinderclient/exceptions.py
@@ -59,6 +60,8 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinderclient/openstack/common/apiclient/fake_client.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinderclient/openstack/common/gettextutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinderclient/openstack/common/importutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinderclient/openstack/common/py3kcompat/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinderclient/openstack/common/py3kcompat/urlutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinderclient/openstack/common/strutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinderclient/service_catalog.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinderclient/shell.py
@@ -126,6 +129,10 @@
 depend type=require \
     fmri=library/python/cinderclient@$(IPS_COMPONENT_VERSION),$(BUILD_VERSION)
 
+# force a dependency on keystoneclient; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/keystoneclient-$(PYV)
+
 # force a dependency on pbr; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/pbr-$(PYV)
 
--- a/components/python/cinderclient/resolve.deps	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/python/cinderclient/resolve.deps	Wed Jun 11 17:13:12 2014 -0700
@@ -1,3 +1,2 @@
 runtime/python-26
 runtime/python-27
-runtime/python-34
--- a/components/python/keystoneclient/Makefile	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/python/keystoneclient/Makefile	Wed Jun 11 17:13:12 2014 -0700
@@ -25,11 +25,11 @@
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		python-keystoneclient
-COMPONENT_VERSION=	0.4.1
+COMPONENT_VERSION=	0.8.0
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:bc9b27000e5bbfbb06030a69af81e08ced99568d97fd257972ef80637f6e0704
+    sha256:6a34e10e67f375c82fefbb787072767da976520b302021b5d7237bd78e93f8cf
 COMPONENT_ARCHIVE_URL=	$(call pypi_url)
 COMPONENT_PROJECT_URL=	http://launchpad.net/python-keystoneclient
 COMPONENT_BUGDB=	service/keystone
--- a/components/python/keystoneclient/keystoneclient-PYVER.p5m	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/python/keystoneclient/keystoneclient-PYVER.p5m	Wed Jun 11 17:13:12 2014 -0700
@@ -31,7 +31,7 @@
     value="A client for the OpenStack Identity API, implemented by Keystone. There's a Python API (the 'keystoneclient' module), and a command-line script ('keystone')."
 set name=com.oracle.info.description \
     value="keystoneclient, the Python bindings to the OpenStack Identity API"
-set name=com.oracle.info.tpno value=16349
+set name=com.oracle.info.tpno value=17870
 set name=info.classification \
     value=org.opensolaris.category.2008:Development/Python \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
@@ -48,7 +48,15 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/access.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/apiclient/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/apiclient/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/auth/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/auth/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/auth/identity/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/auth/identity/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/auth/identity/v2.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/auth/identity/v3.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/auth/token_endpoint.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/baseclient.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/client.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/common/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/common/cms.py
@@ -57,26 +65,37 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/contrib/bootstrap/shell.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/contrib/ec2/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/contrib/ec2/utils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/discover.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/fixture/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/fixture/exception.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/fixture/v2.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/generic/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/generic/client.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/generic/shell.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/httpclient.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/locale/keystoneclient.pot
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/middleware/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/middleware/auth_token.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/middleware/memcache_crypt.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/middleware/s3_token.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/apiclient/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/apiclient/auth.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/apiclient/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/apiclient/client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/apiclient/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/apiclient/fake_client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/fixture/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/fixture/config.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/gettextutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/importutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/jsonutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/memorycache.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/py3kcompat/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/py3kcompat/urlutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/strutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/openstack/common/timeutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/service_catalog.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/session.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/shell.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/v2_0/__init__.py
@@ -92,6 +111,11 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/v3/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/v3/client.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/v3/contrib/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/v3/contrib/endpoint_filter.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/v3/contrib/federation/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/v3/contrib/federation/core.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/v3/contrib/federation/identity_providers.py
+file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/v3/contrib/federation/mappings.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/v3/contrib/trusts.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/v3/credentials.py
 file path=usr/lib/python$(PYVER)/vendor-packages/keystoneclient/v3/domains.py
@@ -119,6 +143,10 @@
 # flush this out.
 depend type=group fmri=library/python/eventlet-$(PYV)
 
+# force a group dependency on the optional simplejson; pkgdepend work is needed
+# to flush this out.
+depend type=group fmri=library/python/simplejson-$(PYV)
+
 # force a dependency on argparse; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/argparse-$(PYV)
 
@@ -149,3 +177,9 @@
 
 # force a dependency on six; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/six-$(PYV)
+
+# force a dependency on stevedore; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/stevedore-$(PYV)
+
+# force a dependency on webob; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/webob-$(PYV)
--- a/components/python/neutronclient/Makefile	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/python/neutronclient/Makefile	Wed Jun 11 17:13:12 2014 -0700
@@ -25,11 +25,11 @@
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		python-neutronclient
-COMPONENT_VERSION=	2.3.1
+COMPONENT_VERSION=	2.3.4
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:9497bc036d9ac769f65b989d49b45a122b8d7674bba8cad2480c054b888f8986
+    sha256:41c8fc9dcd947fcd0250175ace419228fc9ac3820442826ba9858ab6f99066c5
 COMPONENT_ARCHIVE_URL=	$(call pypi_url)
 COMPONENT_PROJECT_URL=	http://launchpad.net/python-neutronclient
 COMPONENT_BUGDB=	service/neutron
--- a/components/python/neutronclient/neutronclient-PYVER.p5m	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/python/neutronclient/neutronclient-PYVER.p5m	Wed Jun 11 17:13:12 2014 -0700
@@ -31,7 +31,7 @@
     value="A client library for Neutron built on the OpenStack Virtual Network API. It provides a Python API (the 'neutronclient' module) and a command-line tool ('neutron')."
 set name=com.oracle.info.description \
     value="neutronclient, the Python bindings to the OpenStack Virtual Network API"
-set name=com.oracle.info.tpno value=16351
+set name=com.oracle.info.tpno value=17871
 set name=info.classification \
     value=org.opensolaris.category.2008:Development/Python \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
@@ -70,10 +70,12 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/lb/member.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/lb/pool.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/lb/vip.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/metering.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/network.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/networkprofile.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/nvp_qos_queue.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/nvpnetworkgateway.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/nsx/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/nsx/networkgateway.py
+file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/nsx/qos_queue.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/policyprofile.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/port.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/quota.py
@@ -89,7 +91,6 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/neutron/v2_0/vpn/vpnservice.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/openstack/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/openstack/common/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/openstack/common/exception.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/openstack/common/gettextutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/openstack/common/importutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/neutronclient/openstack/common/jsonutils.py
--- a/components/python/novaclient/Makefile	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/python/novaclient/Makefile	Wed Jun 11 17:13:12 2014 -0700
@@ -25,11 +25,11 @@
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		python-novaclient
-COMPONENT_VERSION=	2.15.0
+COMPONENT_VERSION=	2.17.0
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:ec18819f1021419d7696705c8d008ca80535bd90577dc0b7708afe51dccee099
+    sha256:af6f46890715eb5f4e0d25a714effbbac8da020715f81b1572a301c5e9887197
 COMPONENT_ARCHIVE_URL=	$(call pypi_url)
 COMPONENT_PROJECT_URL=	http://launchpad.net/python-novaclient
 COMPONENT_BUGDB=	service/nova
--- a/components/python/novaclient/novaclient-PYVER.p5m	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/python/novaclient/novaclient-PYVER.p5m	Wed Jun 11 17:13:12 2014 -0700
@@ -17,7 +17,6 @@
 # information: Portions Copyright [yyyy] [name of copyright owner]
 #
 # CDDL HEADER END
-#
 
 #
 # Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
@@ -31,7 +30,7 @@
     value="A client for the OpenStack Compute API. There's a Python API (the 'novaclient' module) and a command-line script ('nova'). Each implements 100% of the OpenStack Compute API."
 set name=com.oracle.info.description \
     value="novaclient, the Python bindings to the OpenStack Compute API"
-set name=com.oracle.info.tpno value=16352
+set name=com.oracle.info.tpno value=17872
 set name=info.classification \
     value=org.opensolaris.category.2008:Development/Python \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
@@ -53,7 +52,16 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/extension.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/common/apiclient/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/common/apiclient/auth.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/common/apiclient/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/common/apiclient/client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/common/apiclient/exceptions.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/common/apiclient/fake_client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/common/cliutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/common/gettextutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/common/importutils.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/common/jsonutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/common/py3kcompat/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/common/py3kcompat/urlutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/openstack/common/strutils.py
@@ -80,8 +88,8 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v1_1/contrib/list_extensions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v1_1/contrib/metadata_extensions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v1_1/contrib/migrations.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v1_1/contrib/server_external_events.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v1_1/contrib/tenant_networks.py
-file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v1_1/coverage_ext.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v1_1/fixed_ips.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v1_1/flavor_access.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v1_1/flavors.py
@@ -109,8 +117,24 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v1_1/volume_types.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v1_1/volumes.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/agents.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/aggregates.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/availability_zones.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/certs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/client.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/flavor_access.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/flavors.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/hosts.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/hypervisors.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/images.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/keypairs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/quota_classes.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/quotas.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/servers.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/services.py
 file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/shell.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/usage.py
+file path=usr/lib/python$(PYVER)/vendor-packages/novaclient/v3/volumes.py
 file path=usr/lib/python$(PYVER)/vendor-packages/python_novaclient-$(COMPONENT_VERSION)-py$(PYVER).egg-info/PKG-INFO
 file path=usr/lib/python$(PYVER)/vendor-packages/python_novaclient-$(COMPONENT_VERSION)-py$(PYVER).egg-info/SOURCES.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/python_novaclient-$(COMPONENT_VERSION)-py$(PYVER).egg-info/dependency_links.txt
@@ -120,6 +144,14 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/python_novaclient-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
 license novaclient.license license="Apache v2.0"
 
+# force a group dependency on the optional anyjson; pkgdepend work is needed to
+# flush this out.
+depend type=group fmri=library/python/anyjson-$(PYV)
+
+# force a group dependency on the optional simplejson; pkgdepend work is needed
+# to flush this out.
+depend type=group fmri=library/python/simplejson-$(PYV)
+
 # force a dependency on argparse; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/argparse-$(PYV)
 
@@ -147,3 +179,6 @@
 
 # force a dependency on six; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/six-$(PYV)
+
+# force a dependency on stevedore; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/stevedore-$(PYV)
--- a/components/python/swiftclient/Makefile	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/python/swiftclient/Makefile	Wed Jun 11 17:13:12 2014 -0700
@@ -25,16 +25,16 @@
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		python-swiftclient
-COMPONENT_VERSION=	2.0.2
+COMPONENT_VERSION=	2.1.0
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:bad3902187b0eeb0a7371d54d5789a8e0c745ca2acca5ca4d86961952e37888a
+    sha256:e5304cc2fc58e5f9fec87c0910109b524b3634f49a6dd82a35a28cbdf1515de8
 COMPONENT_ARCHIVE_URL=	$(call pypi_url)
 COMPONENT_PROJECT_URL=	http://launchpad.net/python-swiftclient
 COMPONENT_BUGDB=	service/swift
 
-# Syntax issues: not Python 3 ready.
+# depends on keystoneclient which is not Python 3 ready
 PYTHON_VERSIONS=	2.7 2.6
 
 include $(WS_TOP)/make-rules/prep.mk
--- a/components/python/swiftclient/resolve.deps	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/python/swiftclient/resolve.deps	Wed Jun 11 17:13:12 2014 -0700
@@ -1,5 +1,3 @@
-library/python-2/simplejson-26
-library/python-2/simplejson-27
 library/python/simplejson-26
 library/python/simplejson-27
 runtime/python-26
--- a/components/python/swiftclient/swiftclient-PYVER.p5m	Tue Jun 10 14:07:48 2014 -0700
+++ b/components/python/swiftclient/swiftclient-PYVER.p5m	Wed Jun 11 17:13:12 2014 -0700
@@ -31,7 +31,7 @@
     value="A python client for the OpenStack Object Storage API. There's a Python API (the 'swiftclient' module), and a command-line script ('swift')."
 set name=com.oracle.info.description \
     value="swiftclient, the Python bindings to the OpenStack Object Storage API"
-set name=com.oracle.info.tpno value=16617
+set name=com.oracle.info.tpno value=17873
 set name=info.classification \
     value=org.opensolaris.category.2008:Development/Python \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
@@ -55,6 +55,8 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/swiftclient/command_helpers.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swiftclient/exceptions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swiftclient/multithreading.py
+file path=usr/lib/python$(PYVER)/vendor-packages/swiftclient/shell.py \
+    pkg.depend.bypass-generate=.*/six.*
 file path=usr/lib/python$(PYVER)/vendor-packages/swiftclient/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/swiftclient/version.py
 license swiftclient.license license="Apache v2.0"
@@ -63,6 +65,14 @@
 # needed to flush this out.
 depend type=group fmri=library/python/keystoneclient-$(PYV)
 
+# force a group dependency on the optional pbr; pkgdepend work is needed to
+# flush this out.
+depend type=group fmri=library/python/pbr-$(PYV)
+
+# force a group dependency on the optional simplejson; pkgdepend work is
+# needed to flush this out.
+depend type=group fmri=library/python/simplejson-$(PYV)
+
 # force a dependency on requests; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/requests-$(PYV)