PSARC 2015/535 OpenStack service updates for Kilo
authorDevjani Ray <devjani.ray@oracle.com>
Fri, 05 Feb 2016 17:54:17 -0500
changeset 5405 66fd59fecd68
parent 5404 55e409ba4e72
child 5406 5ac656f02914
PSARC 2015/535 OpenStack service updates for Kilo PSARC 2015/458 aioeventlet - asyncio event loop scheduling callbacks in eventlet PSARC 2015/460 msgpack - C/Python bindings for MessagePack (de)serializer data PSARC 2015/466 openstackclient - OpenStack Command-line Client PSARC 2015/467 oslo.versionedobjects - Oslo Versioned Objects library PSARC 2015/468 pint - A physical quantities module PSARC 2015/469 pysaml2 - A pure Python implementation of SAML2 PSARC 2015/471 semantic_version - A library implementing the 'SemVer' scheme PSARC 2015/472 testresources - PyUnit extension for managing expensive test resources PSARC 2015/473 testscenarios - Extensions to Python unittest to support scenarios PSARC 2015/474 trollius - Port of the Tulip project (asyncio module, PEP 3156) on Python 2 PSARC 2015/475 urllib3 - HTTP library with thread-safe connection pooling, file post, and more PSARC 2015/520 oslo.concurrency - Oslo Concurrency library PSARC 2015/521 oslo.log - Oslo Logging Configuration library PSARC 2015/529 oslo.policy - Oslo Policy library PSARC 2015/530 psutil - Python system and process utilities PSARC 2015/538 fixtures - Python module to support reusable state for writing clean tests PSARC 2015/539 sqlparse - An SQL parser module for Python PSARC 2016/017 extras - Useful extra utilities for Python PSARC 2016/018 linecache2 - Port of the standard linecache module PSARC 2016/019 python-mimeparse - Basic functions for parsing mime-types PSARC 2016/020 testtools - Extensions to the Python unit testing framework PSARC 2016/021 traceback2 - Port of the standard traceback module PSARC 2016/014 OpenStack Cinder NFS driver for Solaris 22384068 OpenStack service updates for Kilo (Umbrella) 21974208 The Python module msgpack should be added to Userland 22010630 The Python trollius module should be added to Userland 22011755 The Python module pint should be added to Userland 22012256 The Python aioeventlet module should be added to Userland 22012282 The Python oslo.versionedobjects module should be added to Userland 22012317 The Python semantic_version module should be added to Userland 22012321 The Python testresources module should be added to Userland 22012329 The Python testscenarios module should be added to Userland 22012336 The Python urllib3 module should be added to Userland 22012343 The Python openstackclient module should be added to Userland 22299389 The Python oslo.concurrency module should be added to Userland 22299409 The Python oslo.log module should be added to Userland 22299418 The Python oslo.policy module should be added to Userland 22299469 The Python psutil module should be added to Userland 22337793 The Python sqlparse module should be added to Userland 22338325 The Python fixtures module should be added to Userland 22535728 The Python testtools module should be added to Userland 22535739 The Python extras module should be added to Userland 22535748 The Python linecache2 module should be added to Userland 22535753 The Python traceback2 module should be added to Userland 22535760 The Python python-mimeparse module should be added to Userland 18961001 Image filtering does not function as expected 21678935 NFS for Cinder in Solaris OpenStack 22548630 derived manifest should not enforce presence of global when installing from UAR 22629795 problem in SERVICE/KEYSTONE
components/openstack/cinder/Makefile
components/openstack/cinder/cinder.license
components/openstack/cinder/cinder.p5m
components/openstack/cinder/files/cinder-upgrade
components/openstack/cinder/files/cinder.conf
components/openstack/cinder/files/cinder.exec_attr
components/openstack/cinder/files/solaris/nfs.py
components/openstack/cinder/files/solaris/solarisfc.py
components/openstack/cinder/files/solaris/solarisiscsi.py
components/openstack/cinder/files/solaris/zfs.py
components/openstack/cinder/files/zfssa/zfssaiscsi.py
components/openstack/cinder/patches/01-requirements.patch
components/openstack/cinder/patches/02-nopycrypto.patch
components/openstack/cinder/patches/03-emc_vmax_iscsi.patch
components/openstack/cinder/patches/04-volume-backup.patch
components/openstack/cinder/patches/05-keepalive.patch
components/openstack/cinder/patches/05-launchpad-1479342.patch
components/openstack/cinder/patches/06-enable-dd.patch
components/openstack/cinder/patches/07-launchpad-1460156.patch
components/openstack/cinder/patches/07-zfssa-pep-476.patch
components/openstack/cinder/patches/08-disable-sslv3.patch
components/openstack/cinder/patches/08-zfssa-target_lun.patch
components/openstack/cinder/patches/09-launchpad-1414867.patch
components/openstack/cinder/patches/09-nfs-mount.patch
components/openstack/cinder/patches/10-remotefs.patch
components/openstack/cinder/patches/10-zfssa-free-space.patch
components/openstack/cinder/patches/11-nfs-backup.patch
components/openstack/cinder/patches/11-zfssa-pep-iscsi.patch
components/openstack/cinder/patches/12-generate_sample.patch
components/openstack/common/Makefile
components/openstack/common/files/openstack_common.py
components/openstack/common/openstack-common.p5m
components/openstack/common/openstack.p5m
components/openstack/glance/Makefile
components/openstack/glance/files/glance-api.conf
components/openstack/glance/files/glance-cache.conf
components/openstack/glance/files/glance-manage.conf
components/openstack/glance/files/glance-registry.conf
components/openstack/glance/files/glance-scrubber.conf
components/openstack/glance/files/glance-upgrade
components/openstack/glance/glance.p5m
components/openstack/glance/patches/01-nopycrypto.patch
components/openstack/glance/patches/02-zfs-uar-formats.patch
components/openstack/glance/patches/03-Partial_Content.patch
components/openstack/glance/patches/04-requirements.patch
components/openstack/glance/patches/05-CVE-2015-1881.patch
components/openstack/glance/patches/05-launchpad-1496012.patch
components/openstack/glance/patches/06-CVE-2014-9684.patch
components/openstack/glance/patches/06-launchpad-1500361.patch
components/openstack/glance/patches/07-CVE-2015-5251.patch
components/openstack/glance/patches/07-profiler_opts.patch
components/openstack/glance/patches/08-CVE-2015-5286.patch
components/openstack/glance/patches/09-CVE-2015-5286.patch
components/openstack/heat/Makefile
components/openstack/heat/files/heat-upgrade
components/openstack/heat/files/heat.conf
components/openstack/heat/heat.p5m
components/openstack/heat/patches/02-nopycrypto.patch
components/openstack/heat/patches/02-requirements.patch
components/openstack/heat/patches/03-requirements.patch
components/openstack/heat/patches/04-launchpad-1496277.patch
components/openstack/heat/patches/04-nopycrypto.patch
components/openstack/heat/patches/05-neutron-names-required.patch
components/openstack/heat/patches/06-heat-keystone-setup.patch
components/openstack/heat/patches/07-heat-plugin-defaults.patch
components/openstack/heat/patches/08-disable-sslv3.patch
components/openstack/horizon/Makefile
components/openstack/horizon/angular-bootstrap.license
components/openstack/horizon/angular-lrdragndrop.license
components/openstack/horizon/files/branding/css/solaris.css
components/openstack/horizon/files/branding/css/solaris1.css
components/openstack/horizon/files/branding/css/solaris2.css
components/openstack/horizon/files/branding/img/OpenStack_banner.png
components/openstack/horizon/files/branding/img/Openstack_banner.png
components/openstack/horizon/files/branding/img/dropdown_dwn.png
components/openstack/horizon/files/branding/img/dropdown_ena.png
components/openstack/horizon/files/branding/theme/_stylesheets.html
components/openstack/horizon/files/local_settings.py
components/openstack/horizon/files/openstack-dashboard-http.conf
components/openstack/horizon/files/openstack-dashboard-tls.conf
components/openstack/horizon/files/overrides.py
components/openstack/horizon/font-awesome.license
components/openstack/horizon/horizon.license
components/openstack/horizon/horizon.p5m
components/openstack/horizon/magic-search.license
components/openstack/horizon/patches/01-osprofiler.patch
components/openstack/horizon/patches/02-change-angular-imports.patch
components/openstack/horizon/patches/02-remove_js.patch
components/openstack/horizon/patches/03-remove-qunit.patch
components/openstack/horizon/patches/04-remove-jasmine.patch
components/openstack/horizon/patches/05-disable-unsupported-bootsource.patch
components/openstack/horizon/patches/06-remove-security-groups.patch
components/openstack/horizon/patches/07-remove-image-source.patch
components/openstack/horizon/patches/08-_get_reachable_subnets.patch
components/openstack/horizon/patches/11-requirements.patch
components/openstack/horizon/patches/12-CVE-2015-3988.patch
components/openstack/horizon/patches/13-CVE-2015-3219.patch
components/openstack/horizon/patches/14-add-zfs-image-format.patch
components/openstack/horizon/patches/15-fix-static-root-setting.patch
components/openstack/horizon/patches/99-remove.xstatic.patch-proto
components/openstack/horizon/smart-table.license
components/openstack/horizon/termjs.license
components/openstack/ironic/Makefile
components/openstack/ironic/files/drivers/modules/solaris_ipmitool.py
components/openstack/ironic/files/drivers/solaris.py
components/openstack/ironic/files/ironic-api.xml
components/openstack/ironic/files/ironic-conductor.xml
components/openstack/ironic/files/ironic-db.xml
components/openstack/ironic/files/ironic-keystone-setup.sh
components/openstack/ironic/files/ironic-manifest.ksh
components/openstack/ironic/files/ironic-upgrade
components/openstack/ironic/files/ironic-upgrade.xml
components/openstack/ironic/files/ironic.conf
components/openstack/ironic/ironic.license
components/openstack/ironic/ironic.p5m
components/openstack/ironic/patches/01-requirements.patch
components/openstack/ironic/patches/02-driver-entry.patch
components/openstack/ironic/patches/03-boot-device.patch
components/openstack/keystone/Makefile
components/openstack/keystone/files/keystone
components/openstack/keystone/files/keystone-paste.ini
components/openstack/keystone/files/keystone-upgrade
components/openstack/keystone/files/keystone.conf
components/openstack/keystone/files/keystone.stencil
components/openstack/keystone/files/keystone.xml
components/openstack/keystone/keystone.license
components/openstack/keystone/keystone.p5m
components/openstack/keystone/patches/CVE-2015-3646.patch
components/openstack/keystone/patches/CVE-2015-7546.patch
components/openstack/keystone/patches/launchpad-1384775.patch
components/openstack/keystone/patches/launchpad-1404354.patch
components/openstack/keystone/patches/requirements.patch
components/openstack/keystone/patches/sample-data.sh.patch
components/openstack/neutron/Makefile
components/openstack/neutron/files/agent/evs_l3_agent.py
components/openstack/neutron/files/agent/solaris/dhcp.py
components/openstack/neutron/files/agent/solaris/interface.py
components/openstack/neutron/files/agent/solaris/net_lib.py
components/openstack/neutron/files/agent/solaris/ra.py
components/openstack/neutron/files/dhcp_agent.ini
components/openstack/neutron/files/evs/migrate/neutron-kilo-migration.py
components/openstack/neutron/files/evs/plugin.py
components/openstack/neutron/files/l3_agent.ini
components/openstack/neutron/files/metadata_agent.ini
components/openstack/neutron/files/neutron-l3-agent
components/openstack/neutron/files/neutron-l3-agent.xml
components/openstack/neutron/files/neutron-upgrade
components/openstack/neutron/files/neutron.conf
components/openstack/neutron/files/neutron.exec_attr
components/openstack/neutron/files/neutron.prof_attr
components/openstack/neutron/files/services/vpn/device_drivers/solaris_ipsec.py
components/openstack/neutron/files/services/vpn/device_drivers/template/solaris/ike.secret.template
components/openstack/neutron/files/services/vpn/device_drivers/template/solaris/ike.template
components/openstack/neutron/files/services/vpn/device_drivers/template/solaris/ikev2.secret.template
components/openstack/neutron/files/services/vpn/device_drivers/template/solaris/ikev2.template
components/openstack/neutron/files/services/vpn/device_drivers/template/solaris/ipsecinit.conf.template
components/openstack/neutron/files/vpn_agent.ini
components/openstack/neutron/neutron-vpnaas.license
components/openstack/neutron/neutron.license
components/openstack/neutron/neutron.p5m
components/openstack/neutron/patches/01-dhcp-agent-add-solaris.patch
components/openstack/neutron/patches/02-l3-agent-add-solaris.patch
components/openstack/neutron/patches/03-metadata-agent-add-solaris.patch
components/openstack/neutron/patches/03-metadata-driver-solaris.patch
components/openstack/neutron/patches/04-requirements.patch
components/openstack/neutron/patches/05-disable-sslv3.patch
components/openstack/neutron/patches/06-dhcp-port-sched.patch
components/openstack/neutron/patches/07-launchpad-1255441.patch
components/openstack/neutron/vpnaas_patches/01-vpn_db_add_solaris.patch
components/openstack/nova/Makefile
components/openstack/nova/files/nova-compute
components/openstack/nova/files/nova-upgrade
components/openstack/nova/files/nova.conf
components/openstack/nova/files/release
components/openstack/nova/files/solariszones/driver.py
components/openstack/nova/nova.license
components/openstack/nova/nova.p5m
components/openstack/nova/patches/01-Solaris-vm_mode.patch
components/openstack/nova/patches/02-requirements.patch
components/openstack/nova/patches/03-Solaris-flavors.patch
components/openstack/nova/patches/04-compute-hvtype.patch
components/openstack/nova/patches/05-force-driver-snapshot.patch
components/openstack/nova/patches/05-launchpad-1391782.patch
components/openstack/nova/patches/06-Solaris-dev-name.patch
components/openstack/nova/patches/07-CVE-2015-0259.patch
components/openstack/nova/patches/08-confirm_migration_context.patch
components/openstack/nova/patches/08-disable-sslv3.patch
components/openstack/nova/patches/09-CVE-2015-3280.patch
components/openstack/nova/patches/09-generate_sample.patch
components/openstack/nova/patches/10-launchpad-1356552.patch
components/openstack/nova/patches/10-launchpad-1486590.patch
components/openstack/nova/patches/11-launchpad-1377644.patch
components/openstack/nova/patches/12-launchpad-1397153.patch
components/openstack/swift/Makefile
components/openstack/swift/files/swift-upgrade
components/openstack/swift/patches/CVE-2015-1856.patch
components/openstack/swift/patches/CVE-2015-5223.patch
components/openstack/swift/patches/remove_PyECLib.patch
components/openstack/swift/patches/requirements.patch
components/openstack/swift/swift.license
components/openstack/swift/swift.p5m
components/python/aioeventlet/Makefile
components/python/aioeventlet/aioeventlet-PYVER.p5m
components/python/aioeventlet/aioeventlet.license
components/python/barbicanclient/Makefile
components/python/barbicanclient/barbicanclient-PYVER.p5m
components/python/barbicanclient/barbicanclient.license
components/python/ceilometerclient/Makefile
components/python/ceilometerclient/ceilometerclient-PYVER.p5m
components/python/ceilometerclient/patches/01-requirements.patch
components/python/cinderclient/Makefile
components/python/cinderclient/cinderclient-PYVER.p5m
components/python/cinderclient/patches/01-requirements.patch
components/python/cliff/Makefile
components/python/cliff/cliff-PYVER.p5m
components/python/cliff/cliff.license
components/python/cliff/patches/01-requirements.patch
components/python/django_openstack_auth/Makefile
components/python/django_openstack_auth/django_openstack_auth-PYVER.p5m
components/python/django_openstack_auth/patches/02-launchpad-1451934.patch
components/python/dnspython/Makefile
components/python/eventlet/Makefile
components/python/eventlet/eventlet-PYVER.p5m
components/python/eventlet/eventlet.license
components/python/extras/Makefile
components/python/extras/extras-PYVER.p5m
components/python/fixtures/Makefile
components/python/fixtures/fixtures-PYVER.p5m
components/python/fixtures/fixtures.license
components/python/glance_store/Makefile
components/python/glance_store/glance_store-PYVER.p5m
components/python/glance_store/glance_store.license
components/python/glance_store/patches/no-tests.patch
components/python/glanceclient/Makefile
components/python/glanceclient/glanceclient-PYVER.p5m
components/python/glanceclient/patches/01-requirements.patch
components/python/greenlet/Makefile
components/python/heatclient/Makefile
components/python/heatclient/heatclient-PYVER.p5m
components/python/heatclient/patches/01-requirements.patch
components/python/ironicclient/Makefile
components/python/ironicclient/ironicclient-PYVER.p5m
components/python/ironicclient/ironicclient.license
components/python/ironicclient/patches/01-boot-device-wanboot.patch
components/python/keystoneclient/Makefile
components/python/keystoneclient/keystoneclient-PYVER.p5m
components/python/keystoneclient/patches/01-requirements.patch
components/python/keystoneclient/patches/CVE-2015-1852.patch
components/python/keystoneclient/patches/launchpad-1455673.patch
components/python/keystoneclient/patches/launchpad-1498247.patch
components/python/keystoneclient/patches/nopycrypto.patch
components/python/keystonemiddleware/Makefile
components/python/keystonemiddleware/keystonemiddleware-PYVER.p5m
components/python/keystonemiddleware/keystonemiddleware.license
components/python/keystonemiddleware/patches/CVE-2015-1852.patch
components/python/keystonemiddleware/patches/CVE-2015-7546.patch
components/python/keystonemiddleware/patches/launchpad-1497082.patch
components/python/keystonemiddleware/patches/nopycrypto.patch
components/python/kombu/Makefile
components/python/kombu/kombu-PYVER.p5m
components/python/kombu/kombu.license
components/python/kombu/patches/01-python-2.7.11.patch
components/python/linecache2/Makefile
components/python/linecache2/linecache2-PYVER.p5m
components/python/linecache2/linecache2.license
components/python/msgpack/Makefile
components/python/msgpack/msgpack-PYVER.p5m
components/python/msgpack/msgpack.license
components/python/neutronclient/Makefile
components/python/neutronclient/neutronclient-PYVER.p5m
components/python/neutronclient/patches/01-requirements.patch
components/python/novaclient/Makefile
components/python/novaclient/novaclient-PYVER.p5m
components/python/novaclient/patches/01-novaclient-migration-list.patch
components/python/novaclient/patches/02-requirements.patch
components/python/openstackclient/Makefile
components/python/openstackclient/openstackclient-PYVER.p5m
components/python/openstackclient/openstackclient.license
components/python/openstackclient/patches/01-requirements.patch
components/python/oslo.concurrency/Makefile
components/python/oslo.concurrency/oslo.concurrency-PYVER.p5m
components/python/oslo.concurrency/oslo.concurrency.license
components/python/oslo.config/Makefile
components/python/oslo.config/oslo.config-PYVER.p5m
components/python/oslo.config/oslo.config.license
components/python/oslo.config/patches/launchpad-1438314.patch
components/python/oslo.config/patches/requirements.patch
components/python/oslo.context/Makefile
components/python/oslo.context/oslo.context-PYVER.p5m
components/python/oslo.context/oslo.context.license
components/python/oslo.db/Makefile
components/python/oslo.db/oslo.db-PYVER.p5m
components/python/oslo.db/oslo.db.license
components/python/oslo.i18n/Makefile
components/python/oslo.i18n/oslo.i18n-PYVER.p5m
components/python/oslo.i18n/oslo.i18n.license
components/python/oslo.log/Makefile
components/python/oslo.log/oslo.log-PYVER.p5m
components/python/oslo.log/oslo.log.license
components/python/oslo.log/patches/01-fix-syslog.patch
components/python/oslo.messaging/Makefile
components/python/oslo.messaging/oslo.messaging-PYVER.p5m
components/python/oslo.messaging/oslo.messaging.license
components/python/oslo.messaging/patches/01-disable-sslv3.patch
components/python/oslo.middleware/Makefile
components/python/oslo.middleware/oslo.middleware-PYVER.p5m
components/python/oslo.middleware/oslo.middleware.license
components/python/oslo.policy/Makefile
components/python/oslo.policy/oslo.policy-PYVER.p5m
components/python/oslo.policy/oslo.policy.license
components/python/oslo.serialization/Makefile
components/python/oslo.serialization/oslo.serialization-PYVER.p5m
components/python/oslo.serialization/oslo.serialization.license
components/python/oslo.utils/Makefile
components/python/oslo.utils/oslo.utils-PYVER.p5m
components/python/oslo.utils/oslo.utils.license
components/python/oslo.utils/patches/keepalive.patch
components/python/oslo.versionedobjects/Makefile
components/python/oslo.versionedobjects/oslo.versionedobjects-PYVER.p5m
components/python/oslo.versionedobjects/oslo.versionedobjects.license
components/python/oslo.vmware/Makefile
components/python/oslo.vmware/oslo.vmware-PYVER.p5m
components/python/oslo.vmware/oslo.vmware.license
components/python/pbr/Makefile
components/python/pbr/pbr-PYVER.p5m
components/python/pint/Makefile
components/python/pint/pint-PYVER.p5m
components/python/psutil/Makefile
components/python/psutil/patches/01-remove-non-solaris.patch
components/python/psutil/psutil-PYVER.p5m
components/python/pycadf/Makefile
components/python/pycadf/pycadf-PYVER.p5m
components/python/pycadf/pycadf.license
components/python/python-mimeparse/Makefile
components/python/python-mimeparse/patches/use_setuptools.patch
components/python/python-mimeparse/python-mimeparse-PYVER.p5m
components/python/saharaclient/Makefile
components/python/saharaclient/patches/01-requirements.patch
components/python/saharaclient/saharaclient-PYVER.p5m
components/python/saharaclient/saharaclient.license
components/python/semantic-version/Makefile
components/python/semantic-version/semantic-version-PYVER.p5m
components/python/sqlalchemy-migrate/Makefile
components/python/sqlalchemy-migrate/patches/01-fix-warnings.patch
components/python/sqlalchemy-migrate/sqlalchemy-migrate-PYVER.p5m
components/python/sqlalchemy-migrate/sqlalchemy-migrate.license
components/python/sqlparse/Makefile
components/python/sqlparse/sqlparse-PYVER.p5m
components/python/sqlparse/sqlparse.license
components/python/stevedore/Makefile
components/python/stevedore/patches/01-requirements.patch
components/python/stevedore/stevedore-PYVER.p5m
components/python/stevedore/stevedore.license
components/python/swiftclient/Makefile
components/python/taskflow/Makefile
components/python/taskflow/taskflow-PYVER.p5m
components/python/taskflow/taskflow.license
components/python/testresources/Makefile
components/python/testresources/testresources-PYVER.p5m
components/python/testresources/testresources.license
components/python/testscenarios/Makefile
components/python/testscenarios/testscenarios-PYVER.p5m
components/python/testscenarios/testscenarios.license
components/python/testtools/Makefile
components/python/testtools/patches/01-requirements.patch
components/python/testtools/testtools-PYVER.p5m
components/python/testtools/testtools.license
components/python/traceback2/Makefile
components/python/traceback2/traceback2-PYVER.p5m
components/python/traceback2/traceback2.license
components/python/trollius/Makefile
components/python/trollius/trollius-PYVER.p5m
components/python/troveclient/Makefile
components/python/troveclient/troveclient-PYVER.p5m
components/python/urllib3/Makefile
components/python/urllib3/patches/remove_bundled_components.patch
components/python/urllib3/urllib3-PYVER.p5m
components/python/urllib3/urllib3.license
components/rabbitmq/Makefile
components/rabbitmq/files/rabbitmq-env.conf
components/rabbitmq/files/rabbitmq-env.conf.5
components/rabbitmq/files/rabbitmq-plugins.1
components/rabbitmq/files/rabbitmq-server.1
components/rabbitmq/files/rabbitmq.config
components/rabbitmq/files/rabbitmq.xml
components/rabbitmq/files/rabbitmqctl.1
components/rabbitmq/files/rmq-mplex
components/rabbitmq/files/svc-rabbitmq
components/rabbitmq/patches/makefile.patch
components/rabbitmq/patches/sh-to-bash.patch
components/rabbitmq/rabbitmq.license
components/rabbitmq/rabbitmq.p5m
components/rabbitmq/rabbitmq.xml
components/rabbitmq/rmq-mplex
components/rabbitmq/svc-rabbitmq
transforms/python-3-groups
transforms/standard-python-libraries
--- a/components/openstack/cinder/Makefile	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/cinder/Makefile	Fri Feb 05 17:54:17 2016 -0500
@@ -20,26 +20,26 @@
 #
 
 #
-# Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
 #
 
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		cinder
-COMPONENT_CODENAME=	juno
-COMPONENT_VERSION=	2014.2.2
-COMPONENT_BE_VERSION=	2014.2
+COMPONENT_CODENAME=	kilo
+COMPONENT_VERSION=	2015.1.2
+COMPONENT_BE_VERSION=	2015.1
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:2c779bf9d208163af6c425da9043bbdcb345cebc5c118198482b94062862a117
+    sha256:8baa42a3008b0bb0870df678237c0d06ab8ee148b3bf5c00c6b95cfe05c52333
 COMPONENT_ARCHIVE_URL=	http://launchpad.net/$(COMPONENT_NAME)/$(COMPONENT_CODENAME)/$(COMPONENT_VERSION)/+download/$(COMPONENT_ARCHIVE)
 COMPONENT_SIG_URL=	$(COMPONENT_ARCHIVE_URL).asc
 COMPONENT_PROJECT_URL=	http://www.openstack.org/
 COMPONENT_BUGDB=	service/cinder
 IPS_COMPONENT_VERSION=	0.$(COMPONENT_VERSION)
 
-TPNO=			21819
+TPNO=			25784
 
 include $(WS_MAKE_RULES)/prep.mk
 include $(WS_MAKE_RULES)/setup.py.mk
@@ -55,31 +55,40 @@
 PKG_MACROS +=		PYVER=$(PYTHON_VERSIONS)
 PKG_MACROS +=		PYV=$(shell echo $(PYTHON_VERSIONS) | tr -d .)
 
+COMPONENT_POST_UNPACK_ACTION += \
+    $(MKDIR) $(SOURCE_DIR)/cinder/volume/drivers/zfssa; \
+    $(CP) files/zfssa/zfssaiscsi.py \
+	$(SOURCE_DIR)/cinder/volume/drivers/zfssa
+
 #
 # cinder-api, cinder-backup, cinder-scrubber, and cinder-volume depend
 # on cinder-db, and cinder-upgrade so copy all of the service
 # manifests into the proto directory for pkgdepend(1) to find.
 #
 COMPONENT_POST_INSTALL_ACTION += \
-	($(MKDIR) $(PROTO_DIR)/lib/svc/manifest/application/openstack; \
-	 $(CP) \
-	     files/cinder-api.xml \
-	     files/cinder-backup.xml \
-	     files/cinder-db.xml \
-	     files/cinder-scheduler.xml \
-	     files/cinder-upgrade.xml \
-	     files/cinder-volume.xml \
-	     $(PROTO_DIR)/lib/svc/manifest/application/openstack; \
-         $(CP) \
-	     files/solaris/solarisfc.py \
-             files/solaris/solarisiscsi.py \
-	     $(PROTO_DIR)$(PYTHON_LIB)/cinder/brick/initiator; \
-	 $(MKDIR) $(PROTO_DIR)$(PYTHON_LIB)/cinder/volume/drivers/solaris; \
-	 $(TOUCH) $(PROTO_DIR)$(PYTHON_LIB)/cinder/volume/drivers/solaris/__init__.py; \
-	 $(CP) files/solaris/zfs.py $(PROTO_DIR)$(PYTHON_LIB)/cinder/volume/drivers/solaris; \
-	 $(MKDIR) $(PROTO_DIR)$(PYTHON_LIB)/cinder/volume/drivers/zfssa; \
-	 $(CP) files/zfssa/cinder.akwf $(PROTO_DIR)$(PYTHON_LIB)/cinder/volume/drivers/zfssa); \
-	 $(PYTHON) -m compileall $(PROTO_DIR)/$(PYTHON_VENDOR_PACKAGES)
+    ($(MKDIR) $(PROTO_DIR)/lib/svc/manifest/application/openstack; \
+     $(CP) \
+         files/cinder-api.xml \
+         files/cinder-backup.xml \
+         files/cinder-db.xml \
+         files/cinder-scheduler.xml \
+         files/cinder-upgrade.xml \
+         files/cinder-volume.xml \
+         $(PROTO_DIR)/lib/svc/manifest/application/openstack; \
+     $(CP) \
+         files/solaris/solarisfc.py \
+         files/solaris/solarisiscsi.py \
+         $(PROTO_DIR)$(PYTHON_LIB)/cinder/brick/initiator; \
+     $(MKDIR) $(PROTO_DIR)$(PYTHON_LIB)/cinder/volume/drivers/solaris; \
+     $(TOUCH) \
+         $(PROTO_DIR)$(PYTHON_LIB)/cinder/volume/drivers/solaris/__init__.py; \
+     $(CP) \
+         files/solaris/nfs.py \
+         files/solaris/zfs.py \
+         $(PROTO_DIR)$(PYTHON_LIB)/cinder/volume/drivers/solaris; \
+     $(CP) files/zfssa/cinder.akwf \
+         $(PROTO_DIR)$(PYTHON_LIB)/cinder/volume/drivers/zfssa); \
+     $(PYTHON) -m compileall $(PROTO_DIR)/$(PYTHON_VENDOR_PACKAGES)
 
 # common targets
 build:		$(BUILD_NO_ARCH)
@@ -92,11 +101,12 @@
 
 
 REQUIRED_PACKAGES += cloud/openstack/openstack-common
+REQUIRED_PACKAGES += file/gnu-coreutils
 REQUIRED_PACKAGES += library/python/eventlet-27
 REQUIRED_PACKAGES += library/python/iniparse-27
 REQUIRED_PACKAGES += library/python/ipython-27
 REQUIRED_PACKAGES += library/python/oslo.config-27
-REQUIRED_PACKAGES += library/python/python-mysql-27
+REQUIRED_PACKAGES += library/python/oslo.utils-27
 REQUIRED_PACKAGES += library/python/sqlalchemy-27
 REQUIRED_PACKAGES += library/python/sqlalchemy-migrate-27
 REQUIRED_PACKAGES += system/core-os
--- a/components/openstack/cinder/cinder.license	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/cinder/cinder.license	Fri Feb 05 17:54:17 2016 -0500
@@ -210,33 +210,46 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 
-# Interactive shell based on Django:
+============================================================
+
+# Copyright (c) 2013, Nebula, Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
 #
-# Copyright (c) 2005, the Lawrence Journal-World
-# All rights reserved.
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
 #
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-#     1. Redistributions of source code must retain the above copyright notice,
-#        this list of conditions and the following disclaimer.
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
 #
-#     2. Redistributions in binary form must reproduce the above copyright
-#        notice, this list of conditions and the following disclaimer in the
-#        documentation and/or other materials provided with the distribution.
+# Colorizer Code is borrowed from Twisted:
+# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
 #
-#     3. Neither the name of Django nor the names of its contributors may be
-#        used to endorse or promote products derived from this software without
-#        specific prior written permission.
+#    Permission is hereby granted, free of charge, to any person obtaining
+#    a copy of this software and associated documentation files (the
+#    "Software"), to deal in the Software without restriction, including
+#    without limitation the rights to use, copy, modify, merge, publish,
+#    distribute, sublicense, and/or sell copies of the Software, and to
+#    permit persons to whom the Software is furnished to do so, subject to
+#    the following conditions:
+#
+#    The above copyright notice and this permission notice shall be
+#    included in all copies or substantial portions of the Software.
 #
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+#    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+#    MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+#    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+#    LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+#    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+#    WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+============================================================
+
--- a/components/openstack/cinder/cinder.p5m	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/cinder/cinder.p5m	Fri Feb 05 17:54:17 2016 -0500
@@ -20,7 +20,7 @@
 #
 
 #
-# Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
 #
 
 set name=pkg.fmri \
@@ -28,7 +28,7 @@
 set name=pkg.summary value="OpenStack Cinder (Block Storage Service)"
 set name=pkg.description \
     value="OpenStack Cinder provides an infrastructure for managing block storage volumes in OpenStack. It allows block devices to be exposed and connected to compute instances for expanded storage, better performance and integration with enterprise storage platforms."
-set name=pkg.human-version value="Juno $(COMPONENT_VERSION)"
+set name=pkg.human-version value="Kilo $(COMPONENT_VERSION)"
 set name=com.oracle.info.description \
     value="Cinder, the OpenStack block storage service"
 set name=com.oracle.info.tpno value=$(TPNO)
@@ -42,7 +42,7 @@
 set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
 set name=openstack.upgrade-id reboot-needed=true value=$(COMPONENT_BE_VERSION)
 set name=org.opensolaris.arc-caseid value=PSARC/2013/350 value=PSARC/2014/054 \
-    value=PSARC/2014/208 value=PSARC/2015/110
+    value=PSARC/2014/208 value=PSARC/2015/110 value=PSARC/2015/535
 set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
 set name=variant.opensolaris.zone value=global value=nonglobal
 #
@@ -75,7 +75,7 @@
 file files/cinder-upgrade path=lib/svc/method/cinder-upgrade
 file files/cinder-volume path=lib/svc/method/cinder-volume
 file files/cinder-volume-setup path=lib/svc/method/cinder-volume-setup
-file path=usr/bin/cinder-manage pkg.depend.bypass-generate=.*/bpython.*
+file path=usr/bin/cinder-manage
 file usr/bin/cinder-api path=usr/lib/cinder/cinder-api mode=0555
 file usr/bin/cinder-backup path=usr/lib/cinder/cinder-backup mode=0555
 file usr/bin/cinder-scheduler path=usr/lib/cinder/cinder-scheduler mode=0555
@@ -92,7 +92,6 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder-$(COMPONENT_VERSION)-py$(PYVER).egg-info/top_level.txt
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/auth.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/common.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/admin_actions.py
@@ -123,6 +122,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/volume_replication.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/volume_tenant_attribute.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/volume_transfer.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/volume_type_access.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/volume_type_encryption.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/contrib/volume_unmanage.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/extensions.py
@@ -131,9 +131,6 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/middleware/fault.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/middleware/sizelimit.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/openstack/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/openstack/urlmap.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/openstack/volume/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/openstack/volume/versions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/openstack/wsgi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/schemas/atom-link.rng
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/schemas/v1.1/extension.rng
@@ -144,7 +141,6 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/schemas/v1.1/qos_associations.rng
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/schemas/v1.1/qos_spec.rng
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/schemas/v1.1/qos_specs.rng
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/sizelimit.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/urlmap.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/v1/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/v1/limits.py
@@ -179,9 +175,11 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/api/xmlutil.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/chunkeddriver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/driver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/drivers/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/drivers/ceph.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/drivers/nfs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/drivers/swift.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/drivers/tsm.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/backup/manager.py
@@ -197,17 +195,36 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/initiator/linuxscsi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/initiator/solarisfc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/initiator/solarisiscsi.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/iscsi/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/iscsi/iscsi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/local_dev/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/local_dev/lvm.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/remotefs/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/brick/remotefs/remotefs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/cmd/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/cmd/all.py \
+    pkg.depend.bypass-generate=.*/oslo_log.*
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/cmd/api.py \
+    pkg.depend.bypass-generate=.*/oslo_log.*
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/cmd/backup.py \
+    pkg.depend.bypass-generate=.*/oslo_log.*
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/cmd/manage.py \
+    pkg.depend.bypass-generate=.*/bpython.* \
+    pkg.depend.bypass-generate=.*/oslo_db.* \
+    pkg.depend.bypass-generate=.*/oslo_log.* \
+    pkg.depend.bypass-generate=.*/oslo_messaging.* \
+    pkg.depend.bypass-generate=.*/sqlalchemy.*
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/cmd/rtstool.py \
+    pkg.depend.bypass-generate=.*/rtslib.*
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/cmd/scheduler.py \
+    pkg.depend.bypass-generate=.*/oslo_log.*
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/cmd/volume.py \
+    pkg.depend.bypass-generate=.*/oslo_log.*
+file \
+    path=usr/lib/python$(PYVER)/vendor-packages/cinder/cmd/volume_usage_audit.py \
+    pkg.depend.bypass-generate=.*/oslo_log.*
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/common/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/common/config.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/common/sqlalchemyutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/compute/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/compute/aggregate_states.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/compute/nova.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/consistencygroup/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/consistencygroup/api.py
@@ -254,8 +271,26 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/024_add_replication_support.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/025_add_consistencygroup.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/026_add_consistencygroup_quota_class.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/027_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/028_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/029_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/030_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/031_placeholder.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/032_add_volume_type_projects.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/032_sqlite_downgrade.sql
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/033_add_encryption_unique_key.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/034_sqlite_downgrade.sql
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/034_volume_type_add_desc_column.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/035_add_provider_id_column.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/036_add_provider_id_column_to_snapshots.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/037_add_cgsnapshot_id_column_to_consistencygroups.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/038_add_driver_initiator_data_table.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/039_add_parent_id_to_backups.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/040_add_volume_attachment.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/040_sqlite_downgrade.sql
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/041_add_modified_at_column_to_service.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migrate_repo/versions/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/migration.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/db/sqlalchemy/models.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/exception.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/flow_utils.py
@@ -272,32 +307,27 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/keymgr/key_mgr.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/keymgr/not_implemented_key_mgr.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/manager.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/objects/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/objects/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/objects/fields.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/objects/snapshot.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/objects/volume.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/README
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/_i18n.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/config/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/config/generator.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/context.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/eventlet_backdoor.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/excutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/fileutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/gettextutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/imageutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/importutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/jsonutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/local.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/lockutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/log.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/log_handler.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/loopingcall.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/middleware/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/middleware/base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/middleware/catch_errors.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/middleware/request_id.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/network_utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/periodic_task.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/policy.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/processutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/request_utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/scheduler/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/scheduler/base_filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/scheduler/base_handler.py
@@ -310,14 +340,8 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/scheduler/filters/json_filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/scheduler/weights/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/service.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/sslutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/strutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/systemd.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/test.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/threadgroup.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/timeutils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/units.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/uuidutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/openstack/common/versionutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/policy.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/quota.py
@@ -327,10 +351,14 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/rpc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/evaluator/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/evaluator/evaluator.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/filter_scheduler.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/filters/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/filters/affinity_filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/filters/capacity_filter.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/filters/driver_filter.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/filters/instance_locality_filter.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/flows/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/flows/create_volume.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/host_manager.py
@@ -341,6 +369,7 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/weights/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/weights/capacity.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/weights/chance.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/weights/goodness.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/scheduler/weights/volume_number.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/service.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/ssh_utils.py
@@ -355,26 +384,30 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/driver.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/block_device.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/coraid.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/cloudbyte/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/cloudbyte/cloudbyte.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/cloudbyte/options.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/datera.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/dell/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/dell/dell_storagecenter_api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/dell/dell_storagecenter_common.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/dell/dell_storagecenter_fc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/dell/dell_storagecenter_iscsi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/emc_cli_fc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/emc_cli_iscsi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/emc_vmax_common.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/emc_vmax_fast.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/emc_vmax_fc.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/emc_vmax_https.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/emc_vmax_iscsi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/emc_vmax_masking.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/emc_vmax_provision.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/emc_vmax_provision_v3.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/emc_vmax_utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/emc_vnx_cli.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/emc/xtremio.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/eqlx.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/fujitsu_eternus_dx_common.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/fujitsu_eternus_dx_fc.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/fujitsu_eternus_dx_iscsi.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/fusionio/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/fusionio/ioControl.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/glusterfs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/hds/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/hds/hds.py
@@ -390,13 +423,11 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/hitachi/hbsd_iscsi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/hitachi/hbsd_snm2.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/huawei/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/huawei/huawei_dorado.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/huawei/huawei_hvs.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/huawei/huawei_t.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/huawei/huawei_18000.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/huawei/huawei_utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/huawei/rest_common.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/huawei/ssh_common.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/ibm/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/ibm/flashsystem.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/ibm/gpfs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/ibm/ibmnas.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/ibm/storwize_svc/__init__.py
@@ -406,30 +437,40 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/ibm/xiv_ds8k.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/lvm.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/common.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/block_7mode.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/block_base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/block_cmode.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/client/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/client/api.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/client/client_7mode.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/client/client_base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/fc_7mode.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/fc_cmode.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/iscsi_7mode.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/iscsi_cmode.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/nfs_7mode.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/nfs_base.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/dataontap/ssc_cmode.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/eseries/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/eseries/client.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/eseries/iscsi.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/iscsi.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/nfs.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/eseries/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/options.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/ssc_utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/netapp/utils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/nexenta/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/nexenta/iscsi.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/nexenta/jsonrpc.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/nexenta/nfs.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/nexenta/options.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/nexenta/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/nfs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/nimble.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/openvstorage.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/prophetstor/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/prophetstor/dpl_fc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/prophetstor/dpl_iscsi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/prophetstor/dplcommon.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/prophetstor/options.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/pure.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/quobyte.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/rbd.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/remotefs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/san/__init__.py
@@ -440,34 +481,24 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/san/hp/hp_lefthand_cliq_proxy.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/san/hp/hp_lefthand_iscsi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/san/hp/hp_lefthand_rest_proxy.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/san/hp/hp_msa_client.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/san/hp/hp_msa_common.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/san/hp/hp_msa_fc.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/san/san.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/san/solaris.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/scality.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/sheepdog.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/smbfs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/solaris/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/solaris/nfs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/solaris/zfs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/solidfire.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/srb.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/violin/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/violin/v6000_common.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/violin/v6000_fcp.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/violin/v6000_iscsi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/__init__.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/api.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/datastore.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/error_util.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/io_util.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/pbm.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/read_write_util.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/vim.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/vim_util.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/exceptions.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/vmdk.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/vmware_images.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/volumeops.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/wsdl/5.5/core-types.xsd
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/wsdl/5.5/pbm-messagetypes.xsd
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/wsdl/5.5/pbm-types.xsd
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/wsdl/5.5/pbm.wsdl
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/vmware/wsdl/5.5/pbmService.wsdl
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/windows/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/windows/constants.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/windows/remotefs.py
@@ -475,11 +506,13 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/windows/vhdutils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/windows/windows.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/windows/windows_utils.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/zadara.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/xio.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/zfssa/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/zfssa/cinder.akwf
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/zfssa/restclient.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/zfssa/webdavclient.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/zfssa/zfssaiscsi.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/zfssa/zfssanfs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/drivers/zfssa/zfssarest.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/flows/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/flows/api/__init__.py
@@ -488,10 +521,20 @@
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/flows/manager/__init__.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/flows/manager/create_volume.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/flows/manager/manage_existing.py
-file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/iscsi.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/manager.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/qos_specs.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/rpcapi.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/targets/__init__.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/targets/cxt.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/targets/driver.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/targets/fake.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/targets/iet.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/targets/iscsi.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/targets/iser.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/targets/lio.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/targets/scst.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/targets/tgt.py
+file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/throttling.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/utils.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/volume/volume_types.py
 file path=usr/lib/python$(PYVER)/vendor-packages/cinder/wsgi.py
@@ -522,30 +565,28 @@
 #
 license cinder.license license="Apache v2.0"
 
-# force a group dependency on the optional anyjson; pkgdepend work is needed to
-# flush this out.
-depend type=group fmri=library/python/anyjson-$(PYV)
+# force a group dependency on the optional oslo.vmware; pkgdepend work is needed
+# to flush this out.
+depend type=group fmri=library/python/oslo.vmware-$(PYV)
 
-# force a group dependency on the optional netaddr; pkgdepend work is needed to
-# flush this out.
-depend type=group fmri=library/python/netaddr-$(PYV)
+# force a group dependency on the optional pyopenssl; pkgdepend work is needed
+# to flush this out.
+depend type=group fmri=library/python/pyopenssl-$(PYV)
 
 # force a group dependency on the optional pywbem; pkgdepend work is needed to
 # flush this out.
 depend type=group fmri=library/python/pywbem-$(PYV)
 
-# force a group dependency on the optional requests; pkgdepend work is needed to
-# flush this out.
-depend type=group fmri=library/python/requests-$(PYV)
-
-# force a group dependency on the optional simplejson; pkgdepend work is needed
-# to flush this out.
-depend type=group fmri=library/python/simplejson-$(PYV)
-
 # force a group dependency on the optional suds; pkgdepend work is needed to
 # flush this out.
 depend type=group fmri=library/python/suds-$(PYV)
 
+# To upgrade to Kilo version, Juno version of the package must be on the system
+depend type=origin fmri=cloud/openstack/[email protected] root-image=true
+
+# force a dependency on package delivering GNU du(1M)
+depend type=require fmri=__TBD pkg.debug.depend.file=usr/bin/gdu
+
 # force a dependency on package delivering fcinfo(1M)
 depend type=require fmri=__TBD pkg.debug.depend.file=usr/sbin/fcinfo
 
@@ -587,19 +628,41 @@
 # force a dependency on lxml; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/lxml-$(PYV)
 
+# force a dependency on netaddr; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/netaddr-$(PYV)
+
 # force a dependency on novaclient; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/novaclient-$(PYV)
 
+# force a dependency on oslo.concurrency; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/oslo.concurrency-$(PYV)
+
+# force a dependency on oslo.context; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/oslo.context-$(PYV)
+
 # force a dependency on oslo.db; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/oslo.db-$(PYV)
 
 # force a dependency on oslo.i18n; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/oslo.i18n-$(PYV)
 
+# force a dependency on oslo.log; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/oslo.log-$(PYV)
+
 # force a dependency on oslo.messaging; pkgdepend work is needed to flush this
 # out.
 depend type=require fmri=library/python/oslo.messaging-$(PYV)
 
+# force a dependency on oslo.middleware; pkgdepend work is needed to flush this
+# out.
+depend type=require fmri=library/python/oslo.middleware-$(PYV)
+
+# force a dependency on oslo.serialization; pkgdepend work is needed to flush
+# this out.
+depend type=require fmri=library/python/oslo.serialization-$(PYV)
+
 # force a dependency on osprofiler; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/osprofiler-$(PYV)
 
@@ -616,6 +679,15 @@
 # force a dependency on pbr; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/pbr-$(PYV)
 
+# force a dependency on pyparsing; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/pyparsing-$(PYV)
+
+# force a dependency on requests; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/requests-$(PYV)
+
+# force a dependency on retrying; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/retrying-$(PYV)
+
 # force a dependency on routes; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/routes-$(PYV)
 
@@ -640,3 +712,6 @@
 # force a dependency on the Solaris Install library; pkgdepend work is needed to
 # flush this out.
 depend type=require fmri=system/library/install
+
+# force a dependency on rad-python; pkgdepend work is needed to flush this out.
+depend type=require fmri=system/management/rad/client/rad-python
--- a/components/openstack/cinder/files/cinder-upgrade	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/cinder/files/cinder-upgrade	Fri Feb 05 17:54:17 2016 -0500
@@ -1,6 +1,6 @@
 #!/usr/bin/python2.7
 
-# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
@@ -30,44 +30,18 @@
 
 CINDER_CONF_MAPPINGS = {
     # Deprecated group/name
-    ('DEFAULT', 'rabbit_durable_queues'): ('DEFAULT', 'amqp_durable_queues'),
-    ('rpc_notifier2', 'topics'): ('DEFAULT', 'notification_topics'),
-    ('DEFAULT', 'osapi_compute_link_prefix'):
-        ('DEFAULT', 'osapi_volume_base_URL'),
-    ('DEFAULT', 'backup_service'): ('DEFAULT', 'backup_driver'),
-    ('DEFAULT', 'pybasedir'): ('DEFAULT', 'state_path'),
-    ('DEFAULT', 'log_config'): ('DEFAULT', 'log_config_append'),
-    ('DEFAULT', 'logfile'): ('DEFAULT', 'log_file'),
-    ('DEFAULT', 'logdir'): ('DEFAULT', 'log_dir'),
-    ('DEFAULT', 'num_iscsi_scan_tries'):
-        ('DEFAULT', 'num_volume_device_scan_tries'),
-    ('DEFAULT', 'zfssa_host'): ('DEFAULT', 'san_ip'),
-    ('DEFAULT', 'zfssa_auth_user'): ('DEFAULT', 'san_login'),
-    ('DEFAULT', 'zfssa_auth_password'): ('DEFAULT', 'san_password'),
+    ('DEFAULT', 'eqlx_chap_password'): ('DEFAULT', 'chap_password'),
+    ('DEFAULT', 'eqlx_use_chap'): ('DEFAULT', 'use_chap_auth'),
+    ('DEFAULT', 'eqlx_chap_login'): ('DEFAULT', 'chap_username'),
+    ('DEFAULT', 'host'): ('DEFAULT', 'backend_host'),
     ('DEFAULT', 'db_backend'): ('database', 'backend'),
-    ('DEFAULT', 'sql_connection'): ('database', 'connection'),
-    ('DATABASE', 'sql_connection'): ('database', 'connection'),
-    ('sql', 'connection'): ('database', 'connection'),
-    ('DEFAULT', 'sql_idle_timeout'): ('database', 'idle_timeout'),
-    ('DATABASE', 'sql_idle_timeout'): ('database', 'idle_timeout'),
-    ('sql', 'idle_timeout'): ('database', 'idle_timeout'),
-    ('DEFAULT', 'sql_min_pool_size'): ('database', 'min_pool_size'),
-    ('DATABASE', 'sql_min_pool_size'): ('database', 'min_pool_size'),
-    ('DEFAULT', 'sql_max_pool_size'): ('database', 'max_pool_size'),
-    ('DATABASE', 'sql_max_pool_size'): ('database', 'max_pool_size'),
-    ('DEFAULT', 'sql_max_retries'): ('database', 'max_retries'),
-    ('DATABASE', 'sql_max_retries'): ('database', 'max_retries'),
-    ('DEFAULT', 'sql_retry_interval'): ('database', 'retry_interval'),
-    ('DATABASE', 'reconnect_interval'): ('database', 'retry_interval'),
-    ('DEFAULT', 'sql_max_overflow'): ('database', 'max_overflow'),
-    ('DATABASE', 'sqlalchemy_max_overflow'): ('database', 'max_overflow'),
-    ('DEFAULT', 'sql_connection_debug'): ('database', 'connection_debug'),
-    ('DEFAULT', 'sql_connection_trace'): ('database', 'connection_trace'),
-    ('DATABASE', 'sqlalchemy_pool_timeout'): ('database', 'pool_timeout'),
-    ('DEFAULT', 'dbapi_use_tpool'): ('database', 'use_tpool'),
-    ('DEFAULT', 'memcache_servers'):
-        ('keystone_authtoken', 'memcached_servers'),
-    ('DEFAULT', 'matchmaker_ringfile'): ('matchmaker_ring', 'ringfile'),
+    ('DEFAULT', 'osapi_max_request_body_size'):
+        ('oslo_middleware', 'max_request_body_size'),
+    ('DEFAULT', 'log-format'): (None, None),
+    ('DEFAULT', 'use-syslog'): (None, None),
+    ('DEFAULT', 'datera_api_token'): (None, None),
+    ('DEFAULT', 'enable_v1_api'): (None, None),
+    ('DEFAULT', 'log_format'): (None, None),
 }
 
 CINDER_CONF_EXCEPTIONS = [
@@ -84,18 +58,6 @@
     ('keystone_authtoken', 'signing_dir'),
 ]
 
-CINDER_MOVE_CONFIG = {
-    ('filter:authtoken', 'auth_uri'): ('keystone_authtoken', 'auth_uri'),
-    ('filter:authtoken', 'identity_uri'):
-        ('keystone_authtoken', 'identity_uri'),
-    ('filter:authtoken', 'admin_tenant_name'):
-        ('keystone_authtoken', 'admin_tenant_name'),
-    ('filter:authtoken', 'admin_user'): ('keystone_authtoken', 'admin_user'),
-    ('filter:authtoken', 'admin_password'):
-        ('keystone_authtoken', 'admin_password'),
-    ('filter:authtoken', 'signing_dir'): ('keystone_authtoken', 'signing_dir'),
-}
-
 
 def start():
     # pull out the current version of config/upgrade-id
@@ -125,14 +87,8 @@
 
         modify_conf('/etc/cinder/api-paste.ini')
 
-        # before modifying cinder.conf, move the [filter:authtoken] entries
-        # from the updated api-paste.ini to the old cinder.conf
-        move_conf('/etc/cinder/api-paste.ini', '/etc/cinder/cinder.conf',
-                  CINDER_MOVE_CONFIG)
-
         modify_conf('/etc/cinder/cinder.conf', CINDER_CONF_MAPPINGS,
                     CINDER_CONF_EXCEPTIONS)
-        modify_conf('/etc/cinder/logging.conf')
 
     config = iniparse.RawConfigParser()
     config.read('/etc/cinder/cinder.conf')
--- a/components/openstack/cinder/files/cinder.conf	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/cinder/files/cinder.conf	Fri Feb 05 17:54:17 2016 -0500
@@ -1,133 +1,110 @@
 [DEFAULT]
 
 #
+# Options defined in oslo.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# The name of a logging configuration file. This file is
+# appended to any existing logging configuration files. For
+# details about logging configuration files, see the Python
+# logging module documentation. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated.  Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s . (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=<None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and will change in J to honor RFC5424. (boolean
+# value)
+#use_syslog=false
+
+# (Optional) Enables or disables syslog rfc5424 format for
+# logging. If enabled, prefixes the MSG part of the syslog
+# message with APP-NAME (RFC5424). The format without the APP-
+# NAME is deprecated in I, and will be removed in J. (boolean
+# value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility=LOG_USER
+
+# Log output to standard error. (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context. (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context.
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format.
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
+
+# Enables or disables publication of error events. (boolean
+# value)
+#publish_errors=false
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+
+# The format for an instance that is passed with the log
+# message. (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log
+# message. (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+
+#
 # Options defined in oslo.messaging
 #
 
-# Use durable queues in AMQP. (boolean value)
-# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
-#amqp_durable_queues=false
-
-# Auto-delete queues in AMQP. (boolean value)
-#amqp_auto_delete=false
-
-# Size of RPC connection pool. (integer value)
-#rpc_conn_pool_size=30
-
-# Qpid broker hostname. (string value)
-#qpid_hostname=localhost
-
-# Qpid broker port. (integer value)
-#qpid_port=5672
-
-# Qpid HA cluster host:port pairs. (list value)
-#qpid_hosts=$qpid_hostname:$qpid_port
-
-# Username for Qpid connection. (string value)
-#qpid_username=
-
-# Password for Qpid connection. (string value)
-#qpid_password=
-
-# Space separated list of SASL mechanisms to use for auth.
-# (string value)
-#qpid_sasl_mechanisms=
-
-# Seconds between connection keepalive heartbeats. (integer
-# value)
-#qpid_heartbeat=60
-
-# Transport to use, either 'tcp' or 'ssl'. (string value)
-#qpid_protocol=tcp
-
-# Whether to disable the Nagle algorithm. (boolean value)
-#qpid_tcp_nodelay=true
-
-# The number of prefetched messages held by receiver. (integer
-# value)
-#qpid_receiver_capacity=1
-
-# The qpid topology version to use.  Version 1 is what was
-# originally used by impl_qpid.  Version 2 includes some
-# backwards-incompatible changes that allow broker federation
-# to work.  Users should update to version 2 when they are
-# able to take everything down, as it requires a clean break.
-# (integer value)
-#qpid_topology_version=1
-
-# SSL version to use (valid only if SSL enabled). valid values
-# are TLSv1 and SSLv23. SSLv2 and SSLv3 may be available on
-# some distributions. (string value)
-#kombu_ssl_version=
-
-# SSL key file (valid only if SSL enabled). (string value)
-#kombu_ssl_keyfile=
-
-# SSL cert file (valid only if SSL enabled). (string value)
-#kombu_ssl_certfile=
-
-# SSL certification authority file (valid only if SSL
-# enabled). (string value)
-#kombu_ssl_ca_certs=
-
-# How long to wait before reconnecting in response to an AMQP
-# consumer cancel notification. (floating point value)
-#kombu_reconnect_delay=1.0
-
-# The RabbitMQ broker address where a single node is used.
-# (string value)
-#rabbit_host=localhost
-
-# The RabbitMQ broker port where a single node is used.
-# (integer value)
-#rabbit_port=5672
-
-# RabbitMQ HA cluster host:port pairs. (list value)
-#rabbit_hosts=$rabbit_host:$rabbit_port
-
-# Connect over SSL for RabbitMQ. (boolean value)
-#rabbit_use_ssl=false
-
-# The RabbitMQ userid. (string value)
-#rabbit_userid=guest
-
-# The RabbitMQ password. (string value)
-#rabbit_password=guest
-
-# The RabbitMQ login method. (string value)
-#rabbit_login_method=AMQPLAIN
-
-# The RabbitMQ virtual host. (string value)
-#rabbit_virtual_host=/
-
-# How frequently to retry connecting with RabbitMQ. (integer
-# value)
-#rabbit_retry_interval=1
-
-# How long to backoff for between retries when connecting to
-# RabbitMQ. (integer value)
-#rabbit_retry_backoff=2
-
-# Maximum number of RabbitMQ connection retries. Default is 0
-# (infinite retry count). (integer value)
-#rabbit_max_retries=0
-
-# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
-# this option, you must wipe the RabbitMQ database. (boolean
-# value)
-#rabbit_ha_queues=false
-
-# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
-# (boolean value)
-#fake_rabbit=false
-
 # ZeroMQ bind address. Should be a wildcard (*), an ethernet
 # interface, or IP. The "host" option should point or resolve
 # to this address. (string value)
 #rpc_zmq_bind_address=*
 
 # MatchMaker driver. (string value)
-#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+#rpc_zmq_matchmaker=local
 
 # ZeroMQ receiver listening port. (integer value)
 #rpc_zmq_port=9501
@@ -157,7 +134,7 @@
 # Heartbeat time-to-live. (integer value)
 #matchmaker_heartbeat_ttl=600
 
-# Size of RPC greenthread pool. (integer value)
+# Size of RPC thread pool. (integer value)
 #rpc_thread_pool_size=64
 
 # Driver or drivers to handle sending notifications. (multi
@@ -289,14 +266,6 @@
 
 
 #
-# Options defined in cinder.test
-#
-
-# File name of clean sqlite db (string value)
-#sqlite_clean_db=clean.sqlite
-
-
-#
 # Options defined in cinder.wsgi
 #
 
@@ -306,17 +275,17 @@
 # with big service catalogs). (integer value)
 #max_header_line=16384
 
+# Timeout for client connections' socket operations. If an
+# incoming connection is idle for this number of seconds it
+# will be closed. A value of '0' means wait forever. (integer
+# value)
+#client_socket_timeout=900
+
 # If False, closes the client socket connection explicitly.
 # Setting it to True to maintain backward compatibility.
 # Recommended setting is set it to False. (boolean value)
 #wsgi_keep_alive=true
 
-# Timeout for client connections' socket operations. If an
-# incoming connection is idle for this number of seconds it
-# will be closed. A value of '0' means wait forever. (integer
-# value)
-#client_socket_timeout=0
-
 # Sets the value of TCP_KEEPALIVE (True/False) for each server
 # socket. (boolean value)
 #tcp_keepalive=true
@@ -378,13 +347,41 @@
 
 
 #
+# Options defined in cinder.api.views.versions
+#
+
+# Public url to use for versions endpoint. The default is
+# None, which will use the request's host_url attribute to
+# populate the URL base. If Cinder is operating behind a
+# proxy, you will want to change this to represent the proxy's
+# URL. (string value)
+#public_endpoint=<None>
+
+
+#
+# Options defined in cinder.backup.chunkeddriver
+#
+
+# Compression algorithm (None to disable) (string value)
+#backup_compression_algorithm=zlib
+
+
+#
 # Options defined in cinder.backup.driver
 #
 
 # Backup metadata version to be used when backing up volume
 # metadata. If this number is bumped, make sure the service
 # doing the restore supports the new version. (integer value)
-#backup_metadata_version=1
+backup_metadata_version=1
+
+# The number of chunks or objects, for which one Ceilometer
+# notification will be sent (integer value)
+#backup_object_number_per_notification=10
+
+# Interval, in seconds, between two progress notifications
+# reporting the backup status (integer value)
+#backup_timer_interval=120
 
 
 #
@@ -421,6 +418,44 @@
 
 
 #
+# Options defined in cinder.backup.drivers.nfs
+#
+
+# The maximum size in bytes of the files used to hold backups.
+# If the volume being backed up exceeds this size, then it
+# will be backed up into multiple files. backup_file_size must
+# be a multiple of backup_sha_block_size_bytes. (integer
+# value)
+#backup_file_size=1999994880
+
+# The size in bytes that changes are tracked for incremental
+# backups. backup_file_size has to be multiple of
+# backup_sha_block_size_bytes. (integer value)
+#backup_sha_block_size_bytes=32768
+
+# Enable or Disable the timer to send the periodic progress
+# notifications to Ceilometer when backing up the volume to
+# the backend storage. The default value is True to enable the
+# timer. (boolean value)
+#backup_enable_progress_timer=true
+
+# Base dir containing mount point for NFS share. (string
+# value)
+#backup_mount_point_base=$state_path/backup_mount
+
+# NFS share in fqdn:path, ipv4addr:path, or "[ipv6addr]:path"
+# format. (string value)
+#backup_share=<None>
+
+# Mount options passed to the NFS client. See NFS man page for
+# details. (string value)
+#backup_mount_options=<None>
+
+# Custom container to use for backups. (string value)
+#backup_container=<None>
+
+
+#
 # Options defined in cinder.backup.drivers.swift
 #
 
@@ -456,6 +491,11 @@
 # The size in bytes of Swift backup objects (integer value)
 #backup_swift_object_size=52428800
 
+# The size in bytes that changes are tracked for incremental
+# backups. backup_swift_object_size has to be multiple of
+# backup_swift_block_size. (integer value)
+#backup_swift_block_size=32768
+
 # The number of retries to make for Swift operations (integer
 # value)
 #backup_swift_retry_attempts=3
@@ -464,8 +504,11 @@
 # value)
 #backup_swift_retry_backoff=2
 
-# Compression algorithm (None to disable) (string value)
-#backup_compression_algorithm=zlib
+# Enable or Disable the timer to send the periodic progress
+# notifications to Ceilometer when backing up the volume to
+# the Swift backend storage. The default value is True to
+# enable the timer. (boolean value)
+#backup_swift_enable_progress_timer=true
 
 
 #
@@ -493,6 +536,34 @@
 
 
 #
+# Options defined in cinder.cmd.volume
+#
+
+# Backend override of host value. (string value)
+# Deprecated group/name - [DEFAULT]/host
+#backend_host=<None>
+
+
+#
+# Options defined in cinder.cmd.volume_usage_audit
+#
+
+# If this option is specified then the start time specified is
+# used instead of the start time of the last completed audit
+# period. (string value)
+#start_time=<None>
+
+# If this option is specified then the end time specified is
+# used instead of the end time of the last completed audit
+# period. (string value)
+#end_time=<None>
+
+# Send the volume and snapshot create and delete notifications
+# generated in the specified period. (boolean value)
+#send_actions=false
+
+
+#
 # Options defined in cinder.common.config
 #
 
@@ -632,7 +703,7 @@
 # value)
 #enabled_backends=<None>
 
-# Whether snapshots count against GigaByte quota (boolean
+# Whether snapshots count against gigabyte quota (boolean
 # value)
 #no_snapshot_gb_quota=false
 
@@ -648,6 +719,19 @@
 # (string value)
 #consistencygroup_api_class=cinder.consistencygroup.api.API
 
+# OpenStack privileged account username. Used for requests to
+# other services (such as Nova) that require an account with
+# special rights. (string value)
+#os_privileged_user_name=<None>
+
+# Password associated with the OpenStack privileged account.
+# (string value)
+#os_privileged_user_password=<None>
+
+# Tenant name associated with the OpenStack privileged
+# account. (string value)
+#os_privileged_user_tenant=<None>
+
 
 #
 # Options defined in cinder.compute
@@ -665,11 +749,11 @@
 # Match this value when searching for nova in the service
 # catalog. Format is: separated values of the form:
 # <service_type>:<service_name>:<endpoint_type> (string value)
-#nova_catalog_info=compute:nova:publicURL
+#nova_catalog_info=compute:Compute Service:publicURL
 
 # Same as nova_catalog_info, but for admin endpoint. (string
 # value)
-#nova_catalog_admin_info=compute:nova:adminURL
+#nova_catalog_admin_info=compute:Compute Service:adminURL
 
 # Override service catalog lookup with template for nova
 # endpoint e.g. http://localhost:8774/v2/%(project_id)s
@@ -696,9 +780,6 @@
 # Options defined in cinder.db.api
 #
 
-# The backend to use for db (string value)
-#db_backend=sqlalchemy
-
 # Services to be added to the available pool on create
 # (boolean value)
 #enable_new_services=true
@@ -762,112 +843,6 @@
 
 
 #
-# Options defined in cinder.openstack.common.lockutils
-#
-
-# Whether to disable inter-process locks (boolean value)
-#disable_process_locking=false
-
-# Directory to use for lock files. Default to a temp directory
-# (string value)
-#lock_path=<None>
-
-
-#
-# Options defined in cinder.openstack.common.log
-#
-
-# Print debugging output (set logging level to DEBUG instead
-# of default WARNING level). (boolean value)
-#debug=false
-
-# Print more verbose output (set logging level to INFO instead
-# of default WARNING level). (boolean value)
-#verbose=false
-
-# Log output to standard error. (boolean value)
-#use_stderr=true
-
-# Format string to use for log messages with context. (string
-# value)
-#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
-
-# Format string to use for log messages without context.
-# (string value)
-#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
-
-# Data to append to log format when level is DEBUG. (string
-# value)
-#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
-
-# Prefix each line of exception output with this format.
-# (string value)
-#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
-
-# List of logger=LEVEL pairs. (list value)
-#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
-
-# Enables or disables publication of error events. (boolean
-# value)
-#publish_errors=false
-
-# Enables or disables fatal status of deprecations. (boolean
-# value)
-#fatal_deprecations=false
-
-# The format for an instance that is passed with the log
-# message. (string value)
-#instance_format="[instance: %(uuid)s] "
-
-# The format for an instance UUID that is passed with the log
-# message. (string value)
-#instance_uuid_format="[instance: %(uuid)s] "
-
-# The name of a logging configuration file. This file is
-# appended to any existing logging configuration files. For
-# details about logging configuration files, see the Python
-# logging module documentation. (string value)
-# Deprecated group/name - [DEFAULT]/log_config
-#log_config_append=<None>
-
-# DEPRECATED. A logging.Formatter log message format string
-# which may use any of the available logging.LogRecord
-# attributes. This option is deprecated.  Please use
-# logging_context_format_string and
-# logging_default_format_string instead. (string value)
-#log_format=<None>
-
-# Format string for %%(asctime)s in log records. Default:
-# %(default)s . (string value)
-#log_date_format=%Y-%m-%d %H:%M:%S
-
-# (Optional) Name of log file to output to. If no default is
-# set, logging will go to stdout. (string value)
-# Deprecated group/name - [DEFAULT]/logfile
-#log_file=<None>
-
-# (Optional) The base directory used for relative --log-file
-# paths. (string value)
-# Deprecated group/name - [DEFAULT]/logdir
-#log_dir=<None>
-
-# Use syslog for logging. Existing syslog format is DEPRECATED
-# during I, and will change in J to honor RFC5424. (boolean
-# value)
-#use_syslog=false
-
-# (Optional) Enables or disables syslog rfc5424 format for
-# logging. If enabled, prefixes the MSG part of the syslog
-# message with APP-NAME (RFC5424). The format without the APP-
-# NAME is deprecated in I, and will be removed in J. (boolean
-# value)
-#use_syslog_rfc_format=false
-
-# Syslog facility to receive log lines. (string value)
-#syslog_log_facility=LOG_USER
-
-
-#
 # Options defined in cinder.openstack.common.periodic_task
 #
 
@@ -887,6 +862,23 @@
 # (string value)
 #policy_default_rule=default
 
+# Directories where policy configuration files are stored.
+# They can be relative to any directory in the search path
+# defined by the config_dir option, or absolute paths. The
+# file defined by policy_file must exist for these directories
+# to be searched.  Missing or empty directories are ignored.
+# (multi valued)
+#policy_dirs=policy.d
+
+
+#
+# Options defined in cinder.openstack.common.versionutils
+#
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+
 
 #
 # Options defined in cinder.scheduler.driver
@@ -1001,12 +993,12 @@
 # volume (integer value)
 #num_iser_scan_tries=3
 
-# The maximum number of iSER target IDs per host (integer
-# value)
-#iser_num_targets=100
+# This option is deprecated and unused. It will be removed in
+# the Liberty release. (integer value)
+#iser_num_targets=<None>
 
 # Prefix for iSER volumes (string value)
-#iser_target_prefix=iqn.2010-10.org.iser.openstack:
+#iser_target_prefix=iqn.2010-10.org.openstack:
 
 # The IP address that the iSER daemon is listening on (string
 # value)
@@ -1028,9 +1020,9 @@
 # value)
 #reserved_percentage=0
 
-# The maximum number of iSCSI target IDs per host (integer
-# value)
-#iscsi_num_targets=100
+# This option is deprecated and unused. It will be removed in
+# the Liberty release. (integer value)
+#iscsi_num_targets=<None>
 
 # Prefix for iSCSI volumes (string value)
 #iscsi_target_prefix=iqn.2010-10.org.openstack:
@@ -1039,6 +1031,10 @@
 # value)
 #iscsi_ip_address=$my_ip
 
+# The list of secondary IP addresses of the iSCSI daemon (list
+# value)
+#iscsi_secondary_ip_addresses=
+
 # The port that the iSCSI daemon is listening on (integer
 # value)
 #iscsi_port=3260
@@ -1057,8 +1053,12 @@
 # value)
 #use_multipath_for_image_xfer=false
 
-# Method used to wipe old volumes (valid options are: none,
-# zero, shred) (string value)
+# If this is set to True, attachment of volumes for image
+# transfer will be aborted when multipathd is not running.
+# Otherwise, it will fallback to single path. (boolean value)
+#enforce_multipath_for_image_xfer=false
+
+# Method used to wipe old volumes (string value)
 #volume_clear=zero
 
 # Size in MiB to wipe at start of old volumes. 0 => all
@@ -1071,8 +1071,10 @@
 #volume_clear_ionice=<None>
 
 # iSCSI target user-land tool to use. tgtadm is default, use
-# lioadm for LIO iSCSI support, iseradm for the ISER protocol,
-# or fake for testing. (string value)
+# lioadm for LIO iSCSI support, scstadmin for SCST target
+# support, iseradm for the ISER protocol, ietadm for iSCSI
+# Enterprise Target, iscsictl for Chelsio iSCSI Target or fake
+# for testing. (string value)
 #iscsi_helper=tgtadm
 
 # Volume configuration file storage directory (string value)
@@ -1081,8 +1083,12 @@
 # IET configuration file (string value)
 #iet_conf=/etc/iet/ietd.conf
 
-# Comma-separated list of initiator IQNs allowed to connect to
-# the iSCSI target. (From Nova compute nodes.) (string value)
+# Chiscsi (CXT) global defaults configuration file (string
+# value)
+#chiscsi_conf=/etc/chelsio-iscsi/chiscsi.conf
+
+# This option is deprecated and unused. It will be removed in
+# the next release. (string value)
 #lio_initiator_iqns=
 
 # Sets the behavior of the iSCSI target to either perform
@@ -1108,6 +1114,13 @@
 # value)
 #iscsi_write_cache=on
 
+# Determines the iSCSI protocol for new iSCSI volumes, created
+# with tgtadm or lioadm target helpers. In order to enable
+# RDMA, this parameter should be set with the value "iser".
+# The supported iSCSI protocol values are "iscsi" and "iser".
+# (string value)
+#iscsi_protocol=iscsi
+
 # The path to the client certificate key for verification, if
 # the driver supports it. (string value)
 #driver_client_cert_key=<None>
@@ -1116,6 +1129,57 @@
 # driver supports it. (string value)
 #driver_client_cert=<None>
 
+# Tell driver to use SSL for connection to backend storage if
+# the driver supports it. (boolean value)
+#driver_use_ssl=false
+
+# Float representation of the over subscription ratio when
+# thin provisioning is involved. Default ratio is 20.0,
+# meaning provisioned capacity can be 20 times of the total
+# physical capacity. If the ratio is 10.5, it means
+# provisioned capacity can be 10.5 times of the total physical
+# capacity. A ratio of 1.0 means provisioned capacity cannot
+# exceed the total physical capacity. A ratio lower than 1.0
+# will be ignored and the default value will be used instead.
+# (floating point value)
+#max_over_subscription_ratio=20.0
+
+# Certain ISCSI targets have predefined target names, SCST
+# target driver uses this name. (string value)
+#scst_target_iqn_name=<None>
+
+# SCST target implementation can choose from multiple SCST
+# target drivers. (string value)
+#scst_target_driver=iscsi
+
+# Option to enable/disable CHAP authentication for targets.
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/eqlx_use_chap
+#use_chap_auth=false
+
+# CHAP user name. (string value)
+# Deprecated group/name - [DEFAULT]/eqlx_chap_login
+#chap_username=
+
+# Password for specified CHAP account name. (string value)
+# Deprecated group/name - [DEFAULT]/eqlx_chap_password
+#chap_password=
+
+# Namespace for driver private data values to be saved in.
+# (string value)
+#driver_data_namespace=<None>
+
+# String representation for an equation that will be used to
+# filter hosts. Only used when the driver filter is set to be
+# used by the Cinder scheduler. (string value)
+#filter_function=<None>
+
+# String representation for an equation that will be used to
+# determine the goodness of a host. Only used when using the
+# goodness weigher is set to be used by the Cinder scheduler.
+# (string value)
+#goodness_function=<None>
+
 
 #
 # Options defined in cinder.volume.drivers.block_device
@@ -1126,31 +1190,47 @@
 
 
 #
-# Options defined in cinder.volume.drivers.coraid
+# Options defined in cinder.volume.drivers.cloudbyte.options
 #
 
-# IP address of Coraid ESM (string value)
-#coraid_esm_address=
-
-# User name to connect to Coraid ESM (string value)
-#coraid_user=admin
-
-# Name of group on Coraid ESM to which coraid_user belongs
-# (must have admin privilege) (string value)
-#coraid_group=admin
-
-# Password to connect to Coraid ESM (string value)
-#coraid_password=password
-
-# Volume Type key name to store ESM Repository Name (string
+# These values will be used for CloudByte storage's addQos API
+# call. (dict value)
+#cb_add_qosgroup=latency:15,iops:10,graceallowed:false,iopscontrol:true,memlimit:0,throughput:0,tpcontrol:false,networkspeed:0
+
+# Driver will use this API key to authenticate against the
+# CloudByte storage's management interface. (string value)
+#cb_apikey=None
+
+# CloudByte storage specific account name. This maps to a
+# project name in OpenStack. (string value)
+#cb_account_name=None
+
+# This corresponds to the name of Tenant Storage Machine (TSM)
+# in CloudByte storage. A volume will be created in this TSM.
+# (string value)
+#cb_tsm_name=None
+
+# A retry value in seconds. Will be used by the driver to
+# check if volume creation was successful in CloudByte
+# storage. (integer value)
+#cb_confirm_volume_create_retry_interval=5
+
+# Will confirm a successful volume creation in CloudByte
+# storage by making this many number of attempts. (integer
 # value)
-#coraid_repository_key=coraid_repository
+#cb_confirm_volume_create_retries=3
+
+# These values will be used for CloudByte storage's
+# createVolume API call. (dict value)
+#cb_create_volume=compression:off,deduplication:off,blocklength:512B,sync:always,protocoltype:ISCSI,recordsize:16k
 
 
 #
 # Options defined in cinder.volume.drivers.datera
 #
 
+# DEPRECATED: This will be removed in the Liberty release. Use
+# san_login and san_password instead. This directly sets the
 # Datera API token. (string value)
 #datera_api_token=<None>
 
@@ -1165,6 +1245,25 @@
 
 
 #
+# Options defined in cinder.volume.drivers.dell.dell_storagecenter_common
+#
+
+# Storage Center System Serial Number (integer value)
+#dell_sc_ssn=64702
+
+# Dell API port (integer value)
+#dell_sc_api_port=3033
+
+# Name of the server folder to use on the Storage Center
+# (string value)
+#dell_sc_server_folder=openstack
+
+# Name of the volume folder to use on the Storage Center
+# (string value)
+#dell_sc_volume_folder=openstack
+
+
+#
 # Options defined in cinder.volume.drivers.emc.emc_vmax_common
 #
 
@@ -1217,60 +1316,69 @@
 # False. (boolean value)
 #initiator_auto_registration=false
 
+# Automatically deregister initiators after the related
+# storage group is destroyed. By default, the value is False.
+# (boolean value)
+#initiator_auto_deregistration=false
+
+# Report free_capacity_gb as 0 when the limit to maximum
+# number of pool LUNs is reached. By default, the value is
+# False. (boolean value)
+#check_max_pool_luns_threshold=false
+
+# Delete a LUN even if it is in Storage Groups. (boolean
+# value)
+#force_delete_lun_in_storagegroup=false
+
+
+#
+# Options defined in cinder.volume.drivers.emc.xtremio
+#
+
+# XMS cluster id in multi-cluster environment (string value)
+#xtremio_cluster_name=
+
 
 #
 # Options defined in cinder.volume.drivers.eqlx
 #
 
-# Group name to use for creating volumes (string value)
+# Group name to use for creating volumes. Defaults to
+# "group-0". (string value)
 #eqlx_group_name=group-0
 
-# Timeout for the Group Manager cli command execution (integer
-# value)
+# Timeout for the Group Manager cli command execution. Default
+# is 30. (integer value)
 #eqlx_cli_timeout=30
 
-# Maximum retry count for reconnection (integer value)
+# Maximum retry count for reconnection. Default is 5. (integer
+# value)
 #eqlx_cli_max_retries=5
 
-# Use CHAP authentication for targets? (boolean value)
+# Use CHAP authentication for targets. Note that this option
+# is deprecated in favour of "use_chap_auth" as specified in
+# cinder/volume/driver.py and will be removed in next release.
+# (boolean value)
 #eqlx_use_chap=false
 
-# Existing CHAP account name (string value)
+# Existing CHAP account name. Note that this option is
+# deprecated in favour of "chap_username" as specified in
+# cinder/volume/driver.py and will be removed in next release.
+# (string value)
 #eqlx_chap_login=admin
 
-# Password for specified CHAP account name (string value)
+# Password for specified CHAP account name. Note that this
+# option is deprecated in favour of "chap_password" as
+# specified in cinder/volume/driver.py and will be removed in
+# the next release (string value)
 #eqlx_chap_password=password
 
-# Pool in which volumes will be created (string value)
+# Pool in which volumes will be created. Defaults to
+# "default". (string value)
 #eqlx_pool=default
 
 
 #
-# Options defined in cinder.volume.drivers.fujitsu_eternus_dx_common
-#
-
-# The configuration file for the Cinder SMI-S driver (string
-# value)
-#cinder_smis_config_file=/etc/cinder/cinder_fujitsu_eternus_dx.xml
-
-
-#
-# Options defined in cinder.volume.drivers.fusionio.ioControl
-#
-
-# amount of time wait for iSCSI target to come online (integer
-# value)
-#fusionio_iocontrol_targetdelay=5
-
-# number of retries for GET operations (integer value)
-#fusionio_iocontrol_retry=3
-
-# verify the array certificate on each transaction (boolean
-# value)
-#fusionio_iocontrol_verify_cert=true
-
-
-#
 # Options defined in cinder.volume.drivers.glusterfs
 #
 
@@ -1413,6 +1521,20 @@
 
 
 #
+# Options defined in cinder.volume.drivers.ibm.flashsystem
+#
+
+# Connection protocol should be FC. (string value)
+#flashsystem_connection_protocol=FC
+
+# Connect with multipath (FC only). (boolean value)
+#flashsystem_multipath_enabled=false
+
+# Allows vdisk to multi host mapping. (boolean value)
+#flashsystem_multihostmap_enabled=true
+
+
+#
 # Options defined in cinder.volume.drivers.ibm.gpfs
 #
 
@@ -1458,22 +1580,6 @@
 # Options defined in cinder.volume.drivers.ibm.ibmnas
 #
 
-# IP address or Hostname of NAS system. (string value)
-#nas_ip=
-
-# User name to connect to NAS system. (string value)
-#nas_login=admin
-
-# Password to connect to NAS system. (string value)
-#nas_password=
-
-# SSH port to use to connect to NAS system. (integer value)
-#nas_ssh_port=22
-
-# Filename of private key to use for SSH authentication.
-# (string value)
-#nas_private_key=
-
 # IBMNAS platform type to be used as backend storage; valid
 # values are - v7ku : for using IBM Storwize V7000 Unified,
 # sonas : for using IBM Scale Out NAS, gpfs-nas : for using
@@ -1556,8 +1662,7 @@
 # value)
 #xiv_ds8k_proxy=xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy
 
-# Connection type to the IBM Storage Array
-# (fibre_channel|iscsi) (string value)
+# Connection type to the IBM Storage Array (string value)
 #xiv_ds8k_connection_type=iscsi
 
 # CHAP authentication mode, effective only for iscsi
@@ -1578,10 +1683,15 @@
 # value)
 #lvm_mirrors=0
 
-# Type of LVM volumes to deploy; (default or thin) (string
-# value)
+# Type of LVM volumes to deploy (string value)
 #lvm_type=default
 
+# LVM conf file to use for the LVM driver in Cinder; this
+# setting is ignored if the specified file does not exist (You
+# can also specify 'None' to not use a conf file even if one
+# exists). (string value)
+#lvm_conf_file=/etc/cinder/lvm.conf
+
 
 #
 # Options defined in cinder.volume.drivers.netapp.options
@@ -1595,6 +1705,13 @@
 # system. (string value)
 #netapp_vfiler=<None>
 
+# The name of the config.conf stanza for a Data ONTAP (7-mode)
+# HA partner.  This option is only used by the driver when
+# connecting to an instance with a storage family of Data
+# ONTAP operating in 7-Mode, and it is required if the storage
+# protocol selected is FC. (string value)
+#netapp_partner_backend_name=<None>
+
 # Administrative user account name used to access the storage
 # system or proxy server. (string value)
 #netapp_login=<None>
@@ -1605,14 +1722,7 @@
 
 # This option specifies the virtual storage server (Vserver)
 # name on the storage cluster on which provisioning of block
-# storage volumes should occur. If using the NFS storage
-# protocol, this parameter is mandatory for storage service
-# catalog support (utilized by Cinder volume type extra_specs
-# support). If this option is specified, the exports belonging
-# to the Vserver will only be used for provisioning in the
-# future. Block storage volumes on exports not belonging to
-# the Vserver specified by this option will continue to
-# function normally. (string value)
+# storage volumes should occur. (string value)
 #netapp_vserver=<None>
 
 # The hostname (or IP address) for the storage system or proxy
@@ -1692,11 +1802,11 @@
 #netapp_size_multiplier=1.2
 
 # This option is only utilized when the storage protocol is
-# configured to use iSCSI. This option is used to restrict
-# provisioning to the specified controller volumes. Specify
-# the value of this option to be a comma separated list of
-# NetApp controller volume names to be used for provisioning.
-# (string value)
+# configured to use iSCSI or FC. This option is used to
+# restrict provisioning to the specified controller volumes.
+# Specify the value of this option to be a comma separated
+# list of NetApp controller volume names to be used for
+# provisioning. (string value)
 #netapp_volume_list=<None>
 
 # The storage family type used on the storage system; valid
@@ -1706,89 +1816,15 @@
 #netapp_storage_family=ontap_cluster
 
 # The storage protocol to be used on the data path with the
-# storage system; valid values are iscsi or nfs. (string
-# value)
+# storage system. (string value)
 #netapp_storage_protocol=<None>
 
 # The transport protocol used when communicating with the
-# storage system or proxy server. Valid values are http or
-# https. (string value)
+# storage system or proxy server. (string value)
 #netapp_transport_type=http
 
 
 #
-# Options defined in cinder.volume.drivers.nexenta.options
-#
-
-# IP address of Nexenta SA (string value)
-#nexenta_host=
-
-# HTTP port to connect to Nexenta REST API server (integer
-# value)
-#nexenta_rest_port=2000
-
-# Use http or https for REST connection (default auto) (string
-# value)
-#nexenta_rest_protocol=auto
-
-# User name to connect to Nexenta SA (string value)
-#nexenta_user=admin
-
-# Password to connect to Nexenta SA (string value)
-#nexenta_password=nexenta
-
-# Nexenta target portal port (integer value)
-#nexenta_iscsi_target_portal_port=3260
-
-# SA Pool that holds all volumes (string value)
-#nexenta_volume=cinder
-
-# IQN prefix for iSCSI targets (string value)
-#nexenta_target_prefix=iqn.1986-03.com.sun:02:cinder-
-
-# Prefix for iSCSI target groups on SA (string value)
-#nexenta_target_group_prefix=cinder/
-
-# File with the list of available nfs shares (string value)
-#nexenta_shares_config=/etc/cinder/nfs_shares
-
-# Base directory that contains NFS share mount points (string
-# value)
-#nexenta_mount_point_base=$state_path/mnt
-
-# Enables or disables the creation of volumes as sparsed files
-# that take no space. If disabled (False), volume is created
-# as a regular file, which takes a long time. (boolean value)
-#nexenta_sparsed_volumes=true
-
-# Default compression value for new ZFS folders. (string
-# value)
-#nexenta_volume_compression=on
-
-# If set True cache NexentaStor appliance volroot option
-# value. (boolean value)
-#nexenta_nms_cache_volroot=true
-
-# Enable stream compression, level 1..9. 1 - gives best speed;
-# 9 - gives best compression. (integer value)
-#nexenta_rrmgr_compression=0
-
-# TCP Buffer size in KiloBytes. (integer value)
-#nexenta_rrmgr_tcp_buf_size=4096
-
-# Number of TCP connections. (integer value)
-#nexenta_rrmgr_connections=2
-
-# Block size for volumes (default=blank means 8KB) (string
-# value)
-#nexenta_blocksize=
-
-# Enables or disables the creation of sparse volumes (boolean
-# value)
-#nexenta_sparse=false
-
-
-#
 # Options defined in cinder.volume.drivers.nfs
 #
 
@@ -1818,6 +1854,11 @@
 # nfs man page for details. (string value)
 #nfs_mount_options=<None>
 
+# The number of attempts to mount nfs shares before raising an
+# error.  At least one attempt will be made to mount an nfs
+# share, regardless of the value specified. (integer value)
+#nfs_mount_attempts=3
+
 
 #
 # Options defined in cinder.volume.drivers.nimble
@@ -1831,6 +1872,15 @@
 
 
 #
+# Options defined in cinder.volume.drivers.openvstorage
+#
+
+# Vpool to use for volumes - backend is defined by vpool not
+# by us. (string value)
+#vpool_name=
+
+
+#
 # Options defined in cinder.volume.drivers.prophetstor.options
 #
 
@@ -1851,6 +1901,31 @@
 
 
 #
+# Options defined in cinder.volume.drivers.quobyte
+#
+
+# URL to the Quobyte volume e.g., quobyte://<DIR host>/<volume
+# name> (string value)
+#quobyte_volume_url=<None>
+
+# Path to a Quobyte Client configuration file. (string value)
+#quobyte_client_cfg=<None>
+
+# Create volumes as sparse files which take no space. If set
+# to False, volume is created as regular file.In such case
+# volume creation takes a lot of time. (boolean value)
+#quobyte_sparsed_volumes=true
+
+# Create volumes as QCOW2 files rather than raw files.
+# (boolean value)
+#quobyte_qcow2_volumes=true
+
+# Base dir containing the mount point for the Quobyte volume.
+# (string value)
+#quobyte_mount_point_base=$state_path/mnt
+
+
+#
 # Options defined in cinder.volume.drivers.rbd
 #
 
@@ -1874,7 +1949,8 @@
 
 # Directory where temporary image files are stored when the
 # volume driver does not write them directly to the volume.
-# (string value)
+# Warning: this option is now deprecated, please use
+# image_conversion_dir instead. (string value)
 #volume_tmp_dir=<None>
 
 # Maximum number of nested volume clones that are taken before
@@ -1912,6 +1988,32 @@
 # (string value)
 #nas_private_key=
 
+# Allow network-attached storage systems to operate in a
+# secure environment where root level access is not permitted.
+# If set to False, access is as the root user and insecure. If
+# set to True, access is not as root. If set to auto, a check
+# is done to determine if this is a new installation: True is
+# used if so, otherwise False. Default is auto. (string value)
+#nas_secure_file_operations=auto
+
+# Set more secure file permissions on network-attached storage
+# volume files to restrict broad other/world access. If set to
+# False, volumes are created with open permissions. If set to
+# True, volumes are created with permissions for the cinder
+# user and group (660). If set to auto, a check is done to
+# determine if this is a new installation: True is used if so,
+# otherwise False. Default is auto. (string value)
+#nas_secure_file_permissions=auto
+
+# Path to the share to use for storing Cinder volumes. For
+# example:  "/srv/export1" for an NFS server export available
+# at 10.0.5.10:/srv/export1 . (string value)
+#nas_share_path=
+
+# Options used to mount the storage backend file system where
+# Cinder volumes are stored. (string value)
+#nas_mount_options=<None>
+
 
 #
 # Options defined in cinder.volume.drivers.san.hp.hp_3par_common
@@ -1927,11 +2029,11 @@
 # 3PAR Super user password (string value)
 #hp3par_password=
 
-# The CPG to use for volume creation (string value)
+# List of the CPG(s) to use for volume creation (list value)
 #hp3par_cpg=OpenStack
 
-# The CPG to use for Snapshots for volumes. If empty
-# hp3par_cpg will be used (string value)
+# The CPG to use for Snapshots for volumes. If empty the
+# userCPG will be used. (string value)
 #hp3par_cpg_snap=
 
 # The time in hours to retain a snapshot.  You can't delete it
@@ -1979,14 +2081,6 @@
 
 
 #
-# Options defined in cinder.volume.drivers.san.hp.hp_msa_common
-#
-
-# The VDisk to use for volume creation. (string value)
-#msa_vdisk=OpenStack
-
-
-#
 # Options defined in cinder.volume.drivers.san.san
 #
 
@@ -2027,15 +2121,6 @@
 
 
 #
-# Options defined in cinder.volume.drivers.san.solaris
-#
-
-# The ZFS path under which to create zvols for volumes.
-# (string value)
-#san_zfs_volume_base=rpool/
-
-
-#
 # Options defined in cinder.volume.drivers.scality
 #
 
@@ -2058,8 +2143,7 @@
 #smbfs_shares_config=/etc/cinder/smbfs_shares
 
 # Default format that will be used when creating volumes if no
-# volume format is specified. Can be set to: raw, qcow2, vhd
-# or vhdx. (string value)
+# volume format is specified. (string value)
 #smbfs_default_volume_format=qcow2
 
 # Create volumes as sparsed files which take no space rather
@@ -2087,15 +2171,25 @@
 
 
 #
+# Options defined in cinder.volume.drivers.solaris.nfs
+#
+
+# Schedule volumes round robin across NFS shares. (boolean
+# value)
+#nfs_round_robin=true
+
+
+#
 # Options defined in cinder.volume.drivers.solaris.zfs
 #
 
-# The base dataset for ZFS cinder volumes. (string value)
+# The base dataset for ZFS volumes. (string value)
 #zfs_volume_base=rpool/cinder
 
 # iSCSI target group name. (string value)
 #zfs_target_group=tgt-grp
 
+
 #
 # Options defined in cinder.volume.drivers.solidfire
 #
@@ -2112,12 +2206,48 @@
 # default behavior).  The default is NO prefix. (string value)
 #sf_account_prefix=<None>
 
+# Account name on the SolidFire Cluster to use as owner of
+# template/cache volumes (created if does not exist). (string
+# value)
+#sf_template_account_name=openstack-vtemplate
+
+# Create an internal cache of copy of images when a bootable
+# volume is created to eliminate fetch from glance and qemu-
+# conversion on subsequent calls. (boolean value)
+#sf_allow_template_caching=true
+
 # SolidFire API port. Useful if the device api is behind a
 # proxy on a different port. (integer value)
 #sf_api_port=443
 
 
 #
+# Options defined in cinder.volume.drivers.srb
+#
+
+# Comma-separated list of REST servers IP to connect to. (eg
+# http://IP1/,http://IP2:81/path (string value)
+#srb_base_urls=<None>
+
+
+#
+# Options defined in cinder.volume.drivers.violin.v6000_common
+#
+
+# IP address or hostname of mg-a (string value)
+#gateway_mga=<None>
+
+# IP address or hostname of mg-b (string value)
+#gateway_mgb=<None>
+
+# Use igroups to manage targets and initiators (boolean value)
+#use_igroups=false
+
+# Global backend request timeout, in seconds (integer value)
+#request_timeout=300
+
+
+#
 # Options defined in cinder.volume.drivers.vmware.vmdk
 #
 
@@ -2180,43 +2310,25 @@
 
 
 #
-# Options defined in cinder.volume.drivers.zadara
+# Options defined in cinder.volume.drivers.xio
 #
 
-# Management IP of Zadara VPSA (string value)
-#zadara_vpsa_ip=<None>
-
-# Zadara VPSA port number (string value)
-#zadara_vpsa_port=<None>
-
-# Use SSL connection (boolean value)
-#zadara_vpsa_use_ssl=false
-
-# User name for the VPSA (string value)
-#zadara_user=<None>
-
-# Password for the VPSA (string value)
-#zadara_password=<None>
-
-# Name of VPSA storage pool for volumes (string value)
-#zadara_vpsa_poolname=<None>
-
-# Default thin provisioning policy for volumes (boolean value)
-#zadara_vol_thin=true
-
-# Default encryption policy for volumes (boolean value)
-#zadara_vol_encrypt=false
-
-# Default template for VPSA volume names (string value)
-#zadara_vol_name_template=OS_%s
-
-# Automatically detach from servers on volume delete (boolean
-# value)
-#zadara_vpsa_auto_detach_on_delete=true
-
-# Don't halt on deletion of non-existing volumes (boolean
-# value)
-#zadara_vpsa_allow_nonexistent_delete=true
+# Default storage pool for volumes. (integer value)
+#ise_storage_pool=1
+
+# Raid level for ISE volumes. (integer value)
+#ise_raid=1
+
+# Number of retries (per port) when establishing connection to
+# ISE management port. (integer value)
+#ise_connection_retries=5
+
+# Interval (secs) between retries. (integer value)
+#ise_retry_interval=1
+
+# Number on retries to get completion status after issuing a
+# command to ISE. (integer value)
+#ise_completion_retries=30
 
 
 #
@@ -2229,20 +2341,18 @@
 # Project name. (string value)
 #zfssa_project=<None>
 
-# Block size: 512, 1k, 2k, 4k, 8k, 16k, 32k, 64k, 128k.
-# (string value)
+# Block size. (string value)
 #zfssa_lun_volblocksize=8k
 
 # Flag to enable sparse (thin-provisioned): True, False.
 # (boolean value)
 #zfssa_lun_sparse=false
 
-# Data compression-off, lzjb, gzip-2, gzip, gzip-9. (string
-# value)
-#zfssa_lun_compression=
-
-# Synchronous write bias-latency, throughput. (string value)
-#zfssa_lun_logbias=
+# Data compression. (string value)
+#zfssa_lun_compression=off
+
+# Synchronous write bias. (string value)
+#zfssa_lun_logbias=latency
 
 # iSCSI initiator group. (string value)
 #zfssa_initiator_group=
@@ -2256,6 +2366,9 @@
 # iSCSI initiator CHAP password. (string value)
 #zfssa_initiator_password=
 
+# iSCSI initiators configuration. (string value)
+#zfssa_initiator_config=
+
 # iSCSI target group name. (string value)
 #zfssa_target_group=tgt-grp
 
@@ -2278,17 +2391,51 @@
 
 
 #
+# Options defined in cinder.volume.drivers.zfssa.zfssanfs
+#
+
+# Data path IP address (string value)
+#zfssa_data_ip=<None>
+
+# HTTPS port number (string value)
+#zfssa_https_port=443
+
+# Options to be passed while mounting share over nfs (string
+# value)
+#zfssa_nfs_mount_options=
+
+# Storage pool name. (string value)
+#zfssa_nfs_pool=
+
+# Project name. (string value)
+#zfssa_nfs_project=NFSProject
+
+# Share name. (string value)
+#zfssa_nfs_share=nfs_share
+
+# Data compression. (string value)
+#zfssa_nfs_share_compression=off
+
+# Synchronous write bias-latency, throughput. (string value)
+#zfssa_nfs_share_logbias=latency
+
+# REST connection timeout. (seconds) (integer value)
+#zfssa_rest_timeout=<None>
+
+
+#
 # Options defined in cinder.volume.manager
 #
 
 # Driver to use for volume creation (string value)
 # The local ZFS driver provides direct access to ZFS volumes that it
 # creates. The other listed drivers provide access to ZFS volumes via
-# iSCSI or Fibre Channel and are suitable for cases where block storage
+# iSCSI, Fibre Channel, or NFS and are suitable for cases where block storage
 # for Nova compute instances is shared.
 volume_driver=cinder.volume.drivers.solaris.zfs.ZFSVolumeDriver
 #volume_driver=cinder.volume.drivers.solaris.zfs.ZFSISCSIDriver
 #volume_driver=cinder.volume.drivers.solaris.zfs.ZFSFCDriver
+#volume_driver=cinder.volume.drivers.solaris.nfs.ZfsNfsVolumeDriver
 #volume_driver=cinder.volume.drivers.zfssa.zfssaiscsi.ZFSSAISCSIDriver
 
 # Timeout for creating the volume to migrate to when
@@ -2303,7 +2450,12 @@
 #zoning_mode=none
 
 # User defined capabilities, a JSON formatted string
-# specifying key/value pairs. (string value)
+# specifying key/value pairs. The key/value pairs can be used
+# by the CapabilitiesFilter to select between backends when
+# requests specify volume types. For example, specifying a
+# service level or the geographical location of a backend,
+# then creating a volume type to allow the user to select by
+# these different properties. (string value)
 #extra_capabilities={}
 
 
@@ -2460,20 +2612,22 @@
 # connection lost. (boolean value)
 #use_db_reconnect=false
 
-# Seconds between database connection retries. (integer value)
+# Seconds between retries of a database transaction. (integer
+# value)
 #db_retry_interval=1
 
-# If True, increases the interval between database connection
-# retries up to db_max_retry_interval. (boolean value)
+# If True, increases the interval between retries of a
+# database operation up to db_max_retry_interval. (boolean
+# value)
 #db_inc_retry_interval=true
 
 # If db_inc_retry_interval is set, the maximum seconds between
-# database connection retries. (integer value)
+# retries of a database operation. (integer value)
 #db_max_retry_interval=10
 
-# Maximum database connection retries before error is raised.
-# Set to -1 to specify an infinite retry count. (integer
-# value)
+# Maximum retries in case of connection error or deadlock
+# error before error is raised. Set to -1 to specify an
+# infinite retry count. (integer value)
 #db_max_retries=20
 
 
@@ -2513,15 +2667,16 @@
 # value)
 #zone_driver=cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver
 
-# Zoning policy configured by user (string value)
+# Zoning policy configured by user; valid values include
+# "initiator-target" or "initiator" (string value)
 #zoning_policy=initiator-target
 
-# Comma separated list of fibre channel fabric names. This
+# Comma separated list of Fibre Channel fabric names. This
 # list of names is used to retrieve other SAN credentials for
 # connecting to each SAN fabric (string value)
 #fc_fabric_names=<None>
 
-# FC San Lookup Service (string value)
+# FC SAN Lookup Service (string value)
 #fc_san_lookup_service=cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service.BrcdFCSanLookupService
 
 
@@ -2550,7 +2705,7 @@
 #
 
 # Authentication url for encryption service. (string value)
-#encryption_auth_url=http://localhost:5000/v2.0
+encryption_auth_url=http://localhost:5000/v2.0
 
 # Url for encryption service. (string value)
 #encryption_api_url=http://localhost:9311/v1
@@ -2562,73 +2717,34 @@
 # Options defined in keystonemiddleware.auth_token
 #
 
-# Prefix to prepend at the beginning of the path. Deprecated,
-# use identity_uri. (string value)
-#auth_admin_prefix=
-
-# Host providing the admin Identity API endpoint. Deprecated,
-# use identity_uri. (string value)
-#auth_host=127.0.0.1
-
-# Port of the admin Identity API endpoint. Deprecated, use
-# identity_uri. (integer value)
-#auth_port=35357
-
-# Protocol of the admin Identity API endpoint (http or https).
-# Deprecated, use identity_uri. (string value)
-#auth_protocol=https
-
-# Complete public Identity API endpoint (string value)
+# Complete public Identity API endpoint. (string value)
 auth_uri=http://127.0.0.1:5000/v2.0/
 
-# Complete admin Identity API endpoint. This should specify
-# the unversioned root endpoint e.g. https://localhost:35357/
-# (string value)
-identity_uri=http://127.0.0.1:35357/
-
-# API version of the admin Identity API endpoint (string
+# API version of the admin Identity API endpoint. (string
 # value)
 #auth_version=<None>
 
 # Do not handle authorization requests within the middleware,
 # but delegate the authorization decision to downstream WSGI
-# components (boolean value)
+# components. (boolean value)
 #delay_auth_decision=false
 
 # Request timeout value for communicating with Identity API
-# server. (boolean value)
+# server. (integer value)
 #http_connect_timeout=<None>
 
 # How many times are we trying to reconnect when communicating
 # with Identity API Server. (integer value)
 #http_request_max_retries=3
 
-# This option is deprecated and may be removed in a future
-# release. Single shared secret with the Keystone
-# configuration used for bootstrapping a Keystone
-# installation, or otherwise bypassing the normal
-# authentication process. This option should not be used, use
-# `admin_user` and `admin_password` instead. (string value)
-#admin_token=<None>
-
-# Keystone account username (string value)
-admin_user=%SERVICE_USER%
-
-# Keystone account password (string value)
-admin_password=%SERVICE_PASSWORD%
-
-# Keystone service account tenant name to validate user tokens
-# (string value)
-admin_tenant_name=%SERVICE_TENANT_NAME%
-
-# Env key for the swift cache (string value)
+# Env key for the swift cache. (string value)
 #cache=<None>
 
-# Required if Keystone server requires client certificate
+# Required if identity server requires client certificate
 # (string value)
 #certfile=<None>
 
-# Required if Keystone server requires client certificate
+# Required if identity server requires client certificate
 # (string value)
 #keyfile=<None>
 
@@ -2639,7 +2755,7 @@
 # Verify HTTPS connections. (boolean value)
 #insecure=false
 
-# Directory used to cache files related to PKI tokens (string
+# Directory used to cache files related to PKI tokens. (string
 # value)
 signing_dir=$state_path/keystone-signing
 
@@ -2662,7 +2778,7 @@
 # value)
 #revocation_cache_time=10
 
-# (optional) if defined, indicate whether token data should be
+# (Optional) If defined, indicate whether token data should be
 # authenticated or authenticated and encrypted. Acceptable
 # values are MAC or ENCRYPT.  If MAC, token data is
 # authenticated (with HMAC) in the cache. If ENCRYPT, token
@@ -2671,38 +2787,38 @@
 # raise an exception on initialization. (string value)
 #memcache_security_strategy=<None>
 
-# (optional, mandatory if memcache_security_strategy is
-# defined) this string is used for key derivation. (string
+# (Optional, mandatory if memcache_security_strategy is
+# defined) This string is used for key derivation. (string
 # value)
 #memcache_secret_key=<None>
 
-# (optional) number of seconds memcached server is considered
+# (Optional) Number of seconds memcached server is considered
 # dead before it is tried again. (integer value)
 #memcache_pool_dead_retry=300
 
-# (optional) max total number of open connections to every
+# (Optional) Maximum total number of open connections to every
 # memcached server. (integer value)
 #memcache_pool_maxsize=10
 
-# (optional) socket timeout in seconds for communicating with
+# (Optional) Socket timeout in seconds for communicating with
 # a memcache server. (integer value)
 #memcache_pool_socket_timeout=3
 
-# (optional) number of seconds a connection to memcached is
+# (Optional) Number of seconds a connection to memcached is
 # held unused in the pool before it is closed. (integer value)
 #memcache_pool_unused_timeout=60
 
-# (optional) number of seconds that an operation will wait to
+# (Optional) Number of seconds that an operation will wait to
 # get a memcache client connection from the pool. (integer
 # value)
 #memcache_pool_conn_get_timeout=10
 
-# (optional) use the advanced (eventlet safe) memcache client
+# (Optional) Use the advanced (eventlet safe) memcache client
 # pool. The advanced pool will only work under python 2.x.
 # (boolean value)
 #memcache_use_advanced_pool=false
 
-# (optional) indicate whether to set the X-Service-Catalog
+# (Optional) Indicate whether to set the X-Service-Catalog
 # header. If False, middleware will not ask for service
 # catalog on token validation and will not set the X-Service-
 # Catalog header. (boolean value)
@@ -2721,7 +2837,7 @@
 
 # If true, the revocation list will be checked for cached
 # tokens. This requires that PKI tokens are configured on the
-# Keystone server. (boolean value)
+# identity server. (boolean value)
 #check_revocations_for_cached=false
 
 # Hash algorithms to use for hashing PKI tokens. This may be a
@@ -2736,6 +2852,51 @@
 # (list value)
 #hash_algorithms=md5
 
+# Prefix to prepend at the beginning of the path. Deprecated,
+# use identity_uri. (string value)
+#auth_admin_prefix=
+
+# Host providing the admin Identity API endpoint. Deprecated,
+# use identity_uri. (string value)
+#auth_host=127.0.0.1
+
+# Port of the admin Identity API endpoint. Deprecated, use
+# identity_uri. (integer value)
+#auth_port=35357
+
+# Protocol of the admin Identity API endpoint (http or https).
+# Deprecated, use identity_uri. (string value)
+#auth_protocol=https
+
+# Complete admin Identity API endpoint. This should specify
+# the unversioned root endpoint e.g. https://localhost:35357/
+# (string value)
+identity_uri=http://127.0.0.1:35357/
+
+# This option is deprecated and may be removed in a future
+# release. Single shared secret with the Keystone
+# configuration used for bootstrapping a Keystone
+# installation, or otherwise bypassing the normal
+# authentication process. This option should not be used, use
+# `admin_user` and `admin_password` instead. (string value)
+#admin_token=<None>
+
+# Service username. (string value)
+admin_user=%SERVICE_USER%
+
+# Service user password. (string value)
+admin_password=%SERVICE_PASSWORD%
+
+# Service tenant name. (string value)
+admin_tenant_name=%SERVICE_TENANT_NAME%
+
+# Name of the plugin to load (string value)
+#auth_plugin=<None>
+
+# Config Section from which to load plugin specific options
+# (string value)
+#auth_section=<None>
+
 
 [matchmaker_redis]
 
@@ -2764,12 +2925,28 @@
 #ringfile=/etc/oslo/matchmaker_ring.json
 
 
+[oslo_concurrency]
+
+#
+# Options defined in oslo.concurrency
+#
+
+# Enables or disables inter-process locks. (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files.  For security, the
+# specified directory should only be writable by the user
+# running the processes that need locking. Defaults to
+# environment variable OSLO_LOCK_PATH. If external locks are
+# used, a lock path must be set. (string value)
+lock_path=$state_path/lock
+
+
 [oslo_messaging_amqp]
 
 #
 # Options defined in oslo.messaging
 #
-# NOTE: Options in this group are supported when using oslo.messaging >=1.5.0.
 
 # address prefix used when sending to a specific server
 # (string value)
@@ -2813,6 +2990,170 @@
 #allow_insecure_clients=false
 
 
+[oslo_messaging_qpid]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in AMQP. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The number of prefetched messages held by receiver. (integer
+# value)
+#qpid_receiver_capacity=1
+
+# The qpid topology version to use.  Version 1 is what was
+# originally used by impl_qpid.  Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work.  Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+
+[oslo_messaging_rabbit]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in AMQP. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# SSL version to use (valid only if SSL enabled). Valid values
+# are TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may
+# be available on some distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+#rabbit_host=localhost
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+#rabbit_port=5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+#rabbit_userid=guest
+
+# The RabbitMQ password. (string value)
+#rabbit_password=guest
+
+# The RabbitMQ login method. (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# Number of seconds after which the Rabbit broker is
+# considered down if heartbeat's keep-alive fails (0 disables
+# the heartbeat, >0 enables it. Enabling heartbeats requires
+# kombu>=3.0.7 and amqp>=1.4.0). EXPERIMENTAL (integer value)
+#heartbeat_timeout_threshold=0
+
+# How often times during the heartbeat_timeout_threshold we
+# check the heartbeat. (integer value)
+#heartbeat_rate=2
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
+# (boolean value)
+#fake_rabbit=false
+
+
+[oslo_middleware]
+
+#
+# Options defined in oslo.middleware
+#
+
+# The maximum body size for each  request, in bytes. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size
+# Deprecated group/name - [DEFAULT]/max_request_body_size
+#max_request_body_size=114688
+
+
 [profiler]
 
 #
@@ -2826,22 +3167,3 @@
 #trace_sqlalchemy=false
 
 
-[ssl]
-
-#
-# Options defined in cinder.openstack.common.sslutils
-#
-
-# CA certificate file to use to verify connecting clients
-# (string value)
-#ca_file=<None>
-
-# Certificate file to use when starting the server securely
-# (string value)
-#cert_file=<None>
-
-# Private key file to use when starting the server securely
-# (string value)
-#key_file=<None>
-
-
--- a/components/openstack/cinder/files/cinder.exec_attr	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/cinder/files/cinder.exec_attr	Fri Feb 05 17:54:17 2016 -0500
@@ -15,6 +15,10 @@
 
 cinder-volume:solaris:cmd:RO::/usr/sbin/itadm:uid=0
 
+cinder-volume:solaris:cmd:RO::/usr/sbin/mount:privs=sys_mount,net_privaddr
+
 cinder-volume:solaris:cmd:RO::/usr/sbin/stmfadm:euid=0
 
+cinder-volume:solaris:cmd:RO::/usr/sbin/umount:privs=sys_mount,net_privaddr
+
 cinder-volume:solaris:cmd:RO::/usr/sbin/zfs:privs=sys_config,sys_mount
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/files/solaris/nfs.py	Fri Feb 05 17:54:17 2016 -0500
@@ -0,0 +1,232 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 OpenStack LLC.
+# All Rights Reserved.
+#
+# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+Volume driver for Solaris ZFS NFS storage
+"""
+
+import os
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import units
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.volume.drivers import nfs
+
+ZFS_NFS_VERSION = '1.0.0'
+
+LOG = logging.getLogger(__name__)
+
+solaris_zfs_nfs_opts = [
+    cfg.BoolOpt('nfs_round_robin',
+                default=True,
+                help=('Schedule volumes round robin across NFS shares.')),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(solaris_zfs_nfs_opts)
+
+
+class ZfsNfsVolumeDriver(nfs.NfsDriver):
+    """Local ZFS NFS volume operations."""
+
+    driver_volume_type = 'nfs'
+    driver_prefix = 'nfs'
+    volume_backend_name = 'Solaris_NFS'
+
+    def __init__(self, *args, **kwargs):
+        super(ZfsNfsVolumeDriver, self).__init__(*args, **kwargs)
+        self.configuration.append_config_values(solaris_zfs_nfs_opts)
+
+        self.last_rr_pos = None
+
+        if self.configuration.nfs_mount_options:
+            LOG.warning(_("Solaris NFS driver ignores mount options"))
+
+    def _update_volume_stats(self):
+        """Retrieve volume status info."""
+
+        stats = {}
+        backend_name = self.configuration.safe_get('volume_backend_name')
+        stats["volume_backend_name"] = backend_name or self.__class__.__name__
+        stats["driver_version"] = ZFS_NFS_VERSION
+        stats["vendor_name"] = 'Oracle'
+        stats['storage_protocol'] = self.driver_volume_type
+
+        self._ensure_shares_mounted()
+
+        global_capacity = 0
+        global_free = 0
+        for share in self._mounted_shares:
+            capacity, free, used = self._get_capacity_info(share)
+            global_capacity += capacity
+            global_free += free
+
+        stats['total_capacity_gb'] = global_capacity / float(units.Gi)
+        stats['free_capacity_gb'] = global_free / float(units.Gi)
+        stats['reserved_percentage'] = 0
+        stats['QoS_support'] = False
+        self._stats = stats
+
+    def _create_sparsed_file(self, path, size):
+        """Creates a sparse file of a given size in GiB."""
+        self._execute('/usr/bin/truncate', '-s', '%sG' % size, path)
+
+    def _create_regular_file(self, path, size):
+        """Creates a regular file of given size in GiB."""
+
+        block_size_mb = 1
+        block_count = size * units.Gi / (block_size_mb * units.Mi)
+
+        self._execute('/usr/bin/dd', 'if=/dev/zero', 'of=%s' % path,
+                      'bs=%dM' % block_size_mb,
+                      'count=%d' % block_count)
+
+    def _set_rw_permissions(self, path):
+        """Sets access permissions for given NFS path.
+
+        :param path: the volume file path.
+        """
+        os.chmod(path, 0o660)
+
+    def _set_rw_permissions_for_all(self, path):
+        """Sets 666 permissions for the path."""
+        mode = os.stat(path).st_mode
+        os.chmod(path, mode | 0o666)
+
+    def _set_rw_permissions_for_owner(self, path):
+        """Sets read-write permissions to the owner for the path."""
+        mode = os.stat(path).st_mode
+        os.chmod(path, mode | 0o600)
+
+    def _delete(self, path):
+        os.unlink(path)
+
+    def _get_capacity_info(self, nfs_share):
+        """Calculate available space on the NFS share.
+
+        :param nfs_share: example 172.18.194.100:/var/nfs
+        """
+
+        mount_point = self._get_mount_point_for_share(nfs_share)
+
+        st = os.statvfs(mount_point)
+        total_available = st.f_frsize * st.f_bavail
+        total_size = st.f_frsize * st.f_blocks
+
+        du, _ = self._execute('/usr/bin/gdu', '-sb', '--apparent-size',
+                              '--exclude', '*snapshot*', mount_point)
+        total_allocated = float(du.split()[0])
+        return total_size, total_available, total_allocated
+
+    def _round_robin(self, sharelist):
+        """
+        Implement a round robin generator for share list
+        """
+
+        mylen = len(sharelist)
+
+        if self.last_rr_pos is None:
+            start_pos = 0
+        else:
+            start_pos = (self.last_rr_pos + 1) % mylen
+
+        pos = start_pos
+        while True:
+            yield sharelist[pos], pos
+            pos = (pos + 1) % mylen
+            if pos == start_pos:
+                break
+
+    def _find_share(self, volume_size_in_gib):
+        """Choose NFS share among available ones for given volume size.
+
+        For instances with more than one share that meets the criteria, the
+        share with the least "allocated" space will be selected.
+
+        :param volume_size_in_gib: int size in GB
+        """
+
+        if not self._mounted_shares:
+            raise exception.NfsNoSharesMounted()
+
+        target_share = None
+        if self.configuration.nfs_round_robin:
+            # Round Robin volume placement on shares
+
+            LOG.debug(_("_find_share using round robin"))
+
+            for nfs_share, pos in self._round_robin(self._mounted_shares):
+                if not self._is_share_eligible(nfs_share, volume_size_in_gib):
+                    continue
+                target_share = nfs_share
+                self.last_rr_pos = pos
+                break
+        else:
+            # Place volume on share with the most free space.
+
+            LOG.debug(_("_find_share using select most free"))
+
+            target_share_reserved = 0
+
+            for nfs_share in self._mounted_shares:
+                if not self._is_share_eligible(nfs_share, volume_size_in_gib):
+                    continue
+                total_size, total_available, total_allocated = \
+                    self._get_capacity_info(nfs_share)
+                if target_share is not None:
+                    if target_share_reserved > total_allocated:
+                        target_share = nfs_share
+                        target_share_reserved = total_allocated
+                else:
+                    target_share = nfs_share
+                    target_share_reserved = total_allocated
+
+        if target_share is None:
+            raise exception.NfsNoSuitableShareFound(
+                volume_size=volume_size_in_gib)
+
+        LOG.debug('Selected %s as target nfs share.', target_share)
+
+        return target_share
+
+    def set_nas_security_options(self, is_new_cinder_install):
+        """Secure NAS options.
+
+        For Solaris we always operate in a secure mode and do not
+        rely on root or any rootwrap utilities.
+
+        With RBAC we can do what we need as the cinder user.  We
+        set the nas_secure_file.XXX to be true by default.  We ignore
+        any conf file setting for these string vars.
+
+        We don't ever use these nas_secure_file_XXX vars in this driver
+        but we still set the value to true.  This might prevent admin/users
+        from opening bugs stating we are not running in a secure mode.
+        """
+
+        self.configuration.nas_secure_file_operations = 'true'
+        self.configuration.nas_secure_file_permissions = 'true'
+        self._execute_as_root = False
+
+        LOG.debug('NAS variable secure_file_permissions setting is: %s' %
+                  self.configuration.nas_secure_file_permissions)
+
+        LOG.debug('NAS variable secure_file_operations setting is: %s' %
+                  self.configuration.nas_secure_file_operations)
--- a/components/openstack/cinder/files/solaris/solarisfc.py	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/cinder/files/solaris/solarisfc.py	Fri Feb 05 17:54:17 2016 -0500
@@ -20,10 +20,11 @@
 import platform
 import time
 
+from oslo_concurrency import processutils as putils
+from oslo_log import log as logging
+
 from cinder.brick import exception
 from cinder.openstack.common.gettextutils import _
-from cinder.openstack.common import log as logging
-from cinder.openstack.common import processutils as putils
 
 LOG = logging.getLogger(__name__)
 
--- a/components/openstack/cinder/files/solaris/solarisiscsi.py	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/cinder/files/solaris/solarisiscsi.py	Fri Feb 05 17:54:17 2016 -0500
@@ -20,10 +20,11 @@
 import platform
 import time
 
+from oslo_concurrency import processutils as putils
+from oslo_log import log as logging
+
 from cinder.brick import exception
 from cinder.openstack.common.gettextutils import _
-from cinder.openstack.common import log as logging
-from cinder.openstack.common import processutils as putils
 
 LOG = logging.getLogger(__name__)
 
--- a/components/openstack/cinder/files/solaris/zfs.py	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/cinder/files/solaris/zfs.py	Fri Feb 05 17:54:17 2016 -0500
@@ -2,7 +2,7 @@
 # Copyright (c) 2012 OpenStack LLC.
 # All Rights Reserved.
 #
-# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
@@ -22,21 +22,28 @@
 import abc
 import fcntl
 import os
-import socket
 import subprocess
 import time
 
-from oslo.config import cfg
+from oslo_concurrency import processutils
+from oslo_config import cfg
+from oslo_log import log as logging
 import paramiko
 
 from cinder import exception
-from cinder.i18n import _
+from cinder.i18n import _, _LE, _LI
 from cinder.image import image_utils
-from cinder.openstack.common import log as logging
-from cinder.openstack.common import processutils
 from cinder.volume import driver
 from cinder.volume.drivers.san.san import SanDriver
 
+from eventlet.green import socket
+from eventlet.green.OpenSSL import SSL
+
+import rad.client as radc
+import rad.connect as radcon
+import rad.bindings.com.oracle.solaris.rad.zfsmgr_1 as zfsmgr
+import rad.auth as rada
+
 from solaris_install.target.size import Size
 
 FLAGS = cfg.CONF
@@ -53,8 +60,53 @@
 FLAGS.register_opts(solaris_zfs_opts)
 
 
+def connect_tls(host, port=12302, locale=None, ca_certs=None):
+    """Connect to a RAD instance over TLS.
+
+    Arguments:
+    host     string, target host
+    port     int, target port (RAD_PORT_TLS = 12302)
+    locale   string, locale
+    ca_certs string, path to file containing CA certificates
+
+    Returns:
+    RadConnection: a connection to RAD
+    """
+    # We don't want SSL 2.0, SSL 3.0 nor TLS 1.0 in RAD
+    context = SSL.Context(SSL.SSLv23_METHOD)
+    context.set_options(SSL.OP_NO_SSLv2)
+    context.set_options(SSL.OP_NO_SSLv3)
+    context.set_options(SSL.OP_NO_TLSv1)
+
+    if ca_certs is not None:
+        context.set_verify(SSL.VERIFY_PEER, _tls_verify_cb)
+        context.load_verify_locations(ca_certs)
+
+    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    sock = SSL.Connection(context, sock)
+    sock.connect((host, port))
+    sock.do_handshake()
+
+    return radcon.RadConnection(sock, locale=locale)
+
+
 class ZFSVolumeDriver(SanDriver):
-    """Local ZFS volume operations."""
+    """OpenStack Cinder ZFS volume driver for generic ZFS volumes.
+
+    Version history:
+        1.0.0 - Initial driver with basic functionalities in Havana
+        1.1.0 - Support SAN for the remote storage nodes access in Juno
+        1.1.1 - Add support for the volume backup
+        1.1.2 - Add support for the volume migration
+        1.2.0 - Add support for the volume management in Kilo
+        1.2.1 - Enable the connect_tls by importing eventlet.green.socket
+        1.2.2 - Introduce the ZFS RAD for volume migration enhancement
+        1.2.3 - Replace volume-specific targets with one shared target in
+                the ZFSISCSIDriver
+
+    """
+
+    version = "1.2.3"
     protocol = 'local'
 
     def __init__(self, *args, **kwargs):
@@ -202,7 +254,7 @@
         """Callback for volume attached to instance or host."""
         pass
 
-    def detach_volume(self, context, volume):
+    def detach_volume(self, context, volume, attachment):
         """ Callback for volume detached."""
         pass
 
@@ -214,14 +266,6 @@
 
         return self._stats
 
-    def copy_image_to_volume(self, context, volume, image_service, image_id):
-        """Fetch the image from image_service and write it to the volume."""
-        raise NotImplementedError()
-
-    def copy_volume_to_image(self, context, volume, image_service, image_meta):
-        """Copy the volume to the specified image."""
-        raise NotImplementedError()
-
     def _get_zfs_property(self, prop, dataset):
         """Get the value of property for the dataset."""
         try:
@@ -229,8 +273,8 @@
                                         'value', prop, dataset)
             return out.rstrip()
         except processutils.ProcessExecutionError:
-            LOG.info(_("Failed to get the property '%s' of the dataset '%s'") %
-                     (prop, dataset))
+            LOG.info(_LI("Failed to get the property '%s' of the dataset '%s'")
+                     % (prop, dataset))
             return None
 
     def _get_zfs_snap_name(self, snapshot):
@@ -273,7 +317,7 @@
             p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
         except:
-            LOG.error(_("_piped_execute '%s' failed.") % (cmd1))
+            LOG.error(_LE("_piped_execute '%s' failed.") % (cmd1))
             raise
 
         # Set the pipe to be blocking because evenlet.green.subprocess uses
@@ -308,7 +352,7 @@
         cmd2 = ['/usr/sbin/zfs', 'receive', dst]
         # Due to pipe injection protection in the ssh utils method,
         # cinder.utils.check_ssh_injection(), the piped commands must be passed
-        # through via paramiko.  These commands take no user defined input
+        # through via paramiko. These commands take no user defined input
         # other than the names of the zfs datasets which are already protected
         # against the special characters of concern.
         if not self.run_local:
@@ -325,6 +369,90 @@
         cmd = ['/usr/sbin/zfs', 'destroy', dst_snapshot_name]
         self._execute(*cmd)
 
+    def _get_rc_connect(self, san_info=None):
+        """Connect the RAD server."""
+        if san_info is not None:
+            san_ip = san_info.split(';')[0]
+            san_login = san_info.split(';')[1]
+            san_password = san_info.split(';')[2]
+        else:
+            san_ip = self.configuration.san_ip
+            san_login = self.configuration.san_login
+            san_password = self.configuration.san_password
+
+        rc = connect_tls(san_ip)
+        auth = rada.RadAuth(rc)
+        auth.pam_login(san_login, san_password)
+
+        return rc
+
+    def _rad_zfs_send_recv(self, src, dst, dst_san_info=None):
+        """Replicate the ZFS dataset stream."""
+        src_snapshot = {'volume_name': src['name'],
+                        'name': 'tmp-send-snapshot-%s' % src['id']}
+        src_snapshot_name = self._get_zfs_snap_name(src_snapshot)
+        prop_type = self._get_zfs_property('type', src_snapshot_name)
+        # Delete the temporary snapshot if it already exists
+        if prop_type == 'snapshot':
+            self.delete_snapshot(src_snapshot)
+        # Create the temporary snapshot of src volume
+        self.create_snapshot(src_snapshot)
+
+        src_rc = self._get_rc_connect()
+        dst_rc = self._get_rc_connect(dst_san_info)
+
+        src_pat = self._get_zfs_volume_name(src['name'])
+        src_vol_obj = src_rc.get_object(zfsmgr.ZfsDataset(),
+                                        radc.ADRGlobPattern({"name": src_pat}))
+        dst_pat = dst.rsplit('/', 1)[0]
+        dst_vol_obj = dst_rc.get_object(zfsmgr.ZfsDataset(),
+                                        radc.ADRGlobPattern({"name": dst_pat}))
+
+        send_sock_info = src_vol_obj.get_send_socket(
+            name=src_snapshot_name, socket_type=zfsmgr.SocketType.AF_INET)
+        send_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        send_sock.connect((self.hostname, int(send_sock_info.socket)))
+
+        dst_san_ip = dst_san_info.split(';')[0]
+        remote_host, alias, addresslist = socket.gethostbyaddr(dst_san_ip)
+
+        recv_sock_info = dst_vol_obj.get_receive_socket(
+            name=dst, socket_type=zfsmgr.SocketType.AF_INET,
+            name_options=zfsmgr.ZfsRecvNameOptions.use_provided_name)
+        recv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        recv_sock.connect((remote_host, int(recv_sock_info.socket)))
+
+        # Set 4mb buffer size
+        buf_size = 4194304
+        while True:
+            # Read the data from the send stream
+            buf = send_sock.recv(buf_size)
+            if not buf:
+                break
+            # Write the data to the receive steam
+            recv_sock.send(buf)
+
+        recv_sock.close()
+        send_sock.close()
+        time.sleep(1)
+
+        # Delete the temporary dst snapshot
+        pat = radc.ADRGlobPattern({"name": dst})
+        dst_zvol_obj = dst_rc.get_object(zfsmgr.ZfsDataset(), pat)
+        snapshot_list = dst_zvol_obj.get_snapshots()
+        for snap in snapshot_list:
+            if 'tmp-send-snapshot'in snap:
+                dst_zvol_obj.destroy_snapshot(snap)
+                break
+
+        # Delete the temporary src snapshot
+        self.delete_snapshot(src_snapshot)
+        LOG.debug(("Transfered src stream'%s' to dst'%s' on the host'%s'") %
+                  (src_snapshot_name, dst, self.hostname))
+
+        src_rc.close()
+        dst_rc.close()
+
     def _get_zvol_path(self, volume):
         """Get the ZFS volume path."""
         return "/dev/zvol/rdsk/%s" % self._get_zfs_volume_name(volume['name'])
@@ -337,7 +465,7 @@
         backend_name = self.configuration.safe_get('volume_backend_name')
         stats["volume_backend_name"] = backend_name or self.__class__.__name__
         stats["storage_protocol"] = self.protocol
-        stats["driver_version"] = '1.0'
+        stats["driver_version"] = self.version
         stats["vendor_name"] = 'Oracle'
         stats['QoS_support'] = False
 
@@ -348,8 +476,9 @@
             (Size(used_size) + Size(avail_size)).get(Size.gb_units)
         stats['free_capacity_gb'] = Size(avail_size).get(Size.gb_units)
         stats['reserved_percentage'] = self.configuration.reserved_percentage
+
         stats['location_info'] =\
-            ('ZFSVolumeDriver:%(hostname)s:%(zfs_volume_base)s' %
+            ('ZFSVolumeDriver:%(hostname)s:%(zfs_volume_base)s:local' %
              {'hostname': self.hostname,
               'zfs_volume_base': self.configuration.zfs_volume_base})
 
@@ -373,14 +502,67 @@
 
         LOG.debug(_("Rename the volume '%s' to '%s'") % (src, dst))
 
-    def migrate_volume(self, context, volume, host):
-        """Migrate the volume among different backends on the same server.
+    def _get_existing_volume_ref_name(self, existing_ref):
+        """Returns the volume name of an existing reference.
+        And Check if an existing volume reference has a source-name
+        """
+        if 'source-name' in existing_ref:
+            vol_name = existing_ref['source-name']
+            return vol_name
+        else:
+            reason = _("Reference must contain source-name.")
+            raise exception.ManageExistingInvalidReference(
+                existing_ref=existing_ref,
+                reason=reason)
+
+    def manage_existing_get_size(self, volume, existing_ref):
+        """Return size of volume to be managed by manage_existing.
+        existing_ref is a dictionary of the form:
+        {'source-name': <name of the volume>}
+        """
+        target_vol_name = self._get_existing_volume_ref_name(existing_ref)
+        volsize = self._get_zfs_property('volsize', target_vol_name)
+
+        return Size(volsize).get(Size.gb_units)
+
+    def manage_existing(self, volume, existing_ref):
+        """Brings an existing zfs volume object under Cinder management.
 
-        The volume migration can only run locally by calling zfs send/recv
-        cmds and the specified host needs to be on the same server with the
-        host. But, one exception is when the src and dst volume are located
-        under the same zpool locally or remotely, the migration will be done
-        by just renaming the volume.
+        :param volume:       Cinder volume to manage
+        :param existing_ref: Driver-specific information used to identify a
+        volume
+        """
+        # Check the existence of the ZFS volume
+        target_vol_name = self._get_existing_volume_ref_name(existing_ref)
+        prop_type = self._get_zfs_property('type', target_vol_name)
+        if prop_type != 'volume':
+            msg = (_("Failed to identify the volume '%s'.")
+                   % target_vol_name)
+            raise exception.InvalidInput(reason=msg)
+
+        if volume['name']:
+            volume_name = volume['name']
+        else:
+            volume_name = 'new_zvol'
+
+        # rename the volume
+        dst_volume = "%s/%s" % (self.configuration.zfs_volume_base,
+                                volume_name)
+        self.rename_volume(target_vol_name, dst_volume)
+
+    def unmanage(self, volume):
+        """Removes the specified volume from Cinder management."""
+        # Rename the volume's name to cinder-unm-* format.
+        volume_name = self._get_zfs_volume_name(volume['name'])
+        tmp_volume_name = "cinder-unm-%s" % volume['name']
+        new_volume_name = "%s/%s" % (self.configuration.zfs_volume_base,
+                                     tmp_volume_name)
+        self.rename_volume(volume_name, new_volume_name)
+
+    def migrate_volume(self, context, volume, host):
+        """Migrate the volume from one backend to another one.
+        The backends should be in the same volume type.
+
         :param context: context
         :param volume: a dictionary describing the volume to migrate
         :param host: a dictionary describing the host to migrate to
@@ -391,27 +573,28 @@
                       (volume['name'], volume['status']))
             return false_ret
 
-        if 'capabilities' not in host or \
-           'location_info' not in host['capabilities']:
-            LOG.debug(_("No location_info or capabilities are in host info"))
+        if 'capabilities' not in host:
+            LOG.debug(("No 'capabilities' is reported in the host'%s'") %
+                      host['host'])
+            return false_ret
+
+        if 'location_info' not in host['capabilities']:
+            LOG.debug(("No 'location_info' is reported in the host'%s'") %
+                      host['host'])
             return false_ret
 
         info = host['capabilities']['location_info']
-        if (self.hostname != info.split(':')[1]):
-            LOG.debug(_("Migration between two different servers '%s' and "
-                      "'%s' is not supported yet.") %
-                      (self.hostname, info.split(':')[1]))
-            return false_ret
-
-        dst_volume = "%s/%s" % (info.split(':')[-1], volume['name'])
+        dst_volume = "%s/%s" % (info.split(':')[2], volume['name'])
         src_volume = self._get_zfs_volume_name(volume['name'])
+
         # check if the src and dst volume are under the same zpool
-        if (src_volume.split('/')[0] == dst_volume.split('/')[0]):
-            self.rename_volume(src_volume, dst_volume)
+        dst_san_info = info.split(':')[3]
+        if dst_san_info == 'local':
+            self._zfs_send_recv(volume, dst_volume)
         else:
-            self._zfs_send_recv(volume, dst_volume)
-            # delete the source volume
-            self.delete_volume(volume)
+            self._rad_zfs_send_recv(volume, dst_volume, dst_san_info)
+        # delete the source volume
+        self.delete_volume(volume)
 
         provider_location = {}
         return (True, provider_location)
@@ -544,6 +727,29 @@
 
     def __init__(self, *args, **kwargs):
         super(ZFSISCSIDriver, self).__init__(*args, **kwargs)
+        if not self.configuration.san_is_local:
+            self.hostname, alias, addresslist = \
+                socket.gethostbyaddr(self.configuration.san_ip)
+
+    def get_volume_stats(self, refresh=False):
+        """Get volume status."""
+        status = super(ZFSISCSIDriver, self).get_volume_stats(refresh)
+        status["storage_protocol"] = self.protocol
+        backend_name = self.configuration.safe_get('volume_backend_name')
+        status["volume_backend_name"] = backend_name or self.__class__.__name__
+
+        if not self.configuration.san_is_local:
+            san_info = "%s;%s;%s" % (self.configuration.san_ip,
+                                     self.configuration.san_login,
+                                     self.configuration.san_password)
+            status['location_info'] = \
+                ('ZFSISCSIDriver:%(hostname)s:%(zfs_volume_base)s:'
+                 '%(san_info)s' %
+                 {'hostname': self.hostname,
+                  'zfs_volume_base': self.configuration.zfs_volume_base,
+                  'san_info': san_info})
+
+        return status
 
     def do_setup(self, context):
         """Setup the target and target group."""
@@ -735,6 +941,29 @@
 
     def __init__(self, *args, **kwargs):
         super(ZFSFCDriver, self).__init__(*args, **kwargs)
+        if not self.configuration.san_is_local:
+            self.hostname, alias, addresslist = \
+                socket.gethostbyaddr(self.configuration.san_ip)
+
+    def get_volume_stats(self, refresh=False):
+        """Get volume status."""
+        status = super(ZFSFCDriver, self).get_volume_stats(refresh)
+        status["storage_protocol"] = self.protocol
+        backend_name = self.configuration.safe_get('volume_backend_name')
+        status["volume_backend_name"] = backend_name or self.__class__.__name__
+
+        if not self.configuration.san_is_local:
+            san_info = "%s;%s;%s" % (self.configuration.san_ip,
+                                     self.configuration.san_login,
+                                     self.configuration.san_password)
+            status['location_info'] = \
+                ('ZFSFCDriver:%(hostname)s:%(zfs_volume_base)s:'
+                 '%(san_info)s' %
+                 {'hostname': self.hostname,
+                  'zfs_volume_base': self.configuration.zfs_volume_base,
+                  'san_info': san_info})
+
+        return status
 
     def check_for_setup_error(self):
         """Check the setup error."""
@@ -847,15 +1076,15 @@
                                'wwn.%s' % wwn)
             self._stmf_execute('/usr/sbin/stmfadm', 'add-tg-member', '-g',
                                target_group, 'wwn.%s' % wwn)
-            self._stmf_execute('/usr/sbin/stmfadm', 'online-target',
-                               'wwn.%s' % wwn)
-        assert self._target_in_tg(wwn, target_group)
 
         # Add a logical unit view entry
         # TODO(Strony): replace the auto assigned LUN with '-n' option
         if luid is not None:
             self._stmf_execute('/usr/sbin/stmfadm', 'add-view', '-t',
                                target_group, luid)
+            self._stmf_execute('/usr/sbin/stmfadm', 'online-target',
+                               'wwn.%s' % wwn)
+        assert self._target_in_tg(wwn, target_group)
 
     def remove_export(self, context, volume):
         """Remove an export for a volume."""
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/files/zfssa/zfssaiscsi.py	Fri Feb 05 17:54:17 2016 -0500
@@ -0,0 +1,466 @@
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""
+ZFS Storage Appliance Cinder Volume Driver
+"""
+import ast
+import base64
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import units
+
+from cinder import exception
+from cinder.i18n import _, _LE, _LW
+from cinder.volume import driver
+from cinder.volume.drivers.san import san
+from cinder.volume.drivers.zfssa import zfssarest
+from cinder.volume import volume_types
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+ZFSSA_OPTS = [
+    cfg.StrOpt('zfssa_pool',
+               help='Storage pool name.'),
+    cfg.StrOpt('zfssa_project',
+               help='Project name.'),
+    cfg.StrOpt('zfssa_lun_volblocksize', default='8k',
+               choices=['512', '1k', '2k', '4k', '8k', '16k', '32k', '64k',
+                        '128k'],
+               help='Block size.'),
+    cfg.BoolOpt('zfssa_lun_sparse', default=False,
+                help='Flag to enable sparse (thin-provisioned): True, False.'),
+    cfg.StrOpt('zfssa_lun_compression', default='off',
+               choices=['off', 'lzjb', 'gzip-2', 'gzip', 'gzip-9'],
+               help='Data compression.'),
+    cfg.StrOpt('zfssa_lun_logbias', default='latency',
+               choices=['latency', 'throughput'],
+               help='Synchronous write bias.'),
+    cfg.StrOpt('zfssa_initiator_group', default='',
+               help='iSCSI initiator group.'),
+    cfg.StrOpt('zfssa_initiator', default='',
+               help='iSCSI initiator IQNs. (comma separated)'),
+    cfg.StrOpt('zfssa_initiator_user', default='',
+               help='iSCSI initiator CHAP user.'),
+    cfg.StrOpt('zfssa_initiator_password', default='',
+               help='iSCSI initiator CHAP password.', secret=True),
+    cfg.StrOpt('zfssa_initiator_config', default='',
+               help='iSCSI initiators configuration.'),
+    cfg.StrOpt('zfssa_target_group', default='tgt-grp',
+               help='iSCSI target group name.'),
+    cfg.StrOpt('zfssa_target_user', default='',
+               help='iSCSI target CHAP user.'),
+    cfg.StrOpt('zfssa_target_password', default='',
+               help='iSCSI target CHAP password.', secret=True),
+    cfg.StrOpt('zfssa_target_portal',
+               help='iSCSI target portal (Data-IP:Port, w.x.y.z:3260).'),
+    cfg.StrOpt('zfssa_target_interfaces',
+               help='Network interfaces of iSCSI targets. (comma separated)'),
+    cfg.IntOpt('zfssa_rest_timeout',
+               help='REST connection timeout. (seconds)')
+
+]
+
+CONF.register_opts(ZFSSA_OPTS)
+
+ZFSSA_LUN_SPECS = {'zfssa:volblocksize',
+                   'zfssa:sparse',
+                   'zfssa:compression',
+                   'zfssa:logbias'}
+
+
+def factory_zfssa():
+    return zfssarest.ZFSSAApi()
+
+
+class ZFSSAISCSIDriver(driver.ISCSIDriver):
+    """ZFSSA Cinder volume driver"""
+
+    VERSION = '1.0.0'
+    protocol = 'iSCSI'
+
+    def __init__(self, *args, **kwargs):
+        super(ZFSSAISCSIDriver, self).__init__(*args, **kwargs)
+        self.configuration.append_config_values(ZFSSA_OPTS)
+        self.configuration.append_config_values(san.san_opts)
+        self.zfssa = None
+        self._stats = None
+
+    def _get_target_alias(self):
+        """return target alias"""
+        return self.configuration.zfssa_target_group
+
+    def do_setup(self, context):
+        """Setup - create multiple elements.
+
+        Project, initiators, initiatorgroup, target and targetgroup.
+        """
+        lcfg = self.configuration
+        msg = (_('Connecting to host: %s.') % lcfg.san_ip)
+        LOG.info(msg)
+        self.zfssa = factory_zfssa()
+        self.zfssa.set_host(lcfg.san_ip, timeout=lcfg.zfssa_rest_timeout)
+        auth_str = base64.encodestring('%s:%s' %
+                                       (lcfg.san_login,
+                                        lcfg.san_password))[:-1]
+        self.zfssa.login(auth_str)
+        self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_project,
+                                  compression=lcfg.zfssa_lun_compression,
+                                  logbias=lcfg.zfssa_lun_logbias)
+
+        if (lcfg.zfssa_initiator_config != ''):
+            initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config)
+            for initiator_group in initiator_config:
+                zfssa_initiator_group = initiator_group
+                for zfssa_initiator in initiator_config[zfssa_initiator_group]:
+                    self.zfssa.create_initiator(zfssa_initiator['iqn'],
+                                                zfssa_initiator_group + '-' +
+                                                zfssa_initiator['iqn'],
+                                                chapuser=
+                                                zfssa_initiator['user'],
+                                                chapsecret=
+                                                zfssa_initiator['password'])
+                    if (zfssa_initiator_group != 'default'):
+                        self.zfssa.add_to_initiatorgroup(
+                            zfssa_initiator['iqn'],
+                            zfssa_initiator_group)
+        else:
+            LOG.warning(_LW('zfssa_initiator_config not found. '
+                            'Using deprecated configuration options.'))
+            if (lcfg.zfssa_initiator != '' and
+                (lcfg.zfssa_initiator_group == '' or
+                 lcfg.zfssa_initiator_group == 'default')):
+                LOG.warning(_LW('zfssa_initiator: %(ini)s'
+                                ' wont be used on '
+                                'zfssa_initiator_group= %(inigrp)s.')
+                            % {'ini': lcfg.zfssa_initiator,
+                               'inigrp': lcfg.zfssa_initiator_group})
+
+            # Setup initiator and initiator group
+            if (lcfg.zfssa_initiator != '' and
+               lcfg.zfssa_initiator_group != '' and
+               lcfg.zfssa_initiator_group != 'default'):
+                for initiator in lcfg.zfssa_initiator.split(','):
+                    self.zfssa.create_initiator(
+                        initiator, lcfg.zfssa_initiator_group + '-' +
+                        initiator, chapuser=lcfg.zfssa_initiator_user,
+                        chapsecret=lcfg.zfssa_initiator_password)
+                    self.zfssa.add_to_initiatorgroup(
+                        initiator, lcfg.zfssa_initiator_group)
+
+        # Parse interfaces
+        interfaces = []
+        for interface in lcfg.zfssa_target_interfaces.split(','):
+            if interface == '':
+                continue
+            interfaces.append(interface)
+
+        # Setup target and target group
+        iqn = self.zfssa.create_target(
+            self._get_target_alias(),
+            interfaces,
+            tchapuser=lcfg.zfssa_target_user,
+            tchapsecret=lcfg.zfssa_target_password)
+
+        self.zfssa.add_to_targetgroup(iqn, lcfg.zfssa_target_group)
+
+    def check_for_setup_error(self):
+        """Check that driver can login.
+
+        Check also pool, project, initiators, initiatorgroup, target and
+        targetgroup.
+        """
+        lcfg = self.configuration
+
+        self.zfssa.verify_pool(lcfg.zfssa_pool)
+        self.zfssa.verify_project(lcfg.zfssa_pool, lcfg.zfssa_project)
+
+        if (lcfg.zfssa_initiator_config != ''):
+            initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config)
+            for initiator_group in initiator_config:
+                zfssa_initiator_group = initiator_group
+                for zfssa_initiator in initiator_config[zfssa_initiator_group]:
+                    self.zfssa.verify_initiator(zfssa_initiator['iqn'])
+        else:
+            if (lcfg.zfssa_initiator != '' and
+               lcfg.zfssa_initiator_group != '' and
+               lcfg.zfssa_initiator_group != 'default'):
+                for initiator in lcfg.zfssa_initiator.split(','):
+                    self.zfssa.verify_initiator(initiator)
+
+            self.zfssa.verify_target(self._get_target_alias())
+
+    def _get_provider_info(self, volume):
+        """return provider information"""
+        lcfg = self.configuration
+        lun = self.zfssa.get_lun(lcfg.zfssa_pool,
+                                 lcfg.zfssa_project, volume['name'])
+        iqn = self.zfssa.get_target(self._get_target_alias())
+        loc = "%s %s %s" % (lcfg.zfssa_target_portal, iqn, lun['number'])
+        LOG.debug('_get_provider_info: provider_location: %s' % loc)
+        provider = {'provider_location': loc}
+        if lcfg.zfssa_target_user != '' and lcfg.zfssa_target_password != '':
+            provider['provider_auth'] = ('CHAP %s %s' %
+                                         lcfg.zfssa_target_user,
+                                         lcfg.zfssa_target_password)
+
+        return provider
+
+    def create_volume(self, volume):
+        """Create a volume on ZFSSA"""
+        LOG.debug('zfssa.create_volume: volume=' + volume['name'])
+        lcfg = self.configuration
+        volsize = str(volume['size']) + 'g'
+        specs = self._get_voltype_specs(volume)
+        self.zfssa.create_lun(lcfg.zfssa_pool,
+                              lcfg.zfssa_project,
+                              volume['name'],
+                              volsize,
+                              lcfg.zfssa_target_group,
+                              specs)
+
+    def delete_volume(self, volume):
+        """Deletes a volume with the given volume['name']."""
+        LOG.debug('zfssa.delete_volume: name=' + volume['name'])
+        lcfg = self.configuration
+        lun2del = self.zfssa.get_lun(lcfg.zfssa_pool,
+                                     lcfg.zfssa_project,
+                                     volume['name'])
+        # Delete clone temp snapshot. see create_cloned_volume()
+        if 'origin' in lun2del and 'id' in volume:
+            if lun2del['nodestroy']:
+                self.zfssa.set_lun_props(lcfg.zfssa_pool,
+                                         lcfg.zfssa_project,
+                                         volume['name'],
+                                         nodestroy=False)
+
+            tmpsnap = 'tmp-snapshot-%s' % volume['id']
+            if lun2del['origin']['snapshot'] == tmpsnap:
+                self.zfssa.delete_snapshot(lcfg.zfssa_pool,
+                                           lcfg.zfssa_project,
+                                           lun2del['origin']['share'],
+                                           lun2del['origin']['snapshot'])
+                return
+
+        self.zfssa.delete_lun(pool=lcfg.zfssa_pool,
+                              project=lcfg.zfssa_project,
+                              lun=volume['name'])
+
+    def create_snapshot(self, snapshot):
+        """Creates a snapshot with the given snapshot['name'] of the
+           snapshot['volume_name']
+        """
+        LOG.debug('zfssa.create_snapshot: snapshot=' + snapshot['name'])
+        lcfg = self.configuration
+        self.zfssa.create_snapshot(lcfg.zfssa_pool,
+                                   lcfg.zfssa_project,
+                                   snapshot['volume_name'],
+                                   snapshot['name'])
+
+    def delete_snapshot(self, snapshot):
+        """Deletes a snapshot."""
+        LOG.debug('zfssa.delete_snapshot: snapshot=' + snapshot['name'])
+        lcfg = self.configuration
+        has_clones = self.zfssa.has_clones(lcfg.zfssa_pool,
+                                           lcfg.zfssa_project,
+                                           snapshot['volume_name'],
+                                           snapshot['name'])
+        if has_clones:
+            LOG.error(_LE('Snapshot %s: has clones') % snapshot['name'])
+            raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
+
+        self.zfssa.delete_snapshot(lcfg.zfssa_pool,
+                                   lcfg.zfssa_project,
+                                   snapshot['volume_name'],
+                                   snapshot['name'])
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        """Creates a volume from a snapshot - clone a snapshot"""
+        LOG.debug('zfssa.create_volume_from_snapshot: volume=' +
+                  volume['name'])
+        LOG.debug('zfssa.create_volume_from_snapshot: snapshot=' +
+                  snapshot['name'])
+        if not self._verify_clone_size(snapshot, volume['size'] * units.Gi):
+            exception_msg = (_('Error verifying clone size on '
+                               'Volume clone: %(clone)s '
+                               'Size: %(size)d on'
+                               'Snapshot: %(snapshot)s')
+                             % {'clone': volume['name'],
+                                'size': volume['size'],
+                                'snapshot': snapshot['name']})
+            LOG.error(exception_msg)
+            raise exception.InvalidInput(reason=exception_msg)
+
+        lcfg = self.configuration
+        self.zfssa.clone_snapshot(lcfg.zfssa_pool,
+                                  lcfg.zfssa_project,
+                                  snapshot['volume_name'],
+                                  snapshot['name'],
+                                  volume['name'])
+
+    def _update_volume_status(self):
+        """Retrieve status info from volume group."""
+        LOG.debug("Updating volume status")
+        self._stats = None
+        data = {}
+        backend_name = self.configuration.safe_get('volume_backend_name')
+        data["volume_backend_name"] = backend_name or self.__class__.__name__
+        data["vendor_name"] = 'Oracle'
+        data["driver_version"] = self.VERSION
+        data["storage_protocol"] = self.protocol
+
+        lcfg = self.configuration
+        (avail, total) = self.zfssa.get_pool_stats(lcfg.zfssa_pool)
+        if avail is None or total is None:
+            return
+
+        data['total_capacity_gb'] = int(total) / units.Gi
+        data['free_capacity_gb'] = int(avail) / units.Gi
+        data['reserved_percentage'] = 0
+        data['QoS_support'] = False
+        self._stats = data
+
+    def get_volume_stats(self, refresh=False):
+        """Get volume status.
+           If 'refresh' is True, run update the stats first.
+        """
+        if refresh:
+            self._update_volume_status()
+        return self._stats
+
+    def create_export(self, context, volume):
+        pass
+
+    def remove_export(self, context, volume):
+        pass
+
+    def ensure_export(self, context, volume):
+        pass
+
+    def copy_image_to_volume(self, context, volume, image_service, image_id):
+        self.ensure_export(context, volume)
+        super(ZFSSAISCSIDriver, self).copy_image_to_volume(
+            context, volume, image_service, image_id)
+
+    def extend_volume(self, volume, new_size):
+        """Driver entry point to extent volume size."""
+        LOG.debug('extend_volume: volume name: %s' % volume['name'])
+        lcfg = self.configuration
+        self.zfssa.set_lun_props(lcfg.zfssa_pool,
+                                 lcfg.zfssa_project,
+                                 volume['name'],
+                                 volsize=new_size * units.Gi)
+
+    def create_cloned_volume(self, volume, src_vref):
+        """Create a clone of the specified volume."""
+        zfssa_snapshot = {'volume_name': src_vref['name'],
+                          'name': 'tmp-snapshot-%s' % volume['id']}
+        self.create_snapshot(zfssa_snapshot)
+        try:
+            self.create_volume_from_snapshot(volume, zfssa_snapshot)
+        except exception.VolumeBackendAPIException:
+            LOG.error(_LE('Clone Volume:'
+                          '%(volume)s failed from source volume:'
+                          '%(src_vref)s')
+                      % {'volume': volume['name'],
+                         'src_vref': src_vref['name']})
+            # Cleanup snapshot
+            self.delete_snapshot(zfssa_snapshot)
+
+    def local_path(self, volume):
+        """Not implemented"""
+        pass
+
+    def backup_volume(self, context, backup, backup_service):
+        """Not implemented"""
+        pass
+
+    def restore_backup(self, context, backup, volume, backup_service):
+        """Not implemented"""
+        pass
+
+    def _verify_clone_size(self, snapshot, size):
+        """Check whether the clone size is the same as the parent volume"""
+        lcfg = self.configuration
+        lun = self.zfssa.get_lun(lcfg.zfssa_pool,
+                                 lcfg.zfssa_project,
+                                 snapshot['volume_name'])
+        return lun['size'] == size
+
+    def initialize_connection(self, volume, connector):
+        lcfg = self.configuration
+        init_groups = self.zfssa.get_initiator_initiatorgroup(
+            connector['initiator'])
+        for initiator_group in init_groups:
+            self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
+                                              lcfg.zfssa_project,
+                                              volume['name'],
+                                              initiator_group)
+        iscsi_properties = {}
+        provider = self._get_provider_info(volume)
+        (target_portal, iqn, lun) = provider['provider_location'].split()
+        iscsi_properties['target_discovered'] = False
+        iscsi_properties['target_portal'] = target_portal
+        iscsi_properties['target_iqn'] = iqn
+        iscsi_properties['target_lun'] = lun
+        iscsi_properties['volume_id'] = volume['id']
+
+        if 'provider_auth' in provider:
+            (auth_method, auth_username, auth_password) = provider[
+                'provider_auth'].split()
+            iscsi_properties['auth_method'] = auth_method
+            iscsi_properties['auth_username'] = auth_username
+            iscsi_properties['auth_password'] = auth_password
+
+        return {
+            'driver_volume_type': 'iscsi',
+            'data': iscsi_properties
+        }
+
+    def terminate_connection(self, volume, connector, **kwargs):
+        """Driver entry point to terminate a connection for a volume."""
+        LOG.debug('terminate_connection: volume name: %s.' % volume['name'])
+        lcfg = self.configuration
+        self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
+                                          lcfg.zfssa_project,
+                                          volume['name'],
+                                          '')
+
+    def _get_voltype_specs(self, volume):
+        """Get specs suitable for volume creation."""
+        vtype = volume.get('volume_type_id', None)
+        extra_specs = None
+        if vtype:
+            extra_specs = volume_types.get_volume_type_extra_specs(vtype)
+
+        return self._get_specs(extra_specs)
+
+    def _get_specs(self, xspecs):
+        """Return a dict with extra specs and/or config values."""
+        result = {}
+        for spc in ZFSSA_LUN_SPECS:
+            val = None
+            prop = spc.split(':')[1]
+            cfg = 'zfssa_lun_' + prop
+            if xspecs:
+                val = xspecs.pop(spc, None)
+
+            if val is None:
+                val = self.configuration.safe_get(cfg)
+
+            if val is not None and val != '':
+                result.update({prop: val})
+
+        return result
--- a/components/openstack/cinder/patches/01-requirements.patch	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/cinder/patches/01-requirements.patch	Fri Feb 05 17:54:17 2016 -0500
@@ -1,70 +1,89 @@
 In-house patch to remove unnecessary dependencies from Cinder's
 requirements files. The specific reasons are as follows:
 
-kombu           Not applicable
+anyjson		Not applicable
+
+kombu		Not applicable
 
-oslo.rootwrap   Not applicable to Solaris
+oslo.rootwrap	Not applicable to Solaris
 
-pycrypto        Not applicable to Solaris (various drivers specific)
+pycrypto	Not applicable to Solaris (various drivers specific)
 
-rtslib-fb       Not applicable to Solaris (Linux iSCSI specific)
+rtslib-fb	Not applicable to Solaris (Linux iSCSI specific)
 
---- cinder-2014.2.2/cinder.egg-info/requires.txt.orig	2015-05-27 22:55:59.906826810 -0700
-+++ cinder-2014.2.2/cinder.egg-info/requires.txt	2015-05-27 22:57:00.022877081 -0700
[email protected]@ -6,18 +6,15 @@
+--- cinder-2015.1.2/cinder.egg-info/requires.txt.~1~	2015-10-13 09:30:55.000000000 -0700
++++ cinder-2015.1.2/cinder.egg-info/requires.txt	2016-01-30 21:00:52.192406009 -0800
[email protected]@ -1,11 +1,9 @@
+ pbr!=0.7,<1.0,>=0.6
+-anyjson>=0.3.3
+ Babel>=1.3
+ eventlet!=0.17.0,>=0.16.1
  greenlet>=0.3.2
  iso8601>=0.1.9
- keystonemiddleware>=1.0.0
+ keystonemiddleware<1.6.0,>=1.5.0
 -kombu>=2.5.0
  lxml>=2.3
  netaddr>=0.7.12
- oslo.config>=1.4.0  # Apache-2.0
- oslo.db>=1.0.0,<1.1  # Apache-2.0
- oslo.messaging>=1.4.0,!=1.5.0,<1.6.0
--oslo.rootwrap>=1.3.0
- osprofiler>=0.3.0                       # Apache-2.0
+ oslo.config<1.10.0,>=1.9.3 # Apache-2.0
[email protected]@ -15,14 +13,12 @@ oslo.db<1.8.0,>=1.7.0 # Apache-2.0
+ oslo.log<1.1.0,>=1.0.0 # Apache-2.0
+ oslo.messaging<1.9.0,>=1.8.0 # Apache-2.0
+ oslo.middleware<1.1.0,>=1.0.0 # Apache-2.0
+-oslo.rootwrap<1.7.0,>=1.6.0 # Apache-2.0
+ oslo.serialization<1.5.0,>=1.4.0 # Apache-2.0
+ oslo.utils!=1.4.1,<1.5.0,>=1.4.0 # Apache-2.0
+ osprofiler>=0.3.0 # Apache-2.0
  paramiko>=1.13.0
  Paste
  PasteDeploy>=1.5.0
 -pycrypto>=2.6
- python-barbicanclient>=2.1.0,!=3.0.0,<3.0.2
- python-glanceclient>=0.14.0
- python-novaclient>=2.18.0
[email protected]@ -25,7 +22,6 @@
- requests>=1.2.1,!=2.4.0
- Routes>=1.12.3,!=2.0
- taskflow>=0.4,<0.7.0
--rtslib-fb>=2.1.39
- six>=1.7.0
- SQLAlchemy>=0.8.4,<=0.9.99,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,!=0.9.4,!=0.9.5,!=0.9.6
- sqlalchemy-migrate==0.9.1
---- cinder-2014.2.2/requirements.txt.orig       2015-05-27 22:57:45.226877884 -0700
-+++ cinder-2014.2.2/requirements.txt    2015-05-27 22:58:25.817248288 -0700
[email protected]@ -10,18 +10,15 @@
+ pyparsing>=2.0.1
+ python-barbicanclient<3.1.0,>=3.0.1
+ python-glanceclient<0.18.0,>=0.15.0
[email protected]@ -32,7 +28,6 @@ requests!=2.4.0,>=2.2.0
+ retrying!=1.3.0,>=1.2.3 # Apache-2.0
+ Routes!=2.0,>=1.12.3
+ taskflow<0.8.0,>=0.7.1
+-rtslib-fb>=2.1.41
+ six>=1.9.0
+ SQLAlchemy<=0.9.99,>=0.9.7
+ sqlalchemy-migrate!=0.9.8,<0.10.0,>=0.9.5
+--- cinder-2015.1.2/requirements.txt.~1~	2015-10-13 09:27:35.000000000 -0700
++++ cinder-2015.1.2/requirements.txt	2016-01-30 21:01:27.270741292 -0800
[email protected]@ -3,13 +3,11 @@
+ # process, which may cause wedges in the gate later.
+ 
+ pbr!=0.7,<1.0,>=0.6
+-anyjson>=0.3.3
+ Babel>=1.3
+ eventlet!=0.17.0,>=0.16.1
  greenlet>=0.3.2
  iso8601>=0.1.9
- keystonemiddleware>=1.0.0
+ keystonemiddleware<1.6.0,>=1.5.0
 -kombu>=2.5.0
  lxml>=2.3
  netaddr>=0.7.12
- oslo.config>=1.4.0  # Apache-2.0
- oslo.db>=1.0.0,<1.1  # Apache-2.0
- oslo.messaging>=1.4.0,!=1.5.0,<1.6.0
--oslo.rootwrap>=1.3.0
- osprofiler>=0.3.0                       # Apache-2.0
+ oslo.config<1.10.0,>=1.9.3 # Apache-2.0
[email protected]@ -19,14 +17,12 @@ oslo.db<1.8.0,>=1.7.0 # Apache-2.0
+ oslo.log<1.1.0,>=1.0.0 # Apache-2.0
+ oslo.messaging<1.9.0,>=1.8.0 # Apache-2.0
+ oslo.middleware<1.1.0,>=1.0.0 # Apache-2.0
+-oslo.rootwrap<1.7.0,>=1.6.0 # Apache-2.0
+ oslo.serialization<1.5.0,>=1.4.0 # Apache-2.0
+ oslo.utils!=1.4.1,<1.5.0,>=1.4.0 # Apache-2.0
+ osprofiler>=0.3.0 # Apache-2.0
  paramiko>=1.13.0
  Paste
  PasteDeploy>=1.5.0
 -pycrypto>=2.6
- python-barbicanclient>=2.1.0,!=3.0.0,<3.0.2
- python-glanceclient>=0.14.0
- python-novaclient>=2.18.0
[email protected]@ -29,7 +26,6 @@
- requests>=1.2.1,!=2.4.0
- Routes>=1.12.3,!=2.0
- taskflow>=0.4,<0.7.0
--rtslib-fb>=2.1.39
- six>=1.7.0
- SQLAlchemy>=0.8.4,<=0.9.99,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,!=0.9.4,!=0.9.5,!=0.9.6
- sqlalchemy-migrate==0.9.1
-
+ pyparsing>=2.0.1
+ python-barbicanclient<3.1.0,>=3.0.1
+ python-glanceclient<0.18.0,>=0.15.0
[email protected]@ -36,7 +32,6 @@ requests!=2.4.0,>=2.2.0
+ retrying!=1.3.0,>=1.2.3 # Apache-2.0
+ Routes!=2.0,>=1.12.3
+ taskflow<0.8.0,>=0.7.1
+-rtslib-fb>=2.1.41
+ six>=1.9.0
+ SQLAlchemy<=0.9.99,>=0.9.7
+ sqlalchemy-migrate!=0.9.8,<0.10.0,>=0.9.5
--- a/components/openstack/cinder/patches/02-nopycrypto.patch	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/cinder/patches/02-nopycrypto.patch	Fri Feb 05 17:54:17 2016 -0500
@@ -1,8 +1,8 @@
 In-house removal of PyCrypto dependency in Cinder. This patch is
 Solaris-specific and not suitable for upstream.
 
---- cinder-2014.2.2/cinder/volume/utils.py.orig	2014-10-16 06:26:26.000000000 -0700
-+++ cinder-2014.2.2/cinder/volume/utils.py	2014-11-23 15:13:26.412114890 -0800
+--- cinder-2015.1.2/cinder/volume/utils.py.orig	2015-10-13 09:27:35.000000000 -0700
++++ cinder-2015.1.2/cinder/volume/utils.py	2016-01-30 23:52:30.088107306 -0800
 @@ -16,8 +16,8 @@
  
  
@@ -10,10 +10,10 @@
 +from random import SystemRandom
  
 -from Crypto.Random import random
- from oslo.config import cfg
- 
- from cinder.brick.local_dev import lvm as brick_lvm
[email protected]@ -429,6 +429,7 @@ def generate_password(length=20, symbolg
+ from oslo_concurrency import processutils
+ from oslo_config import cfg
+ from oslo_log import log as logging
[email protected]@ -419,6 +419,7 @@ def generate_password(length=16, symbolg
      # NOTE(jerdfelt): Some password policies require at least one character
      # from each group of symbols, so start off with one random character
      # from each symbol group
--- a/components/openstack/cinder/patches/03-emc_vmax_iscsi.patch	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/cinder/patches/03-emc_vmax_iscsi.patch	Fri Feb 05 17:54:17 2016 -0500
@@ -1,29 +1,28 @@
 In-house patch to adopt EMC driver to use Solaris' iscsiadm(1M) rather
-than that from Linux.  Patch has not yet been submitted upstream.
+than that from Linux. Patch has not yet been submitted upstream.
 
---- cinder-2014.2.2/cinder/volume/drivers/emc/emc_vmax_iscsi.py.orig	2014-10-16 06:26:26.000000000 -0700
-+++ cinder-2014.2.2/cinder/volume/drivers/emc/emc_vmax_iscsi.py	2014-10-27 00:12:22.034201865 -0700
[email protected]@ -16,6 +16,8 @@
- ISCSI Drivers for EMC VMAX arrays based on SMI-S.
+--- cinder-2015.1.2/cinder/volume/drivers/emc/emc_vmax_iscsi.py.~1~	2016-01-30 23:55:52.360305136 -0800
++++ cinder-2015.1.2/cinder/volume/drivers/emc/emc_vmax_iscsi.py	2016-01-31 00:03:10.160731018 -0800
[email protected]@ -17,6 +17,7 @@ ISCSI Drivers for EMC VMAX arrays based
  
  """
+ import os
 +import sys
-+
- import six
  
- from cinder import context
[email protected]@ -155,17 +157,43 @@ class EMCVMAXISCSIDriver(driver.ISCSIDri
- 
-         LOG.info(_("ISCSI provider_location not stored, using discovery."))
+ from oslo_log import log as logging
+ import six
[email protected]@ -162,17 +163,43 @@ class EMCVMAXISCSIDriver(driver.ISCSIDri
+             LOG.error(_LE(
+                 "You must set your iscsi_ip_address in cinder.conf."))
  
 -        (out, _err) = self._execute('iscsiadm', '-m', 'discovery',
 -                                    '-t', 'sendtargets', '-p',
 -                                    self.configuration.iscsi_ip_address,
 -                                    run_as_root=True)
 -
--        LOG.info(_(
--            "smis_do_iscsi_discovery is: %(out)s")
--            % {'out': out})
+-        LOG.info(_LI(
+-            "smis_do_iscsi_discovery is: %(out)s."),
+-            {'out': out})
          targets = []
 -        for target in out.splitlines():
 -            targets.append(target)
@@ -58,9 +57,9 @@
 +                                        self.configuration.iscsi_ip_address,
 +                                        run_as_root=True)
 +
-+            LOG.info(_(
-+                "smis_do_iscsi_discovery is: %(out)s")
-+                % {'out': out})
++            LOG.info(_LI(
++                "smis_do_iscsi_discovery is: %(out)s."),
++                {'out': out})
 +            for target in out.splitlines():
 +                targets.append(target)
  
--- a/components/openstack/cinder/patches/04-volume-backup.patch	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/cinder/patches/04-volume-backup.patch	Fri Feb 05 17:54:17 2016 -0500
@@ -1,27 +1,20 @@
-This patch is to replace the linux-specific codes with the solaris
-codes to support the cinder backup on the Solaris.
+This patch is to replace Linux-specific code with conditional checks in
+the Cinder Brick code to support Cinder backup on Solaris. Patch has
+not yet been submitted upstream.
 
---- cinder-2014.2.2/cinder/brick/initiator/connector.py.~1~	2015-02-05 08:03:26.000000000 -0800
-+++ cinder-2014.2.2/cinder/brick/initiator/connector.py	2015-04-20 21:05:25.881159722 -0700
[email protected]@ -15,6 +15,7 @@
- 
- import os
- import socket
-+import sys
- import time
- 
- from cinder.brick import exception
[email protected]@ -22,6 +23,8 @@
- from cinder.brick.initiator import host_driver
+--- cinder-2015.1.2/cinder/brick/initiator/connector.py.~1~	2015-10-13 09:27:35.000000000 -0700
++++ cinder-2015.1.2/cinder/brick/initiator/connector.py	2016-01-31 00:12:30.729547660 -0800
[email protected]@ -32,6 +32,8 @@ from cinder.brick.initiator import host_
  from cinder.brick.initiator import linuxfc
  from cinder.brick.initiator import linuxscsi
+ from cinder.brick.remotefs import remotefs
 +from cinder.brick.initiator import solarisfc
 +from cinder.brick.initiator import solarisiscsi
- from cinder.brick.remotefs import remotefs
- from cinder.i18n import _
- from cinder.openstack.common import lockutils
[email protected]@ -39,7 +42,10 @@
-     """Get the connection properties for all protocols."""
+ from cinder.i18n import _, _LE, _LW
+ from cinder.openstack.common import loopingcall
+ 
[email protected]@ -72,7 +74,10 @@ def get_connector_properties(root_helper
+     """
  
      iscsi = ISCSIConnector(root_helper=root_helper)
 -    fc = linuxfc.LinuxFibreChannel(root_helper=root_helper)
@@ -32,21 +25,21 @@
  
      props = {}
      props['ip'] = my_ip
[email protected]@ -134,8 +140,11 @@
[email protected]@ -188,8 +193,11 @@ class InitiatorConnector(executor.Execut
                 'of=/dev/null', 'count=1')
          out, info = None, None
          try:
--            out, info = self._execute(*cmd, run_as_root=True,
+-            out, info = self._execute(*cmd, run_as_root=run_as_root,
 -                                      root_helper=self._root_helper)
 +            if sys.platform == 'sunos5':
 +                out, info = self._execute(*cmd)
 +            else:
-+                out, info = self._execute(*cmd, run_as_root=True,
++                out, info = self._execute(*cmd, run_as_root=run_as_root,
 +                                          root_helper=self._root_helper)
          except putils.ProcessExecutionError as e:
-             LOG.error(_("Failed to access the device on the path "
-                         "%(path)s: %(error)s %(info)s.") %
[email protected]@ -171,7 +180,10 @@
+             LOG.error(_LE("Failed to access the device on the path "
+                           "%(path)s: %(error)s %(info)s.") %
[email protected]@ -225,7 +233,10 @@ class ISCSIConnector(InitiatorConnector)
                   execute=putils.execute, use_multipath=False,
                   device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
                   *args, **kwargs):
@@ -58,7 +51,7 @@
          super(ISCSIConnector, self).__init__(root_helper, driver=driver,
                                               execute=execute,
                                               device_scan_attempts=
[email protected]@ -181,6 +193,8 @@
[email protected]@ -235,6 +246,8 @@ class ISCSIConnector(InitiatorConnector)
  
      def set_execute(self, execute):
          super(ISCSIConnector, self).set_execute(execute)
@@ -66,28 +59,27 @@
 +            return
          self._linuxscsi.set_execute(execute)
  
-     @synchronized('connect_volume')
[email protected]@ -192,6 +206,9 @@
-         target_iqn - iSCSI Qualified Name
-         target_lun - LUN id of the volume
+     def _iterate_all_targets(self, connection_properties):
[email protected]@ -289,6 +302,9 @@ class ISCSIConnector(InitiatorConnector)
+         Note that plural keys may be used when use_multipath=True
          """
+ 
 +        if sys.platform == 'sunos5':
 +            return self._solarisiscsi.connect_volume(connection_properties,
 +                                                     self.device_scan_attempts)
- 
          device_info = {'type': 'block'}
  
[email protected]@ -262,6 +279,9 @@
-         target_iqn - iSCSI Qualified Name
-         target_lun - LUN id of the volume
+         if self.use_multipath:
[email protected]@ -365,6 +381,8 @@ class ISCSIConnector(InitiatorConnector)
+         target_iqn(s) - iSCSI Qualified Name
+         target_lun(s) - LUN id of the volume
          """
 +        if sys.platform == 'sunos5':
 +            return
-+
          # Moved _rescan_iscsi and _rescan_multipath
          # from _disconnect_volume_multipath_iscsi to here.
          # Otherwise, if we do rescan after _linuxscsi.remove_multipath_device
[email protected]@ -306,6 +326,9 @@
[email protected]@ -431,6 +449,9 @@ class ISCSIConnector(InitiatorConnector)
  
      def get_initiator(self):
          """Secure helper to read file as root."""
@@ -97,7 +89,7 @@
          file_path = '/etc/iscsi/initiatorname.iscsi'
          try:
              lines, _err = self._execute('cat', file_path, run_as_root=True,
[email protected]@ -555,8 +578,11 @@
[email protected]@ -674,8 +695,11 @@ class FibreChannelConnector(InitiatorCon
                   execute=putils.execute, use_multipath=False,
                   device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
                   *args, **kwargs):
@@ -111,7 +103,7 @@
          super(FibreChannelConnector, self).__init__(root_helper, driver=driver,
                                                      execute=execute,
                                                      device_scan_attempts=
[email protected]@ -566,6 +592,8 @@
[email protected]@ -685,6 +709,8 @@ class FibreChannelConnector(InitiatorCon
  
      def set_execute(self, execute):
          super(FibreChannelConnector, self).set_execute(execute)
@@ -120,7 +112,7 @@
          self._linuxscsi.set_execute(execute)
          self._linuxfc.set_execute(execute)
  
[email protected]@ -578,6 +606,10 @@
[email protected]@ -697,6 +723,10 @@ class FibreChannelConnector(InitiatorCon
          target_iqn - iSCSI Qualified Name
          target_lun - LUN id of the volume
          """
@@ -131,7 +123,7 @@
          LOG.debug("execute = %s" % self._execute)
          device_info = {'type': 'block'}
  
[email protected]@ -686,6 +718,13 @@
[email protected]@ -830,6 +860,13 @@ class FibreChannelConnector(InitiatorCon
          target_wwn - iSCSI Qualified Name
          target_lun - LUN id of the volume
          """
@@ -145,12 +137,10 @@
          devices = device_info['devices']
  
          # If this is a multipath device, we need to search again
-
-
---- cinder-2014.2.2/cinder/utils.py.~1~ 2015-02-05 08:03:26.000000000 -0800
-+++ cinder-2014.2.2/cinder/utils.py     2015-04-20 20:46:27.658908715 -0700
[email protected]@ -137,8 +137,12 @@
-
+--- cinder-2015.1.2/cinder/utils.py.~1~	2015-10-13 09:27:35.000000000 -0700
++++ cinder-2015.1.2/cinder/utils.py	2016-01-31 00:12:30.730160694 -0800
[email protected]@ -138,8 +138,12 @@ def check_exclusive_options(**kwargs):
+ 
  def execute(*cmd, **kwargs):
      """Convenience wrapper around oslo's execute() method."""
 -    if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
@@ -162,5 +152,5 @@
 +        if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
 +            kwargs['root_helper'] = get_root_helper()
      return processutils.execute(*cmd, **kwargs)
-
-
+ 
+ 
--- a/components/openstack/cinder/patches/05-keepalive.patch	Fri Feb 05 11:09:10 2016 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,80 +0,0 @@
-In-house patch to allow the TCP keepalive parameters from cinder.conf
-to be used on Solaris. Patch has not yet been submitted upstream.
-
---- cinder-2014.2.2/cinder/openstack/common/network_utils.py.orig	2014-12-04 21:00:20.000000000 -0800
-+++ cinder-2014.2.2/cinder/openstack/common/network_utils.py	2015-01-23 18:46:17.465276851 -0800
[email protected]@ -18,6 +18,7 @@ Network-related utilities and helper fun
- """
- 
- import socket
-+import sys
- 
- from six.moves.urllib import parse
- 
[email protected]@ -135,26 +136,44 @@ def set_tcp_keepalive(sock, tcp_keepaliv
-     if not tcp_keepalive:
-         return
- 
--    # These options aren't available in the OS X version of eventlet,
--    # Idle + Count * Interval effectively gives you the total timeout.
--    if tcp_keepidle is not None:
--        if hasattr(socket, 'TCP_KEEPIDLE'):
--            sock.setsockopt(socket.IPPROTO_TCP,
--                            socket.TCP_KEEPIDLE,
--                            tcp_keepidle)
-+    if sys.platform == 'sunos5':
-+        # Should match definitions in <netinet/tcp.h>
-+        TCP_KEEPALIVE_THRESHOLD = 0x16
-+        TCP_KEEPALIVE_ABORT_THRESHOLD = 0x17
-+
-+        if tcp_keepidle is not None:
-+            sock.setsockopt(socket.IPPROTO_TCP, TCP_KEEPALIVE_THRESHOLD,
-+                            tcp_keepidle * 1000)
-+        if tcp_keepalive_interval is None and tcp_keepalive_count is None:
-+            return
-+        if tcp_keepalive_interval is None or tcp_keepalive_count is None:
-+            LOG.warning(_LW('tcp_keepintvl and tcp_keepknt must be set '
-+                            'together'))
-         else:
--            LOG.warning(_LW('tcp_keepidle not available on your system'))
--    if tcp_keepalive_interval is not None:
--        if hasattr(socket, 'TCP_KEEPINTVL'):
--            sock.setsockopt(socket.IPPROTO_TCP,
--                            socket.TCP_KEEPINTVL,
--                            tcp_keepalive_interval)
--        else:
--            LOG.warning(_LW('tcp_keepintvl not available on your system'))
--    if tcp_keepalive_count is not None:
--        if hasattr(socket, 'TCP_KEEPCNT'):
--            sock.setsockopt(socket.IPPROTO_TCP,
--                            socket.TCP_KEEPCNT,
--                            tcp_keepalive_count)
--        else:
--            LOG.warning(_LW('tcp_keepknt not available on your system'))
-+            sock.setsockopt(
-+                socket.IPPROTO_TCP, TCP_KEEPALIVE_ABORT_THRESHOLD,
-+                tcp_keepalive_interval * tcp_keepalive_count * 1000)
-+    else:
-+        # These options aren't available in the OS X version of eventlet,
-+        # Idle + Count * Interval effectively gives you the total timeout.
-+        if tcp_keepidle is not None:
-+            if hasattr(socket, 'TCP_KEEPIDLE'):
-+                sock.setsockopt(socket.IPPROTO_TCP,
-+                                socket.TCP_KEEPIDLE,
-+                                tcp_keepidle)
-+            else:
-+                LOG.warning(_LW('tcp_keepidle not available on your system'))
-+        if tcp_keepalive_interval is not None:
-+            if hasattr(socket, 'TCP_KEEPINTVL'):
-+                sock.setsockopt(socket.IPPROTO_TCP,
-+                                socket.TCP_KEEPINTVL,
-+                                tcp_keepalive_interval)
-+            else:
-+                LOG.warning(_LW('tcp_keepintvl not available on your system'))
-+        if tcp_keepalive_count is not None:
-+            if hasattr(socket, 'TCP_KEEPCNT'):
-+                sock.setsockopt(socket.IPPROTO_TCP,
-+                                socket.TCP_KEEPCNT,
-+                                tcp_keepalive_count)
-+            else:
-+                LOG.warning(_LW('tcp_keepknt not available on your system'))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/patches/05-launchpad-1479342.patch	Fri Feb 05 17:54:17 2016 -0500
@@ -0,0 +1,75 @@
+commit 075ff30d7d8bbeca1af634718f3cb19099bc44b3
+Author: Abhiram Moturi <[email protected]>
+Date:   Mon Aug 10 14:23:09 2015 +0000
+
+    ZFSSA driver to return project 'available' space
+    
+    This fix allows the iSCSI driver to return the 'available' space
+    property at project level instead of the pool level which is more
+    accurate in cases when storage is not thin provisioned.
+    
+    Change-Id: I52dec5e527eab393fd464fbc7f4f910fafb67268
+    Closes-Bug: #1479342
+
+--- cinder-2015.1.2/cinder/volume/drivers/zfssa/zfssaiscsi.py.~1~	2016-02-01 00:58:28.817737350 -0800
++++ cinder-2015.1.2/cinder/volume/drivers/zfssa/zfssaiscsi.py	2016-02-01 00:58:28.883666429 -0800
[email protected]@ -322,7 +322,8 @@ class ZFSSAISCSIDriver(driver.ISCSIDrive
+         data["storage_protocol"] = self.protocol
+ 
+         lcfg = self.configuration
+-        (avail, total) = self.zfssa.get_pool_stats(lcfg.zfssa_pool)
++        (avail, total) = self.zfssa.get_project_stats(lcfg.zfssa_pool,
++                                                      lcfg.zfssa_project)
+         if avail is None or total is None:
+             return
+ 
+--- cinder-2015.1.2/cinder/volume/drivers/zfssa/zfssarest.py.~1~	2015-10-13 09:27:35.000000000 -0700
++++ cinder-2015.1.2/cinder/volume/drivers/zfssa/zfssarest.py	2016-02-01 00:59:32.842959922 -0800
[email protected]@ -69,36 +69,30 @@ class ZFSSAApi(object):
+         if self.rclient and not self.rclient.islogin():
+             self.rclient.login(auth_str)
+ 
+-    def get_pool_stats(self, pool):
+-        """Get space available and total properties of a pool
++    def get_project_stats(self, pool, project):
++        """Get project stats.
++
++           Get available space and total space of a project
+            returns (avail, total).
+         """
+-        svc = '/api/storage/v1/pools/' + pool
++        svc = '/api/storage/v1/pools/%s/projects/%s' % (pool, project)
+         ret = self.rclient.get(svc)
+         if ret.status != restclient.Status.OK:
+-            exception_msg = (_('Error Getting Pool Stats: '
++            exception_msg = (_('Error Getting Project Stats: '
+                                'Pool: %(pool)s '
++                               'Project: %(project)s '
+                                'Return code: %(ret.status)d '
+                                'Message: %(ret.data)s.')
+                              % {'pool': pool,
++                                'project': project,
+                                 'ret.status': ret.status,
+                                 'ret.data': ret.data})
+             LOG.error(exception_msg)
+-            raise exception.InvalidVolume(reason=exception_msg)
++            raise exception.VolumeBackendAPIException(data=exception_msg)
+ 
+         val = json.loads(ret.data)
+-
+-        if not self._is_pool_owned(val):
+-            exception_msg = (_('Error Pool ownership: '
+-                               'Pool %(pool)s is not owned '
+-                               'by %(host)s.')
+-                             % {'pool': pool,
+-                                'host': self.host})
+-            LOG.error(exception_msg)
+-            raise exception.InvalidInput(reason=pool)
+-
+-        avail = val['pool']['usage']['available']
+-        total = val['pool']['usage']['total']
++        avail = val['project']['space_available']
++        total = avail + val['project']['space_total']
+ 
+         return avail, total
+ 
--- a/components/openstack/cinder/patches/06-enable-dd.patch	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/cinder/patches/06-enable-dd.patch	Fri Feb 05 17:54:17 2016 -0500
@@ -1,9 +1,10 @@
-In-house supporting volume_dd_blocksize ('1M', 'G') to enable 'dd' to run on Solaris. This patch
-is Solaris-specific and not suitable for upstream.
+In-house supporting volume_dd_blocksize ('1M', 'G') to enable 'dd' to
+run on Solaris. This patch is Solaris-specific and not suitable for
+upstream.
 
---- cinder-2014.2.2/cinder/volume/utils.py.old	2015-10-20 12:55:15.089090904 -0700
-+++ cinder-2014.2.2/cinder/volume/utils.py	2015-10-20 12:55:23.640892844 -0700
[email protected]@ -281,6 +281,7 @@
+--- cinder-2015.1.2/cinder/volume/utils.py.orig	2015-11-17 18:29:59.168650488 -0800
++++ cinder-2015.1.2/cinder/volume/utils.py	2015-11-17 18:30:21.197972544 -0800
[email protected]@ -267,6 +267,7 @@
          bs = strutils.string_to_bytes('%sB' % blocksize)
  
      count = math.ceil(size_in_m * units.Mi / bs)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/patches/07-launchpad-1460156.patch	Fri Feb 05 17:54:17 2016 -0500
@@ -0,0 +1,74 @@
+This patch is to work-around the fact that Python 2.7.9 and beyond have
+implemented PEP 476 which enabled certificate verification by default
+and ZFSSAs may not be deployed with a valid, CA-signed certificate.
+
+commit 814cbb8a937e2a01f2c8814dd032c0f12baa6fd4
+Author: Diem Tran <[email protected]>
+Date:   Wed Jul 1 21:12:48 2015 +0000
+
+    Fix PEP476 & format message of Oracle ZFSSA drivers
+    
+    * Handles the PEP 476 by opting out certificate verification.
+    * Fix debug format messages in restclient.py
+    
+    Change-Id: Iaf9e546f0aed6b57fe9c2bf43aa2ce003a05ddf8
+    Closes-Bug: #1460156
+
+--- cinder-2015.1.2/cinder/volume/drivers/zfssa/restclient.py.~1~	2015-10-13 09:27:35.000000000 -0700
++++ cinder-2015.1.2/cinder/volume/drivers/zfssa/restclient.py	2016-01-31 00:56:12.410126083 -0800
[email protected]@ -1,4 +1,4 @@
+-# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
++# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ #
+ #    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ #    not use this file except in compliance with the License. You may obtain
[email protected]@ -17,6 +17,7 @@ ZFS Storage Appliance REST API Client Pr
+ 
+ import httplib
+ import json
++import ssl
+ import StringIO
+ import time
+ import urllib2
[email protected]@ -268,14 +269,27 @@ class RestClientURL(object):
+         retry = 0
+         response = None
+ 
+-        LOG.debug('Request: %s %s' % (request, zfssaurl))
++        LOG.debug('Request: %(request)s %(url)s',
++                  {'request': request, 'url': zfssaurl})
+         LOG.debug('Out headers: %s' % out_hdrs)
+         if body and body != '':
+             LOG.debug('Body: %s' % body)
+ 
++        context = None
++        if hasattr(ssl, '_create_unverified_context'):
++            context = ssl._create_unverified_context()
++        else:
++            context = None
++
+         while retry < maxreqretries:
+             try:
+-                response = urllib2.urlopen(req, timeout=self.timeout)
++                if context:
++                    response = urllib2.urlopen(req,
++                                               timeout=self.timeout,
++                                               context=context)
++                else:
++                    response = urllib2.urlopen(req,
++                                               timeout=self.timeout)
+             except urllib2.HTTPError as err:
+                 if err.code == httplib.NOT_FOUND:
+                     LOG.debug('REST Not Found: %s' % err.code)
[email protected]@ -315,8 +329,9 @@ class RestClientURL(object):
+ 
+             break
+ 
+-        if response and response.getcode() == httplib.SERVICE_UNAVAILABLE and \
+-           retry >= maxreqretries:
++        if (response and
++            (response.getcode() == httplib.SERVICE_UNAVAILABLE and
++                retry >= maxreqretries)):
+             raise RestClientError(response.getcode(), name="ERR_HTTPError",
+                                   message="REST Not Available: Disabled")
+ 
--- a/components/openstack/cinder/patches/07-zfssa-pep-476.patch	Fri Feb 05 11:09:10 2016 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-This patch is to work-around the fact that Python 2.7.9 and beyond have
-implemented PEP 476 which enabled certificate verification by default
-and ZFSSAs may not be deployed with a valid, CA-signed certificate.
-
-This patch may be suitable for pushing upsteam.
-
---- cinder-2014.2.2/cinder/volume/drivers/zfssa/restclient.py.~1~	2015-02-05 08:03:26.000000000 -0800
-+++ cinder-2014.2.2/cinder/volume/drivers/zfssa/restclient.py	2015-04-23 17:32:01.898738656 -0700
[email protected]@ -17,6 +17,7 @@ ZFS Storage Appliance REST API Client Pr
- 
- import httplib
- import json
-+import ssl
- import StringIO
- import time
- import urllib2
[email protected]@ -272,9 +273,18 @@ class RestClientURL(object):
-         if body and body != '':
-             LOG.debug('Body: %s' % body)
- 
-+        if hasattr(ssl, '_create_unverified_context'):
-+            context = ssl._create_unverified_context()
-+        else:
-+            context = None
-+
-         while retry < maxreqretries:
-             try:
--                response = urllib2.urlopen(req, timeout=self.timeout)
-+                if context:
-+                    response = urllib2.urlopen(req, timeout=self.timeout,
-+                        context=context)
-+                else:
-+                    response = urllib2.urlopen(req, timeout=self.timeout)
-             except urllib2.HTTPError as err:
-                 if err.code == httplib.NOT_FOUND:
-                     LOG.debug('REST Not Found: %s' % err.code)
--- a/components/openstack/cinder/patches/08-disable-sslv3.patch	Fri Feb 05 11:09:10 2016 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-In-house patch to disable SSLv3 support. 
-(See also upstream bug #1395095)
-
---- cinder-2014.2.2/cinder/openstack/common/sslutils.py.orig	2015-02-05 11:03:26.000000000 -0500
-+++ cinder-2014.2.2/cinder/openstack/common/sslutils.py	2015-08-13 20:27:21.205921362 -0400
[email protected]@ -80,8 +80,7 @@
- 
- _SSL_PROTOCOLS = {
-     "tlsv1": ssl.PROTOCOL_TLSv1,
--    "sslv23": ssl.PROTOCOL_SSLv23,
--    "sslv3": ssl.PROTOCOL_SSLv3
-+    "sslv23": ssl.PROTOCOL_SSLv23
- }
- 
- try:
[email protected]@ -89,6 +88,11 @@
- except AttributeError:
-     pass
- 
-+try:
-+    _SSL_PROTOCOLS["sslv3"] = ssl.PROTOCOL_SSLv3
-+except AttributeError:
-+    pass
-+
- 
- def validate_ssl_version(version):
-     key = version.lower()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/patches/08-zfssa-target_lun.patch	Fri Feb 05 17:54:17 2016 -0500
@@ -0,0 +1,20 @@
+This patch addresses 22554075 which has also been filed in Launchpad
+under 1538582.
+
+--- cinder-2015.1.2/cinder/volume/drivers/zfssa/zfssaiscsi.py.~2~	2016-01-31 17:10:27.739412163 -0800
++++ cinder-2015.1.2/cinder/volume/drivers/zfssa/zfssaiscsi.py	2016-01-31 17:11:03.123083242 -0800
[email protected]@ -1,4 +1,4 @@
+-# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
++# Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ #
+ #    Licensed under the Apache License, Version 2.0 (the "License"); you may
+ #    not use this file except in compliance with the License. You may obtain
[email protected]@ -415,7 +415,7 @@ class ZFSSAISCSIDriver(driver.ISCSIDrive
+         iscsi_properties['target_discovered'] = False
+         iscsi_properties['target_portal'] = target_portal
+         iscsi_properties['target_iqn'] = iqn
+-        iscsi_properties['target_lun'] = lun
++        iscsi_properties['target_lun'] = int(lun)
+         iscsi_properties['volume_id'] = volume['id']
+ 
+         if 'provider_auth' in provider:
--- a/components/openstack/cinder/patches/09-launchpad-1414867.patch	Fri Feb 05 11:09:10 2016 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,37 +0,0 @@
-This upstream patch addresses Launchpad bug 1414867. Although it's been
-addressed in Kilo, the patch below is still not yet released for Juno.
-
-commit 549144f754a8b3adb9d7fdfa8f8f9ee186a52f1e
-Author: wuyuting <email address hidden>
-Date: Tue Jan 27 02:50:28 2015 +0800
-
-    Fetch_to_volume_format calls copy_volume using wrong parameter
-
-    When creating a volume from an image, if qemu-img is not installed,
-    fetch_to_volume_format will call volume_utils.copy_volume to copy
-    image to volume. Copy_volume need the size of image in megabyte,
-    but fetch_to_volume_format call it using size in bytes.
-
-    Change-Id: Ia3b0f9168235a977a12232e27a5755ad11ec18f5
-    Closes-Bug: #1414867
-
---- cinder-2014.2.2/cinder/image/image_utils.py.orig	2015-10-19 15:59:34.730112261 -0700
-+++ cinder-2014.2.2/cinder/image/image_utils.py	2015-10-19 16:02:05.982843565 -0700
[email protected]@ -25,6 +25,7 @@
- 
- 
- import contextlib
-+import math
- import os
- import tempfile
- 
[email protected]@ -238,7 +239,8 @@
-             LOG.debug('Copying image from %(tmp)s to volume %(dest)s - '
-                       'size: %(size)s' % {'tmp': tmp, 'dest': dest,
-                                           'size': image_meta['size']})
--            volume_utils.copy_volume(tmp, dest, image_meta['size'], blocksize)
-+            image_size_m = math.ceil(image_meta['size'] / units.Mi)
-+            volume_utils.copy_volume(tmp, dest, image_size_m, blocksize)
-             return
- 
-         data = qemu_img_info(tmp)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/patches/09-nfs-mount.patch	Fri Feb 05 17:54:17 2016 -0500
@@ -0,0 +1,59 @@
+In-house patch to adapt Linux specific rootwrap setup to Solaris.
+Remove check for Linux specific mount.nfs command.
+
+Patch may be suitable for pushing upsteam.
+
+--- cinder-2014.2.2/cinder/volume/drivers/nfs.py.orig	2015-10-12 16:43:35.188157478 -0700
++++ cinder-2014.2.2/cinder/volume/drivers/nfs.py	2015-10-13 09:35:07.871595794 -0700
[email protected]@ -15,6 +15,7 @@
+ 
+ import errno
+ import os
++import platform
+ import time
+ 
+ from oslo_concurrency import processutils as putils
[email protected]@ -86,7 +87,10 @@ class NfsDriver(remotefs.RemoteFSDriver)
+         self._remotefsclient = None
+         super(NfsDriver, self).__init__(*args, **kwargs)
+         self.configuration.append_config_values(nfs_opts)
+-        root_helper = utils.get_root_helper()
++        if platform.system() == "SunOS":
++            root_helper = None
++        else:
++            root_helper = utils.get_root_helper()
+         # base bound to instance is used in RemoteFsConnector.
+         self.base = getattr(self.configuration,
+                             'nfs_mount_point_base',
[email protected]@ -144,18 +148,19 @@ class NfsDriver(remotefs.RemoteFSDriver)
+ 
+         self.shares = {}  # address : options
+ 
+-        # Check if mount.nfs is installed on this system; note that we don't
+-        # need to be root to see if the package is installed.
+-        package = 'mount.nfs'
+-        try:
+-            self._execute(package, check_exit_code=False,
+-                          run_as_root=False)
+-        except OSError as exc:
+-            if exc.errno == errno.ENOENT:
+-                msg = _('%s is not installed') % package
+-                raise exception.NfsException(msg)
+-            else:
+-                raise exc
++        if platform.system() != "SunOS":
++            # Check if mount.nfs is installed on this system; note that we don't
++            # need to be root to see if the package is installed.
++            package = 'mount.nfs'
++            try:
++                self._execute(package, check_exit_code=False,
++                              run_as_root=False)
++            except OSError as exc:
++                if exc.errno == errno.ENOENT:
++                    msg = _('%s is not installed') % package
++                    raise exception.NfsException(msg)
++                else:
++                    raise exc
+ 
+         # Now that all configuration data has been loaded (shares),
+         # we can "set" our final NAS file security options.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/patches/10-remotefs.patch	Fri Feb 05 17:54:17 2016 -0500
@@ -0,0 +1,81 @@
+In-house patch to adapt Linux specific commands and command output
+parsing to Solaris.
+
+Patch may be suitable for pushing upsteam.
+
+--- cinder-2015.1.0/cinder/brick/remotefs/remotefs.py.orig	2015-10-13 13:33:37.708165135 -0700
++++ cinder-2015.1.0/cinder/brick/remotefs/remotefs.py	2015-10-13 13:42:40.978353719 -0700
[email protected]@ -17,6 +17,7 @@
+ 
+ import hashlib
+ import os
++import platform
+ import re
+ 
+ from oslo_concurrency import processutils as putils
[email protected]@ -77,14 +78,21 @@ class RemoteFsClient(object):
+                             self._get_hash_str(device_name))
+ 
+     def _read_mounts(self):
+-        (out, _err) = self._execute('mount', check_exit_code=0)
++        if platform.system() == "SunOS":
++            (out, _err) = self._execute('/usr/sbin/mount', check_exit_code=0)
++        else:
++            (out, _err) = self._execute('mount', check_exit_code=0)
+         lines = out.split('\n')
+         mounts = {}
+         for line in lines:
+             tokens = line.split()
+             if 2 < len(tokens):
+-                device = tokens[0]
+-                mnt_point = tokens[2]
++                if platform.system() == "SunOS":
++                    device = tokens[2]
++                    mnt_point = tokens[0]
++                else:
++                    device = tokens[0]
++                    mnt_point = tokens[2]
+                 mounts[mnt_point] = device
+         return mounts
+ 
[email protected]@ -96,8 +104,12 @@ class RemoteFsClient(object):
+             LOG.info(_LI('Already mounted: %s') % mount_path)
+             return
+ 
+-        self._execute('mkdir', '-p', mount_path, check_exit_code=0)
+-        if self._mount_type == 'nfs':
++        if platform.system() == "SunOS":
++            self._execute('/usr/bin/mkdir', '-p', mount_path,
++                          check_exit_code=0)
++        else:
++            self._execute('mkdir', '-p', mount_path, check_exit_code=0)
++        if self._mount_type == 'nfs' and platform.system() != "SunOS":
+             self._mount_nfs(share, mount_path, flags)
+         else:
+             self._do_mount(self._mount_type, share, mount_path,
[email protected]@ -106,15 +118,21 @@ class RemoteFsClient(object):
+     def _do_mount(self, mount_type, share, mount_path, mount_options=None,
+                   flags=None):
+         """Mounts share based on the specified params."""
+-        mnt_cmd = ['mount', '-t', mount_type]
++        if platform.system() == "SunOS":
++            mnt_cmd = ['/usr/sbin/mount', '-F', mount_type]
++        else:
++            mnt_cmd = ['mount', '-t', mount_type]
+         if mount_options is not None:
+             mnt_cmd.extend(['-o', mount_options])
+         if flags is not None:
+             mnt_cmd.extend(flags)
+         mnt_cmd.extend([share, mount_path])
+ 
+-        self._execute(*mnt_cmd, root_helper=self.root_helper,
+-                      run_as_root=True, check_exit_code=0)
++        if platform.system() == "SunOS":
++            self._execute(*mnt_cmd, check_exit_code=0)
++        else:
++            self._execute(*mnt_cmd, root_helper=self.root_helper,
++                          run_as_root=True, check_exit_code=0)
+ 
+     def _mount_nfs(self, nfs_share, mount_path, flags=None):
+         """Mount nfs share using present mount types."""
+
--- a/components/openstack/cinder/patches/10-zfssa-free-space.patch	Fri Feb 05 11:09:10 2016 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,57 +0,0 @@
-From 075ff30d7d8bbeca1af634718f3cb19099bc44b3 Mon Sep 17 00:00:00 2001
-From: Abhiram Moturi <[email protected]>
-Date: Mon, 10 Aug 2015 14:23:09 +0000
-Subject: [PATCH] ZFSSA driver to return project 'available' space
-
-This fix allows the iSCSI driver to return the 'available' space
-property at project level instead of the pool level which is more
-accurate in cases when storage is not thin provisioned.
-
-Change-Id: I52dec5e527eab393fd464fbc7f4f910fafb67268
-Closes-Bug: #1479342
----
---- cinder-2014.2.2/cinder/volume/drivers/zfssa/zfssarest.py.~1~	2015-11-08 22:01:48.358042338 -0800
-+++ cinder-2014.2.2/cinder/volume/drivers/zfssa/zfssarest.py	2015-11-08 22:02:05.691920138 -0800
[email protected]@ -82,7 +82,7 @@
-                                 'ret.status': ret.status,
-                                 'ret.data': ret.data})
-             LOG.error(exception_msg)
--            raise exception.InvalidVolume(reason=exception_msg)
-+            raise exception.VolumeBackendAPIException(data=exception_msg)
- 
-         val = json.loads(ret.data)
- 
[email protected]@ -95,10 +95,31 @@
-             LOG.error(exception_msg)
-             raise exception.InvalidInput(reason=pool)
- 
--        avail = val['pool']['usage']['available']
-         total = val['pool']['usage']['total']
- 
--        return avail, total
-+        return total
-+
-+    def get_project_stats(self, pool, project):
-+        """Get available space of a project."""
-+        svc = '/api/storage/v1/pools/%s/projects/%s' % (pool, project)
-+        ret = self.rclient.get(svc)
-+        if ret.status != restclient.Status.OK:
-+            exception_msg = (_('Error Getting Project Stats: '
-+                               'Pool: %(pool)s '
-+                               'Project: %(project)s '
-+                               'Return code: %(ret.status)d '
-+                               'Message: %(ret.data)s.')
-+                             % {'pool': pool,
-+                                'project': project,
-+                                'ret.status': ret.status,
-+                                'ret.data': ret.data})
-+            LOG.error(exception_msg)
-+            raise exception.VolumeBackendAPIException(data=exception_msg)
-+ 
-+        val = json.loads(ret.data)
-+        avail = val['project']['space_available']
-+ 
-+        return avail
- 
-     def create_project(self, pool, project, compression=None, logbias=None):
-         """Create a project on a pool
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/patches/11-nfs-backup.patch	Fri Feb 05 17:54:17 2016 -0500
@@ -0,0 +1,31 @@
+In-house patch to cleanup backup container directories when empty.
+
+Patch may be suitable for pushing upsteam.
+
+--- cinder-2015.1.0/cinder/backup/drivers/nfs.py.orig	2015-11-24 11:49:37.670852276 -0700
++++ cinder-2015.1.0/cinder/backup/drivers/nfs.py	2015-11-24 13:31:57.282595085 -0700
[email protected]@ -137,9 +137,21 @@ class NFSBackupDriver(chunkeddriver.Chun
+         return open(path, 'r')
+ 
+     def delete_object(self, container, object_name):
+-        # TODO(tbarron):  clean up the container path if it is empty
+-        path = os.path.join(self.backup_path, container, object_name)
+-        os.remove(path)
++        obj_path = os.path.join(self.backup_path, container, object_name)
++        cpath = os.path.join(self.backup_path, container)
++        os.remove(obj_path)
++
++        # Cleanup container path if it is empty.
++        if not os.listdir(cpath):
++            start = os.getcwd()
++            os.chdir(self.backup_path)
++            try:
++                # Try to remove all the component dirs
++                # in container path.  os.removedirs will
++                # stop if the directory is not empty.
++                os.removedirs(container)
++            finally:
++                os.chdir(start)
+ 
+     def _generate_object_name_prefix(self, backup):
+         return 'backup'
--- a/components/openstack/cinder/patches/11-zfssa-pep-iscsi.patch	Fri Feb 05 11:09:10 2016 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
---- cinder-2014.2.2/cinder/volume/drivers/zfssa/zfssaiscsi.py.~1~	2015-02-05 08:03:26.000000000 -0800
-+++ cinder-2014.2.2/cinder/volume/drivers/zfssa/zfssaiscsi.py	2015-11-08 21:59:29.027424535 -0800
[email protected]@ -290,7 +290,9 @@
-         data["storage_protocol"] = self.protocol
- 
-         lcfg = self.configuration
--        (avail, total) = self.zfssa.get_pool_stats(lcfg.zfssa_pool)
-+        total = self.zfssa.get_pool_stats(lcfg.zfssa_pool)
-+        avail = self.zfssa.get_project_stats(lcfg.zfssa_pool,
-+                                             lcfg.zfssa_project)
-         if avail is None or total is None:
-             return
- 
[email protected]@ -334,9 +336,8 @@
-                                           '')
- 
-     def ensure_export(self, context, volume):
--        """Driver entry point to get the export info for an existing volume."""
--        LOG.debug('ensure_export: volume name: %s' % volume['name'])
--        return self._export_volume(volume)
-+        """Synchronously recreates an export for a volume."""
-+        pass
- 
-     def copy_image_to_volume(self, context, volume, image_service, image_id):
-         self.ensure_export(context, volume)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/cinder/patches/12-generate_sample.patch	Fri Feb 05 17:54:17 2016 -0500
@@ -0,0 +1,45 @@
+In-house patch to update the generate_sample script and related data
+files for use with Solaris.
+
+--- cinder-2015.1.2/tools/config/generate_sample.sh.orig	2015-10-13 09:27:35.000000000 -0700
++++ cinder-2015.1.2/tools/config/generate_sample.sh	2016-01-31 22:28:46.003791619 -0800
[email protected]@ -1,5 +1,7 @@
+ #!/usr/bin/env bash
+ 
++PATH=/usr/gnu/bin:/usr/bin
++
+ # Generate sample configuration for your project.
+ #
+ # Aside from the command line flags, it also respects a config file which
[email protected]@ -97,6 +99,7 @@ fi
+ BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
+ find $TARGETDIR -type f -name "*.pyc" -delete
+ FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
++        ! -name test.py \
+         -exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
+ 
+ RC_FILE="`dirname $0`/oslo.config.generator.rc"
[email protected]@ -136,9 +139,3 @@ CONCAT_FILES=$(ls $BASEDIR/tools/config/
+ for CONCAT_FILE in $CONCAT_FILES; do
+     cat $CONCAT_FILE >> $OUTPUTFILE
+ done
+-
+-# Now we need to get externals
+-oslo-config-generator \
+---namespace oslo_concurrency --namespace oslo_db \
+---namespace oslo_messaging --namespace policy \
+---namespace keystonemiddleware.auth_token  >> $OUTPUTFILE
+--- cinder-2015.1.2/tools/config/oslo.config.generator.rc.orig	2015-10-13 09:27:35.000000000 -0700
++++ cinder-2015.1.2/tools/config/oslo.config.generator.rc	2016-01-31 22:29:40.025594671 -0800
[email protected]@ -1,2 +1,9 @@
+-export CINDER_CONFIG_GENERATOR_EXTRA_MODULES="keystonemiddleware.auth_token"
+-export CINDER_CONFIG_GENERATOR_EXTRA_LIBRARIES="oslo_concurrency oslo.messaging oslo_db oslo.db.concurrency"
++export CINDER_CONFIG_GENERATOR_EXTRA_LIBRARIES=" \
++    keystonemiddleware.auth_token \
++    oslo.concurrency \
++    oslo.db \
++    oslo.db.concurrency \
++    oslo.log \
++    oslo.messaging \
++    oslo.middleware \
++"
--- a/components/openstack/common/Makefile	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/common/Makefile	Fri Feb 05 17:54:17 2016 -0500
@@ -20,13 +20,13 @@
 #
 
 #
-# Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
 #
 
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		openstack
-COMPONENT_VERSION=	2014.2.2
+COMPONENT_VERSION=	2015.1.2
 COMPONENT_PROJECT_URL=	http://www.openstack.org/
 COMPONENT_BUGDB=	service/nova
 IPS_COMPONENT_VERSION=	0.$(COMPONENT_VERSION)
--- a/components/openstack/common/files/openstack_common.py	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/common/files/openstack_common.py	Fri Feb 05 17:54:17 2016 -0500
@@ -153,10 +153,10 @@
                 new_section, new_key = old_section, old_key
 
             # Look for exceptions
-            if exception_list is not None:
+            if exception_list:
                 if (new_section, new_key) in exception_list:
                     if (new_section != 'DEFAULT' and
-                        not new.has_section(new_section)):
+                            not new.has_section(new_section)):
                         new.add_section(new_section)
                     print "Preserving [%s] %s = %s" % \
                         (new_section, new_key, value)
--- a/components/openstack/common/openstack-common.p5m	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/common/openstack-common.p5m	Fri Feb 05 17:54:17 2016 -0500
@@ -20,7 +20,7 @@
 #
 
 #
-# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
 #
 
 set name=pkg.fmri \
@@ -28,7 +28,7 @@
 set name=pkg.summary value="OpenStack Common Package"
 set name=pkg.description \
     value="Library package for common data structures and functions used by other OpenStack projects within Solaris."
-set name=pkg.human-version value="Juno $(COMPONENT_VERSION)"
+set name=pkg.human-version value="Kilo $(COMPONENT_VERSION)"
 set name=info.classification \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
     value="org.opensolaris.category.2008:System/Enterprise Management" \
@@ -37,7 +37,7 @@
 set name=info.upstream value="OpenStack <[email protected]>"
 set name=info.upstream-url value=$(COMPONENT_PROJECT_URL)
 set name=org.opensolaris.arc-caseid value=PSARC/2013/350 value=PSARC/2015/110 \
-    value=PSARC/2015/233
+    value=PSARC/2015/233 value=PSARC/2015/535
 set name=org.opensolaris.consolidation value=$(CONSOLIDATION)
 #
 file path=usr/lib/python$(PYVER)/vendor-packages/openstack_common.py
--- a/components/openstack/common/openstack.p5m	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/common/openstack.p5m	Fri Feb 05 17:54:17 2016 -0500
@@ -20,7 +20,7 @@
 #
 
 #
-# Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
 #
 
 set name=pkg.fmri \
@@ -28,7 +28,7 @@
 set name=pkg.summary value=OpenStack
 set name=pkg.description \
     value="OpenStack is a cloud operating system that controls large pools of compute, storage, and networking resources throughout a data center, all managed through a dashboard that gives administrators control while empowering their users to provision resources through a web interface."
-set name=pkg.human-version value="Juno $(COMPONENT_VERSION)"
+set name=pkg.human-version value="Kilo $(COMPONENT_VERSION)"
 set name=info.classification \
     value="org.opensolaris.category.2008:Meta Packages/Group Packages" \
     value="org.opensolaris.category.2008:System/Administration and Configuration" \
@@ -59,9 +59,13 @@
 depend type=group fmri=library/python/keystoneclient
 depend type=group fmri=library/python/neutronclient
 depend type=group fmri=library/python/novaclient
+depend type=group fmri=library/python/openstackclient
 depend type=group fmri=library/python/python-mysql
 depend type=group fmri=library/python/saharaclient
 depend type=group fmri=library/python/swiftclient
 depend type=group fmri=library/python/troveclient
 depend type=group fmri=network/amqp/rabbitmq
 depend type=group fmri=system/management/rad/module/rad-evs-controller
+
+# To upgrade to Kilo version, Juno version of the package must be on the system
+depend type=origin fmri=pkg:/cloud/[email protected] root-image=true
--- a/components/openstack/glance/Makefile	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/glance/Makefile	Fri Feb 05 17:54:17 2016 -0500
@@ -20,26 +20,26 @@
 #
 
 #
-# Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
 #
 
 include ../../../make-rules/shared-macros.mk
 
 COMPONENT_NAME=		glance
-COMPONENT_CODENAME=	juno
-COMPONENT_VERSION=	2014.2.2
-COMPONENT_BE_VERSION=	2014.2
+COMPONENT_CODENAME=	kilo
+COMPONENT_VERSION=	2015.1.2
+COMPONENT_BE_VERSION=	2015.1
 COMPONENT_SRC=		$(COMPONENT_NAME)-$(COMPONENT_VERSION)
 COMPONENT_ARCHIVE=	$(COMPONENT_SRC).tar.gz
 COMPONENT_ARCHIVE_HASH=	\
-    sha256:bf7273ff9e89e9a7edda76e7a2235989a25109fb728edc4afa956e74ef54a08c
+    sha256:e46bc6648a74a643cef3825dcd44c7e275ed3385b96cab83ef50c4c514932541
 COMPONENT_ARCHIVE_URL=	http://launchpad.net/$(COMPONENT_NAME)/$(COMPONENT_CODENAME)/$(COMPONENT_VERSION)/+download/$(COMPONENT_ARCHIVE)
 COMPONENT_SIG_URL=	$(COMPONENT_ARCHIVE_URL).asc
 COMPONENT_PROJECT_URL=	http://www.openstack.org/
 COMPONENT_BUGDB=	service/glance
 IPS_COMPONENT_VERSION=	0.$(COMPONENT_VERSION)
 
-TPNO=			21820
+TPNO=			25785
 
 include $(WS_MAKE_RULES)/prep.mk
 include $(WS_MAKE_RULES)/setup.py.mk
@@ -77,15 +77,21 @@
 
 test:		$(NO_TESTS)
 
-system-test:    $(NO_TESTS)
+system-test:	$(NO_TESTS)
 
 
 REQUIRED_PACKAGES += cloud/openstack/openstack-common
 REQUIRED_PACKAGES += library/python/eventlet-27
+REQUIRED_PACKAGES += library/python/glance_store-27
 REQUIRED_PACKAGES += library/python/iniparse-27
 REQUIRED_PACKAGES += library/python/m2crypto-27
 REQUIRED_PACKAGES += library/python/oslo.config-27
-REQUIRED_PACKAGES += library/python/python-mysql-27
+REQUIRED_PACKAGES += library/python/oslo.messaging-27
+REQUIRED_PACKAGES += library/python/oslo.serialization-27
+REQUIRED_PACKAGES += library/python/oslo.utils-27
+REQUIRED_PACKAGES += library/python/osprofiler-27
+REQUIRED_PACKAGES += library/python/six-27
 REQUIRED_PACKAGES += library/python/sqlalchemy-27
 REQUIRED_PACKAGES += library/python/sqlalchemy-migrate-27
+REQUIRED_PACKAGES += library/python/webob-27
 REQUIRED_PACKAGES += system/core-os
--- a/components/openstack/glance/files/glance-api.conf	Fri Feb 05 11:09:10 2016 -0800
+++ b/components/openstack/glance/files/glance-api.conf	Fri Feb 05 17:54:17 2016 -0500
@@ -1,650 +1,702 @@
 [DEFAULT]
-# Show more verbose log output (sets INFO log level output)
-#verbose = False
-
-# Show debugging output in logs (sets DEBUG log level output)
-#debug = False
-
-# Which backend scheme should Glance use by default is not specified
-# in a request to add a new image to Glance? Known schemes are determined
-# by the known_stores option below.
-# Default: 'file'
-default_store = file
 
-# Maximum image size (in bytes) that may be uploaded through the
-# Glance API server. Defaults to 1 TB.
-# WARNING: this value should only be increased after careful consideration
-# and must be set to a value under 8 EB (9223372036854775808).
-#image_size_cap = 1099511627776
-
-# Address to bind the API server
-bind_host = 0.0.0.0
-
-# Port the bind the API server to
-bind_port = 9292
-
-# Log to this file. Make sure you do not set the same log file for both the API
-# and registry servers!
+#
+# From glance.api
 #
-# If `log_file` is omitted and `use_syslog` is false, then log messages are
-# sent to stdout as a fallback.
-log_file = /var/log/glance/api.log
-
-# Backlog requests when creating socket
-backlog = 4096
-
-# TCP_KEEPIDLE value in seconds when creating socket.
-# Not supported on OS X.
-#tcp_keepidle = 600
 
-# API to use for accessing data. Default value points to sqlalchemy
-# package, it is also possible to use: glance.db.registry.api
-# data_api = glance.db.sqlalchemy.api
+# When true, this option sets the owner of an image to be the tenant.
+# Otherwise, the owner of the  image will be the authenticated user
+# issuing the request. (boolean value)
+#owner_is_tenant = true
 
-# The number of child process workers that will be
-# created to service API requests. The default will be
-# equal to the number of CPUs available. (integer value)
-workers = 1
-
-# Maximum line size of message headers to be accepted.
-# max_header_line may need to be increased when using large tokens
-# (typically those generated by the Keystone v3 API with big service
-# catalogs)
-# max_header_line = 16384
-
-# Role used to identify an authenticated user as administrator
+# Role used to identify an authenticated user as administrator.
+# (string value)
 #admin_role = admin
 
 # Allow unauthenticated users to access the API with read-only
-# privileges. This only applies when using ContextMiddleware.
-#allow_anonymous_access = False
+# privileges. This only applies when using ContextMiddleware. (boolean
+# value)
+#allow_anonymous_access = false
+
+# Public url to use for versions endpoint. The default is None, which
+# will use the request's host_url attribute to populate the URL base.
+# If Glance is operating behind a proxy, you will want to change this
+# to represent the proxy's URL. (string value)
+#public_endpoint = <None>
 
-# Allow access to version 1 of glance api
-#enable_v1_api = True
+# Whether to allow users to specify image properties beyond what the
+# image schema provides (boolean value)
+#allow_additional_image_properties = true
+
+# Maximum number of image members per image. Negative values evaluate
+# to unlimited. (integer value)
+#image_member_quota = 128
 
-# Allow access to version 2 of glance api
-#enable_v2_api = True
+# Maximum number of properties allowed on an image. Negative values
+# evaluate to unlimited. (integer value)
+#image_property_quota = 128
+
+# Maximum number of tags allowed on an image. Negative values evaluate
+# to unlimited. (integer value)
+#image_tag_quota = 128
 
-# Return the URL that references where the data is stored on
-# the backend storage system.  For example, if using the
-# file system store a URL of 'file:///path/to/image' will
-# be returned to the user in the 'direct_url' meta-data field.
-# The default value is false.
-#show_image_direct_url = False
+# Maximum number of locations allowed on an image. Negative values
+# evaluate to unlimited. (integer value)
+#image_location_quota = 10
+
+# Python module path of data access API (string value)
+#data_api = glance.db.sqlalchemy.api
+
+# Default value for the number of items returned by a request if not
+# specified explicitly in the request (integer value)
+#limit_param_default = 25
 
-# Send headers containing user and tenant information when making requests to
-# the v1 glance registry. This allows the registry to function as if a user is
-# authenticated without the need to authenticate a user itself using the
-# auth_token middleware.
-# The default value is false.
-#send_identity_headers = False
+# Maximum permissible number of items that could be returned by a
+# request (integer value)
+#api_limit_max = 1000
 
-# Supported values for the 'container_format' image attribute
-container_formats=ami,ari,aki,bare,ovf,ova,uar
+# Whether to include the backend image storage location in image
+# properties. Revealing storage location can be a security risk, so
+# use this setting with caution! (boolean value)
+#show_image_direct_url = false
+
+# Whether to include the backend image locations in image properties.
+# For example, if using the file system store a URL of
+# "file:///path/to/image" will be returned to the user in the
+# 'direct_url' meta-data field. Revealing storage location can be a
+# security risk, so use this setting with caution!  The overrides
+# show_image_direct_url. (boolean value)
+#show_multiple_locations = false
 
-# Supported values for the 'disk_format' image attribute
-disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,zfs
+# Maximum size of image a user can upload in bytes. Defaults to
+# 1099511627776 bytes (1 TB).WARNING: this value should only be
+# increased after careful consideration and must be set to a value
+# under 8 EB (9223372036854775808). (integer value)
+#image_size_cap = 1099511627776
 
-# Directory to use for lock files. Default to a temp directory
-# (string value). This setting needs to be the same for both
-# glance-scrubber and glance-api.
-#lock_path=<None>
+# Set a system wide quota for every user. This value is the total
+# capacity that a user can use across all storage systems. A value of
+# 0 means unlimited.Optional unit can be specified for the value.
+# Accepted units are B, KB, MB, GB and TB representing Bytes,
+# KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. If no
+# unit is specified then Bytes is assumed. Note that there should not
+# be any space between value and unit and units are case sensitive.
+# (string value)
+#user_storage_quota = 0
+
+# Deploy the v1 OpenStack Images API. (boolean value)
+#enable_v1_api = true
+
+# Deploy the v2 OpenStack Images API. (boolean value)
+#enable_v2_api = true
+
+# Deploy the v1 OpenStack Registry API. (boolean value)
+#enable_v1_registry = true
 
-# Property Protections config file
-# This file contains the rules for property protections and the roles/policies
-# associated with it.
-# If this config value is not specified, by default, property protections
-# won't be enforced.
-# If a value is specified and the file is not found, then the glance-api
-# service will not start.
-#property_protection_file =
+# Deploy the v2 OpenStack Registry API. (boolean value)
+#enable_v2_registry = true
+
+# The hostname/IP of the pydev process listening for debug connections
+# (string value)
+#pydev_worker_debug_host = <None>
+
+# The port on which a pydev process is listening for connections.
+# (integer value)
+#pydev_worker_debug_port = 5678
 
-# Specify whether 'roles' or 'policies' are used in the
-# property_protection_file.
-# The default value for property_protection_rule_format is 'roles'.
-#property_protection_rule_format = roles
+# AES key for encrypting store 'location' metadata. This includes, if
+# used, Swift or S3 credentials. Should be set to a random string of
+# length 16, 24 or 32 bytes (string value)
+#metadata_encryption_key = <None>
 
-# This value sets what strategy will be used to determine the image location
-# order. Currently two strategies are packaged with Glance 'location_order'
-# and 'store_type'.
+# Digest algorithm which will be used for digital signature; the
+# default is sha1 the default in Kilo for a smooth upgrade process,
+# and it will be updated with sha256 in next release(L). Use the
+# command "openssl list-message-digest-algorithms" to get the
+# available algorithms supported by the version of OpenSSL on the
+# platform. Examples are "sha1", "sha256", "sha512", etc. (string
+# value)
+#digest_algorithm = sha1
+
+# This value sets what strategy will be used to determine the image
+# location order. Currently two strategies are packaged with Glance
+# 'location_order' and 'store_type'. (string value)
+# Allowed values: location_order, store_type
 #location_strategy = location_order
 
-# ================= Syslog Options ============================
-
-# Send logs to syslog (/dev/log) instead of to file specified
-# by `log_file`
-#use_syslog = False
+# The location of the property protection file.This file contains the
+# rules for property protections and the roles/policies associated
+# with it. If this config value is not specified, by default, property
+# protections won't be enforced. If a value is specified and the file
+# is not found, then the glance-api service will not start. (string
+# value)
+#property_protection_file = <None>
 
-# Facility to use. If unset defaults to LOG_USER.
-#syslog_log_facility = LOG_LOCAL0
-
-# ================= SSL Options ===============================
+# This config value indicates whether "roles" or "policies" are used
+# in the property protection file. (string value)
+# Allowed values: roles, policies
+#property_protection_rule_format = roles
 
-# Certificate file to use when starting API server securely
-#cert_file = /path/to/certfile
+# Modules of exceptions that are permitted to be recreated upon
+# receiving exception data from an rpc call. (list value)
+#allowed_rpc_exception_modules = openstack.common.exception,glance.common.exception,exceptions
 
-# Private key file to use when starting API server securely
-#key_file = /path/to/keyfile
+# Address to bind the server.  Useful when selecting a particular
+# network interface. (string value)
+#bind_host = 0.0.0.0
+
+# The port on which the server will listen. (integer value)
+#bind_port = <None>
 
-# CA certificate file to use to verify connecting clients
-#ca_file = /path/to/cafile
+# The number of child process workers that will be created to service
+# requests. The default will be equal to the number of CPUs available.
+# (integer value)
+workers = 1
 
-# ================= Security Options ==========================
+# Maximum line size of message headers to be accepted. max_header_line
+# may need to be increased when using large tokens (typically those
+# generated by the Keystone v3 API with big service catalogs (integer
+# value)
+#max_header_line = 16384
 
-# AES key for encrypting store 'location' metadata, including
-# -- if used -- Swift or S3 credentials
-# Should be set to a random string of length 16, 24 or 32 bytes
-#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
+# If False, server will return the header "Connection: close", If
+# True, server will return "Connection: Keep-Alive" in its responses.
+# In order to close the client socket connection explicitly after the
+# response is sent and read successfully by the client, you simply
+# have to set this option to False when you create a wsgi server.
+# (boolean value)
+#http_keepalive = true
 
-# ============ Registry Options ===============================
+# Timeout for client connections' socket operations. If an incoming
+# connection is idle for this number of seconds it will be closed. A
+# value of '0' means wait forever. (integer value)
+#client_socket_timeout = 0
 
-# Address to find the registry server
-registry_host = 0.0.0.0
-
-# Port the registry server is listening on
-registry_port = 9191
+# The backlog value that will be used when creating the TCP listener
+# socket. (integer value)
+#backlog = 4096
 
-# What protocol to use when connecting to the registry server?
-# Set to https for secure HTTP communication
-registry_client_protocol = http
+# The value for the socket option TCP_KEEPIDLE.  This is the time in
+# seconds that the connection must be idle before TCP starts sending
+# keepalive probes. (integer value)
+#tcp_keepidle = 600
 
-# The path to the key file to use in SSL connections to the
-# registry server, if any. Alternately, you may set the
-# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
-#registry_client_key_file = /path/to/key/file
+# CA certificate file to use to verify connecting clients. (string
+# value)
+#ca_file = <None>
 
-# The path to the cert file to use in SSL connections to the
-# registry server, if any. Alternately, you may set the
-# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
-#registry_client_cert_file = /path/to/cert/file
+# Certificate file to use when starting API server securely. (string
+# value)
+#cert_file = <None>
 
-# The path to the certifying authority cert file to use in SSL connections
-# to the registry server, if any. Alternately, you may set the
-# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
-#registry_client_ca_file = /path/to/ca/file
+# Private key file to use when starting API server securely. (string
+# value)
+#key_file = <None>
+
+# The path to the sqlite file database that will be used for image
+# cache management. (string value)
+#image_cache_sqlite_db = cache.db
+
+# The driver to use for image cache management. (string value)
+#image_cache_driver = sqlite
 
-# When using SSL in connections to the registry server, do not require
-# validation via a certifying authority. This is the registry's equivalent of
-# specifying --insecure on the command line using glanceclient for the API
-# Default: False
-#registry_client_insecure = False
+# The upper limit (the maximum size of accumulated cache in bytes)
+# beyond which pruner, if running, starts cleaning the images cache.
+# (integer value)
+#image_cache_max_size = 10737418240
 
-# The period of time, in seconds, that the API server will wait for a registry
-# request to complete. A value of '0' implies no timeout.
-# Default: 600
-#registry_client_timeout = 600
+# The amount of time to let an image remain in the cache without being
+# accessed. (integer value)
+#image_cache_stall_time = 86400
+
+# Base directory that the Image Cache uses. (string value)
+#image_cache_dir = <None>
+
+# Default publisher_id for outgoing notifications. (string value)
+#default_publisher_id = image.localhost
 
-# Whether to automatically create the database tables.
-# Default: False
-#db_auto_create = False
+# List of disabled notifications. A notification can be given either
+# as a notification type to disable a single event, or as a
+# notification group prefix to disable all events within a group.
+# Example: if this config option is set to ["image.create",
+# "metadef_namespace"], then "image.create" notification will not be
+# sent after image is created and none of the notifications for
+# metadefinition namespaces will be sent. (list value)
+#disabled_notifications =
 
-# Enable DEBUG log messages from sqlalchemy which prints every database
-# query and response.
-# Default: False
-#sqlalchemy_debug = True
+# Address to find the registry server. (string value)
+#registry_host = 0.0.0.0
 
-# Pass the user's token through for API requests to the registry.
+# Port the registry server is listening on. (integer value)
+#registry_port = 9191
+
+# Whether to pass through the user token when making requests to the
+# registry. (boolean value)
 # WARNING: DO NOT CHANGE THIS VALUE.  Setting use_user_token to False
 # allows for unintended privilege escalation within the Glance API server.
 # See https://wiki.openstack.org/wiki/OSSN/OSSN-0060
-# Default: True
-#use_user_token = True
+#use_user_token = true
+
+# The administrators user name. If "use_user_token" is not in effect,
+# then admin credentials can be specified. (string value)
+#admin_user = <None>
+
+# The administrators password. If "use_user_token" is not in effect,
+# then admin credentials can be specified. (string value)
+#admin_password = <None>
+
+# The tenant name of the administrative user. If "use_user_token" is
+# not in effect, then admin tenant name can be specified. (string
+# value)
+#admin_tenant_name = <None>
+
+# The URL to the keystone service. If "use_user_token" is not in
+# effect and using keystone auth, then URL of keystone can be
+# specified. (string value)
+#auth_url = <None>
+
+# The strategy to use for authentication. If "use_user_token" is not
+# in effect, then auth strategy can be specified. (string value)
+#auth_strategy = noauth
 
-# If 'use_user_token' is not in effect then admin credentials
-# can be specified. Requests to the registry on behalf of
-# the API will use these credentials.
-# Admin user name
-#admin_user = None
-# Admin password
-#admin_password = None
-# Admin tenant name
-#admin_tenant_name = None
-# Keystone endpoint
-#auth_url = None
-# Keystone region
-#auth_region = None
-# Auth strategy
-#auth_strategy = keystone
+# The region for the authentication service. If "use_user_token" is
+# not in effect and using keystone auth, then region name can be
+# specified. (string value)
+#auth_region = <None>
+
+# The protocol to use for communication with the registry server.
+# Either http or https. (string value)
+#registry_client_protocol = http
+
+# The path to the key file to use in SSL connections to the registry
+# server, if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE
+# environment variable to a filepath of the key file (string value)
+#registry_client_key_file = <None>
 
-# ============ Notification System Options =====================
+# The path to the cert file to use in SSL connections to the registry
+# server, if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE
+# environment variable to a filepath of the CA cert file (string
+# value)
+#registry_client_cert_file = <None>
+
+# The path to the certifying authority cert file to use in SSL
+# connections to the registry server, if any. Alternately, you may set
+# the GLANCE_CLIENT_CA_FILE environment variable to a filepath of the
+# CA cert file. (string value)
+#registry_client_ca_file = <None>
+
+# When using SSL in connections to the registry server, do not require
+# validation via a certifying authority. This is the registry's
+# equivalent of specifying --insecure on the command line using
+# glanceclient for the API. (boolean value)
+#registry_client_insecure = false
 
-# Driver or drivers to handle sending notifications. Set to
-# 'messaging' to send notifications to a message queue.
-# notification_driver = noop
+# The period of time, in seconds, that the API server will wait for a
+# registry request to complete. A value of 0 implies no timeout.
+# (integer value)
+#registry_client_timeout = 600
+
+# Whether to pass through headers containing user and tenant
+# information when making requests to the registry. This allows the
+# registry to use the context middleware without keystonemiddleware's
+# auth_token middleware, removing calls to the keystone auth service.
+# It is recommended that when using this option, secure communication
+# between glance api and glance registry is ensured by means other
+# than auth_token middleware. (boolean value)
+#send_identity_headers = false
 
-# Default publisher_id for outgoing notifications.
-# default_publisher_id = image.localhost
+# Directory that the scrubber will use to track information about what
+# to delete. Make sure this is set in glance-api.conf and glance-
+# scrubber.conf. (string value)
+#scrubber_datadir = /var/lib/glance/scrubber
 
-# Messaging driver used for 'messaging' notifications driver
-# rpc_backend = 'rabbit'
+# The amount of time in seconds to delay before performing a delete.
+# (integer value)
+#scrub_time = 0
+
+# A boolean that determines if the scrubber should clean up the files
+# it uses for taking data. Only one server in your deployment should
+# be designated the cleanup host. (boolean value)
+#cleanup_scrubber = false
+
+# Turn on/off delayed delete. (boolean value)
+#delayed_delete = false
 
-# Configuration options if sending notifications via rabbitmq (these are
-# the defaults)
-rabbit_host = localhost
-rabbit_port = 5672
-rabbit_use_ssl = false
-rabbit_userid = guest
-rabbit_password = guest
-rabbit_virtual_host = /
-rabbit_notification_exchange = glance
-rabbit_notification_topic = notifications
-rabbit_durable_queues = False
+# Items must have a modified time that is older than this value in
+# order to be candidates for cleanup. (integer value)
+#cleanup_scrubber_time = 86400
+
+#
+# From oslo.log
+#
+
+# Print debugging output (set logging level to DEBUG instead of
+# default WARNING level). (boolean value)
+#debug = false
+
+# Print more verbose output (set logging level to INFO instead of
+# default WARNING level). (boolean value)
+#verbose = false
+
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
+# (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# DEPRECATED. A logging.Formatter log message format string which may
+# use any of the available logging.LogRecord attributes. This option
+# is deprecated.  Please use logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format = <None>
+
+# Format string for %%(asctime)s in log records. Default: %(default)s
+# . (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
 
-# Configuration options if sending notifications via Qpid (these are
-# the defaults)
-qpid_notification_exchange = glance
-qpid_notification_topic = notifications
-qpid_hostname = localhost
-qpid_port = 5672
-qpid_username =
-qpid_password =
-qpid_sasl_mechanisms =
-qpid_reconnect_timeout = 0
-qpid_reconnect_limit = 0
-qpid_reconnect_interval_min = 0
-qpid_reconnect_interval_max = 0
-qpid_reconnect_interval = 0
-qpid_heartbeat = 5
-# Set to 'ssl' to enable SSL
-qpid_protocol = tcp
-qpid_tcp_nodelay = True
+# (Optional) Name of log file to output to. If no default is set,
+# logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative --log-file paths.
+# (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Use syslog for logging. Existing syslog format is DEPRECATED during
+# I, and will change in J to honor RFC5424. (boolean value)
+#use_syslog = false
+
+# (Optional) Enables or disables syslog rfc5424 format for logging. If
+# enabled, prefixes the MSG part of the syslog message with APP-NAME
+# (RFC5424). The format without the APP-NAME is deprecated in I, and
+# will be removed in J. (boolean value)
+#use_syslog_rfc_format = false
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility = LOG_USER
+
+# Log output to standard error. (boolean value)
+#use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
 
-# ============ Delayed Delete Options =============================
+# Format string to use for log messages without context. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string
+# value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
 
-# Turn on/off delayed delete
-delayed_delete = False
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
 
-# Delayed delete time in seconds
-scrub_time = 43200
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+#
+# From oslo.messaging
+#
 
-# Directory that the scrubber will use to remind itself of what to delete
-# Make sure this is also set in glance-scrubber.conf
-scrubber_datadir = /var/lib/glance/scrubber
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve to this
+# address. (string value)
+#rpc_zmq_bind_address = *
 
-# =============== Quota Options ==================================
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker = local
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port = 9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
 
-# The maximum number of image members allowed per image
-#image_member_quota = 128
+# Maximum number of ingress messages to locally buffer per topic.
+# Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
 
-# The maximum number of image properties allowed per image
-#image_property_quota = 128
+# Name of this node. Must be a valid hostname, FQDN, or IP address.
+# Must match "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
 
-# The maximum number of tags allowed per image
-#image_tag_quota = 128
+# Seconds to wait before a cast expires (TTL). Only supported by
+# impl_zmq. (integer value)
+#rpc_cast_timeout = 30
 
-# The maximum number of locations allowed per image
-#image_location_quota = 10
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq = 300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl = 600
+
+# Size of RPC thread pool. (integer value)
+#rpc_thread_pool_size = 64
+
+# Driver or drivers to handle sending notifications. (multi valued)
+#notification_driver =
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics = notifications
 
-# Set a system wide quota for every user.  This value is the total number
-# of bytes that a user can use across all storage systems.  A value of
-# 0 means unlimited.
-#user_storage_quota = 0
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout = 60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend option
+# and driver specific configuration. (string value)
+#transport_url = <None>
 
-# =============== Image Cache Options =============================
+# The messaging driver to use, defaults to rabbit. Other drivers
+# include qpid and zmq. (string value)
+#rpc_backend = rabbit
 
-# Base directory that the Image Cache uses
-image_cache_dir = /var/lib/glance/image-cache/
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the transport_url
+# option. (string value)
+#control_exchange = openstack
 
-# =============== Database Options =================================
 
 [database]
-# The file name to use with SQLite (string value)
+
+#
+# From oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+# Deprecated group/name - [DEFAULT]/sqlite_db
 #sqlite_db = oslo.sqlite
 
-# If True, SQLite uses synchronous mode (boolean value)
-#sqlite_synchronous = True
+# If True, SQLite uses synchronous mode. (boolean value)
+# Deprecated group/name - [DEFAULT]/sqlite_synchronous
+#sqlite_synchronous = true
 
-# The backend to use for db (string value)
+# The back end to use for the database. (string value)
 # Deprecated group/name - [DEFAULT]/db_backend
 #backend = sqlalchemy
 
-# The SQLAlchemy connection string used to connect to the
-# database (string value)
+# The SQLAlchemy connection string to use to connect to the database.
+# (string value)
 # Deprecated group/name - [DEFAULT]/sql_connection
 # Deprecated group/name - [DATABASE]/sql_connection
 # Deprecated group/name - [sql]/connection
 connection = mysql://%SERVICE_USER%:%SERVICE_PASSWORD%@localhost/glance
 
-# The SQL mode to be used for MySQL sessions. This option,
-# including the default, overrides any server-set SQL mode. To
-# use whatever SQL mode is set by the server configuration,
-# set this to no value. Example: mysql_sql_mode= (string
-# value)
+# The SQLAlchemy connection string to use to connect to the slave
+# database. (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including
+# the default, overrides any server-set SQL mode. To use whatever SQL
+# mode is set by the server configuration, set this to no value.
+# Example: mysql_sql_mode= (string value)
 #mysql_sql_mode = TRADITIONAL
 
-# Timeout before idle sql connections are reaped (integer
-# value)
+# Timeout before idle SQL connections are reaped. (integer value)
 # Deprecated group/name - [DEFAULT]/sql_idle_timeout
 # Deprecated group/name - [DATABASE]/sql_idle_timeout
 # Deprecated group/name - [sql]/idle_timeout
 #idle_timeout = 3600
 
-# Minimum number of SQL connections to keep open in a pool
-# (integer value)
+# Minimum number of SQL connections to keep open in a pool. (integer
+# value)
 # Deprecated group/name - [DEFAULT]/sql_min_pool_size
 # Deprecated group/name - [DATABASE]/sql_min_pool_size
 #min_pool_size = 1
 
-# Maximum number of SQL connections to keep open in a pool
-# (integer value)
+# Maximum number of SQL connections to keep open in a pool. (integer
+# value)
 # Deprecated group/name - [DEFAULT]/sql_max_pool_size
 # Deprecated group/name - [DATABASE]/sql_max_pool_size
 #max_pool_size = <None>
 
-# Maximum db connection retries during startup. (setting -1
-# implies an infinite retry count) (integer value)
+# Maximum number of database connection retries during startup. Set to
+# -1 to specify an infinite retry count. (integer value)
 # Deprecated group/name - [DEFAULT]/sql_max_retries
 # Deprecated group/name - [DATABASE]/sql_max_retries
 #max_retries = 10
 
-# Interval between retries of opening a sql connection
-# (integer value)
+# Interval between retries of opening a SQL connection. (integer
+# value)
 # Deprecated group/name - [DEFAULT]/sql_retry_interval
 # Deprecated group/name - [DATABASE]/reconnect_interval
 #retry_interval = 10
 
-# If set, use this value for max_overflow with sqlalchemy
-# (integer value)
+# If set, use this value for max_overflow with SQLAlchemy. (integer
+# value)
 # Deprecated group/name - [DEFAULT]/sql_max_overflow
 # Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
 #max_overflow = <None>
 
-# Verbosity of SQL debugging information. 0=None,
-# 100=Everything (integer value)
+# Verbosity of SQL debugging information: 0=None, 100=Everything.
+# (integer value)
 # Deprecated group/name - [DEFAULT]/sql_connection_debug
 #connection_debug = 0
 
-# Add python stack traces to SQL as comment strings (boolean
-# value)
+# Add Python stack traces to SQL as comment strings. (boolean value)
 # Deprecated group/name - [DEFAULT]/sql_connection_trace
-#connection_trace = False
+#connection_trace = false
 
-# If set, use this value for pool_timeout with sqlalchemy
-# (integer value)
+# If set, use this value for pool_timeout with SQLAlchemy. (integer
+# value)
 # Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
 #pool_timeout = <None>
 
-# Enable the experimental use of database reconnect on
-# connection lost (boolean value)
-#use_db_reconnect = False
+# Enable the experimental use of database reconnect on connection
+# lost. (boolean value)
+#use_db_reconnect = false
 
-# seconds between db connection retries (integer value)
+# Seconds between retries of a database transaction. (integer value)
 #db_retry_interval = 1
 
-# Whether to increase interval between db connection retries,
-# up to db_max_retry_interval (boolean value)
-#db_inc_retry_interval = True
+# If True, increases the interval between retries of a database
+# operation up to db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
 
-# max seconds between db connection retries, if
-# db_inc_retry_interval is enabled (integer value)
+# If db_inc_retry_interval is set, the maximum seconds between retries
+# of a database operation. (integer value)
 #db_max_retry_interval = 10
 
-# maximum db connection retries before error is raised.
-# (setting -1 implies an infinite retry count) (integer value)
+# Maximum retries in case of connection error or deadlock error before
+# error is raised. Set to -1 to specify an infinite retry count.
+# (integer value)
 #db_max_retries = 20
 
-[keystone_authtoken]
-auth_uri = http://127.0.0.1:5000/v2.0/
-identity_uri = http://127.0.0.1:35357/
-admin_tenant_name = %SERVICE_TENANT_NAME%
-admin_user = %SERVICE_USER%
-admin_password = %SERVICE_PASSWORD%
-revocation_cache_time = 10
-signing_dir = /var/lib/glance/keystone-signing
-
-[paste_deploy]
-# Name of the paste configuration file that defines the available pipelines
-#config_file = glance-api-paste.ini
-
-# Partial name of a pipeline in your paste configuration file with the
-# service name removed. For example, if your paste section name is
-# [pipeline:glance-api-keystone], you would configure the flavor below
-# as 'keystone'.
-flavor=keystone
+#
+# From oslo.db.concurrency
+#
 
-[store_type_location_strategy]
-# The scheme list to use to get store preference order. The scheme must be
-# registered by one of the stores defined by the 'known_stores' config option.
-# This option will be applied when you using 'store_type' option as image
-# location strategy defined by the 'location_strategy' config option.
-#store_type_preference =
-
-[profiler]
-# If False fully disable profiling feature.
-#enabled = False
-
-# If False doesn't trace SQL requests.
-#trace_sqlalchemy = False
+# Enable the experimental use of thread pooling for all DB API calls
+# (boolean value)
+# Deprecated group/name - [DEFAULT]/dbapi_use_tpool
+#use_tpool = false
 
-[task]
-# ================= Glance Tasks Options ============================
-
-# Specifies how long (in hours) a task is supposed to live in the tasks DB
-# after succeeding or failing before getting soft-deleted.
-# The default value for task_time_to_live is 48 hours.
-# task_time_to_live = 48
-
-# Specifies which task executor to be used to run the task scripts.
-# The default value for task_executor is eventlet.
-# task_executor = eventlet
-
-# Specifies the maximum number of eventlet threads which can be spun up by
-# the eventlet based task executor to perform execution of Glance tasks.
-# eventlet_executor_pool_size = 1000
 
 [glance_store]
-# List of which store classes and store class locations are
-# currently known to glance at startup.
-# Existing but disabled stores:
-#      glance.store.rbd.Store,
-#      glance.store.s3.Store,
-#      glance.store.swift.Store,
-#      glance.store.sheepdog.Store,
-#      glance.store.cinder.Store,
-#      glance.store.gridfs.Store,
-#      glance.store.vmware_datastore.Store,
-#stores = glance.store.filesystem.Store,
-#         glance.store.http.Store
 
-# ============ Filesystem Store Options ========================
-
-# Directory that the Filesystem backend store
-# writes image data to
-filesystem_store_datadir = /var/lib/glance/images/
+#
+# From glance.store
+#
 
-# A list of directories where image data can be stored.
-# This option may be specified multiple times for specifying multiple store
-# directories. Either one of filesystem_store_datadirs or
-# filesystem_store_datadir option is required. A priority number may be given
-# after each directory entry, separated by a ":".
-# When adding an image, the highest priority directory will be selected, unless
-# there is not enough space available in cases where the image size is already
-# known. If no priority is given, it is assumed to be zero and the directory
-# will be considered for selection last. If multiple directories have the same
-# priority, then the one with the most free space available is selected.
-# If same store is specified multiple times then BadStoreConfiguration
-# exception will be raised.
-#filesystem_store_datadirs = /var/lib/glance/images/:1
+# List of stores enabled (list value)
+#stores = file,http
 
-# A path to a JSON file that contains metadata describing the storage
-# system.  When show_multiple_locations is True the information in this
-# file will be returned with any location that is contained in this
-# store.
-#filesystem_store_metadata_file = None
-
-# ============ Swift Store Options =============================
+# Default scheme to use to store image data. The scheme must be
+# registered by one of the stores defined by the 'stores' config
+# option. (string value)
+#default_store = file
 
-# Version of the authentication service to use
-# Valid versions are '2' for keystone and '1' for swauth and rackspace
-swift_store_auth_version = 2
-
-# Address where the Swift authentication service lives
-# Valid schemes are 'http://' and 'https://'
-# If no scheme specified,  default to 'https://'
-# For swauth, use something like '127.0.0.1:8080/v1.0/'
-swift_store_auth_address = 127.0.0.1:5000/v2.0/
-
-# User to authenticate against the Swift authentication service
-# If you use Swift authentication service, set it to 'account':'user'
-# where 'account' is a Swift storage account and 'user'
-# is a user in that account
-swift_store_user = jdoe:jdoe
-
-# Auth key for the user authenticating against the
-# Swift authentication service
-swift_store_key = a86850deb2742ec3cb41518e26aa2d89
+# Minimum interval seconds to execute updating dynamic storage
+# capabilities based on backend status then. It's not a periodic
+# routine, the update logic will be executed only when interval
+# seconds elapsed and an operation of store has triggered. The feature
+# will be enabled only when the option value greater then zero.
+# (integer value)
+#store_capabilities_update_min_interval = 0
 
-# Container within the account that the account should use
-# for storing images in Swift
-swift_store_container = glance
-
-# Do we create the container if it does not exist?
-swift_store_create_container_on_put = False
+#
+# From glance.store
+#
 
-# What size, in MB, should Glance start chunking image files
-# and do a large object manifest in Swift? By default, this is
-# the maximum object size in Swift, which is 5GB
-swift_store_large_object_size = 5120
+# Images will be chunked into objects of this size (in megabytes). For
+# best performance, this should be a power of two. (integer value)
+#sheepdog_store_chunk_size = 64
 
-# swift_store_config_file = glance-swift.conf
-# This file contains references for each of the configured
-# Swift accounts/backing stores. If used, this option can prevent
-# credentials being stored in the database. Using Swift references
-# is disabled if this config is left blank.
+# Port of sheep daemon. (integer value)
+#sheepdog_store_port = 7000
 
-# The reference to the default Swift parameters to use for adding new images.
-# default_swift_reference = 'ref1'
+# IP address of sheep daemon. (string value)
+#sheepdog_store_address = localhost
 
-# When doing a large object manifest, what size, in MB, should
-# Glance write chunks to Swift? This amount of data is written
-# to a temporary disk buffer during the process of chunking
-# the image file, and the default is 200MB
-swift_store_large_object_chunk_size = 200
+# RADOS images will be chunked into objects of this size (in
+# megabytes). For best performance, this should be a power of two.
+# (integer value)
+#rbd_store_chunk_size = 8
 
-# Whether to use ServiceNET to communicate with the Swift storage servers.
-# (If you aren't RACKSPACE, leave this False!)
-#
-# To use ServiceNET for authentication, prefix hostname of
-# `swift_store_auth_address` with 'snet-'.
-# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
-swift_enable_snet = False
-
-# If set to True enables multi-tenant storage mode which causes Glance images
-# to be stored in tenant specific Swift accounts.
-#swift_store_multi_tenant = False
+# RADOS pool in which images are stored. (string value)
+#rbd_store_pool = images
 
-# A list of swift ACL strings that will be applied as both read and
-# write ACLs to the containers created by Glance in multi-tenant
-# mode. This grants the specified tenants/users read and write access
-# to all newly created image objects. The standard swift ACL string
-# formats are allowed, including:
-# <tenant_id>:<username>
-# <tenant_name>:<username>
-# *:<username>
-# Multiple ACLs can be combined using a comma separated list, for
-# example: swift_store_admin_tenants = service:glance,*:admin
-#swift_store_admin_tenants =
+# RADOS user to authenticate as (only applicable if using Cephx. If
+# <None>, a default will be chosen based on the client. section in
+# rbd_store_ceph_conf) (string value)
+#rbd_store_user = <None>
 
-# The region of the swift endpoint to be used for single tenant. This setting
-# is only necessary if the tenant has multiple swift endpoints.
-#swift_store_region =
-
-# If set to False, disables SSL layer compression of https swift requests.
-# Setting to 'False' may improve performance for images which are already
-# in a compressed format, eg qcow2. If set to True, enables SSL layer
-# compression (provided it is supported by the target swift proxy).
-#swift_store_ssl_compression = True
+# Ceph configuration file path. If <None>, librados will locate the
+# default config. If using cephx authentication, this file should
+# include a reference to the right keyring in a client.<USER> section
+# (string value)
+#rbd_store_ceph_conf = /etc/ceph/ceph.conf
 
-# The number of times a Swift download will be retried before the
-# request fails
-#swift_store_retry_get_count = 0
+# The host where the S3 server is listening. (string value)
+#s3_store_host = <None>
 
-# Bypass SSL verification for Swift
-#swift_store_auth_insecure = False
-
-# ============ S3 Store Options =============================
+# The S3 query token access key. (string value)
+#s3_store_access_key = <None>
 
-# Address where the S3 authentication service lives
-# Valid schemes are 'http://' and 'https://'
-# If no scheme specified,  default to 'http://'
-s3_store_host = 127.0.0.1:8080/v1.0/
+# The S3 query token secret key. (string value)
+#s3_store_secret_key = <None>
 
-# User to authenticate against the S3 authentication service
-s3_store_access_key = <20-char AWS access key>
-
-# Auth key for the user authenticating against the
-# S3 authentication service
-s3_store_secret_key = <40-char AWS secret key>
+# The S3 bucket to be used to store the Glance data. (string value)
+#s3_store_bucket = <None>
 
-# Container within the account that the account should use
-# for storing images in S3. Note that S3 has a flat namespace,
-# so you need a unique bucket name for your glance images. An
-# easy way to do this is append your AWS access key to "glance".
-# S3 buckets in AWS *must* be lowercased, so remember to lowercase
-# your AWS access key if you use it in your bucket name below!
-s3_store_bucket = <lowercased 20-char aws access key>glance
+# The local directory where uploads will be staged before they are
+# transferred into S3. (string value)
+#s3_store_object_buffer_dir = <None>
 
-# Do we create the bucket if it does not exist?
-s3_store_create_bucket_on_put = False
+# A boolean to determine if the S3 bucket should be created on upload
+# if it does not exist or if an error should be returned to the user.
+# (boolean value)
+#s3_store_create_bucket_on_put = false
 
-# When sending images to S3, the data will first be written to a
-# temporary buffer on disk. By default the platform's temporary directory
-# will be used. If required, an alternative directory can be specified here.
-#s3_store_object_buffer_dir = /path/to/dir
-
-# When forming a bucket url, boto will either set the bucket name as the
-# subdomain or as the first token of the path. Amazon's S3 service will
-# accept it as the subdomain, but Swift's S3 middleware requires it be
-# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
+# The S3 calling format used to determine the bucket. Either subdomain
+# or path can be used. (string value)
 #s3_store_bucket_url_format = subdomain
 
-# Size, in MB, should S3 start chunking image files
-# and do a multipart upload in S3. The default is 100MB.
+# What size, in MB, should S3 start chunking image files and do a
+# multipart upload in S3. (integer value)
 #s3_store_large_object_size = 100
 
-# Multipart upload part size, in MB, should S3 use when uploading
-# parts. The size must be greater than or equal to
-# 5MB. The default is 10MB.
+# What multipart upload part size, in MB, should S3 use when uploading
+# parts. The size must be greater than or equal to 5M. (integer value)
 #s3_store_large_object_chunk_size = 10
 
-# The number of thread pools to perform a multipart upload
-# in S3. The default is 10.
+# The number of thread pools to perform a multipart upload in S3.
+# (integer value)
 #s3_store_thread_pools = 10
 
-# ============ RBD Store Options =============================
-
-# Ceph configuration file path
-# If using cephx authentication, this file should
-# include a reference to the right keyring
-# in a client.<USER> section
-#rbd_store_ceph_conf = /etc/ceph/ceph.conf
-
-# RADOS user to authenticate as (only applicable if using cephx)
-# If <None>, a default will be chosen based on the client. section
-# in rbd_store_ceph_conf
-#rbd_store_user = <None>
-
-# RADOS pool in which images are stored
-#rbd_store_pool = images
+# Hostname or IP address of the instance to connect to, or a mongodb
+# URI, or a list of hostnames / mongodb URIs. If host is an IPv6
+# literal it must be enclosed in '[' and ']' characters following the
+# RFC2732 URL syntax (e.g. '[::1]' for localhost) (string value)
+#mongodb_store_uri = <None>
 
-# RADOS images will be chunked into objects of this size (in megabytes).
-# For best performance, this should be a power of two
-#rbd_store_chunk_size = 8
-
-# ============ Sheepdog Store Options =============================
-
-sheepdog_store_address = localhost
+# Database to use (string value)
+#mongodb_store_db = <None>
 
-sheepdog_store_port = 7000
-
-# Images will be chunked into objects of this size (in megabytes).
-# For best performance, this should be a power of two
-sheepdog_store_chunk_size = 64
-
-# ============ Cinder Store Options ===============================
-
-# Info to match when looking for cinder in the service catalog
-# Format is : separated values of the form:
+# Info to match when looking for cinder in the service catalog. Format
+# is : separated values of the form:
 # <service_type>:<service_name>:<endpoint_type> (string value)
 #cinder_catalog_info = volume:cinder:publicURL
 
@@ -655,7 +707,7 @@
 # Region name of this node (string value)
 #os_region_name = <None>
 
-# Location of ca certicates file to use for cinder client requests
+# Location of ca certicates file to use for cinder client requests.
 # (string value)
 #cinder_ca_certificates_file = <None>
 
@@ -663,40 +715,766 @@