PSARC 2015/289 OpenStack Cinder Enhancements s11u3-sru
authorQiang Strony Zhang <strony.zhang@oracle.com>
Fri, 09 Oct 2015 11:54:18 -0700
branchs11u3-sru
changeset 4937 8f0976d7e40e
parent 4936 79af241b4834
child 4939 f77b7f85081b
PSARC 2015/289 OpenStack Cinder Enhancements 20938366 cinder backup and volume-migration need to be supported in openstack 21215160 cinder SAN needs Paramiko 21365620 cinder volume backups sometimes fail on SPARC
components/openstack/cinder/cinder.p5m
components/openstack/cinder/files/cinder-volume-setup
components/openstack/cinder/files/cinder.exec_attr
components/openstack/cinder/files/solaris/solarisfc.py
components/openstack/cinder/files/solaris/solarisiscsi.py
components/openstack/cinder/files/solaris/zfs.py
components/openstack/cinder/patches/01-requirements.patch
components/openstack/cinder/patches/04-volume-backup.patch
components/openstack/cinder/patches/06-no-san-remote.patch
--- a/components/openstack/cinder/cinder.p5m	Fri Oct 09 09:55:13 2015 -0700
+++ b/components/openstack/cinder/cinder.p5m	Fri Oct 09 11:54:18 2015 -0700
@@ -609,6 +609,9 @@
 # force a dependency on osprofiler; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/osprofiler-$(PYV)
 
+# force a dependency on paramiko; pkgdepend work is needed to flush this out.
+depend type=require fmri=library/python/paramiko-$(PYV)
+
 # force a dependency on paste; pkgdepend work is needed to flush this out.
 depend type=require fmri=library/python/paste-$(PYV)
 
--- a/components/openstack/cinder/files/cinder-volume-setup	Fri Oct 09 09:55:13 2015 -0700
+++ b/components/openstack/cinder/files/cinder-volume-setup	Fri Oct 09 11:54:18 2015 -0700
@@ -36,6 +36,17 @@
     parser = ConfigParser.ConfigParser()
     parser.read(cinder_conf)
 
+    # check if the SAN storage is used.
+    try:
+        local = parser.get("DEFAULT", "san_is_local")
+    except ConfigParser.NoOptionError:
+        local = False
+
+    # The script only handles the setup for local access as it needs to
+    # run in the remote node in the SAN environment.
+    if not local:
+        return smf_include.SMF_EXIT_OK
+
     # retrieve the top-level dataset or just get the default (rpool/cinder)
     try:
         top_ds = parser.get("DEFAULT", "zfs_volume_base")
--- a/components/openstack/cinder/files/cinder.exec_attr	Fri Oct 09 09:55:13 2015 -0700
+++ b/components/openstack/cinder/files/cinder.exec_attr	Fri Oct 09 11:54:18 2015 -0700
@@ -1,8 +1,18 @@
 OpenStack Block Storage Management:solaris:cmd:RO::/usr/bin/cinder-manage:\
 uid=cinder;gid=cinder
 
+cinder-volume:solaris:cmd:RO::/usr/bin/chown:euid=0
+
+cinder-volume:solaris:cmd:RO::/usr/bin/dd:privs=file_dac_read
+
+cinder-volume:solaris:cmd:RO::/usr/sbin/fcadm:privs=file_dac_read,sys_devices
+
 cinder-volume:solaris:cmd:RO::/usr/sbin/fcinfo:privs=file_dac_read,sys_devices
 
+cinder-volume:solaris:cmd:RO::/usr/sbin/format:euid=0
+
+cinder-volume:solaris:cmd:RO::/usr/sbin/iscsiadm:euid=0
+
 cinder-volume:solaris:cmd:RO::/usr/sbin/itadm:uid=0
 
 cinder-volume:solaris:cmd:RO::/usr/sbin/stmfadm:euid=0
--- a/components/openstack/cinder/files/solaris/solarisfc.py	Fri Oct 09 09:55:13 2015 -0700
+++ b/components/openstack/cinder/files/solaris/solarisfc.py	Fri Oct 09 11:54:18 2015 -0700
@@ -17,6 +17,7 @@
 """Generic Solaris Fibre Channel utilities."""
 
 import os
+import platform
 import time
 
 from cinder.brick import exception
@@ -107,7 +108,7 @@
         for wwpn in wwpns:
             self.execute('/usr/sbin/fcadm', 'force-lip', wwpn)
 
-    def get_device_path(self, wwn):
+    def _get_device_path(self, wwn):
         """Get the Device Name of the WWN"""
         try:
             out, err = self.execute('/usr/sbin/fcinfo', 'logical-unit', '-v')
@@ -146,7 +147,7 @@
         # a refresh.
         for i in range(1, scan_tries):
             LOG.debug("Looking for Fibre Channel device")
-            host_dev = self.get_device_path(wwn)
+            host_dev = self._get_device_path(wwn)
 
             if host_dev is not None and os.path.exists(host_dev):
                 break
@@ -158,5 +159,15 @@
             LOG.error(msg)
             raise exception.NoFibreChannelVolumeDeviceFound()
 
+        # Set the label EFI to the disk on SPARC before it is accessed and
+        # make sure the correct device path with slice 0
+        # (like '/dev/rdsk/c0t600xxxd0s0').
+        if platform.processor() == 'sparc':
+            tmp_dev_name = host_device.rsplit('s', 1)
+            disk_name = tmp_dev_name[0].split('/')[-1]
+            (out, _err) = self.execute('/usr/sbin/format', '-L', 'efi', '-d',
+                                       disk_name)
+            host_device = '%ss0' % tmp_dev_name[0]
+
         device_info['path'] = host_dev
         return device_info
--- a/components/openstack/cinder/files/solaris/solarisiscsi.py	Fri Oct 09 09:55:13 2015 -0700
+++ b/components/openstack/cinder/files/solaris/solarisiscsi.py	Fri Oct 09 11:54:18 2015 -0700
@@ -17,6 +17,7 @@
 """Generic Solaris iSCSI utilities."""
 
 import os
+import platform
 import time
 
 from cinder.brick import exception
@@ -31,13 +32,6 @@
     def __init__(self, *args, **kwargs):
         self.execute = putils.execute
 
-    def disconnect_iscsi(self):
-        """Disable the iSCSI discovery method to detach the volume
-        from instance_name.
-        """
-        self.execute('/usr/sbin/iscsiadm', 'modify', 'discovery',
-                     '--sendtargets', 'disable')
-
     def _get_device_path(self, connection_properties):
         """Get the device path from the target info."""
         (out, _err) = self.execute('/usr/sbin/iscsiadm', 'list',
@@ -114,5 +108,15 @@
         else:
             raise exception.VolumeDeviceNotFound(device=host_device)
 
+        # Set the label EFI to the disk on SPARC before it is accessed and
+        # make sure the correct device path with slice 0
+        # (like '/dev/rdsk/c0t600xxxd0s0').
+        if platform.processor() == 'sparc':
+            tmp_dev_name = host_device.rsplit('s', 1)
+            disk_name = tmp_dev_name[0].split('/')[-1]
+            (out, _err) = self.execute('/usr/sbin/format', '-L', 'efi', '-d',
+                                       disk_name)
+            host_device = '%ss0' % tmp_dev_name[0]
+
         device_info['path'] = host_device
         return device_info
--- a/components/openstack/cinder/files/solaris/zfs.py	Fri Oct 09 09:55:13 2015 -0700
+++ b/components/openstack/cinder/files/solaris/zfs.py	Fri Oct 09 11:54:18 2015 -0700
@@ -20,16 +20,21 @@
 """
 
 import abc
+import fcntl
+import os
+import socket
+import subprocess
 import time
 
 from oslo.config import cfg
 
 from cinder import exception
+from cinder.i18n import _
 from cinder.image import image_utils
-from cinder.i18n import _
 from cinder.openstack.common import log as logging
 from cinder.openstack.common import processutils
 from cinder.volume import driver
+from cinder.volume.drivers.san.san import SanDriver
 
 from solaris_install.target.size import Size
 
@@ -44,13 +49,24 @@
 FLAGS.register_opts(solaris_zfs_opts)
 
 
-class ZFSVolumeDriver(driver.VolumeDriver):
+class ZFSVolumeDriver(SanDriver):
     """Local ZFS volume operations."""
     protocol = 'local'
 
     def __init__(self, *args, **kwargs):
-        super(ZFSVolumeDriver, self).__init__(*args, **kwargs)
+        super(ZFSVolumeDriver, self).__init__(execute=self.solaris_execute,
+                                              *args, **kwargs)
         self.configuration.append_config_values(solaris_zfs_opts)
+        self.run_local = self.configuration.san_is_local
+        self.hostname = socket.gethostname()
+
+    def solaris_execute(self, *cmd, **kwargs):
+        """Execute the command locally or remotely."""
+        if self.run_local:
+            return processutils.execute(*cmd, **kwargs)
+        else:
+            return super(ZFSVolumeDriver, self)._run_ssh(cmd,
+                                                         check_exit_code=True)
 
     def check_for_setup_error(self):
         """Check the setup error."""
@@ -59,7 +75,7 @@
     def create_volume(self, volume):
         """Create a volume."""
         size = '%sG' % volume['size']
-        zfs_volume = self._get_zfs_volume_name(volume)
+        zfs_volume = self._get_zfs_volume_name(volume['name'])
 
         # Create a ZFS volume
         cmd = ['/usr/sbin/zfs', 'create', '-V', size, zfs_volume]
@@ -78,7 +94,7 @@
 
         # Create a ZFS clone
         zfs_snapshot = self._get_zfs_snap_name(snapshot)
-        zfs_volume = self._get_zfs_volume_name(volume)
+        zfs_volume = self._get_zfs_volume_name(volume['name'])
         cmd = ['/usr/sbin/zfs', 'clone', zfs_snapshot, zfs_volume]
         self._execute(*cmd)
 
@@ -102,7 +118,7 @@
 
         # Create a ZFS clone
         zfs_snapshot = self._get_zfs_snap_name(tmp_snapshot)
-        zfs_volume = self._get_zfs_volume_name(volume)
+        zfs_volume = self._get_zfs_volume_name(volume['name'])
         cmd = ['/usr/sbin/zfs', 'clone', zfs_snapshot, zfs_volume]
         self._execute(*cmd)
 
@@ -123,7 +139,7 @@
             LOG.debug(_("The volume path '%s' doesn't exist") % zvol)
             return
 
-        zfs_volume = self._get_zfs_volume_name(volume)
+        zfs_volume = self._get_zfs_volume_name(volume['name'])
         origin_snapshot = self._get_zfs_property('origin', zfs_volume)
         tmp_cloned_vol = False
 
@@ -205,37 +221,86 @@
 
     def copy_image_to_volume(self, context, volume, image_service, image_id):
         """Fetch the image from image_service and write it to the volume."""
-        image_utils.fetch_to_raw(context,
-                                 image_service,
-                                 image_id,
-                                 self.local_path(volume))
+        raise NotImplementedError()
 
     def copy_volume_to_image(self, context, volume, image_service, image_meta):
         """Copy the volume to the specified image."""
-        image_utils.upload_volume(context,
-                                  image_service,
-                                  image_meta,
-                                  self.local_path(volume))
+        raise NotImplementedError()
 
     def _get_zfs_property(self, prop, dataset):
         """Get the value of property for the dataset."""
-        (out, _err) = self._execute('/usr/sbin/zfs', 'get', '-H', '-o',
-                                    'value', prop, dataset)
-        return out.rstrip()
+        try:
+            (out, _err) = self._execute('/usr/sbin/zfs', 'get', '-H', '-o',
+                                        'value', prop, dataset)
+            return out.rstrip()
+        except processutils.ProcessExecutionError:
+            LOG.info(_("Failed to get the property '%s' of the dataset '%s'") %
+                     (prop, dataset))
+            return None
 
     def _get_zfs_snap_name(self, snapshot):
         """Get the snapshot path."""
         return "%s/%[email protected]%s" % (self.configuration.zfs_volume_base,
                              snapshot['volume_name'], snapshot['name'])
 
-    def _get_zfs_volume_name(self, volume):
+    def _get_zfs_volume_name(self, volume_name):
         """Add the pool name to get the ZFS volume."""
         return "%s/%s" % (self.configuration.zfs_volume_base,
-                          volume['name'])
+                          volume_name)
+
+    def _piped_execute(self, cmd1, cmd2):
+        """Pipe output of cmd1 into cmd2."""
+        LOG.debug(_("Piping cmd1='%s' into cmd2='%s'") %
+                  (' '.join(cmd1), ' '.join(cmd2)))
+
+        try:
+            p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
+                                  stderr=subprocess.PIPE)
+        except:
+            LOG.error(_("_piped_execute '%s' failed.") % (cmd1))
+            raise
+
+        # Set the pipe to be blocking because evenlet.green.subprocess uses
+        # the non-blocking pipe.
+        flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK)
+        fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags)
+
+        p2 = subprocess.Popen(cmd2, stdin=p1.stdout,
+                              stdout=subprocess.PIPE,
+                              stderr=subprocess.PIPE)
+        p1.stdout.close()
+        stdout, stderr = p2.communicate()
+        if p2.returncode:
+            msg = (_("_piped_execute failed with the info '%s' and '%s'.") %
+                   (stdout, stderr))
+            raise exception.VolumeBackendAPIException(data=msg)
+
+    def _zfs_send_recv(self, src, dst, remote=False):
+        """Replicate the ZFS dataset by calling zfs send/recv cmd"""
+        src_snapshot = {'volume_name': src['name'],
+                        'name': 'tmp-snapshot-%s' % src['id']}
+        src_snapshot_name = self._get_zfs_snap_name(src_snapshot)
+        prop_type = self._get_zfs_property('type', src_snapshot_name)
+        # Delete the temporary snapshot if it already exists
+        if prop_type == 'snapshot':
+            self.delete_snapshot(src_snapshot)
+        # Create a temporary snapshot of volume
+        self.create_snapshot(src_snapshot)
+        src_snapshot_name = self._get_zfs_snap_name(src_snapshot)
+
+        cmd1 = ['/usr/sbin/zfs', 'send', src_snapshot_name]
+        cmd2 = ['/usr/sbin/zfs', 'receive', dst]
+        self._piped_execute(cmd1, cmd2)
+
+        # Delete the temporary src snapshot and dst snapshot
+        self.delete_snapshot(src_snapshot)
+        dst_snapshot_name = "%[email protected]%s" % (dst, src['id'])
+        cmd = ['/usr/sbin/zfs', 'destroy', dst_snapshot_name]
+        self._execute(*cmd)
 
     def _get_zvol_path(self, volume):
         """Get the ZFS volume path."""
-        return "/dev/zvol/rdsk/%s" % self._get_zfs_volume_name(volume)
+        return "/dev/zvol/rdsk/%s" % self._get_zfs_volume_name(volume['name'])
 
     def _update_volume_stats(self):
         """Retrieve volume status info."""
@@ -256,13 +321,17 @@
             (Size(used_size) + Size(avail_size)).get(Size.gb_units)
         stats['free_capacity_gb'] = Size(avail_size).get(Size.gb_units)
         stats['reserved_percentage'] = self.configuration.reserved_percentage
+        stats['location_info'] =\
+            ('ZFSVolumeDriver:%(hostname)s:%(zfs_volume_base)s' %
+             {'hostname': self.hostname,
+              'zfs_volume_base': self.configuration.zfs_volume_base})
 
         self._stats = stats
 
     def extend_volume(self, volume, new_size):
         """Extend an existing volume's size."""
         volsize_str = 'volsize=%sg' % new_size
-        zfs_volume = self._get_zfs_volume_name(volume)
+        zfs_volume = self._get_zfs_volume_name(volume['name'])
         try:
             self._execute('/usr/sbin/zfs', 'set', volsize_str, zfs_volume)
         except Exception:
@@ -270,6 +339,56 @@
                    % {'new_size': new_size})
             raise exception.VolumeBackendAPIException(data=msg)
 
+    def rename_volume(self, src, dst):
+        """Rename the volume from src to dst in the same zpool."""
+        cmd = ['/usr/sbin/zfs', 'rename', src, dst]
+        self._execute(*cmd)
+
+        LOG.debug(_("Rename the volume '%s' to '%s'") % (src, dst))
+
+    def migrate_volume(self, context, volume, host):
+        """Migrate the volume among different backends on the same server.
+
+        The volume migration can only run locally by calling zfs send/recv
+        cmds and the specified host needs to be on the same server with the
+        host. But, one exception is when the src and dst volume are located
+        under the same zpool locally or remotely, the migration will be done
+        by just renaming the volume.
+        :param context: context
+        :param volume: a dictionary describing the volume to migrate
+        :param host: a dictionary describing the host to migrate to
+        """
+        false_ret = (False, None)
+        if volume['status'] != 'available':
+            LOG.debug(_("Status of volume '%s' is '%s', not 'available'.") %
+                      (volume['name'], volume['status']))
+            return false_ret
+
+        if 'capabilities' not in host or \
+           'location_info' not in host['capabilities']:
+            LOG.debug(_("No location_info or capabilities are in host info"))
+            return false_ret
+
+        info = host['capabilities']['location_info']
+        if (self.hostname != info.split(':')[1]):
+            LOG.debug(_("Migration between two different servers '%s' and "
+                      "'%s' is not supported yet.") %
+                      (self.hostname, info.split(':')[1]))
+            return false_ret
+
+        dst_volume = "%s/%s" % (info.split(':')[-1], volume['name'])
+        src_volume = self._get_zfs_volume_name(volume['name'])
+        # check if the src and dst volume are under the same zpool
+        if (src_volume.split('/')[0] == dst_volume.split('/')[0]):
+            self.rename_volume(src_volume, dst_volume)
+        else:
+            self._zfs_send_recv(volume, dst_volume)
+            # delete the source volume
+            self.delete_volume(volume)
+
+        provider_location = {}
+        return (True, provider_location)
+
 
 class STMFDriver(ZFSVolumeDriver):
     """Abstract base class for common COMSTAR operations."""
@@ -492,8 +611,14 @@
         properties['target_discovered'] = True
         properties['target_iqn'] = target_name
 
+        # Here the san_is_local means that the cinder-volume runs in the
+        # iSCSI target with iscsi_ip_address.
+        if self.configuration.san_is_local:
+            target_ip = self.configuration.iscsi_ip_address
+        else:
+            target_ip = self.configuration.san_ip
         properties['target_portal'] = ('%s:%d' %
-                                       (self.configuration.iscsi_ip_address,
+                                       (target_ip,
                                         self.configuration.iscsi_port))
         view_lun = self._get_view_and_lun(luid)
         if view_lun['lun'] is not None:
--- a/components/openstack/cinder/patches/01-requirements.patch	Fri Oct 09 09:55:13 2015 -0700
+++ b/components/openstack/cinder/patches/01-requirements.patch	Fri Oct 09 11:54:18 2015 -0700
@@ -1,19 +1,17 @@
 In-house patch to remove unnecessary dependencies from Cinder's
 requirements files. The specific reasons are as follows:
 
-kombu		Not applicable
+kombu           Not applicable
 
-oslo.rootwrap	Not applicable to Solaris
+oslo.rootwrap   Not applicable to Solaris
 
-paramiko	Not applicable to Solaris (various drivers specific)
+pycrypto        Not applicable to Solaris (various drivers specific)
 
-pycrypto	Not applicable to Solaris (various drivers specific)
-
-rtslib-fb	Not applicable to Solaris (Linux iSCSI specific)
+rtslib-fb       Not applicable to Solaris (Linux iSCSI specific)
 
---- cinder-2014.2.2/cinder.egg-info/requires.txt.~1~	2015-02-05 08:05:50.000000000 -0800
-+++ cinder-2014.2.2/cinder.egg-info/requires.txt	2015-02-23 13:49:52.736413251 -0800
[email protected]@ -6,18 +6,14 @@ eventlet>=0.15.1,<0.16.0
+--- cinder-2014.2.2/cinder.egg-info/requires.txt.orig	2015-05-27 22:55:59.906826810 -0700
++++ cinder-2014.2.2/cinder.egg-info/requires.txt	2015-05-27 22:57:00.022877081 -0700
[email protected]@ -6,18 +6,15 @@
  greenlet>=0.3.2
  iso8601>=0.1.9
  keystonemiddleware>=1.0.0
@@ -25,14 +23,14 @@
  oslo.messaging>=1.4.0,!=1.5.0,<1.6.0
 -oslo.rootwrap>=1.3.0
  osprofiler>=0.3.0                       # Apache-2.0
--paramiko>=1.13.0
+ paramiko>=1.13.0
  Paste
  PasteDeploy>=1.5.0
 -pycrypto>=2.6
  python-barbicanclient>=2.1.0,!=3.0.0,<3.0.2
  python-glanceclient>=0.14.0
  python-novaclient>=2.18.0
[email protected]@ -25,7 +21,6 @@ python-swiftclient>=2.2.0
[email protected]@ -25,7 +22,6 @@
  requests>=1.2.1,!=2.4.0
  Routes>=1.12.3,!=2.0
  taskflow>=0.4,<0.7.0
@@ -40,9 +38,9 @@
  six>=1.7.0
  SQLAlchemy>=0.8.4,<=0.9.99,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,!=0.9.4,!=0.9.5,!=0.9.6
  sqlalchemy-migrate==0.9.1
---- cinder-2014.2.2/requirements.txt.~1~	2015-02-05 08:03:26.000000000 -0800
-+++ cinder-2014.2.2/requirements.txt	2015-02-23 13:52:20.137709426 -0800
[email protected]@ -10,18 +10,14 @@ eventlet>=0.15.1,<0.16.0
+--- cinder-2014.2.2/requirements.txt.orig       2015-05-27 22:57:45.226877884 -0700
++++ cinder-2014.2.2/requirements.txt    2015-05-27 22:58:25.817248288 -0700
[email protected]@ -10,18 +10,15 @@
  greenlet>=0.3.2
  iso8601>=0.1.9
  keystonemiddleware>=1.0.0
@@ -54,14 +52,14 @@
  oslo.messaging>=1.4.0,!=1.5.0,<1.6.0
 -oslo.rootwrap>=1.3.0
  osprofiler>=0.3.0                       # Apache-2.0
--paramiko>=1.13.0
+ paramiko>=1.13.0
  Paste
  PasteDeploy>=1.5.0
 -pycrypto>=2.6
  python-barbicanclient>=2.1.0,!=3.0.0,<3.0.2
  python-glanceclient>=0.14.0
  python-novaclient>=2.18.0
[email protected]@ -29,7 +25,6 @@ python-swiftclient>=2.2.0
[email protected]@ -29,7 +26,6 @@
  requests>=1.2.1,!=2.4.0
  Routes>=1.12.3,!=2.0
  taskflow>=0.4,<0.7.0
@@ -69,3 +67,4 @@
  six>=1.7.0
  SQLAlchemy>=0.8.4,<=0.9.99,!=0.9.0,!=0.9.1,!=0.9.2,!=0.9.3,!=0.9.4,!=0.9.5,!=0.9.6
  sqlalchemy-migrate==0.9.1
+
--- a/components/openstack/cinder/patches/04-volume-backup.patch	Fri Oct 09 09:55:13 2015 -0700
+++ b/components/openstack/cinder/patches/04-volume-backup.patch	Fri Oct 09 11:54:18 2015 -0700
@@ -1,8 +1,8 @@
 This patch is to replace the linux-specific codes with the solaris
 codes to support the cinder backup on the Solaris.
 
---- cinder-2014.2.2/cinder/brick/initiator/connector.py.~1~	2014-10-16 06:26:26.000000000 -0700
-+++ cinder-2014.2.2/cinder/brick/initiator/connector.py	2015-01-04 23:12:23.661116812 -0800
+--- cinder-2014.2.2/cinder/brick/initiator/connector.py.~1~	2015-02-05 08:03:26.000000000 -0800
++++ cinder-2014.2.2/cinder/brick/initiator/connector.py	2015-04-20 21:05:25.881159722 -0700
 @@ -15,6 +15,7 @@
  
  import os
@@ -58,7 +58,16 @@
          super(ISCSIConnector, self).__init__(root_helper, driver=driver,
                                               execute=execute,
                                               device_scan_attempts=
[email protected]@ -192,6 +204,9 @@
[email protected]@ -181,6 +193,8 @@
+ 
+     def set_execute(self, execute):
+         super(ISCSIConnector, self).set_execute(execute)
++        if sys.platform == 'sunos5':
++            return
+         self._linuxscsi.set_execute(execute)
+ 
+     @synchronized('connect_volume')
[email protected]@ -192,6 +206,9 @@
          target_iqn - iSCSI Qualified Name
          target_lun - LUN id of the volume
          """
@@ -68,18 +77,17 @@
  
          device_info = {'type': 'block'}
  
[email protected]@ -262,6 +277,10 @@
[email protected]@ -262,6 +279,9 @@
          target_iqn - iSCSI Qualified Name
          target_lun - LUN id of the volume
          """
 +        if sys.platform == 'sunos5':
-+            self._solarisiscsi.disconnect_iscsi()
 +            return
 +
          # Moved _rescan_iscsi and _rescan_multipath
          # from _disconnect_volume_multipath_iscsi to here.
          # Otherwise, if we do rescan after _linuxscsi.remove_multipath_device
[email protected]@ -306,6 +325,9 @@
[email protected]@ -306,6 +326,9 @@
  
      def get_initiator(self):
          """Secure helper to read file as root."""
@@ -89,7 +97,7 @@
          file_path = '/etc/iscsi/initiatorname.iscsi'
          try:
              lines, _err = self._execute('cat', file_path, run_as_root=True,
[email protected]@ -555,8 +577,11 @@
[email protected]@ -555,8 +578,11 @@
                   execute=putils.execute, use_multipath=False,
                   device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
                   *args, **kwargs):
@@ -103,7 +111,16 @@
          super(FibreChannelConnector, self).__init__(root_helper, driver=driver,
                                                      execute=execute,
                                                      device_scan_attempts=
[email protected]@ -578,6 +603,10 @@
[email protected]@ -566,6 +592,8 @@
+ 
+     def set_execute(self, execute):
+         super(FibreChannelConnector, self).set_execute(execute)
++        if sys.platform == 'sunos5':
++            return
+         self._linuxscsi.set_execute(execute)
+         self._linuxfc.set_execute(execute)
+ 
[email protected]@ -578,6 +606,10 @@
          target_iqn - iSCSI Qualified Name
          target_lun - LUN id of the volume
          """
@@ -114,7 +131,7 @@
          LOG.debug("execute = %s" % self._execute)
          device_info = {'type': 'block'}
  
[email protected]@ -686,6 +715,13 @@
[email protected]@ -686,6 +718,13 @@
          target_wwn - iSCSI Qualified Name
          target_lun - LUN id of the volume
          """
@@ -130,15 +147,18 @@
          # If this is a multipath device, we need to search again
 
 
---- cinder-2014.2.2/cinder/utils.py.~1~   2014-10-16 06:26:26.000000000 -0700
-+++ cinder-2014.2.2/cinder/utils.py       2015-01-04 23:26:04.305688145 -0800
[email protected]@ -137,8 +137,9 @@
+--- cinder-2014.2.2/cinder/utils.py.~1~ 2015-02-05 08:03:26.000000000 -0800
++++ cinder-2014.2.2/cinder/utils.py     2015-04-20 20:46:27.658908715 -0700
[email protected]@ -137,8 +137,12 @@
 
  def execute(*cmd, **kwargs):
      """Convenience wrapper around oslo's execute() method."""
 -    if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
 -        kwargs['root_helper'] = get_root_helper()
-+    if sys.platform != 'sunos5':
++    if sys.platform == 'sunos5':
++        if 'run_as_root' in kwargs:
++            kwargs['run_as_root'] = False
++    else:
 +        if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
 +            kwargs['root_helper'] = get_root_helper()
      return processutils.execute(*cmd, **kwargs)
--- a/components/openstack/cinder/patches/06-no-san-remote.patch	Fri Oct 09 09:55:13 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-This internal-only patch is to prevent Cinder's SanDriver from
-attempting to use the Paramiko-based routines until the latter is
-integrated into Solaris. It also provides a specific error in the case
-where san_is_local=false is defined in the configuration and a driver
-is specified that attempts to use that configuration (which implies,
-again, an attempt to use Paramiko).
-
---- cinder-2014.2.2/cinder/volume/drivers/san/san.py.~1~	2015-02-05 08:03:26.000000000 -0800
-+++ cinder-2014.2.2/cinder/volume/drivers/san/san.py	2015-04-22 00:54:38.481125902 -0700
[email protected]@ -29,7 +29,6 @@ from cinder.i18n import _
- from cinder.openstack.common import excutils
- from cinder.openstack.common import log as logging
- from cinder.openstack.common import processutils
--from cinder import ssh_utils
- from cinder import utils
- from cinder.volume import driver
- 
[email protected]@ -165,10 +164,8 @@ class SanDriver(driver.VolumeDriver):
-     def check_for_setup_error(self):
-         """Returns an error if prerequisites aren't met."""
-         if not self.run_local:
--            if not (self.configuration.san_password or
--                    self.configuration.san_private_key):
--                raise exception.InvalidInput(
--                    reason=_('Specify san_password or san_private_key'))
-+            raise exception.InvalidInput(
-+                reason=_("san_is_local=false is not currently supported."))
- 
-         # The san_ip must always be set, because we use it for the target
-         if not self.configuration.san_ip: