os-brick 1.3.0 release

meta:version: 1.3.0
 meta:series: newton
 meta:release-type: release
 meta:announce: openstack-dev@lists.openstack.org
 meta:pypi: yes
 meta:first: yes
 -----BEGIN PGP SIGNATURE-----
 
 iEYEABECAAYFAlcLnjgACgkQgNg6eWEDv1l4+ACfZdEIXlNiwPCG+6XgoioEUFwT
 8NsAoO7Sd4dhWLVICfCsV/v3b9Fb5K8R
 =P6rT
 -----END PGP SIGNATURE-----

Merge tag '1.3.0' into debian/newton

os-brick 1.3.0 release

meta:version: 1.3.0
meta:series: newton
meta:release-type: release
meta:announce: openstack-dev@lists.openstack.org
meta:pypi: yes
meta:first: yes
This commit is contained in:
Thomas Goirand 2016-06-06 14:38:03 +00:00
commit e573e9d92f
19 changed files with 877 additions and 99 deletions

2
.gitignore vendored
View File

@ -27,6 +27,8 @@ pip-log.txt
nosetests.xml
.testrepository
.venv
tools/lintstack.head.py
tools/pylint_exceptions
# Translations
*.mo

View File

@ -1,6 +0,0 @@
[DEFAULT]
# The list of modules to copy from oslo-incubator.git
# The base module to hold the copy of openstack.common
base=brick

View File

@ -132,3 +132,12 @@ class VolumeGroupCreationFailed(BrickException):
class CommandExecutionFailed(BrickException):
message = _("Failed to execute command %(cmd)s")
class VolumeDriverException(BrickException):
message = _('An error occurred while IO to volume %(name)s.')
class InvalidIOHandleObject(BrickException):
message = _('IO handle of %(protocol)s has wrong object '
'type %(actual_type)s.')

View File

@ -53,6 +53,7 @@ from os_brick.initiator import host_driver
from os_brick.initiator import linuxfc
from os_brick.initiator import linuxrbd
from os_brick.initiator import linuxscsi
from os_brick.initiator import linuxsheepdog
from os_brick.remotefs import remotefs
from os_brick.i18n import _, _LE, _LI, _LW
@ -80,6 +81,7 @@ SCALITY = "SCALITY"
QUOBYTE = "QUOBYTE"
DISCO = "DISCO"
VZSTORAGE = "VZSTORAGE"
SHEEPDOG = "SHEEPDOG"
def _check_multipathd_running(root_helper, enforce_multipath):
@ -265,6 +267,12 @@ class InitiatorConnector(executor.Executor):
device_scan_attempts=device_scan_attempts,
*args, **kwargs
)
elif protocol == SHEEPDOG:
return SheepdogConnector(root_helper=root_helper,
driver=driver,
execute=execute,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
else:
msg = (_("Invalid InitiatorConnector protocol "
"specified %(protocol)s") %
@ -456,15 +464,23 @@ class InitiatorConnector(executor.Executor):
of the target volume attributes.
:type connection_properties: dict
"""
volumes = []
path = self.get_search_path()
if path:
# now find all entries in the search path
if os.path.isdir(path):
path_items = [path, '/*']
file_filter = ''.join(path_items)
return glob.glob(file_filter)
else:
return []
volumes = glob.glob(file_filter)
return volumes
def check_IO_handle_valid(self, handle, data_type, protocol):
"""Check IO handle has correct data type."""
if (handle and not isinstance(handle, data_type)):
raise exception.InvalidIOHandleObject(
protocol=protocol,
actual_type=type(handle))
class FakeConnector(InitiatorConnector):
@ -920,7 +936,10 @@ class ISCSIConnector(InitiatorConnector):
for dev in host_devices:
if os.path.exists(dev):
host_device = dev
multipath_device = self._get_multipath_device_name(dev)
device_wwn = self._linuxscsi.get_scsi_wwn(dev)
(multipath_device, multipath_id) = (super(
ISCSIConnector, self)._discover_mpath_device(
device_wwn, connection_properties, dev))
if multipath_device:
break
if not host_device:
@ -1193,18 +1212,6 @@ class ISCSIConnector(InitiatorConnector):
attempts=5,
delay_on_retry=True)
def _get_multipath_device_name(self, single_path_device):
device = os.path.realpath(single_path_device)
out = self._run_multipath(['-ll',
device],
check_exit_code=[0, 1])[0]
mpath_line = [line for line in out.splitlines()
if not re.match(MULTIPATH_ERROR_REGEX, line)]
if len(mpath_line) > 0 and len(mpath_line[0]) > 0:
return "/dev/mapper/%s" % mpath_line[0].split(" ")[0]
return None
def _get_iscsi_devices(self):
try:
devices = list(os.walk('/dev/disk/by-path'))[0][-1]
@ -1762,9 +1769,9 @@ class RemoteFsConnector(InitiatorConnector):
*args, **kwargs):
kwargs = kwargs or {}
conn = kwargs.get('conn')
mount_type_lower = mount_type.lower()
if conn:
mount_point_base = conn.get('mount_point_base')
mount_type_lower = mount_type.lower()
if mount_type_lower in ('nfs', 'glusterfs', 'scality',
'quobyte', 'vzstorage'):
kwargs[mount_type_lower + '_mount_point_base'] = (
@ -1773,9 +1780,14 @@ class RemoteFsConnector(InitiatorConnector):
else:
LOG.warning(_LW("Connection details not present."
" RemoteFsClient may not initialize properly."))
self._remotefsclient = remotefs.RemoteFsClient(mount_type, root_helper,
execute=execute,
*args, **kwargs)
if mount_type_lower == 'scality':
cls = remotefs.ScalityRemoteFsClient
else:
cls = remotefs.RemoteFsClient
self._remotefsclient = cls(mount_type, root_helper, execute=execute,
*args, **kwargs)
super(RemoteFsConnector, self).__init__(
root_helper, driver=driver,
execute=execute,
@ -2989,3 +3001,99 @@ class DISCOConnector(InitiatorConnector):
def extend_volume(self, connection_properties):
raise NotImplementedError
class SheepdogConnector(InitiatorConnector):
""""Connector class to attach/detach sheepdog volumes."""
def __init__(self, root_helper, driver=None,
execute=putils.execute, use_multipath=False,
device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
super(SheepdogConnector, self).__init__(root_helper, driver=driver,
execute=execute,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
def get_volume_paths(self, connection_properties):
# TODO(lixiaoy1): don't know where the connector
# looks for sheepdog volumes.
return []
def get_search_path(self):
# TODO(lixiaoy1): don't know where the connector
# looks for sheepdog volumes.
return None
def get_all_available_volumes(self, connection_properties=None):
# TODO(lixiaoy1): not sure what to return here for sheepdog
return []
def _get_sheepdog_handle(self, connection_properties):
try:
host = connection_properties['hosts'][0]
name = connection_properties['name']
port = connection_properties['ports'][0]
except IndexError:
msg = _("Connect volume failed, malformed connection properties")
raise exception.BrickException(msg=msg)
sheepdog_handle = linuxsheepdog.SheepdogVolumeIOWrapper(
host, port, name)
return sheepdog_handle
def connect_volume(self, connection_properties):
"""Connect to a volume.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
:type connection_properties: dict
:returns: dict
"""
sheepdog_handle = self._get_sheepdog_handle(connection_properties)
return {'path': sheepdog_handle}
def disconnect_volume(self, connection_properties, device_info):
"""Disconnect a volume.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
:type connection_properties: dict
:param device_info: historical difference, but same as connection_props
:type device_info: dict
"""
if device_info:
sheepdog_handle = device_info.get('path', None)
self.check_IO_handle_valid(sheepdog_handle,
linuxsheepdog.SheepdogVolumeIOWrapper,
'Sheepdog')
if sheepdog_handle is not None:
sheepdog_handle.close()
def check_valid_device(self, path, run_as_root=True):
"""Verify an existing sheepdog handle is connected and valid."""
sheepdog_handle = path
if sheepdog_handle is None:
return False
original_offset = sheepdog_handle.tell()
try:
sheepdog_handle.read(4096)
except Exception as e:
LOG.error(_LE("Failed to access sheepdog device "
"handle: %(error)s"),
{"error": e})
return False
finally:
sheepdog_handle.seek(original_offset, 0)
return True
def extend_volume(self, connection_properties):
# TODO(lixiaoy1): is this possible?
raise NotImplementedError

View File

@ -0,0 +1,118 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Generic SheepDog Connection Utilities.
"""
import eventlet
import io
from oslo_concurrency import processutils
from oslo_log import log as logging
from os_brick import exception
from os_brick.i18n import _
LOG = logging.getLogger(__name__)
class SheepdogVolumeIOWrapper(io.RawIOBase):
"""File-like object with Sheepdog backend."""
def __init__(self, addr, port, volume, snapshot_name=None):
self._addr = addr
self._port = port
self._vdiname = volume
self._snapshot_name = snapshot_name
self._offset = 0
# SheepdogVolumeIOWrapper instance becomes invalid
# if a write error occurs.
self._valid = True
def _execute(self, cmd, data=None):
try:
# NOTE(yamada-h): processutils.execute causes busy waiting
# under eventlet.
# To avoid wasting CPU resources, it should not be used for
# the command which takes long time to execute.
# For workaround, we replace a subprocess module with
# the original one while only executing a read/write command.
_processutils_subprocess = processutils.subprocess
processutils.subprocess = eventlet.patcher.original('subprocess')
return processutils.execute(*cmd, process_input=data)[0]
except (processutils.ProcessExecutionError, OSError):
self._valid = False
raise exception.VolumeDriverException(name=self._vdiname)
finally:
processutils.subprocess = _processutils_subprocess
def read(self, length=None):
if not self._valid:
raise exception.VolumeDriverException(name=self._vdiname)
cmd = ['dog', 'vdi', 'read', '-a', self._addr, '-p', self._port]
if self._snapshot_name:
cmd.extend(('-s', self._snapshot_name))
cmd.extend((self._vdiname, self._offset))
if length:
cmd.append(length)
data = self._execute(cmd)
self._offset += len(data)
return data
def write(self, data):
if not self._valid:
raise exception.VolumeDriverException(name=self._vdiname)
length = len(data)
cmd = ('dog', 'vdi', 'write', '-a', self._addr, '-p', self._port,
self._vdiname, self._offset, length)
self._execute(cmd, data)
self._offset += length
return length
def seek(self, offset, whence=0):
if not self._valid:
raise exception.VolumeDriverException(name=self._vdiname)
if whence == 0:
# SEEK_SET or 0 - start of the stream (the default);
# offset should be zero or positive
new_offset = offset
elif whence == 1:
# SEEK_CUR or 1 - current stream position; offset may be negative
new_offset = self._offset + offset
else:
# SEEK_END or 2 - end of the stream; offset is usually negative
# TODO(yamada-h): Support SEEK_END
raise IOError(_("Invalid argument - whence=%s not supported.") %
whence)
if new_offset < 0:
raise IOError(_("Invalid argument - negative seek offset."))
self._offset = new_offset
def tell(self):
return self._offset
def flush(self):
pass
def fileno(self):
"""Sheepdog does not have support for fileno so we raise IOError.
Raising IOError is recommended way to notify caller that interface is
not supported - see http://docs.python.org/2/library/io.html#io.IOBase
"""
raise IOError(_("fileno is not supported by SheepdogVolumeIOWrapper"))

View File

@ -54,7 +54,8 @@ class LVM(executor.Executor):
and we want to create it
:param physical_volumes: List of PVs to build VG on
:param lvm_type: VG and Volume type (default, or thin)
:param executor: Execute method to use, None uses common/processutils
:param executor: Execute method to use, None uses
oslo_concurrency.processutils
"""
super(LVM, self).__init__(execute=executor, root_helper=root_helper)
@ -492,7 +493,7 @@ class LVM(executor.Executor):
"""
if not self.supports_thin_provisioning(self._root_helper):
if not LVM.supports_thin_provisioning(self._root_helper):
LOG.error(_LE('Requested to setup thin provisioning, '
'however current LVM version does not '
'support it.'))

View File

@ -40,7 +40,6 @@ class RemoteFsClient(object):
'cifs': 'smbfs',
'glusterfs': 'glusterfs',
'vzstorage': 'vzstorage',
'scality': 'scality_sofs',
'quobyte': 'quobyte'
}
@ -230,3 +229,36 @@ class RemoteFsClient(object):
opt = '%s=%s' % (option, value) if value else option
opts.append(opt)
return ",".join(opts) if len(opts) > 1 else opts[0]
class ScalityRemoteFsClient(RemoteFsClient):
def __init__(self, mount_type, root_helper,
execute=None, *args, **kwargs):
self._mount_type = mount_type
self._mount_base = kwargs.get(
'scality_mount_point_base', "").rstrip('/')
if not self._mount_base:
raise exception.InvalidParameterValue(
err=_('scality_mount_point_base required'))
self.root_helper = root_helper
self.set_execute(execute or putils.execute)
self._mount_options = None
def get_mount_point(self, device_name):
return os.path.join(self._mount_base,
device_name,
"00")
def mount(self, share, flags=None):
"""Mount the Scality ScaleOut FS.
The `share` argument is ignored because you can't mount several
SOFS at the same type on a single server. But we want to keep the
same method signature for class inheritance purpose.
"""
if self._mount_base in self._read_mounts():
LOG.info(_LI('Already mounted: %s'), self._mount_base)
return
self._execute('mkdir', '-p', self._mount_base, check_exit_code=0)
super(ScalityRemoteFsClient, self)._do_mount(
'sofs', '/etc/sfused.conf', self._mount_base)

View File

@ -35,6 +35,7 @@ from os_brick.initiator import host_driver
from os_brick.initiator import linuxfc
from os_brick.initiator import linuxrbd
from os_brick.initiator import linuxscsi
from os_brick.initiator import linuxsheepdog
from os_brick.remotefs import remotefs
from os_brick.tests import base
@ -159,7 +160,7 @@ class ConnectorTestCase(base.TestCase):
self.assertEqual(obj.__class__.__name__, "RemoteFsConnector")
obj = connector.InitiatorConnector.factory(
'scality', None, scality_sofs_mount_point_base='/mnt/test')
'scality', None, scality_mount_point_base='/mnt/test')
self.assertEqual(obj.__class__.__name__, "RemoteFsConnector")
obj = connector.InitiatorConnector.factory('local', None)
@ -538,11 +539,10 @@ class ISCSIConnectorTestCase(ConnectorTestCase):
@mock.patch.object(connector.ISCSIConnector, '_connect_to_iscsi_portal')
@mock.patch.object(connector.ISCSIConnector, '_rescan_iscsi')
@mock.patch.object(connector.ISCSIConnector, '_rescan_multipath')
@mock.patch.object(connector.ISCSIConnector, '_get_multipath_device_name')
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(connector.InitiatorConnector, '_discover_mpath_device')
def test_connect_volume_with_multipath(
self, mock_discover_mpath_device, exists_mock, get_device_mock,
self, mock_discover_mpath_device, exists_mock,
rescan_multipath_mock, rescan_iscsi_mock, connect_to_mock,
portals_mock, iscsiadm_mock, mock_iscsi_wwn):
mock_iscsi_wwn.return_value = FAKE_SCSI_WWN
@ -558,7 +558,6 @@ class ISCSIConnectorTestCase(ConnectorTestCase):
connector.ISCSIConnector(None, use_multipath=True)
iscsiadm_mock.return_value = "%s %s" % (location, iqn)
portals_mock.return_value = [[location, iqn]]
get_device_mock.return_value = 'iqn.2010-10.org.openstack:%s' % name
result = self.connector_with_multipath.connect_volume(
connection_properties['data'])
@ -621,15 +620,13 @@ class ISCSIConnectorTestCase(ConnectorTestCase):
return_value={})
@mock.patch.object(connector.ISCSIConnector, '_get_iscsi_devices')
@mock.patch.object(connector.ISCSIConnector, '_run_multipath')
@mock.patch.object(connector.ISCSIConnector, '_get_multipath_device_name')
@mock.patch.object(connector.ISCSIConnector, '_get_multipath_iqns')
@mock.patch.object(connector.InitiatorConnector, '_discover_mpath_device')
@mock.patch.object(linuxscsi.LinuxSCSI, 'process_lun_id')
def test_connect_volume_with_multiple_portals(
self, mock_process_lun_id, mock_discover_mpath_device,
mock_get_iqn, mock_device_name, mock_run_multipath,
mock_iscsi_devices, mock_get_device_map, mock_devices,
mock_exists, mock_scsi_wwn):
mock_get_iqn, mock_run_multipath, mock_iscsi_devices,
mock_get_device_map, mock_devices, mock_exists, mock_scsi_wwn):
mock_scsi_wwn.return_value = FAKE_SCSI_WWN
location1 = '10.0.2.15:3260'
location2 = '[2001:db8::1]:3260'
@ -679,14 +676,13 @@ class ISCSIConnectorTestCase(ConnectorTestCase):
@mock.patch.object(connector.ISCSIConnector, '_get_iscsi_devices')
@mock.patch.object(connector.ISCSIConnector, '_rescan_multipath')
@mock.patch.object(connector.ISCSIConnector, '_run_multipath')
@mock.patch.object(connector.ISCSIConnector, '_get_multipath_device_name')
@mock.patch.object(connector.ISCSIConnector, '_get_multipath_iqns')
@mock.patch.object(connector.ISCSIConnector, '_run_iscsiadm')
@mock.patch.object(connector.InitiatorConnector, '_discover_mpath_device')
@mock.patch.object(linuxscsi.LinuxSCSI, 'process_lun_id')
def test_connect_volume_with_multiple_portals_primary_error(
self, mock_process_lun_id, mock_discover_mpath_device,
mock_iscsiadm, mock_get_iqn, mock_device_name, mock_run_multipath,
mock_iscsiadm, mock_get_iqn, mock_run_multipath,
mock_rescan_multipath, mock_iscsi_devices,
mock_get_multipath_device_map, mock_devices, mock_exists,
mock_scsi_wwn):
@ -714,7 +710,6 @@ class ISCSIConnectorTestCase(ConnectorTestCase):
mock_exists.side_effect = lambda x: x != dev1
mock_devices.return_value = [dev2]
mock_iscsi_devices.return_value = [dev2]
mock_device_name.return_value = fake_multipath_dev
mock_get_iqn.return_value = [iqn2]
mock_iscsiadm.side_effect = fake_run_iscsiadm
@ -762,13 +757,11 @@ class ISCSIConnectorTestCase(ConnectorTestCase):
@mock.patch.object(connector.ISCSIConnector, '_get_iscsi_devices')
@mock.patch.object(connector.ISCSIConnector, '_rescan_multipath')
@mock.patch.object(connector.ISCSIConnector, '_run_multipath')
@mock.patch.object(connector.ISCSIConnector, '_get_multipath_device_name')
@mock.patch.object(connector.InitiatorConnector, '_discover_mpath_device')
def test_connect_volume_with_multipath_connecting(
self, mock_discover_mpath_device, mock_device_name,
mock_run_multipath, mock_rescan_multipath, mock_iscsi_devices,
mock_devices, mock_connect, mock_portals, mock_exists,
mock_scsi_wwn):
self, mock_discover_mpath_device, mock_run_multipath,
mock_rescan_multipath, mock_iscsi_devices, mock_devices,
mock_connect, mock_portals, mock_exists, mock_scsi_wwn):
mock_scsi_wwn.return_value = FAKE_SCSI_WWN
location1 = '10.0.2.15:3260'
location2 = '[2001:db8::1]:3260'
@ -784,7 +777,6 @@ class ISCSIConnectorTestCase(ConnectorTestCase):
'/dev/disk/by-path/ip-%s-iscsi-%s-lun-2' % (dev_loc2, iqn2)]
mock_devices.return_value = devs
mock_iscsi_devices.return_value = devs
mock_device_name.return_value = fake_multipath_dev
mock_portals.return_value = [[location1, iqn1], [location2, iqn1],
[location2, iqn2]]
mock_discover_mpath_device.return_value = (fake_multipath_dev,
@ -812,10 +804,9 @@ class ISCSIConnectorTestCase(ConnectorTestCase):
@mock.patch.object(connector.ISCSIConnector, '_get_iscsi_devices')
@mock.patch.object(connector.ISCSIConnector, '_rescan_multipath')
@mock.patch.object(connector.ISCSIConnector, '_run_multipath')
@mock.patch.object(connector.ISCSIConnector, '_get_multipath_device_name')
def test_connect_volume_multipath_failed_iscsi_login(
self, mock_device_name, mock_run_multipath,
mock_rescan_multipath, mock_iscsi_devices, mock_devices,
self, mock_run_multipath, mock_rescan_multipath,
mock_iscsi_devices, mock_devices,
mock_connect, mock_portals, mock_exists):
location1 = '10.0.2.15:3260'
location2 = '10.0.3.15:3260'
@ -823,14 +814,12 @@ class ISCSIConnectorTestCase(ConnectorTestCase):
name2 = 'volume-00000001-2'
iqn1 = 'iqn.2010-10.org.openstack:%s' % name1
iqn2 = 'iqn.2010-10.org.openstack:%s' % name2
fake_multipath_dev = '/dev/mapper/fake-multipath-dev'
vol = {'id': 1, 'name': name1}
connection_properties = self.iscsi_connection(vol, location1, iqn1)
devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location1, iqn1),
'/dev/disk/by-path/ip-%s-iscsi-%s-lun-2' % (location2, iqn2)]
mock_devices.return_value = devs
mock_iscsi_devices.return_value = devs
mock_device_name.return_value = fake_multipath_dev
mock_portals.return_value = [[location1, iqn1], [location2, iqn1],
[location2, iqn2]]
@ -875,33 +864,6 @@ class ISCSIConnectorTestCase(ConnectorTestCase):
expected = [ip_iqn1, ip_iqn2]
self.assertEqual(expected, res)
@mock.patch.object(os.path, 'realpath')
@mock.patch.object(connector.ISCSIConnector, '_run_multipath')
def test_get_multipath_device_name(self, multipath_mock, realpath_mock):
multipath_mock.return_value = ['mpath2 (20017380006c00036) '
'dm-7 IBM,2810XIV']
expected = '/dev/mapper/mpath2'
self.assertEqual(expected,
self.connector.
_get_multipath_device_name('/dev/md-1'))
@mock.patch.object(os.path, 'realpath')
@mock.patch.object(connector.ISCSIConnector, '_run_multipath')
def test_get_multipath_device_name_with_error(self, multipath_mock,
realpath_mock):
multipath_mock.return_value = [
"Mar 17 14:32:37 | sda: No fc_host device for 'host-1'\n"
"mpathb (36e00000000010001) dm-4 IET ,VIRTUAL-DISK\n"
"size=1.0G features='0' hwhandler='0' wp=rw\n"
"|-+- policy='service-time 0' prio=0 status=active\n"
"| `- 2:0:0:1 sda 8:0 active undef running\n"
"`-+- policy='service-time 0' prio=0 status=enabled\n"
" `- 3:0:0:1 sdb 8:16 active undef running\n"]
expected = '/dev/mapper/mpathb'
self.assertEqual(expected,
self.connector.
_get_multipath_device_name('/dev/sda'))
@mock.patch.object(os, 'walk')
def test_get_iscsi_devices(self, walk_mock):
paths = [('ip-10.0.0.1:3260-iscsi-iqn.2013-01.ro.'
@ -949,13 +911,11 @@ class ISCSIConnectorTestCase(ConnectorTestCase):
@mock.patch.object(host_driver.HostDriver, 'get_all_block_devices')
@mock.patch.object(connector.ISCSIConnector,
'_disconnect_from_iscsi_portal')
@mock.patch.object(connector.ISCSIConnector, '_get_multipath_device_name',
return_value='/dev/mapper/md-3')
@mock.patch.object(connector.ISCSIConnector, '_get_multipath_iqns')
@mock.patch.object(os.path, 'exists', return_value=True)
def test_disconnect_volume_multipath_iscsi(
self, exists_mock, multipath_iqn_mock, get_multipath_name_mock,
disconnect_mock, get_all_devices_mock, get_iscsi_devices_mock,
self, exists_mock, multipath_iqn_mock, disconnect_mock,
get_all_devices_mock, get_iscsi_devices_mock,
rescan_multipath_mock, rescan_iscsi_mock, get_portals_mock,
get_multipath_device_map_mock):
iqn1 = 'iqn.2013-01.ro.com.netapp:node.netapp01'
@ -1142,6 +1102,13 @@ Setting up iSCSI targets: unused
new_size = self.connector.extend_volume(connection_info['data'])
self.assertEqual(fake_new_size, new_size)
@mock.patch.object(os.path, 'isdir')
def test_get_all_available_volumes_path_not_dir(self, mock_isdir):
mock_isdir.return_value = False
expected = []
actual = self.connector.get_all_available_volumes()
self.assertItemsEqual(expected, actual)
class FibreChannelConnectorTestCase(ConnectorTestCase):
def setUp(self):
@ -1493,6 +1460,13 @@ class FibreChannelConnectorTestCase(ConnectorTestCase):
new_size = self.connector.extend_volume(connection_info['data'])
self.assertEqual(fake_new_size, new_size)
@mock.patch.object(os.path, 'isdir')
def test_get_all_available_volumes_path_not_dir(self, mock_isdir):
mock_isdir.return_value = False
expected = []
actual = self.connector.get_all_available_volumes()
self.assertItemsEqual(expected, actual)
class FibreChannelConnectorS390XTestCase(ConnectorTestCase):
@ -1665,6 +1639,11 @@ class RemoteFsConnectorTestCase(ConnectorTestCase):
nfs_mount_point_base=self.TEST_BASE,
nfs_mount_options='vers=3')
@mock.patch('os_brick.remotefs.remotefs.ScalityRemoteFsClient')
def test_init_with_scality(self, mock_scality_remotefs_client):
connector.RemoteFsConnector('scality', root_helper='sudo')
self.assertEqual(1, mock_scality_remotefs_client.call_count)
def test_get_search_path(self):
expected = self.TEST_BASE
actual = self.connector.get_search_path()
@ -2474,12 +2453,14 @@ class ScaleIOConnectorTestCase(ConnectorTestCase):
self.assertRaises(exception.BrickException, self.test_connect_volume)
def test_error_path_not_found(self):
@mock.patch('time.sleep')
def test_error_path_not_found(self, sleep_mock):
"""Timeout waiting for volume to map to local file system"""
mock.patch.object(
os, 'listdir', return_value=["emc-vol-no-volume"]
).start()
self.assertRaises(exception.BrickException, self.test_connect_volume)
self.assertTrue(sleep_mock.called)
def test_map_volume_already_mapped(self):
"""Ignore REST API failure for volume already mapped"""
@ -2642,3 +2623,63 @@ class DISCOConnectorTestCase(ConnectorTestCase):
self.assertRaises(NotImplementedError,
self.connector.extend_volume,
self.fake_connection_properties)
class SheepdogConnectorTestCase(ConnectorTestCase):
def setUp(self):
super(SheepdogConnectorTestCase, self).setUp()
self.hosts = ['fake_hosts']
self.ports = ['fake_ports']
self.volume = 'fake_volume'
self.connection_properties = {
'hosts': self.hosts,
'name': self.volume,
'ports': self.ports,
}
def test_get_search_path(self):
sheepdog = connector.SheepdogConnector(None)
path = sheepdog.get_search_path()
self.assertIsNone(path)
def test_get_volume_paths(self):
sheepdog = connector.SheepdogConnector(None)
expected = []
actual = sheepdog.get_volume_paths(self.connection_properties)
self.assertEqual(expected, actual)
def test_connect_volume(self):
"""Test the connect volume case."""
sheepdog = connector.SheepdogConnector(None)
device_info = sheepdog.connect_volume(self.connection_properties)
# Ensure expected object is returned correctly
self.assertTrue(isinstance(device_info['path'],
linuxsheepdog.SheepdogVolumeIOWrapper))
@mock.patch.object(linuxsheepdog.SheepdogVolumeIOWrapper, 'close')
def test_disconnect_volume(self, volume_close):
"""Test the disconnect volume case."""
sheepdog = connector.SheepdogConnector(None)
device_info = sheepdog.connect_volume(self.connection_properties)
sheepdog.disconnect_volume(self.connection_properties, device_info)
self.assertEqual(1, volume_close.call_count)
def test_disconnect_volume_with_invalid_handle(self):
"""Test the disconnect volume case with invalid handle."""
sheepdog = connector.SheepdogConnector(None)
device_info = {'path': 'fake_handle'}
self.assertRaises(exception.InvalidIOHandleObject,
sheepdog.disconnect_volume,
self.connection_properties,
device_info)
def test_extend_volume(self):
sheepdog = connector.SheepdogConnector(None)
self.assertRaises(NotImplementedError,
sheepdog.extend_volume,
self.connection_properties)

View File

@ -73,7 +73,8 @@ class LinuxSCSITestCase(base.TestCase):
('tee -a /sys/block/sdc/device/delete')]
self.assertEqual(expected_commands, self.cmds)
def test_wait_for_volume_removal(self):
@mock.patch('time.sleep')
def test_wait_for_volume_removal(self, sleep_mock):
fake_path = '/dev/disk/by-path/fake-iscsi-iqn-lun-0'
exists_mock = mock.Mock()
exists_mock.return_value = True
@ -88,6 +89,7 @@ class LinuxSCSITestCase(base.TestCase):
self.linuxscsi.wait_for_volume_removal(fake_path)
expected_commands = []
self.assertEqual(expected_commands, self.cmds)
self.assertTrue(sleep_mock.called)
def test_flush_multipath_device(self):
self.linuxscsi.flush_multipath_device('/dev/dm-9')
@ -117,8 +119,9 @@ class LinuxSCSITestCase(base.TestCase):
expected_path = '/dev/disk/by-id/dm-uuid-mpath-%s' % fake_wwn
self.assertEqual(expected_path, found_path)
@mock.patch('time.sleep')
@mock.patch.object(os.path, 'exists')
def test_find_multipath_device_path_mapper(self, exists_mock):
def test_find_multipath_device_path_mapper(self, exists_mock, sleep_mock):
# the wait loop tries 3 times before it gives up
# we want to test failing to find the
# /dev/disk/by-id/dm-uuid-mpath-<WWN> path
@ -129,6 +132,7 @@ class LinuxSCSITestCase(base.TestCase):
found_path = self.linuxscsi.find_multipath_device_path(fake_wwn)
expected_path = '/dev/mapper/%s' % fake_wwn
self.assertEqual(expected_path, found_path)
self.assertTrue(sleep_mock.called)
@mock.patch.object(os.path, 'exists', return_value=False)
@mock.patch.object(time, 'sleep')

View File

@ -0,0 +1,121 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from os_brick import exception
from os_brick.initiator import linuxsheepdog
from os_brick.tests import base
from oslo_concurrency import processutils
SHEEP_ADDR = '127.0.0.1'
SHEEP_PORT = 7000
class SheepdogVolumeIOWrapperTestCase(base.TestCase):
def setUp(self):
super(SheepdogVolumeIOWrapperTestCase, self).setUp()
self.volume = 'volume-2f9b2ff5-987b-4412-a91c-23caaf0d5aff'
self.snapshot_name = 'snapshot-bf452d80-068a-43d7-ba9f-196cf47bd0be'
self.vdi_wrapper = linuxsheepdog.SheepdogVolumeIOWrapper(
SHEEP_ADDR, SHEEP_PORT, self.volume)
self.snapshot_wrapper = linuxsheepdog.SheepdogVolumeIOWrapper(
SHEEP_ADDR, SHEEP_PORT, self.volume, self.snapshot_name)
self.execute = mock.MagicMock()
self.mock_object(processutils, 'execute', self.execute)
def test_init(self):
self.assertEqual(self.volume, self.vdi_wrapper._vdiname)
self.assertIsNone(self.vdi_wrapper._snapshot_name)
self.assertEqual(0, self.vdi_wrapper._offset)
self.assertEqual(self.snapshot_name,
self.snapshot_wrapper._snapshot_name)
def test_execute(self):
cmd = ('cmd1', 'arg1')
data = 'data1'
self.vdi_wrapper._execute(cmd, data)
self.execute.assert_called_once_with(*cmd, process_input=data)
def test_execute_error(self):
cmd = ('cmd1', 'arg1')
data = 'data1'
self.mock_object(processutils, 'execute',
mock.MagicMock(side_effect=OSError))
args = (cmd, data)
self.assertRaises(exception.VolumeDriverException,
self.vdi_wrapper._execute,
*args)
def test_read_vdi(self):
self.vdi_wrapper.read()
self.execute.assert_called_once_with(
'dog', 'vdi', 'read', '-a', SHEEP_ADDR, '-p', SHEEP_PORT,
self.volume, 0, process_input=None)
def test_read_vdi_invalid(self):
self.vdi_wrapper._valid = False
self.assertRaises(exception.VolumeDriverException,
self.vdi_wrapper.read)
def test_write_vdi(self):
data = 'data1'
self.vdi_wrapper.write(data)
self.execute.assert_called_once_with(
'dog', 'vdi', 'write', '-a', SHEEP_ADDR, '-p', SHEEP_PORT,
self.volume, 0, len(data),
process_input=data)
self.assertEqual(len(data), self.vdi_wrapper.tell())
def test_write_vdi_invalid(self):
self.vdi_wrapper._valid = False
self.assertRaises(exception.VolumeDriverException,
self.vdi_wrapper.write, 'dummy_data')
def test_read_snapshot(self):
self.snapshot_wrapper.read()
self.execute.assert_called_once_with(
'dog', 'vdi', 'read', '-a', SHEEP_ADDR, '-p', SHEEP_PORT,
'-s', self.snapshot_name, self.volume, 0,
process_input=None)
def test_seek(self):
self.vdi_wrapper.seek(12345)
self.assertEqual(12345, self.vdi_wrapper.tell())
self.vdi_wrapper.seek(-2345, whence=1)
self.assertEqual(10000, self.vdi_wrapper.tell())
# This results in negative offset.
self.assertRaises(IOError, self.vdi_wrapper.seek, -20000, whence=1)
def test_seek_invalid(self):
seek_num = 12345
self.vdi_wrapper._valid = False
self.assertRaises(exception.VolumeDriverException,
self.vdi_wrapper.seek, seek_num)
def test_flush(self):
# flush does nothing.
self.vdi_wrapper.flush()
self.assertFalse(self.execute.called)
def test_fileno(self):
self.assertRaises(IOError, self.vdi_wrapper.fileno)

View File

View File

@ -174,9 +174,6 @@ class RemoteFsClientTestCase(base.TestCase):
def test_no_mount_point_vzstorage(self):
self._test_no_mount_point('vzstorage')
def test_no_mount_point_scality(self):
self._test_no_mount_point('scality')
def test_no_mount_point_quobyte(self):
self._test_no_mount_point('quobyte')
@ -193,10 +190,37 @@ class RemoteFsClientTestCase(base.TestCase):
# starts with "smbfs_"
self.assertEqual('/fake', client._mount_base)
def test_init_nfs_calls_check_nfs_options(self):
to_patch = remotefs.RemoteFsClient._check_nfs_options
with mock.patch.object(to_patch) as mock_check_nfs_options:
remotefs.RemoteFsClient("nfs", root_helper='true',
nfs_mount_point_base='/fake')
@mock.patch('os_brick.remotefs.remotefs.RemoteFsClient._check_nfs_options')
def test_init_nfs_calls_check_nfs_options(self, mock_check_nfs_options):
remotefs.RemoteFsClient("nfs", root_helper='true',
nfs_mount_point_base='/fake')
mock_check_nfs_options.assert_called_once_with()
class ScalityRemoteFsClientTestCase(base.TestCase):
def setUp(self):
super(ScalityRemoteFsClientTestCase, self).setUp()
def test_no_mount_point_scality(self):
self.assertRaises(exception.InvalidParameterValue,
remotefs.ScalityRemoteFsClient,
'scality', root_helper='true')
def test_get_mount_point(self):
fsclient = remotefs.ScalityRemoteFsClient(
'scality', root_helper='true', scality_mount_point_base='/fake')
self.assertEqual('/fake/path/00', fsclient.get_mount_point('path'))
@mock.patch('oslo_concurrency.processutils.execute')
@mock.patch('os_brick.remotefs.remotefs.RemoteFsClient._do_mount')
def test_mount(self, mock_do_mount, mock_execute):
fsclient = remotefs.ScalityRemoteFsClient(
'scality', root_helper='true', scality_mount_point_base='/fake')
with mock.patch.object(fsclient, '_read_mounts', return_value={}):
fsclient.mount('fake')
mock_execute.assert_called_once_with(
'mkdir', '-p', '/fake', check_exit_code=0)
mock_do_mount.assert_called_once_with(
'sofs', '/etc/sfused.conf', '/fake')

38
pylintrc Normal file
View File

@ -0,0 +1,38 @@
# The format of this file isn't really documented; just use --generate-rcfile
[Messages Control]
# NOTE(justinsb): We might want to have a 2nd strict pylintrc in future
# C0111: Don't require docstrings on every method
# W0511: TODOs in code comments are fine.
# W0142: *args and **kwargs are fine.
# W0622: Redefining id is fine.
disable=C0111,W0511,W0142,W0622
[Basic]
# Variable names can be 1 to 31 characters long, with lowercase and underscores
variable-rgx=[a-z_][a-z0-9_]{0,30}$
# Argument names can be 2 to 31 characters long, with lowercase and underscores
argument-rgx=[a-z_][a-z0-9_]{1,30}$
# Method names should be at least 3 characters long
# and be lowercased with underscores
method-rgx=([a-z_][a-z0-9_]{2,50}|setUp|tearDown)$
# Don't require docstrings on tests.
no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
[Design]
max-public-methods=100
min-public-methods=0
max-args=6
[Variables]
dummy-variables-rgx=_
[Typecheck]
# Disable warnings on the HTTPSConnection classes because pylint doesn't
# support importing from six.moves yet, see:
# https://bitbucket.org/logilab/pylint/issue/550/
ignored-classes=HTTPSConnection

View File

@ -1,5 +1,9 @@
=============
Release Notes
=============
========================
os-brick Release Notes
========================
.. release-notes::
.. toctree::
:maxdepth: 1
unreleased
mitaka

View File

@ -0,0 +1,6 @@
===================================
Mitaka Series Release Notes
===================================
.. release-notes::
:branch: origin/stable/mitaka

View File

@ -0,0 +1,5 @@
==============================
Current Series Release Notes
==============================
.. release-notes::

207
tools/lintstack.py Executable file
View File

@ -0,0 +1,207 @@
#!/usr/bin/env python
# Copyright (c) 2013, AT&T Labs, Yun Mao <yunmao@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""pylint error checking."""
from __future__ import print_function
import json
import re
import sys
from pylint import lint
from pylint.reporters import text
from six.moves import cStringIO as StringIO
ignore_codes = [
# Note(maoy): E1103 is error code related to partial type inference
"E1103"
]
ignore_messages = [
# Note(fengqian): this message is the pattern of [E0611].
# It should be ignored because use six module to keep py3.X compatibility.
"No name 'urllib' in module '_MovedItems'",
# Note(xyang): these error messages are for the code [E1101].
# They should be ignored because 'sha256' and 'sha224' are functions in
# 'hashlib'.
"Module 'hashlib' has no 'sha256' member",
"Module 'hashlib' has no 'sha224' member",
]
ignore_modules = ["os_brick/tests/",
"tools/lintstack.head.py"]
KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions"
class LintOutput(object):
_cached_filename = None
_cached_content = None
def __init__(self, filename, lineno, line_content, code, message,
lintoutput):
self.filename = filename
self.lineno = lineno
self.line_content = line_content
self.code = code
self.message = message
self.lintoutput = lintoutput
@classmethod
def from_line(cls, line):
m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line)
matched = m.groups()
filename, lineno, code, message = (matched[0], int(matched[1]),
matched[2], matched[-1])
if cls._cached_filename != filename:
with open(filename) as f:
cls._cached_content = list(f.readlines())
cls._cached_filename = filename
line_content = cls._cached_content[lineno - 1].rstrip()
return cls(filename, lineno, line_content, code, message,
line.rstrip())
@classmethod
def from_msg_to_dict(cls, msg):
"""From the output of pylint msg, to a dict, where each key
is a unique error identifier, value is a list of LintOutput
"""
result = {}
for line in msg.splitlines():
obj = cls.from_line(line)
if obj.is_ignored():
continue
key = obj.key()
if key not in result:
result[key] = []
result[key].append(obj)
return result
def is_ignored(self):
if self.code in ignore_codes:
return True
if any(self.filename.startswith(name) for name in ignore_modules):
return True
return False
def key(self):
if self.code in ["E1101", "E1103"]:
# These two types of errors are like Foo class has no member bar.
# We discard the source code so that the error will be ignored
# next time another Foo.bar is encountered.
return self.message, ""
return self.message, self.line_content.strip()
def json(self):
return json.dumps(self.__dict__)
def review_str(self):
return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n"
"%(code)s: %(message)s" % self.__dict__) # noqa:H501
class ErrorKeys(object):
@classmethod
def print_json(cls, errors, output=sys.stdout):
print("# automatically generated by tools/lintstack.py", file=output)
for i in sorted(errors.keys()):
print(json.dumps(i), file=output)
@classmethod
def from_file(cls, filename):
keys = set()
for line in open(filename):
if line and line[0] != "#":
d = json.loads(line)
keys.add(tuple(d))
return keys
def run_pylint():
buff = StringIO()
reporter = text.ParseableTextReporter(output=buff)
args = ["--include-ids=y", "-E", "os_brick"]
lint.Run(args, reporter=reporter, exit=False)
val = buff.getvalue()
buff.close()
return val
def generate_error_keys(msg=None):
print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE)
if msg is None:
msg = run_pylint()
errors = LintOutput.from_msg_to_dict(msg)
with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f:
ErrorKeys.print_json(errors, output=f)
def validate(newmsg=None):
print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE)
known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE)
if newmsg is None:
print("Running pylint. Be patient...")
newmsg = run_pylint()
errors = LintOutput.from_msg_to_dict(newmsg)
print("Unique errors reported by pylint: was %d, now %d."
% (len(known), len(errors)))
passed = True
for err_key, err_list in errors.items():
for err in err_list:
if err_key not in known:
print(err.lintoutput)
print()
passed = False
if passed:
print("Congrats! pylint check passed.")
redundant = known - set(errors.keys())
if redundant:
print("Extra credit: some known pylint exceptions disappeared.")
for i in sorted(redundant):
print(json.dumps(i))
print("Consider regenerating the exception file if you will.")
else:
print("Please fix the errors above. If you believe they are false "
"positives, run 'tools/lintstack.py generate' to overwrite.")
sys.exit(1)
def usage():
print("""Usage: tools/lintstack.py [generate|validate]
To generate pylint_exceptions file: tools/lintstack.py generate
To validate the current commit: tools/lintstack.py
""")
def main():
option = "validate"
if len(sys.argv) > 1:
option = sys.argv[1]
if option == "generate":
generate_error_keys()
elif option == "validate":
validate()
else:
usage()
if __name__ == "__main__":
main()

59
tools/lintstack.sh Executable file
View File

@ -0,0 +1,59 @@
#!/usr/bin/env bash
# Copyright (c) 2012-2013, AT&T Labs, Yun Mao <yunmao@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Use lintstack.py to compare pylint errors.
# We run pylint twice, once on HEAD, once on the code before the latest
# commit for review.
set -e
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
# Get the current branch name.
GITHEAD=`git rev-parse --abbrev-ref HEAD`
if [[ "$GITHEAD" == "HEAD" ]]; then
# In detached head mode, get revision number instead
GITHEAD=`git rev-parse HEAD`
echo "Currently we are at commit $GITHEAD"
else
echo "Currently we are at branch $GITHEAD"
fi
cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py
if git rev-parse HEAD^2 2>/dev/null; then
# The HEAD is a Merge commit. Here, the patch to review is
# HEAD^2, the master branch is at HEAD^1, and the patch was
# written based on HEAD^2~1.
PREV_COMMIT=`git rev-parse HEAD^2~1`
git checkout HEAD~1
# The git merge is necessary for reviews with a series of patches.
# If not, this is a no-op so won't hurt either.
git merge $PREV_COMMIT
else
# The HEAD is not a merge commit. This won't happen on gerrit.
# Most likely you are running against your own patch locally.
# We assume the patch to examine is HEAD, and we compare it against
# HEAD~1
git checkout HEAD~1
fi
# First generate tools/pylint_exceptions from HEAD~1
$TOOLS_DIR/lintstack.head.py generate
# Then use that as a reference to compare against HEAD
git checkout $GITHEAD
$TOOLS_DIR/lintstack.head.py
echo "Check passed. FYI: the pylint exceptions are:"
cat $TOOLS_DIR/pylint_exceptions

View File

@ -24,6 +24,11 @@ passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
[testenv:pep8]
commands = flake8
[testenv:pylint]
deps = -r{toxinidir}/requirements.txt
pylint==0.26.0
commands = bash tools/lintstack.sh
[testenv:venv]
commands = {posargs}
@ -63,7 +68,7 @@ commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasen
show-source = True
ignore = E123,E125,E251,E265,H302,H402,H405,H803,H904
builtins = _
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build
max-complexity=30
[hacking]