Fix multi-line docstrings to meet hacking rules

According to the PEP8(H405), multi-line docstring summaries
should be separated by an empty line.

Change-Id: I5cd8a9064dcefc504e85946ecdf1f56f10145d35
Closes-bug: #1407162
This commit is contained in:
Cindy Pallares 2015-05-21 10:54:34 -05:00
parent 70b6ee3ab3
commit cafb5d449f
80 changed files with 465 additions and 356 deletions

View File

@ -387,8 +387,7 @@ class VolumeActionsController(wsgi.Controller):
class Volume_actions(extensions.ExtensionDescriptor): class Volume_actions(extensions.ExtensionDescriptor):
"""Enable volume actions """Enable volume actions."""
"""
name = "VolumeActions" name = "VolumeActions"
alias = "os-volume-actions" alias = "os-volume-actions"

View File

@ -134,7 +134,9 @@ class Request(webob.Request):
return resources.get(resource_id) return resources.get(resource_id)
def cache_db_items(self, key, items, item_key='id'): def cache_db_items(self, key, items, item_key='id'):
"""Allow API methods to store objects from a DB query to be """Get cached database items.
Allow API methods to store objects from a DB query to be
used by API extensions within the same API request. used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a An instance of this class only lives for the lifetime of a
@ -144,7 +146,9 @@ class Request(webob.Request):
self.cache_resource(items, item_key, key) self.cache_resource(items, item_key, key)
def get_db_items(self, key): def get_db_items(self, key):
"""Allow an API extension to get previously stored objects within """Get database items.
Allow an API extension to get previously stored objects within
the same API request. the same API request.
Note that the object data will be slightly stale. Note that the object data will be slightly stale.
@ -152,7 +156,9 @@ class Request(webob.Request):
return self.cached_resource(key) return self.cached_resource(key)
def get_db_item(self, key, item_key): def get_db_item(self, key, item_key):
"""Allow an API extension to get a previously stored object """Get database item.
Allow an API extension to get a previously stored object
within the same API request. within the same API request.
Note that the object data will be slightly stale. Note that the object data will be slightly stale.
@ -238,7 +244,6 @@ class Request(webob.Request):
"""Determine content type of the request body. """Determine content type of the request body.
Does not do any body introspection, only checks header Does not do any body introspection, only checks header
""" """
if "Content-Type" not in self.headers: if "Content-Type" not in self.headers:
return None return None
@ -325,7 +330,6 @@ class XMLDeserializer(TextDeserializer):
:param listnames: list of XML node names whose subnodes should :param listnames: list of XML node names whose subnodes should
be considered list items. be considered list items.
""" """
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
return node.childNodes[0].nodeValue return node.childNodes[0].nodeValue
@ -768,7 +772,6 @@ class Resource(wsgi.Application):
Exceptions derived from webob.exc.HTTPException will be automatically Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses. wrapped in Fault() to provide API friendly error responses.
""" """
def __init__(self, controller, action_peek=None, **deserializers): def __init__(self, controller, action_peek=None, **deserializers):

View File

@ -36,6 +36,7 @@ LOG = logging.getLogger(__name__)
def unquote_header_value(value): def unquote_header_value(value):
"""Unquotes a header value. """Unquotes a header value.
This does not use the real unquoting but what browsers are actually This does not use the real unquoting but what browsers are actually
using for quoting. using for quoting.
@ -75,7 +76,9 @@ def parse_list_header(value):
def parse_options_header(value): def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content """Parse 'Content-Type'-like header into a tuple.
Parse a ``Content-Type`` like header into a tuple with the content
type and the options: type and the options:
>>> parse_options_header('Content-Type: text/html; mimetype=text/html') >>> parse_options_header('Content-Type: text/html; mimetype=text/html')

View File

@ -217,12 +217,11 @@ class RateLimitingMiddleware(base_wsgi.Middleware):
""" """
def __init__(self, application, limits=None, limiter=None, **kwargs): def __init__(self, application, limits=None, limiter=None, **kwargs):
"""Initialize new `RateLimitingMiddleware`, which wraps the given WSGI """Initialize class, wrap WSGI app, and set up given limits.
application and sets up the given limits.
@param application: WSGI application to wrap :param application: WSGI application to wrap
@param limits: String describing limits :param limits: String describing limits
@param limiter: String identifying class for representing limits :param limiter: String identifying class for representing limits
Other parameters are passed to the constructor for the limiter. Other parameters are passed to the constructor for the limiter.
""" """

View File

@ -138,18 +138,22 @@ class ChunkedBackupDriver(driver.BackupDriver):
@abc.abstractmethod @abc.abstractmethod
def update_container_name(self, backup, container): def update_container_name(self, backup, container):
"""This method exists so that sub-classes can override the container name """Allow sub-classes to override container name.
as it comes in to the driver in the backup object. Implementations
should return None if no change to the container name is desired. This method exists so that sub-classes can override the container name
as it comes in to the driver in the backup object. Implementations
should return None if no change to the container name is desired.
""" """
return return
@abc.abstractmethod @abc.abstractmethod
def get_extra_metadata(self, backup, volume): def get_extra_metadata(self, backup, volume):
"""This method allows for collection of extra metadata in prepare_backup() """Return extra metadata to use in prepare_backup.
which will be passed to get_object_reader() and get_object_writer().
Subclass extensions can use this extra information to optimize This method allows for collection of extra metadata in prepare_backup()
data transfers. Return a json serializable object. which will be passed to get_object_reader() and get_object_writer().
Subclass extensions can use this extra information to optimize
data transfers. Return a json serializable object.
""" """
return return

View File

@ -251,14 +251,18 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
return swift_object_names return swift_object_names
def get_object_writer(self, container, object_name, extra_metadata=None): def get_object_writer(self, container, object_name, extra_metadata=None):
"""Returns a writer object that stores a chunk of volume data in a """Return a writer object.
Swift object store.
Returns a writer object that stores a chunk of volume data in a
Swift object store.
""" """
return self.SwiftObjectWriter(container, object_name, self.conn) return self.SwiftObjectWriter(container, object_name, self.conn)
def get_object_reader(self, container, object_name, extra_metadata=None): def get_object_reader(self, container, object_name, extra_metadata=None):
"""Returns a reader object that retrieves a chunk of backed-up volume data """Return reader object.
from a Swift object store.
Returns a reader object that retrieves a chunk of backed-up volume data
from a Swift object store.
""" """
return self.SwiftObjectReader(container, object_name, self.conn) return self.SwiftObjectReader(container, object_name, self.conn)

View File

@ -190,9 +190,7 @@ class BackupManager(manager.SchedulerDependentManager):
backup.save() backup.save()
def init_host(self): def init_host(self):
"""Do any initialization that needs to be run if this is a """Run initialization needed for a standalone service."""
standalone service.
"""
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
for mgr in self.volume_managers.values(): for mgr in self.volume_managers.values():

View File

@ -97,7 +97,8 @@ def args(*args, **kwargs):
def param2id(object_id): def param2id(object_id):
"""Helper function to convert various id types to internal id. """Helper function to convert various id types to internal id.
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
:param object_id: e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
""" """
if uuidutils.is_uuid_like(object_id): if uuidutils.is_uuid_like(object_id):
return object_id return object_id
@ -180,9 +181,7 @@ class ShellCommands(object):
@args('--path', required=True, help='Script path') @args('--path', required=True, help='Script path')
def script(self, path): def script(self, path):
"""Runs the script from the specified path with flags set properly. """Runs the script from the specified path with flags set properly."""
arguments: path
"""
exec(compile(open(path).read(), path, 'exec'), locals(), globals()) exec(compile(open(path).read(), path, 'exec'), locals(), globals())
@ -200,7 +199,9 @@ class HostCommands(object):
@args('zone', nargs='?', default=None, @args('zone', nargs='?', default=None,
help='Availability Zone (default: %(default)s)') help='Availability Zone (default: %(default)s)')
def list(self, zone=None): def list(self, zone=None):
"""Show a list of all physical hosts. Filter by zone. """Show a list of all physical hosts.
Can be filtered by zone.
args: [zone] args: [zone]
""" """
print(_("%(host)-25s\t%(zone)-15s") % {'host': 'host', 'zone': 'zone'}) print(_("%(host)-25s\t%(zone)-15s") % {'host': 'host', 'zone': 'zone'})
@ -281,9 +282,7 @@ class VolumeCommands(object):
@args('volume_id', @args('volume_id',
help='Volume ID to be deleted') help='Volume ID to be deleted')
def delete(self, volume_id): def delete(self, volume_id):
"""Delete a volume, bypassing the check that it """Delete a volume, bypassing the check that it must be available."""
must be available.
"""
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
volume = db.volume_get(ctxt, param2id(volume_id)) volume = db.volume_get(ctxt, param2id(volume_id))
host = vutils.extract_host(volume['host']) if volume['host'] else None host = vutils.extract_host(volume['host']) if volume['host'] else None
@ -399,7 +398,9 @@ class BackupCommands(object):
"""Methods for managing backups.""" """Methods for managing backups."""
def list(self): def list(self):
"""List all backups (including ones in progress) and the host """List all backups.
List all backups (including ones in progress) and the host
on which the backup operation is running. on which the backup operation is running.
""" """
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
@ -470,8 +471,10 @@ CATEGORIES = {
def methods_of(obj): def methods_of(obj):
"""Get all callable methods of an object that don't start with underscore """Return non-private methods from an object.
returns a list of tuples of the form (method_name, method)
Get all callable methods of an object that don't start with underscore
:return: a list of tuples of the form (method_name, method)
""" """
result = [] result = []
for i in dir(obj): for i in dir(obj):

View File

@ -528,8 +528,10 @@ def volume_type_extra_specs_delete(context, volume_type_id, key):
def volume_type_extra_specs_update_or_create(context, def volume_type_extra_specs_update_or_create(context,
volume_type_id, volume_type_id,
extra_specs): extra_specs):
"""Create or update volume type extra specs. This adds or modifies the """Create or update volume type extra specs.
key/value pairs specified in the extra specs dict argument
This adds or modifies the key/value pairs specified in the extra specs dict
argument.
""" """
return IMPL.volume_type_extra_specs_update_or_create(context, return IMPL.volume_type_extra_specs_update_or_create(context,
volume_type_id, volume_type_id,
@ -694,7 +696,9 @@ def volume_glance_metadata_delete_by_snapshot(context, snapshot_id):
def volume_glance_metadata_copy_from_volume_to_volume(context, def volume_glance_metadata_copy_from_volume_to_volume(context,
src_volume_id, src_volume_id,
volume_id): volume_id):
"""Update the Glance metadata for a volume by copying all of the key:value """Update the Glance metadata for a volume.
Update the Glance metadata for a volume by copying all of the key:value
pairs from the originating volume. pairs from the originating volume.
This is so that a volume created from the volume (clone) will retain the This is so that a volume created from the volume (clone) will retain the

View File

@ -3234,7 +3234,9 @@ def volume_glance_metadata_copy_from_volume_to_volume(context,
@require_context @require_context
@require_volume_exists @require_volume_exists
def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id):
"""Update the Glance metadata from a volume (created from a snapshot) by """Update Glance metadata from a volume.
Update the Glance metadata from a volume (created from a snapshot) by
copying all of the key:value pairs from the originating snapshot. copying all of the key:value pairs from the originating snapshot.
This is so that the Glance metadata from the original volume is retained. This is so that the Glance metadata from the original volume is retained.

View File

@ -45,7 +45,9 @@ def upgrade(migrate_engine):
def downgrade(migrate_engine): def downgrade(migrate_engine):
"""Don't delete the 'default' entries at downgrade time. """Downgrade.
Don't delete the 'default' entries at downgrade time.
We don't know if the user had default entries when we started. We don't know if the user had default entries when we started.
If they did, we wouldn't want to remove them. So, the safest If they did, we wouldn't want to remove them. So, the safest
thing to do is just leave the 'default' entries at downgrade time. thing to do is just leave the 'default' entries at downgrade time.

View File

@ -245,9 +245,11 @@ class GlanceImageService(object):
return base_image_meta return base_image_meta
def get_location(self, context, image_id): def get_location(self, context, image_id):
"""Returns a tuple of the direct url and locations representing the """Get backend storage location url.
backend storage location, or (None, None) if these attributes are not
shown by Glance. Returns a tuple containing the direct url and locations representing
the backend storage location, or (None, None) if these attributes are
not shown by Glance.
""" """
if CONF.glance_api_version == 1: if CONF.glance_api_version == 1:
# image location not available in v1 # image location not available in v1

View File

@ -21,8 +21,7 @@ from cinder.keymgr import key_mgr
class NotImplementedKeyManager(key_mgr.KeyManager): class NotImplementedKeyManager(key_mgr.KeyManager):
"""Key Manager Interface that raises NotImplementedError for all operations """Key Manager interface that raises NotImplementedError"""
"""
def create_key(self, ctxt, algorithm='AES', length=256, expiration=None, def create_key(self, ctxt, algorithm='AES', length=256, expiration=None,
**kwargs): **kwargs):

View File

@ -103,6 +103,7 @@ class CinderObjectDictCompat(base.VersionedObjectDictCompat):
class CinderPersistentObject(object): class CinderPersistentObject(object):
"""Mixin class for Persistent objects. """Mixin class for Persistent objects.
This adds the fields that we use in common for all persistent objects. This adds the fields that we use in common for all persistent objects.
""" """
fields = { fields = {

View File

@ -170,7 +170,9 @@ class DbQuotaDriver(object):
def get_project_quotas(self, context, resources, project_id, def get_project_quotas(self, context, resources, project_id,
quota_class=None, defaults=True, quota_class=None, defaults=True,
usages=True, parent_project_id=None): usages=True, parent_project_id=None):
"""Given a list of resources, retrieve the quotas for the given """Retrieve quotas for a project.
Given a list of resources, retrieve the quotas for the given
project. project.
:param context: The request context, for access checks. :param context: The request context, for access checks.

View File

@ -14,8 +14,8 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """The FilterScheduler is for creating volumes.
The FilterScheduler is for creating volumes.
You can customize this scheduler by specifying your own volume Filters and You can customize this scheduler by specifying your own volume Filters and
Weighing Functions. Weighing Functions.
""" """
@ -42,9 +42,7 @@ class FilterScheduler(driver.Scheduler):
self.max_attempts = self._max_attempts() self.max_attempts = self._max_attempts()
def schedule(self, context, topic, method, *args, **kwargs): def schedule(self, context, topic, method, *args, **kwargs):
"""The schedule() contract requires we return the one """Schedule contract that returns best-suited host for this request."""
best-suited host for this request.
"""
self._schedule(context, topic, *args, **kwargs) self._schedule(context, topic, *args, **kwargs)
def _get_configuration_options(self): def _get_configuration_options(self):
@ -52,8 +50,9 @@ class FilterScheduler(driver.Scheduler):
return self.options.get_configuration() return self.options.get_configuration()
def populate_filter_properties(self, request_spec, filter_properties): def populate_filter_properties(self, request_spec, filter_properties):
"""Stuff things into filter_properties. Can be overridden in a """Stuff things into filter_properties.
subclass to add more data.
Can be overridden in a subclass to add more data.
""" """
vol = request_spec['volume_properties'] vol = request_spec['volume_properties']
filter_properties['size'] = vol['size'] filter_properties['size'] = vol['size']
@ -172,16 +171,19 @@ class FilterScheduler(driver.Scheduler):
def _post_select_populate_filter_properties(self, filter_properties, def _post_select_populate_filter_properties(self, filter_properties,
host_state): host_state):
"""Add additional information to the filter properties after a host has """Populate filter properties with additional information.
Add additional information to the filter properties after a host has
been selected by the scheduling process. been selected by the scheduling process.
""" """
# Add a retry entry for the selected volume backend: # Add a retry entry for the selected volume backend:
self._add_retry_host(filter_properties, host_state.host) self._add_retry_host(filter_properties, host_state.host)
def _add_retry_host(self, filter_properties, host): def _add_retry_host(self, filter_properties, host):
"""Add a retry entry for the selected volume backend. In the event that """Add a retry entry for the selected volume backend.
the request gets re-scheduled, this entry will signal that the given
backend has already been tried. In the event that the request gets re-scheduled, this entry will signal
that the given backend has already been tried.
""" """
retry = filter_properties.get('retry', None) retry = filter_properties.get('retry', None)
if not retry: if not retry:
@ -198,9 +200,7 @@ class FilterScheduler(driver.Scheduler):
return max_attempts return max_attempts
def _log_volume_error(self, volume_id, retry): def _log_volume_error(self, volume_id, retry):
"""If the request contained an exception from a previous volume """Log requests with exceptions from previous volume operations."""
create operation, log it to aid debugging
"""
exc = retry.pop('exc', None) # string-ified exception from volume exc = retry.pop('exc', None) # string-ified exception from volume
if not exc: if not exc:
return # no exception info from a previous attempt, skip return # no exception info from a previous attempt, skip
@ -217,8 +217,9 @@ class FilterScheduler(driver.Scheduler):
'exc': exc}) 'exc': exc})
def _populate_retry(self, filter_properties, properties): def _populate_retry(self, filter_properties, properties):
"""Populate filter properties with history of retries for this """Populate filter properties with history of retries for request.
request. If maximum retries is exceeded, raise NoValidHost.
If maximum retries is exceeded, raise NoValidHost.
""" """
max_attempts = self.max_attempts max_attempts = self.max_attempts
retry = filter_properties.pop('retry', {}) retry = filter_properties.pop('retry', {})
@ -249,8 +250,9 @@ class FilterScheduler(driver.Scheduler):
def _get_weighted_candidates(self, context, request_spec, def _get_weighted_candidates(self, context, request_spec,
filter_properties=None): filter_properties=None):
"""Returns a list of hosts that meet the required specs, """Return a list of hosts that meet required specs.
ordered by their fitness.
Returned list is ordered by their fitness.
""" """
elevated = context.elevated() elevated = context.elevated()

View File

@ -12,8 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Weighers that weigh hosts by volume number in backends:
Weighers that weigh hosts by volume number in backends:
1. Volume Number Weigher. Weigh hosts by their volume number. 1. Volume Number Weigher. Weigh hosts by their volume number.
@ -51,6 +50,7 @@ class VolumeNumberWeigher(weights.BaseHostWeigher):
def _weigh_object(self, host_state, weight_properties): def _weigh_object(self, host_state, weight_properties):
"""Less volume number weights win. """Less volume number weights win.
We want spreading to be the default. We want spreading to be the default.
""" """
context = weight_properties['context'] context = weight_properties['context']

View File

@ -286,7 +286,9 @@ class XMLDeserializerTest(test.TestCase):
class MetadataXMLDeserializerTest(test.TestCase): class MetadataXMLDeserializerTest(test.TestCase):
def test_xml_meta_parsing_special_character(self): def test_xml_meta_parsing_special_character(self):
"""Test that when a SaxParser splits a string containing special """Test XML meta parsing with special characters.
Test that when a SaxParser splits a string containing special
characters into multiple childNodes there are no issues extracting characters into multiple childNodes there are no issues extracting
the text. the text.
""" """

View File

@ -679,7 +679,9 @@ class FakeHttplibConnection(object):
def wire_HTTPConnection_to_WSGI(host, app): def wire_HTTPConnection_to_WSGI(host, app):
"""Monkeypatches HTTPConnection so that if you try to connect to host, you """Monkeypatches HTTPConnection.
Monkeypatches HTTPConnection so that if you try to connect to host, you
are instead routed straight to the given WSGI app. are instead routed straight to the given WSGI app.
After calling this method, when any code calls After calling this method, when any code calls
@ -698,7 +700,9 @@ def wire_HTTPConnection_to_WSGI(host, app):
can restore the default HTTPConnection interface (for all hosts). can restore the default HTTPConnection interface (for all hosts).
""" """
class HTTPConnectionDecorator(object): class HTTPConnectionDecorator(object):
"""Wraps the real HTTPConnection class so that when you instantiate """Decorator to mock the HTTPConnection class.
Wraps the real HTTPConnection class so that when you instantiate
the class you might instead get a fake instance. the class you might instead get a fake instance.
""" """

View File

@ -578,7 +578,9 @@ class WsgiLimiterTest(BaseLimitTestSuite):
return jsonutils.dumps({"verb": verb, "path": path}) return jsonutils.dumps({"verb": verb, "path": path})
def _request(self, verb, url, username=None): def _request(self, verb, url, username=None):
"""Make sure that POSTing to the given url causes the given username """POST request to given url by given username.
Make sure that POSTing to the given url causes the given username
to perform the given action. Make the internal rate limiter return to perform the given action. Make the internal rate limiter return
delay and make sure that the WSGI app returns the correct response. delay and make sure that the WSGI app returns the correct response.
""" """
@ -683,7 +685,9 @@ class FakeHttplibConnection(object):
def wire_HTTPConnection_to_WSGI(host, app): def wire_HTTPConnection_to_WSGI(host, app):
"""Monkeypatches HTTPConnection so that if you try to connect to host, you """Monkeypatches HTTPConnection.
Monkeypatches HTTPConnection so that if you try to connect to host, you
are instead routed straight to the given WSGI app. are instead routed straight to the given WSGI app.
After calling this method, when any code calls After calling this method, when any code calls
@ -702,7 +706,9 @@ def wire_HTTPConnection_to_WSGI(host, app):
can restore the default HTTPConnection interface (for all hosts). can restore the default HTTPConnection interface (for all hosts).
""" """
class HTTPConnectionDecorator(object): class HTTPConnectionDecorator(object):
"""Wraps the real HTTPConnection class so that when you instantiate """Decorator to mock the HTTPConecction class.
Wraps the real HTTPConnection class so that when you instantiate
the class you might instead get a fake instance. the class you might instead get a fake instance.
""" """

View File

@ -294,8 +294,10 @@ class SchedulerTestCase(test.TestCase):
class SchedulerDriverBaseTestCase(SchedulerTestCase): class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test cases for base scheduler driver class methods """Test schedule driver class.
that can't will fail if the driver is changed.
Test cases for base scheduler driver class methods
that will fail if the driver is changed.
""" """
def test_unimplemented_schedule(self): def test_unimplemented_schedule(self):

View File

@ -20,8 +20,7 @@ from cinder.volume.targets import tgt
class TestIserAdmDriver(tf.TargetDriverFixture): class TestIserAdmDriver(tf.TargetDriverFixture):
"""Unit tests for the deprecated ISERTgtAdm flow """Unit tests for the deprecated ISERTgtAdm flow"""
"""
def setUp(self): def setUp(self):
super(TestIserAdmDriver, self).setUp() super(TestIserAdmDriver, self).setUp()
@ -46,8 +45,7 @@ class TestIserAdmDriver(tf.TargetDriverFixture):
class TestIserTgtDriver(tf.TargetDriverFixture): class TestIserTgtDriver(tf.TargetDriverFixture):
"""Unit tests for the iSER TGT flow """Unit tests for the iSER TGT flow"""
"""
def setUp(self): def setUp(self):
super(TestIserTgtDriver, self).setUp() super(TestIserTgtDriver, self).setUp()
@ -72,8 +70,7 @@ class TestIserTgtDriver(tf.TargetDriverFixture):
class TestIserLioAdmDriver(tf.TargetDriverFixture): class TestIserLioAdmDriver(tf.TargetDriverFixture):
"""Unit tests for the iSER LIO flow """Unit tests for the iSER LIO flow"""
"""
def setUp(self): def setUp(self):
super(TestIserLioAdmDriver, self).setUp() super(TestIserLioAdmDriver, self).setUp()
self.configuration.iscsi_protocol = 'iser' self.configuration.iscsi_protocol = 'iser'

View File

@ -12,10 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Tests for Backup code."""
Tests for Backup code.
"""
import ddt import ddt
import tempfile import tempfile
@ -178,7 +175,9 @@ class BackupTestCase(BaseBackupTest):
@mock.patch.object(lvm.LVMVolumeDriver, 'delete_snapshot') @mock.patch.object(lvm.LVMVolumeDriver, 'delete_snapshot')
@mock.patch.object(lvm.LVMVolumeDriver, 'delete_volume') @mock.patch.object(lvm.LVMVolumeDriver, 'delete_volume')
def test_init_host(self, mock_delete_volume, mock_delete_snapshot): def test_init_host(self, mock_delete_volume, mock_delete_snapshot):
"""Make sure stuck volumes and backups are reset to correct """Test stuck volumes and backups.
Make sure stuck volumes and backups are reset to correct
states when backup_manager.init_host() is called states when backup_manager.init_host() is called
""" """
vol1_id = self._create_volume_db_entry() vol1_id = self._create_volume_db_entry()
@ -308,7 +307,9 @@ class BackupTestCase(BaseBackupTest):
self.assertEqual(2, notify.call_count) self.assertEqual(2, notify.call_count)
def test_restore_backup_with_bad_volume_status(self): def test_restore_backup_with_bad_volume_status(self):
"""Test error handling when restoring a backup to a volume """Test error handling.
Test error handling when restoring a backup to a volume
with a bad status. with a bad status.
""" """
vol_id = self._create_volume_db_entry(status='available', size=1) vol_id = self._create_volume_db_entry(status='available', size=1)
@ -322,7 +323,9 @@ class BackupTestCase(BaseBackupTest):
self.assertEqual(backup['status'], 'available') self.assertEqual(backup['status'], 'available')
def test_restore_backup_with_bad_backup_status(self): def test_restore_backup_with_bad_backup_status(self):
"""Test error handling when restoring a backup with a backup """Test error handling.
Test error handling when restoring a backup with a backup
with a bad status. with a bad status.
""" """
vol_id = self._create_volume_db_entry(status='restoring-backup', vol_id = self._create_volume_db_entry(status='restoring-backup',
@ -360,7 +363,9 @@ class BackupTestCase(BaseBackupTest):
self.assertTrue(_mock_volume_restore.called) self.assertTrue(_mock_volume_restore.called)
def test_restore_backup_with_bad_service(self): def test_restore_backup_with_bad_service(self):
"""Test error handling when attempting a restore of a backup """Test error handling.
Test error handling when attempting a restore of a backup
with a different service to that used to create the backup. with a different service to that used to create the backup.
""" """
vol_id = self._create_volume_db_entry(status='restoring-backup', vol_id = self._create_volume_db_entry(status='restoring-backup',
@ -410,7 +415,9 @@ class BackupTestCase(BaseBackupTest):
self.assertEqual(2, notify.call_count) self.assertEqual(2, notify.call_count)
def test_delete_backup_with_bad_backup_status(self): def test_delete_backup_with_bad_backup_status(self):
"""Test error handling when deleting a backup with a backup """Test error handling.
Test error handling when deleting a backup with a backup
with a bad status. with a bad status.
""" """
vol_id = self._create_volume_db_entry(size=1) vol_id = self._create_volume_db_entry(size=1)
@ -437,7 +444,9 @@ class BackupTestCase(BaseBackupTest):
self.assertEqual(backup['status'], 'error') self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_bad_service(self): def test_delete_backup_with_bad_service(self):
"""Test error handling when attempting a delete of a backup """Test error handling.
Test error handling when attempting a delete of a backup
with a different service to that used to create the backup. with a different service to that used to create the backup.
""" """
vol_id = self._create_volume_db_entry(size=1) vol_id = self._create_volume_db_entry(size=1)
@ -453,7 +462,9 @@ class BackupTestCase(BaseBackupTest):
self.assertEqual(backup['status'], 'error') self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_no_service(self): def test_delete_backup_with_no_service(self):
"""Test error handling when attempting a delete of a backup """Test error handling.
Test error handling when attempting a delete of a backup
with no service defined for that backup, relates to bug #1162908 with no service defined for that backup, relates to bug #1162908
""" """
vol_id = self._create_volume_db_entry(size=1) vol_id = self._create_volume_db_entry(size=1)
@ -500,8 +511,10 @@ class BackupTestCase(BaseBackupTest):
self.assertEqual(backups[0].id, b2.id) self.assertEqual(backups[0].id, b2.id)
def test_backup_get_all_by_project_with_deleted(self): def test_backup_get_all_by_project_with_deleted(self):
"""Test deleted backups don't show up in backup_get_all_by_project. """Test deleted backups.
Unless context.read_deleted is 'yes'.
Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'.
""" """
backups = db.backup_get_all_by_project(self.ctxt, 'fake') backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(len(backups), 0) self.assertEqual(len(backups), 0)
@ -519,8 +532,10 @@ class BackupTestCase(BaseBackupTest):
self.assertEqual(len(backups), 2) self.assertEqual(len(backups), 2)
def test_backup_get_all_by_host_with_deleted(self): def test_backup_get_all_by_host_with_deleted(self):
"""Test deleted backups don't show up in backup_get_all_by_project. """Test deleted backups.
Unless context.read_deleted is 'yes'
Test deleted backups don't show up in backup_get_all_by_project.
Unless context.read_deleted is 'yes'
""" """
backups = db.backup_get_all_by_host(self.ctxt, 'testhost') backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(len(backups), 0) self.assertEqual(len(backups), 0)
@ -546,7 +561,9 @@ class BackupTestCase(BaseBackupTest):
backup_mgr.driver_name) backup_mgr.driver_name)
def test_export_record_with_bad_service(self): def test_export_record_with_bad_service(self):
"""Test error handling when attempting an export of a backup """Test error handling.
Test error handling when attempting an export of a backup
record with a different service to that used to create the backup. record with a different service to that used to create the backup.
""" """
vol_id = self._create_volume_db_entry(size=1) vol_id = self._create_volume_db_entry(size=1)
@ -561,7 +578,9 @@ class BackupTestCase(BaseBackupTest):
backup) backup)
def test_export_record_with_bad_backup_status(self): def test_export_record_with_bad_backup_status(self):
"""Test error handling when exporting a backup record with a backup """Test error handling.
Test error handling when exporting a backup record with a backup
with a bad status. with a bad status.
""" """
vol_id = self._create_volume_db_entry(status='available', vol_id = self._create_volume_db_entry(status='available',
@ -605,7 +624,9 @@ class BackupTestCase(BaseBackupTest):
self.assertEqual(backup['size'], vol_size) self.assertEqual(backup['size'], vol_size)
def test_import_record_with_bad_service(self): def test_import_record_with_bad_service(self):
"""Test error handling when attempting an import of a backup """Test error handling.
Test error handling when attempting an import of a backup
record with a different service to that used to create the backup. record with a different service to that used to create the backup.
""" """
export = self._create_exported_record_entry() export = self._create_exported_record_entry()
@ -644,7 +665,9 @@ class BackupTestCase(BaseBackupTest):
backup_hosts_expect) backup_hosts_expect)
def test_import_record_with_invalid_backup(self): def test_import_record_with_invalid_backup(self):
"""Test error handling when attempting an import of a backup """Test error handling.
Test error handling when attempting an import of a backup
record where the backup driver returns an exception. record where the backup driver returns an exception.
""" """
export = self._create_exported_record_entry() export = self._create_exported_record_entry()
@ -723,7 +746,9 @@ class BackupTestCaseWithVerify(BaseBackupTest):
self.assertEqual(backup['size'], vol_size) self.assertEqual(backup['size'], vol_size)
def test_import_record_with_verify_invalid_backup(self): def test_import_record_with_verify_invalid_backup(self):
"""Test error handling when attempting an import of a backup """Test error handling.
Test error handling when attempting an import of a backup
record where the backup driver returns an exception. record where the backup driver returns an exception.
""" """
vol_size = 1 vol_size = 1

View File

@ -4686,8 +4686,7 @@ class EMCVMAXFCDriverFastTestCase(test.TestCase):
'get_volume_type_extra_specs', 'get_volume_type_extra_specs',
return_value={'volume_backend_name': 'FCFAST'}) return_value={'volume_backend_name': 'FCFAST'})
def test_delete_volume_fast_notfound(self, _mock_volume_type): def test_delete_volume_fast_notfound(self, _mock_volume_type):
"""We do not set the provider location. """"Test delete volume with volume not found."""
"""
notfound_delete_vol = {} notfound_delete_vol = {}
notfound_delete_vol['name'] = 'notfound_delete_vol' notfound_delete_vol['name'] = 'notfound_delete_vol'
notfound_delete_vol['id'] = '10' notfound_delete_vol['id'] = '10'

View File

@ -2009,7 +2009,9 @@ Time Remaining: 0 second(s)
@mock.patch('random.randint', @mock.patch('random.randint',
mock.Mock(return_value=0)) mock.Mock(return_value=0))
def test_initialize_connection_exist(self): def test_initialize_connection_exist(self):
"""A LUN is added to the SG right before the attach, """Test if initialize connection exists.
A LUN is added to the SG right before the attach,
it may not exists in the first SG query it may not exists in the first SG query
""" """
# Test for auto registration # Test for auto registration
@ -2125,7 +2127,9 @@ Time Remaining: 0 second(s)
@mock.patch('random.randint', @mock.patch('random.randint',
mock.Mock(return_value=0)) mock.Mock(return_value=0))
def test_initialize_connection_no_hlu_left_1(self): def test_initialize_connection_no_hlu_left_1(self):
"""There is no hlu per the first SG query """Test initialize connection with no hlu per first SG query.
There is no hlu per the first SG query
But there are hlu left after the full poll But there are hlu left after the full poll
""" """
# Test for auto registration # Test for auto registration
@ -2172,8 +2176,7 @@ Time Remaining: 0 second(s)
@mock.patch('random.randint', @mock.patch('random.randint',
mock.Mock(return_value=0)) mock.Mock(return_value=0))
def test_initialize_connection_no_hlu_left_2(self): def test_initialize_connection_no_hlu_left_2(self):
"""There is no usable hlu for the SG """Test initialize connection with no hlu left."""
"""
# Test for auto registration # Test for auto registration
self.configuration.initiator_auto_registration = True self.configuration.initiator_auto_registration = True
self.configuration.max_luns_per_storage_group = 2 self.configuration.max_luns_per_storage_group = 2
@ -2887,7 +2890,9 @@ Time Remaining: 0 second(s)
mock.Mock(return_value={'storagetype:provisioning': 'deduplicated', mock.Mock(return_value={'storagetype:provisioning': 'deduplicated',
'storagetype:pool': 'unit_test_pool'})) 'storagetype:pool': 'unit_test_pool'}))
def test_retype_pool_changed_dedup_to_compressed_auto(self): def test_retype_pool_changed_dedup_to_compressed_auto(self):
"""Unit test for retype dedup to compressed and auto tiering """Test retype from dedup to compressed and auto tiering.
Unit test for retype dedup to compressed and auto tiering
and pool changed and pool changed
""" """
diff_data = {'encryption': {}, 'qos_specs': {}, diff_data = {'encryption': {}, 'qos_specs': {},
@ -3229,7 +3234,7 @@ Time Remaining: 0 second(s)
"get_volume_type_extra_specs", "get_volume_type_extra_specs",
mock.Mock(return_value={'fast_cache_enabled': 'True'})) mock.Mock(return_value={'fast_cache_enabled': 'True'}))
def test_create_volume_with_fastcache(self): def test_create_volume_with_fastcache(self):
"""Enable fastcache when creating volume.""" """Test creating volume with fastcache enabled."""
commands = [self.testData.NDU_LIST_CMD, commands = [self.testData.NDU_LIST_CMD,
self.testData.POOL_PROPERTY_W_FASTCACHE_CMD, self.testData.POOL_PROPERTY_W_FASTCACHE_CMD,
self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'), self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),

View File

@ -160,8 +160,7 @@ class GlusterFsDriverTestCase(test.TestCase):
mock_mount.assert_called_once_with(self.TEST_EXPORT1, []) mock_mount.assert_called_once_with(self.TEST_EXPORT1, [])
def test_mount_glusterfs_should_reraise_exception_on_failure(self): def test_mount_glusterfs_should_reraise_exception_on_failure(self):
"""_mount_glusterfs should reraise exception if mount fails. """_mount_glusterfs should reraise exception if mount fails."""
"""
drv = self._driver drv = self._driver
with mock.patch.object(os_brick.remotefs.remotefs.RemoteFsClient, with mock.patch.object(os_brick.remotefs.remotefs.RemoteFsClient,

View File

@ -495,7 +495,9 @@ class HPXPFCDriverTest(test.TestCase):
self.assertTrue(has_volume) self.assertTrue(has_volume)
def test_create_volume_from_snapshot_error_on_non_existing_snapshot(self): def test_create_volume_from_snapshot_error_on_non_existing_snapshot(self):
"""Test create_volume_from_snapshot is error on non existing snapshot. """Test create_volume_from_snapshot.
Test create_volume_from_snapshot is error on non existing snapshot.
""" """
volume2 = fake_volume.fake_db_volume(**self._VOLUME2) volume2 = fake_volume.fake_db_volume(**self._VOLUME2)
snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT) snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT)

View File

@ -15,9 +15,7 @@
# under the License. # under the License.
# #
""" """Tests for the IBM FlashSystem volume driver."""
Tests for the IBM FlashSystem volume driver.
"""
import mock import mock
from oslo_concurrency import processutils from oslo_concurrency import processutils
@ -138,7 +136,9 @@ class FlashSystemManagementSimulator(object):
return six.text_type(num) return six.text_type(num)
def _cmd_lshost(self, **kwargs): def _cmd_lshost(self, **kwargs):
"""svcinfo lshost -delim ! """lshost command.
svcinfo lshost -delim !
svcinfo lshost -delim ! <host> svcinfo lshost -delim ! <host>
""" """
if 'obj' not in kwargs: if 'obj' not in kwargs:
@ -238,7 +238,9 @@ class FlashSystemManagementSimulator(object):
return ('%s' % '\n'.join(objrows), '') return ('%s' % '\n'.join(objrows), '')
def _cmd_lsnode(self, **kwargs): def _cmd_lsnode(self, **kwargs):
"""svcinfo lsnode -delim ! """lsnode command.
svcinfo lsnode -delim !
svcinfo lsnode -delim ! <node> svcinfo lsnode -delim ! <node>
""" """
@ -448,7 +450,9 @@ class FlashSystemManagementSimulator(object):
return ('', '') return ('', '')
def _cmd_mkvdisk(self, **kwargs): def _cmd_mkvdisk(self, **kwargs):
"""svctask mkvdisk -name <name> -mdiskgrp <mdiskgrp> -iogrp <iogrp> """mkvdisk command.
svctask mkvdisk -name <name> -mdiskgrp <mdiskgrp> -iogrp <iogrp>
-size <size> -unit <unit> -size <size> -unit <unit>
""" """
@ -507,7 +511,9 @@ class FlashSystemManagementSimulator(object):
return ('', '') return ('', '')
def _cmd_mkhost(self, **kwargs): def _cmd_mkhost(self, **kwargs):
"""svctask mkhost -force -hbawwpn <wwpn> -name <host_name> """mkhost command.
svctask mkhost -force -hbawwpn <wwpn> -name <host_name>
svctask mkhost -force -iscsiname <initiator> -name <host_name> svctask mkhost -force -iscsiname <initiator> -name <host_name>
""" """
@ -535,7 +541,9 @@ class FlashSystemManagementSimulator(object):
return (out, err) return (out, err)
def _cmd_addhostport(self, **kwargs): def _cmd_addhostport(self, **kwargs):
"""svctask addhostport -force -hbawwpn <wwpn> <host> """addhostport command.
svctask addhostport -force -hbawwpn <wwpn> <host>
svctask addhostport -force -iscsiname <initiator> <host> svctask addhostport -force -iscsiname <initiator> <host>
""" """

View File

@ -670,9 +670,7 @@ class XIVDS8KVolumeDriverTest(test.TestCase):
"Consistency Group created failed") "Consistency Group created failed")
def test_create_consistencygroup_fail_on_cg_not_empty(self): def test_create_consistencygroup_fail_on_cg_not_empty(self):
"""Test that create_consistencygroup fail """Test create_consistencygroup with empty consistency group."""
when consistency group is not empty.
"""
self.driver.do_setup(None) self.driver.do_setup(None)
@ -713,9 +711,7 @@ class XIVDS8KVolumeDriverTest(test.TestCase):
'Consistency Group deleted failed') 'Consistency Group deleted failed')
def test_delete_consistencygroup_fail_on_volume_not_delete(self): def test_delete_consistencygroup_fail_on_volume_not_delete(self):
"""Test that delete_consistencygroup return fail """Test delete_consistencygroup with volume delete failure."""
when the volume can not be deleted.
"""
self.driver.do_setup(None) self.driver.do_setup(None)
@ -821,9 +817,7 @@ class XIVDS8KVolumeDriverTest(test.TestCase):
self.driver.delete_consistencygroup(ctxt, CONSISTGROUP) self.driver.delete_consistencygroup(ctxt, CONSISTGROUP)
def test_delete_cgsnapshot_fail_on_snapshot_not_delete(self): def test_delete_cgsnapshot_fail_on_snapshot_not_delete(self):
"""Test that delete_cgsnapshot return fail """Test delete_cgsnapshot when the snapshot cannot be deleted."""
when the snapshot can not be deleted.
"""
self.driver.do_setup(None) self.driver.do_setup(None)

View File

@ -12,10 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Tests for NetApp volume driver."""
Tests for NetApp volume driver
"""
from lxml import etree from lxml import etree
import mock import mock
@ -1162,9 +1159,7 @@ class FakeDirect7modeHTTPConnection(object):
class NetAppDirect7modeISCSIDriverTestCase_NV( class NetAppDirect7modeISCSIDriverTestCase_NV(
NetAppDirectCmodeISCSIDriverTestCase): NetAppDirectCmodeISCSIDriverTestCase):
"""Test case for NetAppISCSIDriver """Test case for NetAppISCSIDriver without vfiler"""
No vfiler
"""
def setUp(self): def setUp(self):
super(NetAppDirect7modeISCSIDriverTestCase_NV, self).setUp() super(NetAppDirect7modeISCSIDriverTestCase_NV, self).setUp()
@ -1218,9 +1213,7 @@ class NetAppDirect7modeISCSIDriverTestCase_NV(
class NetAppDirect7modeISCSIDriverTestCase_WV( class NetAppDirect7modeISCSIDriverTestCase_WV(
NetAppDirect7modeISCSIDriverTestCase_NV): NetAppDirect7modeISCSIDriverTestCase_NV):
"""Test case for NetAppISCSIDriver """Test case for NetAppISCSIDriver with vfiler"""
With vfiler
"""
def setUp(self): def setUp(self):
super(NetAppDirect7modeISCSIDriverTestCase_WV, self).setUp() super(NetAppDirect7modeISCSIDriverTestCase_WV, self).setUp()

View File

@ -14,9 +14,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Tests for NetApp e-series iscsi volume driver."""
Tests for NetApp e-series iscsi volume driver.
"""
import copy import copy
import json import json
@ -594,8 +592,7 @@ class FakeEseriesServerHandler(object):
class FakeEseriesHTTPSession(object): class FakeEseriesHTTPSession(object):
"""A fake requests.Session for netapp tests. """A fake requests.Session for netapp tests."""
"""
def __init__(self): def __init__(self):
self.handler = FakeEseriesServerHandler() self.handler = FakeEseriesServerHandler()

View File

@ -1055,8 +1055,7 @@ class ManagedRBDTestCase(test_volume.DriverTestCase):
def _create_volume_from_image(self, expected_status, raw=False, def _create_volume_from_image(self, expected_status, raw=False,
clone_error=False): clone_error=False):
"""Try to clone a volume from an image, and check the status """Try to clone a volume from an image, and check status afterwards.
afterwards.
NOTE: if clone_error is True we force the image type to raw otherwise NOTE: if clone_error is True we force the image type to raw otherwise
clone_image is not called clone_image is not called

View File

@ -12,9 +12,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Unit tests for the Scality Rest Block Volume Driver."""
Unit tests for the Scality Rest Block Volume Driver.
"""
import mock import mock
from oslo_concurrency import processutils from oslo_concurrency import processutils
@ -760,8 +758,9 @@ class SRBDriverTestCase(test.TestCase):
self._driver.do_setup, None) self._driver.do_setup, None)
def test_volume_create(self): def test_volume_create(self):
"""The volume shall be added in the internal """"Test volume create.
state through fake_execute
The volume will be added in the internal state through fake_execute.
""" """
volume = {'name': 'volume-test', 'id': 'test', 'size': 4 * units.Gi} volume = {'name': 'volume-test', 'id': 'test', 'size': 4 * units.Gi}
old_vols = self._volumes old_vols = self._volumes

View File

@ -1243,9 +1243,7 @@ class SSHPoolTestCase(test.TestCase):
class BrickUtils(test.TestCase): class BrickUtils(test.TestCase):
"""Unit test to test the brick utility """Unit test to test the brick utility wrapper functions."""
wrapper functions.
"""
@mock.patch('cinder.utils.CONF') @mock.patch('cinder.utils.CONF')
@mock.patch('os_brick.initiator.connector.get_connector_properties') @mock.patch('os_brick.initiator.connector.get_connector_properties')

View File

@ -333,8 +333,10 @@ class V7000FCPDriverTestCase(test.TestCase):
@mock.patch('socket.gethostbyaddr') @mock.patch('socket.gethostbyaddr')
def test_update_volume_stats(self, mock_gethost): def test_update_volume_stats(self, mock_gethost):
"""Makes a mock query to the backend to collect """Test Update Volume Stats.
stats on all physical devices.
Makes a mock query to the backend to collect stats on all physical
devices.
""" """
def gethostbyaddr(addr): def gethostbyaddr(addr):
@ -369,8 +371,10 @@ class V7000FCPDriverTestCase(test.TestCase):
self.assertIsNone(result) self.assertIsNone(result)
def test_get_active_fc_targets(self): def test_get_active_fc_targets(self):
"""Makes a mock query to the backend to collect """Test Get Active FC Targets.
all the physical adapters and extract the WWNs
Makes a mock query to the backend to collect all the physical
adapters and extract the WWNs.
""" """
conf = { conf = {

View File

@ -13,10 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Tests for Volume Code."""
Tests for Volume Code.
"""
import datetime import datetime
import os import os
@ -1751,9 +1748,7 @@ class VolumeTestCase(BaseVolumeTestCase):
@mock.patch.object(keymgr, 'API', fake_keymgr.fake_api) @mock.patch.object(keymgr, 'API', fake_keymgr.fake_api)
def test_create_volume_from_snapshot_with_encryption(self): def test_create_volume_from_snapshot_with_encryption(self):
"""Test volume can be created from a snapshot of """Test volume can be created from a snapshot of an encrypted volume"""
an encrypted volume.
"""
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
db.volume_type_create(ctxt, db.volume_type_create(ctxt,
@ -3455,7 +3450,9 @@ class VolumeTestCase(BaseVolumeTestCase):
self.volume.delete_volume(self.context, volume['id']) self.volume.delete_volume(self.context, volume['id'])
def test_create_volume_from_image_exception(self): def test_create_volume_from_image_exception(self):
"""Verify that create volume from a non-existing image, the volume """Test create volume from a non-existing image.
Verify that create volume from a non-existing image, the volume
status is 'error' and is not bootable. status is 'error' and is not bootable.
""" """
dst_fd, dst_path = tempfile.mkstemp() dst_fd, dst_path = tempfile.mkstemp()
@ -3505,7 +3502,9 @@ class VolumeTestCase(BaseVolumeTestCase):
{'_pool0': {'allocated_capacity_gb': 1}}) {'_pool0': {'allocated_capacity_gb': 1}})
def test_create_volume_from_exact_sized_image(self): def test_create_volume_from_exact_sized_image(self):
"""Verify that an image which is exactly the same size as the """Test create volume from an image of the same size.
Verify that an image which is exactly the same size as the
volume, will work correctly. volume, will work correctly.
""" """
try: try:

View File

@ -11,9 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Unit Tests for volume types code."""
Unit Tests for volume types code
"""
import datetime import datetime
@ -110,7 +108,9 @@ class VolumeTypeTestCase(test.TestCase):
conf_fixture.def_vol_type) conf_fixture.def_vol_type)
def test_default_volume_type_missing_in_db(self): def test_default_volume_type_missing_in_db(self):
"""Ensures proper exception raised if default volume type """Test default volume type is missing in database.
Ensures proper exception raised if default volume type
is not in database. is not in database.
""" """
default_vol_type = volume_types.get_default_volume_type() default_vol_type = volume_types.get_default_volume_type()

View File

@ -11,9 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Unit tests for Oracle's ZFSSA Cinder volume driver."""
Unit tests for Oracle's ZFSSA Cinder volume driver
"""
import json import json
@ -34,7 +32,7 @@ nfs_compression = 'off'
class FakeZFSSA(object): class FakeZFSSA(object):
"""Fake ZFS SA""" """Fake ZFS SA."""
def __init__(self): def __init__(self):
self.user = None self.user = None
self.host = None self.host = None
@ -221,8 +219,7 @@ class FakeZFSSA(object):
class FakeNFSZFSSA(FakeZFSSA): class FakeNFSZFSSA(FakeZFSSA):
"""Fake ZFS SA for the NFS Driver """Fake ZFS SA for the NFS Driver."""
"""
def set_webdav(self, https_path, auth_str): def set_webdav(self, https_path, auth_str):
self.webdavclient = https_path self.webdavclient = https_path

View File

@ -441,7 +441,9 @@ def deepcopy_return_value_method_decorator(fn):
def deepcopy_return_value_class_decorator(cls): def deepcopy_return_value_class_decorator(cls):
"""Wraps all 'non-protected' methods of a class with the """Wraps 'non-protected' methods of a class with decorator.
Wraps all 'non-protected' methods of a class with the
deepcopy_return_value_method_decorator decorator. deepcopy_return_value_method_decorator decorator.
""" """
class NewClass(cls): class NewClass(cls):

View File

@ -12,9 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Mock unit tests for the NetApp E-series iscsi driver."""
Mock unit tests for the NetApp E-series iscsi driver
"""
import copy import copy
@ -101,7 +99,9 @@ class NetAppEseriesHostMapperTestCase(test.TestCase):
def test_unmap_volume_from_host_volume_mapped_to_host_group_but_not_host( def test_unmap_volume_from_host_volume_mapped_to_host_group_but_not_host(
self): self):
"""Ensure an error is raised if the specified host is not in the """Test volume mapped to host not in specified host group.
Ensure an error is raised if the specified host is not in the
host group the volume is mapped to. host group the volume is mapped to.
""" """
fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
@ -162,7 +162,9 @@ class NetAppEseriesHostMapperTestCase(test.TestCase):
self.assertFalse(self.client.delete_volume_mapping.called) self.assertFalse(self.client.delete_volume_mapping.called)
def test_unmap_volume_from_host_volume_mapped_to_outside_host_group(self): def test_unmap_volume_from_host_volume_mapped_to_outside_host_group(self):
"""Ensure we raise error when we find a volume is mapped to an unknown """Test volume mapped to host group without host.
Ensure we raise error when we find a volume is mapped to an unknown
host group that does not have the host. host group that does not have the host.
""" """
fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
@ -189,7 +191,9 @@ class NetAppEseriesHostMapperTestCase(test.TestCase):
def test_unmap_volume_from_host_volume_mapped_to_outside_host_group_w_host( def test_unmap_volume_from_host_volume_mapped_to_outside_host_group_w_host(
self): self):
"""Ensure we raise error when we find a volume is mapped to an unknown """Test volume mapped to host in unknown host group.
Ensure we raise error when we find a volume is mapped to an unknown
host group that has the host. host group that has the host.
""" """
fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
@ -246,7 +250,9 @@ class NetAppEseriesHostMapperTestCase(test.TestCase):
def test_map_volume_to_single_host_volume_mapped_to_multiattach_host_group( def test_map_volume_to_single_host_volume_mapped_to_multiattach_host_group(
self): self):
"""Should move mapping to target host if volume is not migrating or """Test map volume to a single host.
Should move mapping to target host if volume is not migrating or
attached(in-use). If volume is not in use then it should not require a attached(in-use). If volume is not in use then it should not require a
mapping making it ok to sever the mapping to the host group. mapping making it ok to sever the mapping to the host group.
""" """
@ -385,7 +391,9 @@ class NetAppEseriesHostMapperTestCase(test.TestCase):
fake_mapping_to_host_group) fake_mapping_to_host_group)
def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host(self): def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host(self):
"""Should ensure both existing host and destination host are in """Test that mapping moves to another host group.
Should ensure both existing host and destination host are in
multiattach host group and move the mapping to the host group. multiattach host group and move the mapping to the host group.
""" """
@ -418,7 +426,9 @@ class NetAppEseriesHostMapperTestCase(test.TestCase):
def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host_with_lun_collision_with_source_host( # noqa def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host_with_lun_collision_with_source_host( # noqa
self): self):
"""Should fail attempting to move source host to multiattach host """Test moving source host to multiattach host group.
Should fail attempting to move source host to multiattach host
group and raise an error. group and raise an error.
""" """
@ -451,7 +461,9 @@ class NetAppEseriesHostMapperTestCase(test.TestCase):
def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host_with_lun_collision_with_dest_host( # noqa def test_map_volume_to_multiple_hosts_volume_mapped_to_another_host_with_lun_collision_with_dest_host( # noqa
self): self):
"""Should fail attempting to move destination host to multiattach host """Test moving destination host to multiattach host group.
Should fail attempting to move destination host to multiattach host
group and raise an error. group and raise an error.
""" """
@ -484,7 +496,9 @@ class NetAppEseriesHostMapperTestCase(test.TestCase):
def test_map_volume_to_multiple_hosts_volume_mapped_to_foreign_host_group( def test_map_volume_to_multiple_hosts_volume_mapped_to_foreign_host_group(
self): self):
"""Should raise an error stating the volume is mapped to an """Test a target when the host is in a foreign host group.
Should raise an error stating the volume is mapped to an
unsupported host group. unsupported host group.
""" """
fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
@ -510,7 +524,9 @@ class NetAppEseriesHostMapperTestCase(test.TestCase):
def test_map_volume_to_multiple_hosts_volume_mapped_to_host_in_foreign_host_group( # noqa def test_map_volume_to_multiple_hosts_volume_mapped_to_host_in_foreign_host_group( # noqa
self): self):
"""Should raise an error stating the volume is mapped to a """Test a target when the host is in a foreign host group.
Should raise an error stating the volume is mapped to a
host that is in an unsupported host group. host that is in an unsupported host group.
""" """
fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)
@ -540,7 +556,9 @@ class NetAppEseriesHostMapperTestCase(test.TestCase):
def test_map_volume_to_multiple_hosts_volume_target_host_in_foreign_host_group( # noqa def test_map_volume_to_multiple_hosts_volume_target_host_in_foreign_host_group( # noqa
self): self):
"""Should raise an error stating the target host is in an """Test a target when the host is in a foreign host group.
Should raise an error stating the target host is in an
unsupported host group. unsupported host group.
""" """
fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME) fake_eseries_volume = copy.deepcopy(eseries_fakes.VOLUME)

View File

@ -611,8 +611,10 @@ class NetAppEseriesLibraryTestCase(test.TestCase):
class NetAppEseriesLibraryMultiAttachTestCase(test.TestCase): class NetAppEseriesLibraryMultiAttachTestCase(test.TestCase):
"""Test driver behavior when the netapp_enable_multiattach """Test driver when netapp_enable_multiattach is enabled.
configuration option is True.
Test driver behavior when the netapp_enable_multiattach configuration
option is True.
""" """
def setUp(self): def setUp(self):

View File

@ -327,9 +327,7 @@ def safe_minidom_parse_string(xml_string):
def xhtml_escape(value): def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML. """Escapes a string so it is valid within XML or XHTML."""
"""
return saxutils.escape(value, {'"': '&quot;', "'": '&apos;'}) return saxutils.escape(value, {'"': '&quot;', "'": '&apos;'})
@ -402,7 +400,9 @@ def is_none_string(val):
def monkey_patch(): def monkey_patch():
"""If the CONF.monkey_patch set as True, """Patches decorators for all functions in a specified module.
If the CONF.monkey_patch set as True,
this function patches a decorator this function patches a decorator
for all functions in specified modules. for all functions in specified modules.
@ -415,8 +415,8 @@ def monkey_patch():
Parameters of the decorator is as follows. Parameters of the decorator is as follows.
(See cinder.openstack.common.notifier.api.notify_decorator) (See cinder.openstack.common.notifier.api.notify_decorator)
name - name of the function :param name: name of the function
function - object of the function :param function: object of the function
""" """
# If CONF.monkey_patch is not True, this function do nothing. # If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch: if not CONF.monkey_patch:
@ -551,11 +551,10 @@ def get_root_helper():
def brick_get_connector_properties(multipath=False, enforce_multipath=False): def brick_get_connector_properties(multipath=False, enforce_multipath=False):
"""wrapper for the brick calls to automatically set """Wrapper to automatically set root_helper in brick calls.
the root_helper needed for cinder.
:param multipath: A boolean indicating whether the connector can :param multipath: A boolean indicating whether the connector can
support multipath. support multipath.
:param enforce_multipath: If True, it raises exception when multipath=True :param enforce_multipath: If True, it raises exception when multipath=True
is specified but multipathd is not running. is specified but multipathd is not running.
If False, it falls back to multipath=False If False, it falls back to multipath=False
@ -575,6 +574,7 @@ def brick_get_connector(protocol, driver=None,
device_scan_attempts=3, device_scan_attempts=3,
*args, **kwargs): *args, **kwargs):
"""Wrapper to get a brick connector object. """Wrapper to get a brick connector object.
This automatically populates the required protocol as well This automatically populates the required protocol as well
as the root_helper needed to execute commands. as the root_helper needed to execute commands.
""" """
@ -620,7 +620,9 @@ def get_file_size(path):
def _get_disk_of_partition(devpath, st=None): def _get_disk_of_partition(devpath, st=None):
"""Returns a disk device path from a partition device path, and stat for """Gets a disk device path and status from partition path.
Returns a disk device path from a partition device path, and stat for
the device. If devpath is not a partition, devpath is returned as it is. the device. If devpath is not a partition, devpath is returned as it is.
For example, '/dev/sda' is returned for '/dev/sda1', and '/dev/disk1' is For example, '/dev/sda' is returned for '/dev/sda1', and '/dev/disk1' is
for '/dev/disk1p1' ('p' is prepended to the partition number if the disk for '/dev/disk1p1' ('p' is prepended to the partition number if the disk
@ -641,7 +643,9 @@ def _get_disk_of_partition(devpath, st=None):
def get_blkdev_major_minor(path, lookup_for_file=True): def get_blkdev_major_minor(path, lookup_for_file=True):
"""Get the device's "major:minor" number of a block device to control """Get 'major:minor' number of block device.
Get the device's 'major:minor' number of a block device to control
I/O ratelimit of the specified path. I/O ratelimit of the specified path.
If lookup_for_file is True and the path is a regular file, lookup a disk If lookup_for_file is True and the path is a regular file, lookup a disk
device which the file lies on and returns the result for the device. device which the file lies on and returns the result for the device.
@ -667,7 +671,8 @@ def get_blkdev_major_minor(path, lookup_for_file=True):
def check_string_length(value, name, min_length=0, max_length=None): def check_string_length(value, name, min_length=0, max_length=None):
"""Check the length of specified string """Check the length of specified string.
:param value: the value of the string :param value: the value of the string
:param name: the name of the string :param name: the name of the string
:param min_length: the min_length of the string :param min_length: the min_length of the string
@ -733,9 +738,8 @@ def add_visible_admin_metadata(volume):
def remove_invalid_filter_options(context, filters, def remove_invalid_filter_options(context, filters,
allowed_search_options): allowed_search_options):
"""Remove search options that are not valid """Remove search options that are not valid for non-admin API/context."""
for non-admin API/context.
"""
if context.is_admin: if context.is_admin:
# Allow all options # Allow all options
return return

View File

@ -14,9 +14,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Handles all requests relating to volumes."""
Handles all requests relating to volumes.
"""
import collections import collections
@ -1515,7 +1513,8 @@ class HostAPI(base.Base):
raise NotImplementedError() raise NotImplementedError()
def set_host_maintenance(self, context, host, mode): def set_host_maintenance(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers """Start/Stop host maintenance window.
volume evacuation.
On start, it triggers volume evacuation.
""" """
raise NotImplementedError() raise NotImplementedError()

View File

@ -13,8 +13,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Configuration support for all drivers.
Configuration support for all drivers.
This module allows support for setting configurations either from default This module allows support for setting configurations either from default
or from a particular FLAGS group, to be able to set multiple configurations or from a particular FLAGS group, to be able to set multiple configurations
@ -51,8 +50,10 @@ LOG = logging.getLogger(__name__)
class Configuration(object): class Configuration(object):
def __init__(self, volume_opts, config_group=None): def __init__(self, volume_opts, config_group=None):
"""This takes care of grafting the implementation's config """Initialize configuration.
values into the config group
This takes care of grafting the implementation's config
values into the config group
""" """
self.config_group = config_group self.config_group = config_group

View File

@ -13,9 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Drivers for volumes."""
Drivers for volumes.
"""
import abc import abc
import time import time
@ -420,8 +418,10 @@ class BaseVD(object):
@abc.abstractmethod @abc.abstractmethod
def create_volume(self, volume): def create_volume(self, volume):
"""Creates a volume. Can optionally return a Dictionary of """Creates a volume.
changes to the volume object to be persisted.
Can optionally return a Dictionary of changes to the volume object to
be persisted.
If volume_type extra specs includes If volume_type extra specs includes
'capabilities:replication <is> True' the driver 'capabilities:replication <is> True' the driver
@ -453,11 +453,12 @@ class BaseVD(object):
return False return False
def get_volume_stats(self, refresh=False): def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service. If 'refresh' is """Return the current state of the volume service.
True, run the update first.
For replication the following state should be reported: If 'refresh' is True, run the update first.
replication = True (None or false disables replication)
For replication the following state should be reported:
replication = True (None or false disables replication)
""" """
return None return None
@ -1279,8 +1280,9 @@ class ReplicaVD(object):
class VolumeDriver(ConsistencyGroupVD, TransferVD, ManageableVD, ExtendVD, class VolumeDriver(ConsistencyGroupVD, TransferVD, ManageableVD, ExtendVD,
CloneableVD, CloneableImageVD, SnapshotVD, ReplicaVD, CloneableVD, CloneableImageVD, SnapshotVD, ReplicaVD,
RetypeVD, LocalVD, MigrateVD, BaseVD): RetypeVD, LocalVD, MigrateVD, BaseVD):
"""This class will be deprecated soon. Please us the abstract classes """This class will be deprecated soon.
above for new drivers.
Please use the abstract classes above for new drivers.
""" """
def check_for_setup_error(self): def check_for_setup_error(self):
raise NotImplementedError() raise NotImplementedError()
@ -1470,7 +1472,9 @@ class ProxyVD(object):
class can help marking them and retrieve the actual used driver object. class can help marking them and retrieve the actual used driver object.
""" """
def _get_driver(self): def _get_driver(self):
"""Returns the actual driver object. Can be overloaded by the proxy. """Returns the actual driver object.
Can be overloaded by the proxy.
""" """
return getattr(self, "driver", None) return getattr(self, "driver", None)
@ -1806,8 +1810,10 @@ class FakeISCSIDriver(ISCSIDriver):
pass pass
def create_export(self, context, volume): def create_export(self, context, volume):
"""Exports the volume. Can optionally return a Dictionary of changes """Exports the volume.
to the volume object to be persisted.
Can optionally return a Dictionary of changes to the volume object to
be persisted.
""" """
pass pass

View File

@ -181,8 +181,7 @@ class CloudByteISCSIDriver(san.SanISCSIDriver):
return data return data
def _override_params(self, default_dict, filtered_user_dict): def _override_params(self, default_dict, filtered_user_dict):
"""Override the default config values with user provided values. """Override the default config values with user provided values."""
"""
if filtered_user_dict is None: if filtered_user_dict is None:
# Nothing to override # Nothing to override

View File

@ -62,10 +62,11 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
def _bytes_to_gb(self, spacestring): def _bytes_to_gb(self, spacestring):
"""Space is returned in a string like ... """Space is returned in a string like ...
7.38197504E8 Bytes 7.38197504E8 Bytes
Need to split that apart and convert to GB. Need to split that apart and convert to GB.
returns gbs in int form :returns: gbs in int form
""" """
try: try:
n = spacestring.split(' ', 1) n = spacestring.split(' ', 1)

View File

@ -187,8 +187,8 @@ class DotHillCommon(object):
def _assert_enough_space_for_copy(self, volume_size): def _assert_enough_space_for_copy(self, volume_size):
"""The DotHill creates a snap pool before trying to copy the volume. """The DotHill creates a snap pool before trying to copy the volume.
The pool is 5.27GB or 20% of the volume size, whichever is larger.
The pool is 5.27GB or 20% of the volume size, whichever is larger.
Verify that we have enough space for the pool and then copy Verify that we have enough space for the pool and then copy
""" """
pool_size = max(volume_size * 0.2, 5.27) pool_size = max(volume_size * 0.2, 5.27)

View File

@ -12,10 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """Fibre Channel Driver for EMC VNX array based on CLI."""
Fibre Channel Driver for EMC VNX array based on CLI.
"""
from oslo_log import log as logging from oslo_log import log as logging
@ -223,8 +220,7 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
self.cli.manage_existing(volume, existing_ref) self.cli.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref): def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing. """Return size of volume to be managed by manage_existing."""
"""
return self.cli.manage_existing_get_size(volume, existing_ref) return self.cli.manage_existing_get_size(volume, existing_ref)
def create_consistencygroup(self, context, group): def create_consistencygroup(self, context, group):

View File

@ -12,10 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """iSCSI Drivers for EMC VNX array based on CLI."""
iSCSI Drivers for EMC VNX array based on CLI.
"""
from oslo_log import log as logging from oslo_log import log as logging
@ -202,8 +199,7 @@ class EMCCLIISCSIDriver(driver.ISCSIDriver):
self.cli.manage_existing(volume, existing_ref) self.cli.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref): def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing. """Return size of volume to be managed by manage_existing."""
"""
return self.cli.manage_existing_get_size(volume, existing_ref) return self.cli.manage_existing_get_size(volume, existing_ref)
def create_consistencygroup(self, context, group): def create_consistencygroup(self, context, group):

View File

@ -117,8 +117,7 @@ class EMCVMAXCommon(object):
self._gather_info() self._gather_info()
def _gather_info(self): def _gather_info(self):
"""Gather the relevant information for update_volume_stats. """Gather the relevant information for update_volume_stats."""
"""
if hasattr(self.configuration, 'cinder_emc_config_file'): if hasattr(self.configuration, 'cinder_emc_config_file'):
self.pool_info['config_file'] = ( self.pool_info['config_file'] = (
self.configuration.cinder_emc_config_file) self.configuration.cinder_emc_config_file)
@ -564,8 +563,7 @@ class EMCVMAXCommon(object):
return modifiedVolumeDict return modifiedVolumeDict
def update_volume_stats(self): def update_volume_stats(self):
"""Retrieve stats info. """Retrieve stats info."""
"""
if self.pool_info['is_v3']: if self.pool_info['is_v3']:
location_info, total_capacity_gb, free_capacity_gb = ( location_info, total_capacity_gb, free_capacity_gb = (

View File

@ -349,9 +349,7 @@ class EMCVMAXISCSIDriver(driver.ISCSIDriver):
return self.common.manage_existing_get_size(volume, external_ref) return self.common.manage_existing_get_size(volume, external_ref)
def unmanage(self, volume): def unmanage(self, volume):
"""Export VMAX volume from Cinder, leave the volume intact on the """Export VMAX volume and leave volume intact on the backend array."""
backend array.
"""
return self.common.unmanage(volume) return self.common.unmanage(volume)
def update_consistencygroup(self, context, group, def update_consistencygroup(self, context, group,

View File

@ -69,8 +69,7 @@ class EMCVMAXUtils(object):
self.protocol = prtcl self.protocol = prtcl
def find_storage_configuration_service(self, conn, storageSystemName): def find_storage_configuration_service(self, conn, storageSystemName):
"""Given the storage system name, get the storage configuration """Get storage configuration service with given storage system name.
service.
:param conn: connection to the ecom server :param conn: connection to the ecom server
:param storageSystemName: the storage system name :param storageSystemName: the storage system name
@ -2099,7 +2098,9 @@ class EMCVMAXUtils(object):
def get_associated_replication_from_source_volume( def get_associated_replication_from_source_volume(
self, conn, storageSystem, sourceDeviceId): self, conn, storageSystem, sourceDeviceId):
"""Given the source volume device ID, find associated replication """Get associated replication from source volume.
Given the source volume device ID, find associated replication
storage synchronized instance names. storage synchronized instance names.
:param conn: connection to the ecom server :param conn: connection to the ecom server

View File

@ -1697,7 +1697,8 @@ class CommandLineHelper(object):
return out, rc return out, rc
def _toggle_sp(self): def _toggle_sp(self):
"""This function toggles the storage IP """Toggle the storage IP.
Address between primary IP and secondary IP, if no SP IP address has Address between primary IP and secondary IP, if no SP IP address has
exchanged, return False, otherwise True will be returned. exchanged, return False, otherwise True will be returned.
""" """
@ -1715,9 +1716,7 @@ class CommandLineHelper(object):
return True return True
def get_enablers_on_array(self, poll=False): def get_enablers_on_array(self, poll=False):
"""The function would get all the enabler installed """The function would get all the enablers installed on array."""
on array.
"""
enablers = [] enablers = []
cmd_list = ('ndu', '-list') cmd_list = ('ndu', '-list')
out, rc = self.command_execute(*cmd_list, poll=poll) out, rc = self.command_execute(*cmd_list, poll=poll)
@ -1733,9 +1732,7 @@ class CommandLineHelper(object):
return enablers return enablers
def enable_or_disable_compression_on_lun(self, volumename, compression): def enable_or_disable_compression_on_lun(self, volumename, compression):
"""The function will enable or disable the compression """The function will enable or disable the compression on lun."""
on lun
"""
lun_data = self.get_lun_by_name(volumename) lun_data = self.get_lun_by_name(volumename)
command_compression_cmd = ('compression', '-' + compression, command_compression_cmd = ('compression', '-' + compression,

View File

@ -431,6 +431,7 @@ class XtremIOVolumeDriver(san.SanDriver):
def get_volume_stats(self, refresh=False): def get_volume_stats(self, refresh=False):
"""Get volume stats. """Get volume stats.
If 'refresh' is True, run update the stats first. If 'refresh' is True, run update the stats first.
""" """
if refresh: if refresh:
@ -765,7 +766,8 @@ class XtremIOISCSIDriver(XtremIOVolumeDriver, driver.ISCSIDriver):
} }
def _get_iscsi_properties(self, lunmap): def _get_iscsi_properties(self, lunmap):
"""Gets iscsi configuration """Gets iscsi configuration.
:target_discovered: boolean indicating whether discovery was used :target_discovered: boolean indicating whether discovery was used
:target_iqn: the IQN of the iSCSI target :target_iqn: the IQN of the iSCSI target
:target_portal: the portal of the iSCSI target :target_portal: the portal of the iSCSI target

View File

@ -56,8 +56,9 @@ CONF.register_opts(volume_opts)
class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, driver.CloneableVD, class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, driver.CloneableVD,
driver.ExtendVD): driver.ExtendVD):
"""Gluster based cinder driver. Creates file on Gluster share for using it """Gluster based cinder driver.
as block device on hypervisor.
Creates file on Gluster share for using it as block device on hypervisor.
Operations such as create/delete/extend volume/snapshot use locking on a Operations such as create/delete/extend volume/snapshot use locking on a
per-process basis to prevent multiple threads from modifying qcow2 chains per-process basis to prevent multiple threads from modifying qcow2 chains
@ -376,6 +377,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, driver.CloneableVD,
def _ensure_share_mounted(self, glusterfs_share): def _ensure_share_mounted(self, glusterfs_share):
"""Mount GlusterFS share. """Mount GlusterFS share.
:param glusterfs_share: string :param glusterfs_share: string
""" """
mount_path = self._get_mount_point_for_share(glusterfs_share) mount_path = self._get_mount_point_for_share(glusterfs_share)
@ -398,6 +400,7 @@ class GlusterfsDriver(remotefs_drv.RemoteFSSnapDriver, driver.CloneableVD,
def _find_share(self, volume_size_for): def _find_share(self, volume_size_for):
"""Choose GlusterFS share among available ones for given volume size. """Choose GlusterFS share among available ones for given volume size.
Current implementation looks for greatest capacity. Current implementation looks for greatest capacity.
:param volume_size_for: int size in GB :param volume_size_for: int size in GB
""" """

View File

@ -605,6 +605,7 @@ class HnasBackend(object):
def set_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp, secret): def set_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp, secret):
"""Sets the chap secret for the specified target. """Sets the chap secret for the specified target.
:param ip0: string IP address of controller :param ip0: string IP address of controller
:param user: string user authentication for array :param user: string user authentication for array
:param pw: string password authentication for array :param pw: string password authentication for array
@ -637,6 +638,7 @@ class HnasBackend(object):
def get_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp): def get_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp):
"""Returns the chap secret for the specified target. """Returns the chap secret for the specified target.
:param ip0: string IP address of controller :param ip0: string IP address of controller
:param user: string user authentication for array :param user: string user authentication for array
:param pw: string password authentication for array :param pw: string password authentication for array

View File

@ -211,9 +211,10 @@ class HDSISCSIDriver(driver.ISCSIDriver):
return conf return conf
def _get_service(self, volume): def _get_service(self, volume):
"""Get the available service parameters for a given volume using """Get available service parameters.
its type.
:param volume: dictionary volume reference Get the available service parameters for a given volume using its type.
:param volume: dictionary volume reference
""" """
label = utils.extract_host(volume['host'], level='pool') label = utils.extract_host(volume['host'], level='pool')
@ -360,7 +361,8 @@ class HDSISCSIDriver(driver.ISCSIDriver):
def _id_to_vol(self, volume_id): def _id_to_vol(self, volume_id):
"""Given the volume id, retrieve the volume object from database. """Given the volume id, retrieve the volume object from database.
:param volume_id: volume id string
:param volume_id: volume id string
""" """
vol = self.db.volume_get(self.context, volume_id) vol = self.db.volume_get(self.context, volume_id)
@ -369,8 +371,9 @@ class HDSISCSIDriver(driver.ISCSIDriver):
def _update_vol_location(self, volume_id, loc): def _update_vol_location(self, volume_id, loc):
"""Update the provider location. """Update the provider location.
:param volume_id: volume id string
:param loc: string provider location value :param volume_id: volume id string
:param loc: string provider location value
""" """
update = {'provider_location': loc} update = {'provider_location': loc}
@ -421,8 +424,9 @@ class HDSISCSIDriver(driver.ISCSIDriver):
def create_export(self, context, volume): def create_export(self, context, volume):
"""Create an export. Moved to initialize_connection. """Create an export. Moved to initialize_connection.
:param context:
:param volume: volume reference :param context:
:param volume: volume reference
""" """
name = volume['name'] name = volume['name']
@ -432,8 +436,9 @@ class HDSISCSIDriver(driver.ISCSIDriver):
def remove_export(self, context, volume): def remove_export(self, context, volume):
"""Disconnect a volume from an attached instance. """Disconnect a volume from an attached instance.
:param context: context
:param volume: dictionary volume reference :param context: context
:param volume: dictionary volume reference
""" """
provider = volume['provider_location'] provider = volume['provider_location']
@ -445,7 +450,8 @@ class HDSISCSIDriver(driver.ISCSIDriver):
def create_volume(self, volume): def create_volume(self, volume):
"""Create a LU on HNAS. """Create a LU on HNAS.
:param volume: ditctionary volume reference
:param volume: dictionary volume reference
""" """
service = self._get_service(volume) service = self._get_service(volume)
@ -470,8 +476,9 @@ class HDSISCSIDriver(driver.ISCSIDriver):
def create_cloned_volume(self, dst, src): def create_cloned_volume(self, dst, src):
"""Create a clone of a volume. """Create a clone of a volume.
:param dst: ditctionary destination volume reference
:param src: ditctionary source volume reference :param dst: ditctionary destination volume reference
:param src: ditctionary source volume reference
""" """
if src['size'] != dst['size']: if src['size'] != dst['size']:
@ -519,6 +526,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
def delete_volume(self, volume): def delete_volume(self, volume):
"""Delete an LU on HNAS. """Delete an LU on HNAS.
:param volume: dictionary volume reference :param volume: dictionary volume reference
""" """
@ -653,6 +661,7 @@ class HDSISCSIDriver(driver.ISCSIDriver):
def create_snapshot(self, snapshot): def create_snapshot(self, snapshot):
"""Create a snapshot. """Create a snapshot.
:param snapshot: dictionary snapshot reference :param snapshot: dictionary snapshot reference
""" """

View File

@ -146,6 +146,7 @@ def factory_bend(drv_config):
class HDSNFSDriver(nfs.NfsDriver): class HDSNFSDriver(nfs.NfsDriver):
"""Base class for Hitachi NFS driver. """Base class for Hitachi NFS driver.
Executes commands relating to Volumes. Executes commands relating to Volumes.
Version 1.0.0: Initial driver version Version 1.0.0: Initial driver version
@ -190,8 +191,10 @@ class HDSNFSDriver(nfs.NfsDriver):
return vol return vol
def _get_service(self, volume): def _get_service(self, volume):
"""Get the available service parameters for a given volume using """Get service parameters.
its type.
Get the available service parameters for a given volume using
its type.
:param volume: dictionary volume reference :param volume: dictionary volume reference
""" """
@ -373,8 +376,7 @@ class HDSNFSDriver(nfs.NfsDriver):
time.sleep(tries ** 2) time.sleep(tries ** 2)
def _get_volume_path(self, nfs_share, volume_name): def _get_volume_path(self, nfs_share, volume_name):
"""Get volume path (local fs path) for given volume name on given nfs """Get volume path (local fs path) for given name on given nfs share.
share.
:param nfs_share string, example 172.18.194.100:/var/nfs :param nfs_share string, example 172.18.194.100:/var/nfs
:param volume_name string, :param volume_name string,

View File

@ -375,8 +375,8 @@ class FlashSystemDriver(san.SanDriver):
def _get_hdr_dic(self, header, row, delim): def _get_hdr_dic(self, header, row, delim):
"""Return CLI row data as a dictionary indexed by names from header. """Return CLI row data as a dictionary indexed by names from header.
string. The strings are converted to columns using the delimiter in
delim. The strings are converted to columns using the delimiter in delim.
""" """
attributes = header.split(delim) attributes = header.split(delim)

View File

@ -345,8 +345,7 @@ class StorwizeSVCDriver(san.SanDriver,
@fczm_utils.AddFCZone @fczm_utils.AddFCZone
@utils.synchronized('storwize-host', external=True) @utils.synchronized('storwize-host', external=True)
def initialize_connection(self, volume, connector): def initialize_connection(self, volume, connector):
"""Perform the necessary work so that an iSCSI/FC connection can """Perform necessary work to make an iSCSI/FC connection.
be made.
To be able to create an iSCSI/FC connection from a given host to a To be able to create an iSCSI/FC connection from a given host to a
volume, we must: volume, we must:

View File

@ -277,11 +277,12 @@ class NetAppNfsDriver(nfs.NfsDriver):
time.sleep(tries ** 2) time.sleep(tries ** 2)
def _get_volume_path(self, nfs_share, volume_name): def _get_volume_path(self, nfs_share, volume_name):
"""Get volume path (local fs path) for given volume name on given nfs """Get volume path.
share.
@param nfs_share string, example 172.18.194.100:/var/nfs Get volume path (local fs path) for given volume name on given nfs
@param volume_name string, share.
:param nfs_share: string, example 172.18.194.100:/var/nfs
:param volume_name: string,
example volume-91ee65ec-c473-4391-8c09-162b00c68a8c example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
""" """

View File

@ -74,8 +74,9 @@ CONF.register_opts(nfs_opts)
class NfsDriver(driver.ExtendVD, remotefs.RemoteFSDriver): class NfsDriver(driver.ExtendVD, remotefs.RemoteFSDriver):
"""NFS based cinder driver. Creates file on NFS share for using it """NFS based cinder driver.
as block device on hypervisor.
Creates file on NFS share for using it as block device on hypervisor.
""" """
driver_volume_type = 'nfs' driver_volume_type = 'nfs'

View File

@ -191,8 +191,9 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
return ''.join(random.sample(char_set, length)) return ''.join(random.sample(char_set, length))
def _clone_volume_from_snapshot(self, volume, snapshot): def _clone_volume_from_snapshot(self, volume, snapshot):
"""Clonevolume from snapshot. Extend the volume if the """Clone volume from snapshot.
size of the volume is more than the snapshot
Extend the volume if the size of the volume is more than the snapshot.
""" """
reserve = not self.configuration.san_thin_provision reserve = not self.configuration.san_thin_provision
self.APIExecutor.clone_vol(volume, snapshot, reserve) self.APIExecutor.clone_vol(volume, snapshot, reserve)
@ -371,9 +372,7 @@ class NimbleISCSIDriver(san.SanISCSIDriver):
def _response_checker(func): def _response_checker(func):
"""Decorator function to check if the response """Decorator function to check if the response of an API is positive."""
of an API is positive
"""
@functools.wraps(func) @functools.wraps(func)
def inner_response_checker(self, *args, **kwargs): def inner_response_checker(self, *args, **kwargs):
response = func(self, *args, **kwargs) response = func(self, *args, **kwargs)
@ -391,9 +390,7 @@ def _response_checker(func):
def _connection_checker(func): def _connection_checker(func):
"""Decorator to re-establish and """Decorator to re-establish and re-run the api if session has expired."""
re-run the api if session has expired.
"""
@functools.wraps(func) @functools.wraps(func)
def inner_connection_checker(self, *args, **kwargs): def inner_connection_checker(self, *args, **kwargs):
for attempts in range(2): for attempts in range(2):

View File

@ -32,14 +32,15 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
super(DPLFCDriver, self).__init__(*args, **kwargs) super(DPLFCDriver, self).__init__(*args, **kwargs)
def _get_fc_channel(self): def _get_fc_channel(self):
"""return : """Get FibreChannel info.
fcInfos[uuid]
fcInfo[uuid]['display_name'] :returns: fcInfos[uuid]
fcInfo[uuid]['display_description'] fcInfo[uuid]['display_name']
fcInfo[uuid]['hardware_address'] fcInfo[uuid]['display_description']
fcInfo[uuid]['type'] fcInfo[uuid]['hardware_address']
fcInfo[uuid]['speed'] fcInfo[uuid]['type']
fcInfo[uuid]['state'] fcInfo[uuid]['speed']
fcInfo[uuid]['state']
""" """
output = None output = None
fcInfos = {} fcInfos = {}
@ -60,11 +61,12 @@ class DPLFCDriver(dplcommon.DPLCOMMONDriver,
return fcInfos return fcInfos
def _get_targets(self): def _get_targets(self):
"""return:: """Get targets.
targetInfos[uuid] = targetInfo
targetInfo['targetUuid'] :returns: targetInfos[uuid] = targetInfo
targetInfo['targetName'] targetInfo['targetUuid']
targetInfo['targetAddr'] targetInfo['targetName']
targetInfo['targetAddr']
""" """
output = None output = None
targetInfos = {} targetInfos = {}

View File

@ -1432,8 +1432,9 @@ class DPLCOMMONDriver(driver.ConsistencyGroupVD, driver.ExtendVD,
return pools return pools
def _update_volume_stats(self, refresh=False): def _update_volume_stats(self, refresh=False):
"""Return the current state of the volume service. If 'refresh' is """Return the current state of the volume service.
True, run the update first.
If 'refresh' is True, run the update first.
""" """
data = {} data = {}
pools = self._get_pools() pools = self._get_pools()

View File

@ -421,8 +421,7 @@ class RBDDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD,
return self._stats return self._stats
def _get_clone_depth(self, client, volume_name, depth=0): def _get_clone_depth(self, client, volume_name, depth=0):
"""Returns the number of ancestral clones (if any) of the given volume. """Returns the number of ancestral clones of the given volume."""
"""
parent_volume = self.rbd.Image(client.ioctx, volume_name) parent_volume = self.rbd.Image(client.ioctx, volume_name)
try: try:
_pool, parent, _snap = self._get_clone_info(parent_volume, _pool, parent, _snap = self._get_clone_info(parent_volume,

View File

@ -242,9 +242,7 @@ class RemoteFSDriver(driver.LocalVD, driver.TransferVD, driver.BaseVD):
self._set_rw_permissions(volume_path) self._set_rw_permissions(volume_path)
def _ensure_shares_mounted(self): def _ensure_shares_mounted(self):
"""Look for remote shares in the flags and tries to mount them """Look for remote shares in the flags and mount them locally."""
locally.
"""
mounted_shares = [] mounted_shares = []
self._load_shares_config(getattr(self.configuration, self._load_shares_config(getattr(self.configuration,
@ -296,8 +294,10 @@ class RemoteFSDriver(driver.LocalVD, driver.TransferVD, driver.BaseVD):
pass pass
def delete_snapshot(self, snapshot): def delete_snapshot(self, snapshot):
"""Do nothing for this driver, but allow manager to handle deletion """Delete snapshot.
of snapshot in error state.
Do nothing for this driver, but allow manager to handle deletion
of snapshot in error state.
""" """
pass pass
@ -363,7 +363,8 @@ class RemoteFSDriver(driver.LocalVD, driver.TransferVD, driver.BaseVD):
run_as_root=self._execute_as_root) run_as_root=self._execute_as_root)
def local_path(self, volume): def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume """Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference :param volume: volume reference
""" """
remotefs_share = volume['provider_location'] remotefs_share = volume['provider_location']
@ -746,19 +747,22 @@ class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD):
return output return output
def _get_hash_str(self, base_str): def _get_hash_str(self, base_str):
"""Return a string that represents hash of base_str """Return a string that represents hash of base_str.
(in a hex format).
Returns string in a hex format.
""" """
return hashlib.md5(base_str).hexdigest() return hashlib.md5(base_str).hexdigest()
def _get_mount_point_for_share(self, share): def _get_mount_point_for_share(self, share):
"""Return mount point for share. """Return mount point for share.
:param share: example 172.18.194.100:/var/fs :param share: example 172.18.194.100:/var/fs
""" """
return self._remotefsclient.get_mount_point(share) return self._remotefsclient.get_mount_point(share)
def _get_available_capacity(self, share): def _get_available_capacity(self, share):
"""Calculate available space on the share. """Calculate available space on the share.
:param share: example 172.18.194.100:/var/fs :param share: example 172.18.194.100:/var/fs
""" """
mount_point = self._get_mount_point_for_share(share) mount_point = self._get_mount_point_for_share(share)

View File

@ -70,8 +70,7 @@ CONF.register_opts(volume_opts)
class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver): class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
"""SMBFS based cinder volume driver. """SMBFS based cinder volume driver."""
"""
driver_volume_type = 'smbfs' driver_volume_type = 'smbfs'
driver_prefix = 'smbfs' driver_prefix = 'smbfs'
@ -167,6 +166,7 @@ class SmbfsDriver(remotefs_drv.RemoteFSSnapDriver):
def local_path(self, volume): def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume. """Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference :param volume: volume reference
""" """
volume_path_template = self._get_local_volume_path_template(volume) volume_path_template = self._get_local_volume_path_template(volume)

View File

@ -460,7 +460,9 @@ class SRBDriver(driver.VolumeDriver):
@staticmethod @staticmethod
def _activate_lv(orig, *args, **kwargs): def _activate_lv(orig, *args, **kwargs):
"""Use with `patched` to patch `lvm.LVM.activate_lv` to ignore `EEXIST` """Activate lv.
Use with `patched` to patch `lvm.LVM.activate_lv` to ignore `EEXIST`
""" """
try: try:
orig(*args, **kwargs) orig(*args, **kwargs)

View File

@ -53,8 +53,7 @@ LOG = logging.getLogger(__name__)
class V6000FCDriver(driver.FibreChannelDriver): class V6000FCDriver(driver.FibreChannelDriver):
"""Executes commands relating to fibre channel based Violin Memory """Executes commands relating to fibre channel based Violin Memory Arrays.
Arrays.
Version history: Version history:
1.0 - Initial driver 1.0 - Initial driver
@ -422,7 +421,9 @@ class V6000FCDriver(driver.FibreChannelDriver):
return False return False
def _update_stats(self): def _update_stats(self):
"""Gathers array stats from the backend and converts them to GB values. """Update array stats.
Gathers array stats from the backend and converts them to GB values.
""" """
data = {} data = {}
total_gb = 0 total_gb = 0

View File

@ -440,7 +440,9 @@ class V6000ISCSIDriver(driver.ISCSIDriver):
_('Failed to add igroup member: %(code)d, %(message)s') % resp) _('Failed to add igroup member: %(code)d, %(message)s') % resp)
def _update_stats(self): def _update_stats(self):
"""Gathers array stats from the backend and converts them to GB values. """Update array stats.
Gathers array stats from the backend and converts them to GB values.
""" """
data = {} data = {}
total_gb = 0 total_gb = 0

View File

@ -402,7 +402,9 @@ class VHDUtils(object):
return vhd_info return vhd_info
def get_vhd_size(self, vhd_path): def get_vhd_size(self, vhd_path):
"""Returns a dict containing the virtual size, physical size, """Return vhd size.
Returns a dict containing the virtual size, physical size,
block size and sector size of the vhd. block size and sector size of the vhd.
""" """
size = self.get_vhd_info(vhd_path, size = self.get_vhd_info(vhd_path,

View File

@ -145,8 +145,7 @@ class WindowsDriver(driver.ISCSIDriver):
return {'provider_location': target_name} return {'provider_location': target_name}
def remove_export(self, context, volume): def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume. """Driver entry point to remove an export for a volume."""
"""
target_name = "%s%s" % (self.configuration.iscsi_target_prefix, target_name = "%s%s" % (self.configuration.iscsi_target_prefix,
volume['name']) volume['name'])

View File

@ -49,6 +49,7 @@ class WindowsUtils(object):
def check_for_setup_error(self): def check_for_setup_error(self):
"""Check that the driver is working and can communicate. """Check that the driver is working and can communicate.
Invokes the portal and checks that is listening ISCSI traffic. Invokes the portal and checks that is listening ISCSI traffic.
""" """
try: try:

View File

@ -74,7 +74,8 @@ class Status(object):
class RestResult(object): class RestResult(object):
"""Result from a REST API operation""" """Result from a REST API operation"""
def __init__(self, response=None, err=None): def __init__(self, response=None, err=None):
"""Initialize a RestResult containing the results from a REST call """Initialize a RestResult containing the results from a REST call.
:param response: HTTP response :param response: HTTP response
""" """
self.response = response self.response = response

View File

@ -39,9 +39,7 @@ class ZFSSAApi(object):
self.rclient.logout() self.rclient.logout()
def _is_pool_owned(self, pdata): def _is_pool_owned(self, pdata):
"""returns True if the pool's owner is the """Returns True if the pool's owner is the same as the host."""
same as the host.
"""
svc = '/api/system/v1/version' svc = '/api/system/v1/version'
ret = self.rclient.get(svc) ret = self.rclient.get(svc)
if ret.status != restclient.Status.OK: if ret.status != restclient.Status.OK:
@ -70,8 +68,10 @@ class ZFSSAApi(object):
self.rclient.login(auth_str) self.rclient.login(auth_str)
def get_pool_stats(self, pool): def get_pool_stats(self, pool):
"""Get space available and total properties of a pool """Get pool stats.
returns (avail, total).
Get space available and total properties of a pool
returns (avail, total).
""" """
svc = '/api/storage/v1/pools/' + pool svc = '/api/storage/v1/pools/' + pool
ret = self.rclient.get(svc) ret = self.rclient.get(svc)
@ -100,8 +100,9 @@ class ZFSSAApi(object):
return avail, total return avail, total
def create_project(self, pool, project, compression=None, logbias=None): def create_project(self, pool, project, compression=None, logbias=None):
"""Create a project on a pool """Create a project on a pool.
Check first whether the pool exists.
Check first whether the pool exists.
""" """
self.verify_pool(pool) self.verify_pool(pool)
svc = '/api/storage/v1/pools/' + pool + '/projects/' + project svc = '/api/storage/v1/pools/' + pool + '/projects/' + project
@ -222,9 +223,10 @@ class ZFSSAApi(object):
def create_target(self, alias, interfaces=None, tchapuser=None, def create_target(self, alias, interfaces=None, tchapuser=None,
tchapsecret=None): tchapsecret=None):
"""Create an iSCSI target. """Create an iSCSI target.
interfaces: an array with network interfaces
tchapuser, tchapsecret: target's chapuser and chapsecret :param interfaces: an array with network interfaces
returns target iqn :param tchapuser, tchapsecret: target's chapuser and chapsecret
:returns: target iqn
""" """
svc = '/api/san/v1/iscsi/targets/alias=' + alias svc = '/api/san/v1/iscsi/targets/alias=' + alias
ret = self.rclient.get(svc) ret = self.rclient.get(svc)
@ -384,9 +386,9 @@ class ZFSSAApi(object):
raise exception.VolumeBackendAPIException(data=exception_msg) raise exception.VolumeBackendAPIException(data=exception_msg)
def create_lun(self, pool, project, lun, volsize, targetgroup, specs): def create_lun(self, pool, project, lun, volsize, targetgroup, specs):
"""Create a LUN. """Create a LUN.
specs - contains volume properties (e.g blocksize, compression).
specs - contains volume properties (e.g blocksize, compression).
""" """
svc = '/api/storage/v1/pools/' + pool + '/projects/' + \ svc = '/api/storage/v1/pools/' + pool + '/projects/' + \
project + '/luns' project + '/luns'

View File

@ -32,6 +32,7 @@ LOG = logging.getLogger(__name__)
class CxtAdm(iscsi.ISCSITarget): class CxtAdm(iscsi.ISCSITarget):
"""Chiscsi target configuration for block storage devices. """Chiscsi target configuration for block storage devices.
This includes things like create targets, attach, detach This includes things like create targets, attach, detach
etc. etc.
""" """

View File

@ -47,7 +47,9 @@ class Throttle(object):
@contextlib.contextmanager @contextlib.contextmanager
def subcommand(self, srcpath, dstpath): def subcommand(self, srcpath, dstpath):
"""Throttle disk I/O bandwidth used by a sub-command, such as 'dd', """Sub-command that reads from srcpath and writes to dstpath.
Throttle disk I/O bandwidth used by a sub-command, such as 'dd',
that reads from srcpath and writes to dstpath. The sub-command that reads from srcpath and writes to dstpath. The sub-command
must be executed with the generated prefix command. must be executed with the generated prefix command.
""" """

View File

@ -144,7 +144,7 @@ commands = python {toxinidir}/tools/generate_driver_list.py
# H105 Don't use author tags # H105 Don't use author tags
# #
ignore = E251,H405,H105 ignore = E251,H105
exclude = .git,.venv,.tox,dist,tools,doc,common,*egg,build exclude = .git,.venv,.tox,dist,tools,doc,common,*egg,build
max-complexity=30 max-complexity=30