Import only modules: H302

H302 PEP8 check should be enabled to make core more clean
and readable and consistent with OpenStack Hacking rules.

Change-Id: Ie189f2418d12800a46664705eacfc127e7269f45
Partial-Bug: #1407162
This commit is contained in:
Anton Arefiev 2015-01-08 15:20:13 +02:00
parent d39c1351cf
commit 753f83cd17
111 changed files with 1580 additions and 1386 deletions

View File

@ -54,7 +54,7 @@ from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import units
from cinder.backup.driver import BackupDriver
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
@ -154,7 +154,7 @@ class VolumeMetadataBackup(object):
LOG.debug(msg)
class CephBackupDriver(BackupDriver):
class CephBackupDriver(driver.BackupDriver):
"""Backup Cinder volumes to Ceph Object Store.
This class enables backing up Cinder volumes to a Ceph object store.

View File

@ -44,7 +44,7 @@ from oslo_utils import units
import six
from swiftclient import client as swift
from cinder.backup.driver import BackupDriver
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
@ -108,7 +108,7 @@ CONF = cfg.CONF
CONF.register_opts(swiftbackup_service_opts)
class SwiftBackupDriver(BackupDriver):
class SwiftBackupDriver(driver.BackupDriver):
"""Provides backup, restore and delete of backup objects within Swift."""
DRIVER_VERSION = '1.0.0'

View File

@ -32,7 +32,7 @@ import stat
from oslo_concurrency import processutils
from oslo_config import cfg
from cinder.backup.driver import BackupDriver
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _LE, _
from cinder.openstack.common import log as logging
@ -260,7 +260,7 @@ def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id):
LOG.error(err)
class TSMBackupDriver(BackupDriver):
class TSMBackupDriver(driver.BackupDriver):
"""Provides backup, restore and delete of volumes backup for TSM."""
DRIVER_VERSION = '1.0.0'

View File

@ -34,7 +34,7 @@
from __future__ import print_function
from datetime import datetime
import datetime
import sys
import traceback
import warnings
@ -84,9 +84,11 @@ def main():
rpc.init(CONF)
begin, end = utils.last_completed_audit_period()
if CONF.start_time:
begin = datetime.strptime(CONF.start_time, "%Y-%m-%d %H:%M:%S")
begin = datetime.datetime.strptime(CONF.start_time,
"%Y-%m-%d %H:%M:%S")
if CONF.end_time:
end = datetime.strptime(CONF.end_time, "%Y-%m-%d %H:%M:%S")
end = datetime.datetime.strptime(CONF.end_time,
"%Y-%m-%d %H:%M:%S")
if not end > begin:
msg = _("The end time (%(end)s) must be after the start "
"time (%(start)s).") % {'start': begin,

View File

@ -19,8 +19,7 @@
"""Implementation of SQLAlchemy backend."""
from datetime import datetime
from datetime import timedelta
import datetime as dt
import functools
import sys
import threading
@ -3310,7 +3309,7 @@ def purge_deleted_rows(context, age_in_days):
LOG.info(_LI('Purging deleted rows older than age=%(age)d days '
'from table=%(table)s'), {'age': age_in_days,
'table': table})
deleted_age = datetime.now() - timedelta(days=age_in_days)
deleted_age = dt.datetime.now() - dt.timedelta(days=age_in_days)
try:
with session.begin():
result = session.execute(

View File

@ -14,7 +14,7 @@
# under the License.
from datetime import datetime
import datetime
from oslo_utils import timeutils
import webob.exc
@ -29,39 +29,39 @@ from cinder import test
from cinder.tests.api import fakes
fake_services_list = [{'binary': 'cinder-scheduler',
'host': 'host1',
'availability_zone': 'cinder',
'id': 1,
'disabled': True,
'updated_at': datetime(2012, 10, 29, 13, 42, 2),
'created_at': datetime(2012, 9, 18, 2, 46, 27),
'disabled_reason': 'test1'},
{'binary': 'cinder-volume',
'host': 'host1',
'availability_zone': 'cinder',
'id': 2,
'disabled': True,
'updated_at': datetime(2012, 10, 29, 13, 42, 5),
'created_at': datetime(2012, 9, 18, 2, 46, 27),
'disabled_reason': 'test2'},
{'binary': 'cinder-scheduler',
'host': 'host2',
'availability_zone': 'cinder',
'id': 3,
'disabled': False,
'updated_at': datetime(2012, 9, 19, 6, 55, 34),
'created_at': datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': ''},
{'binary': 'cinder-volume',
'host': 'host2',
'availability_zone': 'cinder',
'id': 4,
'disabled': True,
'updated_at': datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': 'test4'},
]
fake_services_list = [
{'binary': 'cinder-scheduler',
'host': 'host1',
'availability_zone': 'cinder',
'id': 1,
'disabled': True,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27),
'disabled_reason': 'test1'},
{'binary': 'cinder-volume',
'host': 'host1',
'availability_zone': 'cinder',
'id': 2,
'disabled': True,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27),
'disabled_reason': 'test2'},
{'binary': 'cinder-scheduler',
'host': 'host2',
'availability_zone': 'cinder',
'id': 3,
'disabled': False,
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': ''},
{'binary': 'cinder-volume',
'host': 'host2',
'availability_zone': 'cinder',
'id': 4,
'disabled': True,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
'disabled_reason': 'test4'}, ]
class FakeRequest(object):
@ -130,7 +130,7 @@ def fake_policy_enforce(context, action, target):
def fake_utcnow():
return datetime(2012, 10, 29, 13, 42, 11)
return datetime.datetime(2012, 10, 29, 13, 42, 11)
class ServicesTest(test.TestCase):
@ -157,24 +157,24 @@ class ServicesTest(test.TestCase):
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 2)},
{'binary': 'cinder-volume',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5)},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime(
'updated_at': datetime.datetime(
2012, 9, 19, 6, 55, 34)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime(
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38)}]}
self.assertEqual(res_dict, response)
@ -187,27 +187,27 @@ class ServicesTest(test.TestCase):
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'cinder-volume',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(
'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
'updated_at': datetime(
'updated_at': datetime.datetime(
2012, 9, 19, 6, 55, 34),
'disabled_reason': ''},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
'updated_at': datetime(
'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'}]}
self.assertEqual(res_dict, response)
@ -216,18 +216,19 @@ class ServicesTest(test.TestCase):
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10,
29, 13, 42, 2)},
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5)}]}
response = {'services': [
{'binary': 'cinder-scheduler',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10,
29, 13, 42, 2)},
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_host(self):
@ -236,40 +237,42 @@ class ServicesTest(test.TestCase):
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10,
29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'}]}
response = {'services': [
{'binary': 'cinder-scheduler',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10,
29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'}]}
self.assertEqual(res_dict, response)
def test_services_list_with_service(self):
req = FakeRequestWithService()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime(2012, 9, 18,
8, 3, 38)}]}
response = {'services': [
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_service(self):
@ -278,42 +281,44 @@ class ServicesTest(test.TestCase):
req = FakeRequestWithService()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime(2012, 9, 18,
8, 3, 38),
'disabled_reason': 'test4'}]}
response = {'services': [
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38),
'disabled_reason': 'test4'}]}
self.assertEqual(res_dict, response)
def test_services_list_with_binary(self):
req = FakeRequestWithBinary()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime(2012, 9, 18,
8, 3, 38)}]}
response = {'services': [
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_binary(self):
@ -322,35 +327,37 @@ class ServicesTest(test.TestCase):
req = FakeRequestWithBinary()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime(2012, 9, 18,
8, 3, 38),
'disabled_reason': 'test4'}]}
response = {'services': [
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18,
8, 3, 38),
'disabled_reason': 'test4'}]}
self.assertEqual(res_dict, response)
def test_services_list_with_host_service(self):
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5)}]}
response = {'services': [
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_host_service(self):
@ -359,27 +366,29 @@ class ServicesTest(test.TestCase):
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'}]}
response = {'services': [
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'}]}
self.assertEqual(res_dict, response)
def test_services_list_with_host_binary(self):
req = FakeRequestWithHostBinary()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5)}]}
response = {'services': [
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_host_binary(self):
@ -388,14 +397,15 @@ class ServicesTest(test.TestCase):
req = FakeRequestWithHostBinary()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'}]}
response = {'services': [
{'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29,
13, 42, 5),
'disabled_reason': 'test2'}]}
self.assertEqual(res_dict, response)
def test_services_enable_with_service_key(self):

View File

@ -24,7 +24,7 @@ import webob
from cinder.api.contrib import volume_actions
from cinder import exception
from cinder.image.glance import GlanceImageService
from cinder.image import glance
from cinder import test
from cinder.tests.api import fakes
from cinder.tests.api.v2 import stubs
@ -672,7 +672,7 @@ class VolumeImageActionsTest(test.TestCase):
mock_get_volume_image_metadata.side_effect = \
fake_get_volume_image_metadata
with mock.patch.object(GlanceImageService, "create") \
with mock.patch.object(glance.GlanceImageService, "create") \
as mock_create:
mock_create.side_effect = self.fake_image_service_create
@ -728,7 +728,7 @@ class VolumeImageActionsTest(test.TestCase):
mock_get_volume_image_metadata.side_effect = \
fake_get_volume_image_metadata_raise
with mock.patch.object(GlanceImageService, "create") \
with mock.patch.object(glance.GlanceImageService, "create") \
as mock_create:
mock_create.side_effect = self.fake_image_service_create
@ -780,7 +780,7 @@ class VolumeImageActionsTest(test.TestCase):
mock_get_volume_image_metadata.side_effect = \
fake_get_volume_image_metadata
with mock.patch.object(GlanceImageService, "create") \
with mock.patch.object(glance.GlanceImageService, "create") \
as mock_create:
mock_create.side_effect = self.fake_image_service_create
@ -824,7 +824,7 @@ class VolumeImageActionsTest(test.TestCase):
id = 1
# Need to mock create, update, copy_volume_to_image
with mock.patch.object(GlanceImageService, "create") \
with mock.patch.object(glance.GlanceImageService, "create") \
as mock_create:
mock_create.side_effect = self.fake_image_service_create

View File

@ -20,8 +20,7 @@ from xml.dom import minidom
import webob
from cinder.api import common
from cinder.api.openstack.wsgi import MetadataXMLDeserializer
from cinder.api.openstack.wsgi import XMLDeserializer
from cinder.api.openstack import wsgi
from cinder import db
from cinder import test
from cinder.tests.api import fakes
@ -115,15 +114,15 @@ class VolumeImageMetadataXMLTest(VolumeImageMetadataTest):
content_type = 'application/xml'
def _get_image_metadata(self, body):
deserializer = XMLDeserializer()
deserializer = wsgi.XMLDeserializer()
volume = deserializer.find_first_child_named(
minidom.parseString(body), 'volume')
image_metadata = deserializer.find_first_child_named(
volume, 'volume_image_metadata')
return MetadataXMLDeserializer().extract_metadata(image_metadata)
return wsgi.MetadataXMLDeserializer().extract_metadata(image_metadata)
def _get_image_metadata_list(self, body):
deserializer = XMLDeserializer()
deserializer = wsgi.XMLDeserializer()
volumes = deserializer.find_first_child_named(
minidom.parseString(body), 'volumes')
volume_list = deserializer.find_children_named(volumes, 'volume')
@ -132,5 +131,5 @@ class VolumeImageMetadataXMLTest(VolumeImageMetadataTest):
volume, 'volume_image_metadata'
)
for volume in volume_list]
return map(MetadataXMLDeserializer().extract_metadata,
return map(wsgi.MetadataXMLDeserializer().extract_metadata,
image_metadata_list)

View File

@ -29,7 +29,7 @@ from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.api import fakes
from cinder.transfer import API
from cinder import transfer
import cinder.volume
@ -41,7 +41,7 @@ class VolumeTransferAPITestCase(test.TestCase):
def setUp(self):
super(VolumeTransferAPITestCase, self).setUp()
self.volume_transfer_api = API()
self.volume_transfer_api = transfer.API()
self.controller = volume_transfer.VolumeTransferController()
def _create_transfer(self, volume_id=1,

View File

@ -13,13 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
from cinder.backup.driver import BackupDriver
from cinder.backup import driver
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class FakeBackupService(BackupDriver):
class FakeBackupService(driver.BackupDriver):
def __init__(self, context, db_driver=None):
super(FakeBackupService, self).__init__(context, db_driver)

View File

@ -15,8 +15,7 @@
"""Tests for db purge."""
from datetime import datetime
from datetime import timedelta
import datetime
import uuid
from cinder import context
@ -55,8 +54,8 @@ class PurgeDeletedTest(test.TestCase):
ins_stmt = self.vm.insert().values(volume_id=uuidstr)
self.conn.execute(ins_stmt)
# Set 4 of them deleted, 2 are 60 days ago, 2 are 20 days ago
old = datetime.now() - timedelta(days=20)
older = datetime.now() - timedelta(days=60)
old = datetime.datetime.now() - datetime.timedelta(days=20)
older = datetime.datetime.now() - datetime.timedelta(days=60)
make_old = self.volumes.update().\
where(self.volumes.c.id.in_(self.uuidstrs[1:3]))\
.values(deleted_at=old)

View File

@ -14,7 +14,7 @@
from cinder.i18n import _LE
from cinder.openstack.common import log as logging
from cinder.tests.brick.fake_lvm import FakeBrickLVM
from cinder.tests.brick import fake_lvm
from cinder.volume import driver
from cinder.volume.drivers import lvm
from cinder.zonemanager import utils as fczm_utils
@ -28,9 +28,9 @@ class FakeISCSIDriver(lvm.LVMISCSIDriver):
def __init__(self, *args, **kwargs):
super(FakeISCSIDriver, self).__init__(execute=self.fake_execute,
*args, **kwargs)
self.vg = FakeBrickLVM('cinder-volumes', False,
None, 'default',
self.fake_execute)
self.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False,
None, 'default',
self.fake_execute)
def check_for_setup_error(self):
"""No setup necessary in fake mode."""

View File

@ -21,8 +21,8 @@ import mock
from oslo_config import cfg
from cinder import context
from cinder.openstack.common.scheduler.weights import HostWeightHandler
from cinder.scheduler.weights.capacity import AllocatedCapacityWeigher as ACW
from cinder.openstack.common.scheduler import weights
from cinder.scheduler.weights import capacity
from cinder import test
from cinder.tests.scheduler import fakes
from cinder.volume import utils
@ -34,13 +34,15 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
def setUp(self):
super(AllocatedCapacityWeigherTestCase, self).setUp()
self.host_manager = fakes.FakeHostManager()
self.weight_handler = HostWeightHandler('cinder.scheduler.weights')
self.weight_handler = weights.HostWeightHandler(
'cinder.scheduler.weights')
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {}
return self.weight_handler.get_weighed_objects([ACW], hosts,
weight_properties)[0]
return self.weight_handler.get_weighed_objects(
[capacity.AllocatedCapacityWeigher], hosts,
weight_properties)[0]
@mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic')
def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False):

View File

@ -20,8 +20,8 @@ import mock
from oslo_config import cfg
from cinder import context
from cinder.openstack.common.scheduler.weights import HostWeightHandler
from cinder.scheduler.weights.capacity import CapacityWeigher
from cinder.openstack.common.scheduler import weights
from cinder.scheduler.weights import capacity
from cinder import test
from cinder.tests.scheduler import fakes
from cinder.volume import utils
@ -33,14 +33,16 @@ class CapacityWeigherTestCase(test.TestCase):
def setUp(self):
super(CapacityWeigherTestCase, self).setUp()
self.host_manager = fakes.FakeHostManager()
self.weight_handler = HostWeightHandler('cinder.scheduler.weights')
self.weight_handler = weights.HostWeightHandler(
'cinder.scheduler.weights')
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {'size': 1}
return self.weight_handler.get_weighed_objects([CapacityWeigher],
hosts,
weight_properties)[0]
return self.weight_handler.get_weighed_objects(
[capacity.CapacityWeigher],
hosts,
weight_properties)[0]
@mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic')
def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False):

View File

@ -18,7 +18,7 @@ Tests For Chance Weigher.
import mock
from cinder.scheduler import host_manager
from cinder.scheduler.weights.chance import ChanceWeigher
from cinder.scheduler.weights import chance
from cinder import test
@ -38,7 +38,7 @@ class ChanceWeigherTestCase(test.TestCase):
# stub random.random() to verify the ChanceWeigher
# is using random.random() (repeated calls to weigh should
# return incrementing weights)
weigher = ChanceWeigher()
weigher = chance.ChanceWeigher()
_mock_random.side_effect = self.fake_random
self.fake_random(reset=True)
host_state = {'host': 'host.example.com', 'free_capacity_gb': 99999}
@ -55,7 +55,7 @@ class ChanceWeigherTestCase(test.TestCase):
hm = host_manager.HostManager()
weighers = hm._choose_host_weighers('ChanceWeigher')
self.assertEqual(1, len(weighers))
self.assertEqual(weighers[0], ChanceWeigher)
self.assertEqual(weighers[0], chance.ChanceWeigher)
def test_use_of_chance_weigher_via_host_manager(self):
# ensure we don't lose any hosts when weighing with

View File

@ -15,7 +15,7 @@
Tests For Goodness Weigher.
"""
from cinder.scheduler.weights.goodness import GoodnessWeigher
from cinder.scheduler.weights import goodness
from cinder import test
from cinder.tests.scheduler import fakes
@ -25,7 +25,7 @@ class GoodnessWeigherTestCase(test.TestCase):
super(GoodnessWeigherTestCase, self).setUp()
def test_goodness_weigher_passing_host(self):
weigher = GoodnessWeigher()
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
@ -54,7 +54,7 @@ class GoodnessWeigherTestCase(test.TestCase):
self.assertEqual(50, weight)
def test_goodness_weigher_capabilities_substitution(self):
weigher = GoodnessWeigher()
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
@ -68,7 +68,7 @@ class GoodnessWeigherTestCase(test.TestCase):
self.assertEqual(60, weight)
def test_goodness_weigher_extra_specs_substitution(self):
weigher = GoodnessWeigher()
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
@ -87,7 +87,7 @@ class GoodnessWeigherTestCase(test.TestCase):
self.assertEqual(60, weight)
def test_goodness_weigher_volume_substitution(self):
weigher = GoodnessWeigher()
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
@ -106,7 +106,7 @@ class GoodnessWeigherTestCase(test.TestCase):
self.assertEqual(60, weight)
def test_goodness_weigher_qos_substitution(self):
weigher = GoodnessWeigher()
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
@ -123,7 +123,7 @@ class GoodnessWeigherTestCase(test.TestCase):
self.assertEqual(60, weight)
def test_goodness_weigher_stats_substitution(self):
weigher = GoodnessWeigher()
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
@ -137,7 +137,7 @@ class GoodnessWeigherTestCase(test.TestCase):
self.assertEqual(100, weight)
def test_goodness_weigher_invalid_substitution(self):
weigher = GoodnessWeigher()
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
@ -151,7 +151,7 @@ class GoodnessWeigherTestCase(test.TestCase):
self.assertEqual(0, weight)
def test_goodness_weigher_host_rating_out_of_bounds(self):
weigher = GoodnessWeigher()
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
@ -172,7 +172,7 @@ class GoodnessWeigherTestCase(test.TestCase):
self.assertEqual(0, weight)
def test_goodness_weigher_invalid_goodness_function(self):
weigher = GoodnessWeigher()
weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {

View File

@ -21,8 +21,8 @@ from oslo_config import cfg
from cinder import context
from cinder.db.sqlalchemy import api
from cinder.openstack.common.scheduler.weights import HostWeightHandler
from cinder.scheduler.weights.volume_number import VolumeNumberWeigher
from cinder.openstack.common.scheduler import weights
from cinder.scheduler.weights import volume_number
from cinder import test
from cinder.tests.scheduler import fakes
from cinder.volume import utils
@ -51,14 +51,16 @@ class VolumeNumberWeigherTestCase(test.TestCase):
super(VolumeNumberWeigherTestCase, self).setUp()
self.context = context.get_admin_context()
self.host_manager = fakes.FakeHostManager()
self.weight_handler = HostWeightHandler('cinder.scheduler.weights')
self.weight_handler = weights.HostWeightHandler(
'cinder.scheduler.weights')
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {'context': self.context}
return self.weight_handler.get_weighed_objects([VolumeNumberWeigher],
hosts,
weight_properties)[0]
return self.weight_handler.get_weighed_objects(
[volume_number.VolumeNumberWeigher],
hosts,
weight_properties)[0]
@mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic')
def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False):

View File

@ -30,8 +30,7 @@ from cinder import db
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.backup.fake_service_with_verify import\
get_backup_driver
from cinder.tests.backup import fake_service_with_verify as fake_service
CONF = cfg.CONF
@ -646,7 +645,7 @@ class BackupTestCaseWithVerify(BaseBackupTest):
'_map_service_to_driver') as \
mock_map_service_to_driver:
mock_map_service_to_driver.return_value = \
get_backup_driver(self.ctxt)
fake_service.get_backup_driver(self.ctxt)
self.backup_mgr.reset_status(self.ctxt,
backup_id,
'available')

View File

@ -27,14 +27,14 @@ import mock
from oslo_config import cfg
from swiftclient import client as swift
from cinder.backup.drivers.swift import SwiftBackupDriver
from cinder.backup.drivers import swift as swift_dr
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.backup.fake_swift_client import FakeSwiftClient
from cinder.tests.backup import fake_swift_client
LOG = logging.getLogger(__name__)
@ -75,7 +75,8 @@ class BackupSwiftTestCase(test.TestCase):
self.ctxt = context.get_admin_context()
self.ctxt.service_catalog = service_catalog
self.stubs.Set(swift, 'Connection', FakeSwiftClient.Connection)
self.stubs.Set(swift, 'Connection',
fake_swift_client.FakeSwiftClient.Connection)
self.stubs.Set(hashlib, 'md5', fake_md5)
self._create_volume_db_entry()
@ -91,7 +92,7 @@ class BackupSwiftTestCase(test.TestCase):
u'adminURL': u'http://example.com'}]
}]
self.assertRaises(exception.BackupDriverException,
SwiftBackupDriver,
swift_dr.SwiftBackupDriver,
self.ctxt)
def test_backup_swift_url_conf(self):
@ -102,7 +103,7 @@ class BackupSwiftTestCase(test.TestCase):
}]
self.ctxt.project_id = "12345678"
self.override_config("backup_swift_url", "http://public.example.com/")
backup = SwiftBackupDriver(self.ctxt)
backup = swift_dr.SwiftBackupDriver(self.ctxt)
self.assertEqual("%s%s" % (CONF.backup_swift_url,
self.ctxt.project_id),
backup.swift_url)
@ -110,13 +111,13 @@ class BackupSwiftTestCase(test.TestCase):
def test_backup_swift_info(self):
self.override_config("swift_catalog_info", "dummy")
self.assertRaises(exception.BackupDriverException,
SwiftBackupDriver,
swift_dr.SwiftBackupDriver,
self.ctxt)
def test_backup_uncompressed(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='none')
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
@ -124,7 +125,7 @@ class BackupSwiftTestCase(test.TestCase):
def test_backup_bz2(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='bz2')
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
@ -132,14 +133,14 @@ class BackupSwiftTestCase(test.TestCase):
def test_backup_zlib(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='zlib')
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_default_container(self):
self._create_backup_db_entry(container=None)
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
@ -157,7 +158,7 @@ class BackupSwiftTestCase(test.TestCase):
# the _send_progress method will be called for sure.
CONF.set_override("backup_object_number_per_notification", 1)
CONF.set_override("backup_swift_enable_progress_timer", False)
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
@ -169,7 +170,7 @@ class BackupSwiftTestCase(test.TestCase):
_send_progress.reset_mock()
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 10)
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
@ -182,7 +183,7 @@ class BackupSwiftTestCase(test.TestCase):
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 10)
CONF.set_override("backup_swift_enable_progress_timer", True)
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
@ -192,7 +193,7 @@ class BackupSwiftTestCase(test.TestCase):
def test_backup_custom_container(self):
container_name = 'fake99'
self._create_backup_db_entry(container=container_name)
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
@ -202,7 +203,7 @@ class BackupSwiftTestCase(test.TestCase):
def test_create_backup_put_object_wraps_socket_error(self):
container_name = 'socket_error_on_put'
self._create_backup_db_entry(container=container_name)
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
self.assertRaises(exception.SwiftConnectionFailed,
@ -218,7 +219,7 @@ class BackupSwiftTestCase(test.TestCase):
"""
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='none')
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
@ -226,7 +227,7 @@ class BackupSwiftTestCase(test.TestCase):
raise exception.BackupDriverException(message=_('fake'))
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(SwiftBackupDriver, '_backup_metadata',
self.stubs.Set(swift_dr.SwiftBackupDriver, '_backup_metadata',
fake_backup_metadata)
# We expect that an exception be notified directly.
@ -243,7 +244,7 @@ class BackupSwiftTestCase(test.TestCase):
"""
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='none')
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
@ -251,14 +252,14 @@ class BackupSwiftTestCase(test.TestCase):
raise exception.BackupDriverException(message=_('fake'))
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(SwiftBackupDriver, '_backup_metadata',
self.stubs.Set(swift_dr.SwiftBackupDriver, '_backup_metadata',
fake_backup_metadata)
def fake_delete(self, backup):
raise exception.BackupOperationError()
# Raise a pseudo exception.BackupOperationError.
self.stubs.Set(SwiftBackupDriver, 'delete', fake_delete)
self.stubs.Set(swift_dr.SwiftBackupDriver, 'delete', fake_delete)
# We expect that the second exception is notified.
self.assertRaises(exception.BackupOperationError,
@ -267,7 +268,7 @@ class BackupSwiftTestCase(test.TestCase):
def test_restore(self):
self._create_backup_db_entry()
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
backup = db.backup_get(self.ctxt, 123)
@ -276,7 +277,7 @@ class BackupSwiftTestCase(test.TestCase):
def test_restore_wraps_socket_error(self):
container_name = 'socket_error_on_get'
self._create_backup_db_entry(container=container_name)
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
backup = db.backup_get(self.ctxt, 123)
@ -287,7 +288,7 @@ class BackupSwiftTestCase(test.TestCase):
def test_restore_unsupported_version(self):
container_name = 'unsupported_version'
self._create_backup_db_entry(container=container_name)
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
backup = db.backup_get(self.ctxt, 123)
@ -297,21 +298,21 @@ class BackupSwiftTestCase(test.TestCase):
def test_delete(self):
self._create_backup_db_entry()
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
backup = db.backup_get(self.ctxt, 123)
service.delete(backup)
def test_delete_wraps_socket_error(self):
container_name = 'socket_error_on_delete'
self._create_backup_db_entry(container=container_name)
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
backup = db.backup_get(self.ctxt, 123)
self.assertRaises(exception.SwiftConnectionFailed,
service.delete,
backup)
def test_get_compressor(self):
service = SwiftBackupDriver(self.ctxt)
service = swift_dr.SwiftBackupDriver(self.ctxt)
compressor = service._get_compressor('None')
self.assertIsNone(compressor)
compressor = service._get_compressor('zlib')

View File

@ -21,7 +21,7 @@ from cinder.db.sqlalchemy import api
import cinder.exception
import cinder.test
from cinder.volume import configuration as conf
from cinder.volume.drivers.block_device import BlockDeviceDriver
from cinder.volume.drivers import block_device
from cinder.volume import utils as volutils
@ -35,8 +35,9 @@ class TestBlockDeviceDriver(cinder.test.TestCase):
self.host = 'localhost'
self.configuration.iscsi_port = 3260
self.configuration.volume_dd_blocksize = 1234
self.drv = BlockDeviceDriver(configuration=self.configuration,
host='localhost')
self.drv = block_device.BlockDeviceDriver(
configuration=self.configuration,
host='localhost')
def test_initialize_connection(self):
TEST_VOLUME1 = {'host': 'localhost1',

View File

@ -23,12 +23,11 @@ import json
import mock
import testtools
from testtools import ExpectedException
from testtools.matchers import Contains
from testtools import matchers
from cinder import exception
from cinder.volume import configuration as conf
from cinder.volume.drivers.cloudbyte.cloudbyte import CloudByteISCSIDriver
from cinder.volume.drivers.cloudbyte import cloudbyte
# A fake list account response of cloudbyte's elasticenter
FAKE_LIST_ACCOUNT_RESPONSE = """{ "listAccountResponse" : {
@ -634,7 +633,8 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
configuration = conf.Configuration(None, None)
# initialize the elasticenter iscsi driver
self.driver = CloudByteISCSIDriver(configuration=configuration)
self.driver = cloudbyte.CloudByteISCSIDriver(
configuration=configuration)
# override some parts of driver configuration
self.driver.configuration.cb_tsm_name = 'openstack'
@ -751,7 +751,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
return volume_id
@mock.patch.object(CloudByteISCSIDriver,
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_execute_and_get_response_details')
def test_api_request_for_cloudbyte(self, mock_conn):
@ -773,7 +773,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
mock_conn.side_effect = self._side_effect_get_err_connection
# run the test
with ExpectedException(
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Bad or unexpected response from the storage volume '
'backend API: Failed to execute CloudByte API'):
@ -786,12 +786,12 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
mock_conn.side_effect = self._side_effect_get_err_connection2
# run the test
with ExpectedException(
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Error executing CloudByte API'):
self.driver._api_request_for_cloudbyte('listTsm', {})
@mock.patch.object(CloudByteISCSIDriver,
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_delete_volume(self, mock_api_req):
@ -822,7 +822,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
# assert that no api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
@mock.patch.object(CloudByteISCSIDriver,
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_delete_snapshot(self, mock_api_req):
@ -860,7 +860,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
# assert that no api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
@mock.patch.object(CloudByteISCSIDriver,
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_create_snapshot(self, mock_api_req):
@ -899,7 +899,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
mock_api_req.side_effect = self._side_effect_api_req
# now run the test & assert the exception
with ExpectedException(
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Bad or unexpected response from the storage volume '
'backend API: Failed to create snapshot'):
@ -908,7 +908,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
# assert that no api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
@mock.patch.object(CloudByteISCSIDriver,
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_create_volume(self, mock_api_req):
@ -935,7 +935,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
'CustomerA', self.driver.configuration.cb_account_name)
self.assertThat(
provider_details['provider_location'],
Contains('172.16.50.35:3260'))
matchers.Contains('172.16.50.35:3260'))
# assert that 9 api calls were invoked
self.assertEqual(9, mock_api_req.call_count)
@ -951,7 +951,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
mock_api_req.side_effect = self._side_effect_api_req
# now run the test & assert the exception
with ExpectedException(
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: Volume \[NotExists\] not found in "
@ -970,7 +970,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
mock_api_req.side_effect = self._side_effect_api_req_to_create_vol
# now run the test & assert the exception
with ExpectedException(
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Bad or unexpected response from the storage volume '
'backend API: Null response received while '
@ -987,7 +987,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
mock_api_req.side_effect = self._side_effect_api_req_to_list_filesystem
# now run the test
with ExpectedException(
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: Null response received from CloudByte's "
@ -1006,7 +1006,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
self._side_effect_api_req_to_list_vol_iscsi_service)
# now run the test
with ExpectedException(
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: Null response received from CloudByte's "
@ -1024,18 +1024,18 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
self._side_effect_api_req_to_list_iscsi_initiator)
# now run the test
with ExpectedException(
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: Null response received from CloudByte's "
"list iscsi initiators."):
self.driver.create_volume(volume)
@mock.patch.object(CloudByteISCSIDriver,
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
@mock.patch.object(CloudByteISCSIDriver,
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'create_volume_from_snapshot')
@mock.patch.object(CloudByteISCSIDriver,
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'create_snapshot')
def test_create_cloned_volume(self, mock_create_snapshot,
mock_create_vol_from_snap, mock_api_req):
@ -1066,7 +1066,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
# assert that n api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
@mock.patch.object(CloudByteISCSIDriver,
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_create_volume_from_snapshot(self, mock_api_req):
@ -1098,7 +1098,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
# assert n api calls were invoked
self.assertEqual(1, mock_api_req.call_count)
@mock.patch.object(CloudByteISCSIDriver,
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_extend_volume(self, mock_api_req):
@ -1123,7 +1123,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
# assert n api calls were invoked
self.assertEqual(1, mock_api_req.call_count)
@mock.patch.object(CloudByteISCSIDriver,
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_create_export(self, mock_api_req):
@ -1138,7 +1138,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
# assert the result
self.assertEqual(None, model_update['provider_auth'])
@mock.patch.object(CloudByteISCSIDriver,
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_ensure_export(self, mock_api_req):
@ -1153,7 +1153,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
# assert the result to have a provider_auth attribute
self.assertEqual(None, model_update['provider_auth'])
@mock.patch.object(CloudByteISCSIDriver,
@mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_get_volume_stats(self, mock_api_req):
@ -1192,7 +1192,7 @@ class CloudByteISCSIDriverTestCase(testtools.TestCase):
mock_api_req.side_effect = self._side_effect_api_req_to_list_tsm
# run the test with refresh as True
with ExpectedException(
with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: No response was received from CloudByte "

View File

@ -22,7 +22,7 @@ from oslo_utils import uuidutils
from cinder import context
from cinder import db
from cinder import exception
from cinder.quota import ReservableResource
from cinder import quota
from cinder import test
@ -48,8 +48,8 @@ def _quota_reserve(context, project_id):
for i, resource in enumerate(('volumes', 'gigabytes')):
quotas[resource] = db.quota_create(context, project_id,
resource, i + 1)
resources[resource] = ReservableResource(resource,
'_sync_%s' % resource)
resources[resource] = quota.ReservableResource(resource,
'_sync_%s' % resource)
deltas[resource] = i + 1
return db.quota_reserve(
context, resources, quotas, deltas,

View File

@ -19,7 +19,7 @@ from cinder import test
from cinder.volume.drivers.dell import dell_storagecenter_api
import mock
from requests.models import Response
from requests import models
import uuid
@ -1096,19 +1096,19 @@ class DellSCSanAPITestCase(test.TestCase):
FLDR_PATH = 'StorageCenter/ScVolumeFolder/'
# Create a Response object that indicates OK
response_ok = Response()
response_ok = models.Response()
response_ok.status_code = 200
response_ok.reason = u'ok'
RESPONSE_200 = response_ok
# Create a Response object that indicates created
response_created = Response()
response_created = models.Response()
response_created.status_code = 201
response_created.reason = u'created'
RESPONSE_201 = response_created
# Create a Response object that indicates a failure (no content)
response_nc = Response()
response_nc = models.Response()
response_nc.status_code = 204
response_nc.reason = u'duplicate'
RESPONSE_204 = response_nc
@ -3274,13 +3274,13 @@ class DellSCSanAPIConnectionTestCase(test.TestCase):
'''
# Create a Response object that indicates OK
response_ok = Response()
response_ok = models.Response()
response_ok.status_code = 200
response_ok.reason = u'ok'
RESPONSE_200 = response_ok
# Create a Response object that indicates a failure (no content)
response_nc = Response()
response_nc = models.Response()
response_nc.status_code = 204
response_nc.reason = u'duplicate'
RESPONSE_204 = response_nc

View File

@ -68,7 +68,7 @@ sys.modules['drbdmanage.exceptions'] = collections.namedtuple(
'module', ['DM_EEXIST'])
from cinder.volume.drivers.drbdmanagedrv import DrbdManageDriver
from cinder.volume.drivers import drbdmanagedrv
LOG = logging.getLogger(__name__)
@ -179,9 +179,11 @@ class DrbdManageTestCase(test.TestCase):
self.stubs.Set(importutils, 'import_object',
self.fake_import_object)
self.stubs.Set(DrbdManageDriver, 'call_or_reconnect',
self.stubs.Set(drbdmanagedrv.DrbdManageDriver,
'call_or_reconnect',
self.fake_issue_dbus_call)
self.stubs.Set(DrbdManageDriver, 'dbus_connect',
self.stubs.Set(drbdmanagedrv.DrbdManageDriver,
'dbus_connect',
self.fake_issue_dbus_connect)
sys.modules['cinder.volume.drivers.drbdmanagedrv'].dm_const \
@ -216,7 +218,7 @@ class DrbdManageTestCase(test.TestCase):
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.create_volume(testvol)
self.assertEqual(dmd.odm.calls[0][0], "create_resource")
@ -232,7 +234,7 @@ class DrbdManageTestCase(test.TestCase):
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.delete_volume(testvol)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
@ -247,7 +249,7 @@ class DrbdManageTestCase(test.TestCase):
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
data = dmd.local_path(testvol)
self.assertTrue(data.startswith("/dev/drbd"))
@ -256,7 +258,7 @@ class DrbdManageTestCase(test.TestCase):
testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111',
'volume_id': 'ba253fd0-8068-11e4-98c0-5254008ea111'}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.create_snapshot(testsnap)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
@ -267,7 +269,7 @@ class DrbdManageTestCase(test.TestCase):
def test_delete_snapshot(self):
testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.delete_snapshot(testsnap)
self.assertEqual(dmd.odm.calls[0][0], "list_snapshots")
@ -281,7 +283,7 @@ class DrbdManageTestCase(test.TestCase):
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.extend_volume(testvol, 5)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
@ -302,7 +304,7 @@ class DrbdManageTestCase(test.TestCase):
newvol = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.create_cloned_volume(newvol, srcvol)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")

View File

@ -17,7 +17,7 @@ import os
import shutil
import tempfile
import time
from xml.dom.minidom import Document
from xml.dom import minidom
import mock
import six
@ -601,7 +601,7 @@ class FakeEcomConnection():
result = None
if ResultClass == 'EMC_StorageHardwareID':
result = self._assoc_hdwid()
elif ResultClass == 'EMC_iSCSIProtocolEndpoint':
elif ResultClass == 'EMC_iSHEADCSIProtocolEndpoint':
result = self._assoc_endpoint()
elif ResultClass == 'EMC_StorageVolume':
result = self._assoc_storagevolume(objectpath)
@ -1421,7 +1421,7 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase):
def create_fake_config_file_no_fast(self):
doc = Document()
doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
doc = self.add_array_info(doc, emc)
@ -1434,7 +1434,7 @@ class EMCVMAXISCSIDriverNoFastTestCase(test.TestCase):
def create_fake_config_file_no_fast_with_add_ons(self):
doc = Document()
doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
doc = self.add_array_info(doc, emc)
@ -2527,7 +2527,7 @@ class EMCVMAXISCSIDriverFastTestCase(test.TestCase):
def create_fake_config_file_fast(self):
doc = Document()
doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
@ -3151,7 +3151,7 @@ class EMCVMAXFCDriverNoFastTestCase(test.TestCase):
def create_fake_config_file_no_fast(self):
doc = Document()
doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
@ -3628,7 +3628,7 @@ class EMCVMAXFCDriverFastTestCase(test.TestCase):
def create_fake_config_file_fast(self):
doc = Document()
doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
@ -4255,7 +4255,7 @@ class EMCV3DriverTestCase(test.TestCase):
def create_fake_config_file_fast(self):
doc = Document()
doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)

View File

@ -20,14 +20,13 @@ from oslo_concurrency import processutils
from cinder import exception
from cinder import test
from cinder.tests.utils import ZeroIntervalLoopingCall
from cinder.tests import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.emc.emc_cli_fc import EMCCLIFCDriver
from cinder.volume.drivers.emc.emc_cli_iscsi import EMCCLIISCSIDriver
import cinder.volume.drivers.emc.emc_vnx_cli as emc_vnx_cli
from cinder.volume.drivers.emc.emc_vnx_cli import CommandLineHelper
from cinder.volume.drivers.emc.emc_vnx_cli import EMCVnxCLICmdError
from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
from cinder.volume.drivers.emc import emc_cli_fc
from cinder.volume.drivers.emc import emc_cli_iscsi
from cinder.volume.drivers.emc import emc_vnx_cli
from cinder.zonemanager import fc_san_lookup_service as fc_service
SUCCEED = ("", 0)
FAKE_ERROR_RETURN = ("FAKE ERROR", 255)
@ -873,9 +872,9 @@ class DriverTestCaseBase(test.TestCase):
def setUp(self):
super(DriverTestCaseBase, self).setUp()
self.stubs.Set(CommandLineHelper, 'command_execute',
self.stubs.Set(emc_vnx_cli.CommandLineHelper, 'command_execute',
self.fake_command_execute_for_driver_setup)
self.stubs.Set(CommandLineHelper, 'get_array_serial',
self.stubs.Set(emc_vnx_cli.CommandLineHelper, 'get_array_serial',
mock.Mock(return_value={'array_serial':
'fakeSerial'}))
self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
@ -971,7 +970,7 @@ class DriverTestCaseBase(test.TestCase):
class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
def generateDriver(self, conf):
return EMCCLIISCSIDriver(configuration=conf)
return emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
@mock.patch(
"eventlet.event.Event.wait",
@ -1703,7 +1702,8 @@ Time Remaining: 0 second(s)
def test_terminate_connection(self):
os.path.exists = mock.Mock(return_value=1)
self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
self.driver = emc_cli_iscsi.EMCCLIISCSIDriver(
configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
@ -1735,7 +1735,7 @@ Time Remaining: 0 second(s)
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(EMCVnxCLICmdError,
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.create_volume,
self.testData.test_failed_volume)
expect_cmd = [mock.call(*self.testData.LUN_CREATION_CMD(
@ -1748,10 +1748,9 @@ Time Remaining: 0 second(s)
fake_cli = self.driverSetup(commands, results)
# case
self.assertRaises(EMCVnxCLICmdError,
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.create_snapshot,
self.testData.test_failed_snapshot)
# verification
expect_cmd = [
mock.call(
@ -1802,7 +1801,7 @@ Time Remaining: 0 second(s)
fake_cli.assert_has_calls(expect_cmd)
@mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
new=ZeroIntervalLoopingCall)
new=utils.ZeroIntervalLoopingCall)
def test_create_volume_from_snapshot_sync_failed(self):
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
@ -1935,7 +1934,7 @@ Time Remaining: 0 second(s)
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(EMCVnxCLICmdError,
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.delete_volume,
self.testData.test_failed_volume)
expected = [mock.call(*self.testData.LUN_DELETE_CMD('failed_vol1'))]
@ -1947,10 +1946,10 @@ Time Remaining: 0 second(s)
results = [self.testData.LUN_DELETE_IN_SG_ERROR(),
self.testData.LUN_DELETE_IN_SG_ERROR(False)]
self.driverSetup(commands, results)
self.assertRaises(EMCVnxCLICmdError,
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.delete_volume,
self.testData.test_volume1_in_sg)
self.assertRaises(EMCVnxCLICmdError,
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.delete_volume,
self.testData.test_volume2_in_sg)
@ -2016,7 +2015,7 @@ Time Remaining: 0 second(s)
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(EMCVnxCLICmdError,
self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.extend_volume,
self.testData.test_failed_volume,
2)
@ -2025,7 +2024,7 @@ Time Remaining: 0 second(s)
fake_cli.assert_has_calls(expected)
@mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
new=ZeroIntervalLoopingCall)
new=utils.ZeroIntervalLoopingCall)
def test_extend_volume_failed(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1')]
results = [self.testData.LUN_PROPERTY('failed_vol1', size=2)]
@ -2052,7 +2051,8 @@ Time Remaining: 0 second(s)
results = [SUCCEED]
self.configuration.storage_vnx_pool_name = \
self.testData.test_pool_name
self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
self.driver = emc_cli_iscsi.EMCCLIISCSIDriver(
configuration=self.configuration)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
# mock the command executor
fake_command_execute = self.get_command_execute_simulator(
@ -2074,7 +2074,8 @@ Time Remaining: 0 second(s)
results = [self.testData.LUN_PROPERTY('lun_name')]
invalid_pool_name = "fake_pool"
self.configuration.storage_vnx_pool_name = invalid_pool_name
self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
self.driver = emc_cli_iscsi.EMCCLIISCSIDriver(
configuration=self.configuration)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
# mock the command executor
fake_command_execute = self.get_command_execute_simulator(
@ -2102,7 +2103,8 @@ Time Remaining: 0 second(s)
self.configuration.storage_vnx_pool_name = \
self.testData.test_pool_name
self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
self.driver = emc_cli_iscsi.EMCCLIISCSIDriver(
configuration=self.configuration)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
# mock the command executor
@ -2174,7 +2176,7 @@ Time Remaining: 0 second(s)
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
@ -2243,7 +2245,7 @@ Time Remaining: 0 second(s)
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
@ -2317,7 +2319,7 @@ Time Remaining: 0 second(s)
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
@ -2380,7 +2382,7 @@ Time Remaining: 0 second(s)
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
@ -2433,7 +2435,7 @@ Time Remaining: 0 second(s)
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
retyped = self.driver.retype(None, self.testData.test_volume3,
@ -2495,7 +2497,7 @@ Time Remaining: 0 second(s)
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
@ -2557,7 +2559,7 @@ Time Remaining: 0 second(s)
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
retyped = self.driver.retype(None, self.testData.test_volume3,
@ -2608,7 +2610,7 @@ Time Remaining: 0 second(s)
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
@ -2645,7 +2647,8 @@ Time Remaining: 0 second(s)
self.configuration.storage_vnx_pool_name = \
self.testData.test_pool_name
self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
self.driver = emc_cli_iscsi.EMCCLIISCSIDriver(
configuration=self.configuration)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
cli_helper = self.driver.cli._client
@ -2925,7 +2928,7 @@ class EMCVNXCLIDArrayBasedDriverTestCase(DriverTestCaseBase):
'volume_backend_name': 'namedbackend'})
def generateDriver(self, conf):
driver = EMCCLIISCSIDriver(configuration=conf)
driver = emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
self.assertTrue(isinstance(driver.cli,
emc_vnx_cli.EMCVnxCliArray))
return driver
@ -3176,7 +3179,7 @@ class EMCVNXCLIDArrayBasedDriverTestCase(DriverTestCaseBase):
class EMCVNXCLIDriverFCTestCase(DriverTestCaseBase):
def generateDriver(self, conf):
return EMCCLIFCDriver(configuration=conf)
return emc_cli_fc.EMCCLIFCDriver(configuration=conf)
@mock.patch(
"oslo_concurrency.processutils.execute",
@ -3277,8 +3280,8 @@ class EMCVNXCLIDriverFCTestCase(DriverTestCaseBase):
('', 0),
self.testData.FC_PORTS]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
configuration=self.configuration)
self.driver.cli.zonemanager_lookup_service =\
fc_service.FCSanLookupService(configuration=self.configuration)
conn_info = self.driver.initialize_connection(
self.testData.test_volume,
@ -3308,7 +3311,8 @@ class EMCVNXCLIDriverFCTestCase(DriverTestCaseBase):
"get_device_mapping_from_network",
mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
def test_terminate_connection_remove_zone_false(self):
self.driver = EMCCLIFCDriver(configuration=self.configuration)
self.driver = emc_cli_fc.EMCCLIFCDriver(
configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
@ -3317,8 +3321,8 @@ class EMCVNXCLIDriverFCTestCase(DriverTestCaseBase):
cli_helper.get_storage_group = mock.Mock(
return_value=data)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
configuration=self.configuration)
self.driver.cli.zonemanager_lookup_service =\
fc_service.FCSanLookupService(configuration=self.configuration)
connection_info = self.driver.terminate_connection(
self.testData.test_volume,
self.testData.connector)
@ -3333,7 +3337,8 @@ class EMCVNXCLIDriverFCTestCase(DriverTestCaseBase):
"get_device_mapping_from_network",
mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
def test_terminate_connection_remove_zone_true(self):
self.driver = EMCCLIFCDriver(configuration=self.configuration)
self.driver = emc_cli_fc.EMCCLIFCDriver(
configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
@ -3342,8 +3347,8 @@ class EMCVNXCLIDriverFCTestCase(DriverTestCaseBase):
cli_helper.get_storage_group = mock.Mock(
return_value=data)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
configuration=self.configuration)
self.driver.cli.zonemanager_lookup_service =\
fc_service.FCSanLookupService(configuration=self.configuration)
connection_info = self.driver.terminate_connection(
self.testData.test_volume,
self.testData.connector)

View File

@ -20,8 +20,7 @@ import six
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume.drivers.emc.xtremio import XtremIOFibreChannelDriver
from cinder.volume.drivers.emc.xtremio import XtremIOISCSIDriver
from cinder.volume.drivers.emc import xtremio
LOG = logging.getLogger(__name__)
@ -210,7 +209,7 @@ class EMCXIODriverISCSITestCase(test.TestCase):
configuration.san_login = ''
configuration.san_password = ''
configuration.san_ip = ''
self.driver = XtremIOISCSIDriver(configuration=configuration)
self.driver = xtremio.XtremIOISCSIDriver(configuration=configuration)
self.data = CommonData()
@ -294,7 +293,8 @@ class EMCXIODriverFibreChannelTestCase(test.TestCase):
configuration.san_login = ''
configuration.san_password = ''
configuration.san_ip = ''
self.driver = XtremIOFibreChannelDriver(configuration=configuration)
self.driver = xtremio.XtremIOFibreChannelDriver(
configuration=configuration)
self.data = CommonData()

View File

@ -14,111 +14,118 @@
# under the License.
from cinder import exception
from cinder.scheduler.evaluator.evaluator import evaluate
from cinder.scheduler.evaluator import evaluator
from cinder import test
class EvaluatorTestCase(test.TestCase):
def test_simple_integer(self):
self.assertEqual(2, evaluate("1+1"))
self.assertEqual(9, evaluate("2+3+4"))
self.assertEqual(23, evaluate("11+12"))
self.assertEqual(30, evaluate("5*6"))
self.assertEqual(2, evaluate("22/11"))
self.assertEqual(38, evaluate("109-71"))
self.assertEqual(493, evaluate("872 - 453 + 44 / 22 * 4 + 66"))
self.assertEqual(2, evaluator.evaluate("1+1"))
self.assertEqual(9, evaluator.evaluate("2+3+4"))
self.assertEqual(23, evaluator.evaluate("11+12"))
self.assertEqual(30, evaluator.evaluate("5*6"))
self.assertEqual(2, evaluator.evaluate("22/11"))
self.assertEqual(38, evaluator.evaluate("109-71"))
self.assertEqual(
493, evaluator.evaluate("872 - 453 + 44 / 22 * 4 + 66"))
def test_simple_float(self):
self.assertEqual(2.0, evaluate("1.0 + 1.0"))
self.assertEqual(2.5, evaluate("1.5 + 1.0"))
self.assertEqual(3.0, evaluate("1.5 * 2.0"))
self.assertEqual(2.0, evaluator.evaluate("1.0 + 1.0"))
self.assertEqual(2.5, evaluator.evaluate("1.5 + 1.0"))
self.assertEqual(3.0, evaluator.evaluate("1.5 * 2.0"))
def test_int_float_mix(self):
self.assertEqual(2.5, evaluate("1.5 + 1"))
self.assertEqual(4.25, evaluate("8.5 / 2"))
self.assertEqual(5.25, evaluate("10/4+0.75 + 2"))
self.assertEqual(2.5, evaluator.evaluate("1.5 + 1"))
self.assertEqual(4.25, evaluator.evaluate("8.5 / 2"))
self.assertEqual(5.25, evaluator.evaluate("10/4+0.75 + 2"))
def test_negative_numbers(self):
self.assertEqual(-2, evaluate("-2"))
self.assertEqual(-1, evaluate("-2+1"))
self.assertEqual(3, evaluate("5+-2"))
self.assertEqual(-2, evaluator.evaluate("-2"))
self.assertEqual(-1, evaluator.evaluate("-2+1"))
self.assertEqual(3, evaluator.evaluate("5+-2"))
def test_exponent(self):
self.assertEqual(8, evaluate("2^3"))
self.assertEqual(-8, evaluate("-2 ^ 3"))
self.assertEqual(15.625, evaluate("2.5 ^ 3"))
self.assertEqual(8, evaluate("4 ^ 1.5"))
self.assertEqual(8, evaluator.evaluate("2^3"))
self.assertEqual(-8, evaluator.evaluate("-2 ^ 3"))
self.assertEqual(15.625, evaluator.evaluate("2.5 ^ 3"))
self.assertEqual(8, evaluator.evaluate("4 ^ 1.5"))
def test_function(self):
self.assertEqual(5, evaluate("abs(-5)"))
self.assertEqual(2, evaluate("abs(2)"))
self.assertEqual(1, evaluate("min(1, 100)"))
self.assertEqual(100, evaluate("max(1, 100)"))
self.assertEqual(5, evaluator.evaluate("abs(-5)"))
self.assertEqual(2, evaluator.evaluate("abs(2)"))
self.assertEqual(1, evaluator.evaluate("min(1, 100)"))
self.assertEqual(100, evaluator.evaluate("max(1, 100)"))
def test_parentheses(self):
self.assertEqual(1, evaluate("(1)"))
self.assertEqual(-1, evaluate("(-1)"))
self.assertEqual(2, evaluate("(1+1)"))
self.assertEqual(15, evaluate("(1+2) * 5"))
self.assertEqual(3, evaluate("(1+2)*(3-1)/((1+(2-1)))"))
self.assertEqual(-8.0, evaluate("((1.0 / 0.5) * (2)) *(-2)"))
self.assertEqual(1, evaluator.evaluate("(1)"))
self.assertEqual(-1, evaluator.evaluate("(-1)"))
self.assertEqual(2, evaluator.evaluate("(1+1)"))
self.assertEqual(15, evaluator.evaluate("(1+2) * 5"))
self.assertEqual(3, evaluator.evaluate("(1+2)*(3-1)/((1+(2-1)))"))
self.assertEqual(
-8.0, evaluator. evaluate("((1.0 / 0.5) * (2)) *(-2)"))
def test_comparisons(self):
self.assertEqual(True, evaluate("1 < 2"))
self.assertEqual(True, evaluate("2 > 1"))
self.assertEqual(True, evaluate("2 != 1"))
self.assertEqual(False, evaluate("1 > 2"))
self.assertEqual(False, evaluate("2 < 1"))
self.assertEqual(False, evaluate("2 == 1"))
self.assertEqual(True, evaluate("(1 == 1) == !(1 == 2)"))
self.assertEqual(True, evaluator.evaluate("1 < 2"))
self.assertEqual(True, evaluator.evaluate("2 > 1"))
self.assertEqual(True, evaluator.evaluate("2 != 1"))
self.assertEqual(False, evaluator.evaluate("1 > 2"))
self.assertEqual(False, evaluator.evaluate("2 < 1"))
self.assertEqual(False, evaluator.evaluate("2 == 1"))
self.assertEqual(True, evaluator.evaluate("(1 == 1) == !(1 == 2)"))
def test_logic_ops(self):
self.assertEqual(True, evaluate("(1 == 1) AND (2 == 2)"))
self.assertEqual(True, evaluate("(1 == 1) and (2 == 2)"))
self.assertEqual(True, evaluate("(1 == 1) && (2 == 2)"))
self.assertEqual(False, evaluate("(1 == 1) && (5 == 2)"))
self.assertEqual(True, evaluator.evaluate("(1 == 1) AND (2 == 2)"))
self.assertEqual(True, evaluator.evaluate("(1 == 1) and (2 == 2)"))
self.assertEqual(True, evaluator.evaluate("(1 == 1) && (2 == 2)"))
self.assertEqual(False, evaluator.evaluate("(1 == 1) && (5 == 2)"))
self.assertEqual(True, evaluate("(1 == 1) OR (5 == 2)"))
self.assertEqual(True, evaluate("(1 == 1) or (5 == 2)"))
self.assertEqual(True, evaluate("(1 == 1) || (5 == 2)"))
self.assertEqual(False, evaluate("(5 == 1) || (5 == 2)"))
self.assertEqual(True, evaluator.evaluate("(1 == 1) OR (5 == 2)"))
self.assertEqual(True, evaluator.evaluate("(1 == 1) or (5 == 2)"))
self.assertEqual(True, evaluator.evaluate("(1 == 1) || (5 == 2)"))
self.assertEqual(False, evaluator.evaluate("(5 == 1) || (5 == 2)"))
self.assertEqual(False, evaluate("(1 == 1) AND NOT (2 == 2)"))
self.assertEqual(False, evaluate("(1 == 1) AND not (2 == 2)"))
self.assertEqual(False, evaluate("(1 == 1) AND !(2 == 2)"))
self.assertEqual(True, evaluate("(1 == 1) AND NOT (5 == 2)"))
self.assertEqual(True,
evaluate("(1 == 1) OR NOT (2 == 2) AND (5 == 5)"))
self.assertEqual(
False, evaluator.evaluate("(1 == 1) AND NOT (2 == 2)"))
self.assertEqual(
False, evaluator.evaluate("(1 == 1) AND not (2 == 2)"))
self.assertEqual(
False, evaluator.evaluate("(1 == 1) AND !(2 == 2)"))
self.assertEqual(
True, evaluator.evaluate("(1 == 1) AND NOT (5 == 2)"))
self.assertEqual(
True, evaluator.evaluate("(1 == 1) OR NOT (2 == 2) AND (5 == 5)"))
def test_ternary_conditional(self):
self.assertEqual(5, evaluate("(1 < 2) ? 5 : 10"))
self.assertEqual(10, evaluate("(1 > 2) ? 5 : 10"))
self.assertEqual(5, evaluator.evaluate("(1 < 2) ? 5 : 10"))
self.assertEqual(10, evaluator.evaluate("(1 > 2) ? 5 : 10"))
def test_variables_dict(self):
stats = {'iops': 1000, 'usage': 0.65, 'count': 503, 'free_space': 407}
request = {'iops': 500, 'size': 4}
self.assertEqual(1500, evaluate("stats.iops + request.iops",
stats=stats, request=request))
self.assertEqual(1500, evaluator.evaluate("stats.iops + request.iops",
stats=stats,
request=request))
def test_missing_var(self):
stats = {'iops': 1000, 'usage': 0.65, 'count': 503, 'free_space': 407}
request = {'iops': 500, 'size': 4}
self.assertRaises(exception.EvaluatorParseException,
evaluate,
evaluator.evaluate,
"foo.bob + 5",
stats=stats, request=request)
self.assertRaises(exception.EvaluatorParseException,
evaluate,
evaluator.evaluate,
"stats.bob + 5",
stats=stats, request=request)
self.assertRaises(exception.EvaluatorParseException,
evaluate,
evaluator.evaluate,
"fake.var + 1",
stats=stats, request=request, fake=None)
def test_bad_expression(self):
self.assertRaises(exception.EvaluatorParseException,
evaluate,
evaluator.evaluate,
"1/*1")
def test_nonnumber_comparison(self):
@ -126,11 +133,11 @@ class EvaluatorTestCase(test.TestCase):
request = {'test': 'bar'}
self.assertRaises(
exception.EvaluatorParseException,
evaluate,
evaluator.evaluate,
"nonnumber.test != request.test",
nonnumber=nonnumber, request=request)
def test_div_zero(self):
self.assertRaises(exception.EvaluatorParseException,
evaluate,
evaluator.evaluate,
"7 / 0")

View File

@ -735,14 +735,17 @@ class FJISCSIDriverTestCase(test.TestCase):
# self.stubs.Set(self.driver.configuration, 'safe_get',
# self.fake_configuration_safe_get)
self.stubs.Set(eternus_dx_iscsi.FJDXISCSIDriver, '_do_iscsi_discovery',
self.stubs.Set(eternus_dx_iscsi.FJDXISCSIDriver,
'_do_iscsi_discovery',
self.fake_do_iscsi_discovery)
self.stubs.Set(eternus_dx_common.FJDXCommon, '_get_ecom_connection',
self.stubs.Set(eternus_dx_common.FJDXCommon,
'_get_ecom_connection',
self.fake_ecom_connection)
instancename = FakeCIMInstanceName()
self.stubs.Set(eternus_dx_common.FJDXCommon, '_getinstancename',
self.stubs.Set(eternus_dx_common.FJDXCommon,
'_getinstancename',
instancename.fake_getinstancename)
# set iscsi driver to self.driver

View File

@ -22,13 +22,12 @@ from oslo_utils import units
import requests
from cinder import context
from cinder.db.sqlalchemy.models import VolumeMetadata
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.fusionio.ioControl import FIOconnection
from cinder.volume.drivers.fusionio.ioControl import FIOioControlDriver
from cinder.volume.drivers.fusionio import ioControl
from cinder.volume import qos_specs
from cinder.volume import volume_types
@ -189,16 +188,18 @@ class FIOioControlConnectionTests(test.TestCase):
super(FIOioControlConnectionTests, self).setUp()
self.configuration = create_configuration()
self.ctxt = context.get_admin_context()
return_text = json.dumps({"Version": FIOconnection.APIVERSION})
return_text = json.dumps(
{"Version": ioControl.FIOconnection.APIVERSION})
get_return = FIOFakeResponse(code=200,
text=return_text)
requests.get = mock.Mock(return_value=get_return)
self.conn = FIOconnection(self.configuration.san_ip,
self.configuration.san_login,
self.configuration.san_password,
self.configuration.fusionio_iocontrol_retry,
(self.configuration.
fusionio_iocontrol_verify_cert),)
self.conn = ioControl.FIOconnection(
self.configuration.san_ip,
self.configuration.san_login,
self.configuration.san_password,
self.configuration.fusionio_iocontrol_retry,
(self.configuration.
fusionio_iocontrol_verify_cert),)
def test_conn_init_sucess(self):
expected = [mock.call(url=("https://" +
@ -209,12 +210,13 @@ class FIOioControlConnectionTests(test.TestCase):
requests.get.assert_has_calls(expected)
def test_wrong_version(self):
expected = json.dumps({"Version": (FIOconnection.APIVERSION + ".1")})
expected = json.dumps(
{"Version": (ioControl.FIOconnection.APIVERSION + ".1")})
get_return = FIOFakeResponse(code=200,
text=expected)
requests.get = mock.Mock(return_value=get_return)
self.assertRaises(exception.VolumeDriverException,
FIOconnection,
ioControl.FIOconnection,
self.configuration.san_ip,
self.configuration.san_login,
self.configuration.san_password,
@ -415,7 +417,8 @@ class FIOioControlTestCases(test.TestCase):
super(FIOioControlTestCases, self).setUp()
self.configuration = create_configuration()
self.ctxt = context.get_admin_context()
self.drv = FIOioControlDriver(configuration=self.configuration)
self.drv = ioControl.FIOioControlDriver(
configuration=self.configuration)
self.drv.fio_qos_dict = self.policyTable
def test_do_setup_sucess(self, connmock):
@ -783,7 +786,7 @@ class FIOioControlTestCases(test.TestCase):
"Driver/Test version Mismatch")
def test_create_volume_QoS_by_presets(self, connmock):
preset_qos = VolumeMetadata(key='fio-qos', value='Policy 2')
preset_qos = models.VolumeMetadata(key='fio-qos', value='Policy 2')
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,7 @@ Self test for Hitachi Unified Storage (HUS-HNAS) platform.
"""
import os
from StringIO import StringIO
import StringIO
import tempfile
import mock
@ -325,18 +325,18 @@ class HNASiSCSIDriverTest(test.TestCase):
def test_read_config(self, m_access, m_open):
# Test exception when file is not found
m_access.return_value = False
m_open.return_value = StringIO(HNASCONF)
m_open.return_value = StringIO.StringIO(HNASCONF)
self.assertRaises(exception.NotFound, iscsi._read_config, '')
# Test exception when config file has parsing errors
# due to missing <svc> tag
m_access.return_value = True
m_open.return_value = StringIO(HNAS_WRONG_CONF1)
m_open.return_value = StringIO.StringIO(HNAS_WRONG_CONF1)
self.assertRaises(exception.ConfigNotFound, iscsi._read_config, '')
# Test exception when config file has parsing errors
# due to missing <hdp> tag
m_open.return_value = StringIO(HNAS_WRONG_CONF2)
m_open.return_value = StringIO.StringIO(HNAS_WRONG_CONF2)
self.configuration.hds_hnas_iscsi_config_file = ''
self.assertRaises(exception.ParameterNotFound, iscsi._read_config, '')

View File

@ -15,7 +15,7 @@
#
import os
from StringIO import StringIO
import StringIO
import tempfile
import mock
@ -169,18 +169,18 @@ class HDSNFSDriverTest(test.TestCase):
def test_read_config(self, m_access, m_open):
# Test exception when file is not found
m_access.return_value = False
m_open.return_value = StringIO(HNASCONF)
m_open.return_value = StringIO.StringIO(HNASCONF)
self.assertRaises(exception.NotFound, nfs._read_config, '')
# Test exception when config file has parsing errors
# due to missing <svc> tag
m_access.return_value = True
m_open.return_value = StringIO(HNAS_WRONG_CONF1)
m_open.return_value = StringIO.StringIO(HNAS_WRONG_CONF1)
self.assertRaises(exception.ConfigNotFound, nfs._read_config, '')
# Test exception when config file has parsing errors
# due to missing <hdp> tag
m_open.return_value = StringIO(HNAS_WRONG_CONF2)
m_open.return_value = StringIO.StringIO(HNAS_WRONG_CONF2)
self.configuration.hds_hnas_iscsi_config_file = ''
self.assertRaises(exception.ParameterNotFound, nfs._read_config, '')

View File

@ -18,7 +18,7 @@ import os
import shutil
import tempfile
import time
from xml.dom.minidom import Document
from xml.dom import minidom
import mock
@ -697,7 +697,7 @@ class Huawei18000ISCSIDriverTestCase(test.TestCase):
file is used to set the Huawei storage custom parameters, therefore,
in the UT test we need to simulate such a configuration file
"""
doc = Document()
doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
@ -905,7 +905,7 @@ class Huawei18000FCDriverTestCase(test.TestCase):
file is used to set the Huawei storage custom parameters, therefore,
in the UT test we need to simulate such a configuration file
"""
doc = Document()
doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)

View File

@ -23,7 +23,7 @@ import shutil
import socket
import tempfile
import time
from xml.dom.minidom import Document
from xml.dom import minidom
from xml.etree import ElementTree as ET
import mox
@ -33,8 +33,8 @@ from cinder import exception
from cinder import ssh_utils
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers import huawei
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume.drivers.huawei import HuaweiVolumeDriver
from cinder.volume.drivers.huawei import ssh_common
from cinder.volume import volume_types
@ -247,7 +247,7 @@ def Fake_change_file_mode(obj, filepath):
def create_fake_conf_file(filename):
doc = Document()
doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
@ -1086,7 +1086,8 @@ class HuaweiTISCSIDriverTestCase(test.TestCase):
def _init_driver(self):
Curr_test[0] = 'T'
self.driver = HuaweiVolumeDriver(configuration=self.configuration)
self.driver = huawei.HuaweiVolumeDriver(
configuration=self.configuration)
self.driver.do_setup(None)
def test_conf_invalid(self):
@ -1096,14 +1097,14 @@ class HuaweiTISCSIDriverTestCase(test.TestCase):
tmp_configuration.cinder_huawei_conf_file = tmp_fonf_file
tmp_configuration.append_config_values(mox.IgnoreArg())
self.assertRaises(IOError,
HuaweiVolumeDriver,
huawei.HuaweiVolumeDriver,
configuration=tmp_configuration)
# Test Product and Protocol invalid
tmp_dict = {'Storage/Product': 'T', 'Storage/Protocol': 'iSCSI'}
for k, v in tmp_dict.items():
modify_conf(self.fake_conf_file, k, 'xx')
self.assertRaises(exception.InvalidInput,
HuaweiVolumeDriver,
huawei.HuaweiVolumeDriver,
configuration=self.configuration)
modify_conf(self.fake_conf_file, k, v)
# Test ctr ip, UserName and password unspecified
@ -1113,20 +1114,23 @@ class HuaweiTISCSIDriverTestCase(test.TestCase):
'Storage/UserPassword': '123456'}
for k, v in tmp_dict.items():
modify_conf(self.fake_conf_file, k, '')
tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
tmp_driver = huawei.HuaweiVolumeDriver(
configuration=self.configuration)
self.assertRaises(exception.InvalidInput,
tmp_driver.do_setup, None)
modify_conf(self.fake_conf_file, k, v)
# Test StoragePool unspecified
modify_conf(self.fake_conf_file, 'LUN/StoragePool', '', attrib='Name')
tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
tmp_driver = huawei. HuaweiVolumeDriver(
configuration=self.configuration)
self.assertRaises(exception.InvalidInput,
tmp_driver.do_setup, None)
modify_conf(self.fake_conf_file, 'LUN/StoragePool', 'RAID_001',
attrib='Name')
# Test LUN type invalid
modify_conf(self.fake_conf_file, 'LUN/LUNType', 'thick')
tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
tmp_driver = huawei.HuaweiVolumeDriver(
configuration=self.configuration)
tmp_driver.do_setup(None)
self.assertRaises(exception.InvalidInput,
tmp_driver.create_volume, FAKE_VOLUME)
@ -1134,14 +1138,16 @@ class HuaweiTISCSIDriverTestCase(test.TestCase):
# Test OSType invalid
modify_conf(self.fake_conf_file, 'Host', 'invalid_type',
attrib='OSType')
tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
tmp_driver = huawei.HuaweiVolumeDriver(
configuration=self.configuration)
self.assertRaises(exception.InvalidInput,
tmp_driver.do_setup, None)
modify_conf(self.fake_conf_file, 'Host', 'Linux', attrib='OSType')
# Test TargetIP not found
modify_conf(self.fake_conf_file, 'iSCSI/DefaultTargetIP', '')
modify_conf(self.fake_conf_file, 'iSCSI/Initiator', '', attrib='Name')
tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
tmp_driver = huawei.HuaweiVolumeDriver(
configuration=self.configuration)
tmp_driver.do_setup(None)
tmp_driver.create_volume(FAKE_VOLUME)
self.assertRaises(exception.InvalidInput,
@ -1480,7 +1486,8 @@ class HuaweiTFCDriverTestCase(test.TestCase):
def _init_driver(self):
Curr_test[0] = 'T'
self.driver = HuaweiVolumeDriver(configuration=self.configuration)
self.driver = huawei.HuaweiVolumeDriver(
configuration=self.configuration)
self.driver.do_setup(None)
def test_validate_connector_failed(self):
@ -1557,7 +1564,8 @@ class HuaweiDorado5100FCDriverTestCase(HuaweiTFCDriverTestCase):
def _init_driver(self):
Curr_test[0] = 'Dorado5100'
modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado')
self.driver = HuaweiVolumeDriver(configuration=self.configuration)
self.driver = huawei.HuaweiVolumeDriver(
configuration=self.configuration)
self.driver.do_setup(None)
def test_create_cloned_volume(self):
@ -1582,7 +1590,8 @@ class HuaweiDorado2100G2FCDriverTestCase(HuaweiTFCDriverTestCase):
def _init_driver(self):
Curr_test[0] = 'Dorado2100G2'
modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado')
self.driver = HuaweiVolumeDriver(configuration=self.configuration)
self.driver = huawei.HuaweiVolumeDriver(
configuration=self.configuration)
self.driver.do_setup(None)
def test_create_cloned_volume(self):
@ -1617,7 +1626,8 @@ class HuaweiDorado5100ISCSIDriverTestCase(HuaweiTISCSIDriverTestCase):
def _init_driver(self):
Curr_test[0] = 'Dorado5100'
modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado')
self.driver = HuaweiVolumeDriver(configuration=self.configuration)
self.driver = huawei.HuaweiVolumeDriver(
configuration=self.configuration)
self.driver.do_setup(None)
def test_create_delete_cloned_volume(self):
@ -1645,7 +1655,8 @@ class HuaweiDorado2100G2ISCSIDriverTestCase(HuaweiTISCSIDriverTestCase):
def _init_driver(self):
Curr_test[0] = 'Dorado2100G2'
modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado')
self.driver = HuaweiVolumeDriver(configuration=self.configuration)
self.driver = huawei.HuaweiVolumeDriver(
configuration=self.configuration)
self.driver.do_setup(None)
def test_conf_invalid(self):
@ -1709,7 +1720,8 @@ class SSHMethodTestCase(test.TestCase):
self.stubs.Set(ssh_common.TseriesCommon, '_change_file_mode',
Fake_change_file_mode)
Curr_test[0] = 'T'
self.driver = HuaweiVolumeDriver(configuration=self.configuration)
self.driver = huawei.HuaweiVolumeDriver(
configuration=self.configuration)
self.driver.do_setup(None)
def test_reach_max_connection_limit(self):

View File

@ -31,12 +31,7 @@ from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import common
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp.options import netapp_7mode_opts
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
from cinder.volume.drivers.netapp.options import netapp_cluster_opts
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_provisioning_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
from cinder.volume.drivers.netapp import options
from cinder.volume.drivers.netapp import utils
@ -45,12 +40,12 @@ LOG = logging.getLogger("cinder.volume.driver")
def create_configuration():
configuration = conf.Configuration(None)
configuration.append_config_values(netapp_connection_opts)
configuration.append_config_values(netapp_transport_opts)
configuration.append_config_values(netapp_basicauth_opts)
configuration.append_config_values(netapp_cluster_opts)
configuration.append_config_values(netapp_7mode_opts)
configuration.append_config_values(netapp_provisioning_opts)
configuration.append_config_values(options.netapp_connection_opts)
configuration.append_config_values(options.netapp_transport_opts)
configuration.append_config_values(options.netapp_basicauth_opts)
configuration.append_config_values(options.netapp_cluster_opts)
configuration.append_config_values(options.netapp_7mode_opts)
configuration.append_config_values(options.netapp_provisioning_opts)
return configuration

View File

@ -33,10 +33,8 @@ from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import common
from cinder.volume.drivers.netapp.eseries import client
from cinder.volume.drivers.netapp.eseries import iscsi
from cinder.volume.drivers.netapp.eseries.iscsi import LOG as driver_log
from cinder.volume.drivers.netapp.eseries import utils
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
from cinder.volume.drivers.netapp.options import netapp_eseries_opts
from cinder.volume.drivers.netapp import options
import cinder.volume.drivers.netapp.utils as na_utils
@ -45,8 +43,8 @@ LOG = logging.getLogger(__name__)
def create_configuration():
configuration = conf.Configuration(None)
configuration.append_config_values(netapp_basicauth_opts)
configuration.append_config_values(netapp_eseries_opts)
configuration.append_config_values(options.netapp_basicauth_opts)
configuration.append_config_values(options.netapp_eseries_opts)
return configuration
@ -845,7 +843,7 @@ class NetAppEseriesISCSIDriverTestCase(test.TestCase):
self.driver._create_volume, wrong_eseries_pool_label,
self.fake_eseries_volume_label, self.fake_size_gb)
@mock.patch.object(driver_log, 'info')
@mock.patch.object(iscsi.LOG, 'info')
@mock.patch.object(client.RestClient, 'list_storage_pools')
@mock.patch.object(client.RestClient, 'create_volume',
mock.MagicMock(return_value='CorrectVolume'))
@ -867,7 +865,7 @@ class NetAppEseriesISCSIDriverTestCase(test.TestCase):
@mock.patch.object(client.RestClient, 'create_volume',
mock.MagicMock(
side_effect=exception.NetAppDriverException))
@mock.patch.object(driver_log, 'info', mock.Mock())
@mock.patch.object(iscsi.LOG, 'info', mock.Mock())
def test_create_volume_check_exception(self, fake_list_pools):
fake_pool = {}
fake_pool['label'] = self.fake_eseries_pool_label

View File

@ -14,13 +14,12 @@
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
from itertools import chain
import itertools
import os
from lxml import etree
import mock
import mox
from mox import IgnoreArg
import mox as mox_lib
import six
from cinder import exception
@ -54,14 +53,15 @@ CONNECTION_INFO = {'hostname': 'fake_host',
'port': 443,
'username': 'admin',
'password': 'passw0rd'}
SEVEN_MODE_CONNECTION_INFO = dict(chain(CONNECTION_INFO.items(),
{'vfiler': 'test_vfiler'}.items()))
SEVEN_MODE_CONNECTION_INFO = dict(
itertools.chain(CONNECTION_INFO.items(),
{'vfiler': 'test_vfiler'}.items()))
FAKE_VSERVER = 'fake_vserver'
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration = mox_lib.MockObject(conf.Configuration)
configuration.append_config_values(mox_lib.IgnoreArg())
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
configuration.nas_mount_options = None
@ -136,7 +136,9 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
drv = self._driver
mox.StubOutWithMock(drv, '_clone_volume')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
drv._clone_volume(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
@ -158,11 +160,13 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
drv._get_volume_location(IgnoreArg()).AndReturn(location)
drv.local_path(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions(IgnoreArg())
drv._clone_volume(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
drv._get_volume_location(mox_lib.IgnoreArg()).AndReturn(location)
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions(mox_lib.IgnoreArg())
mox.ReplayAll()
@ -183,16 +187,16 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
drv._get_provider_location(mox_lib.IgnoreArg())
drv._get_provider_location(mox_lib.IgnoreArg())
drv._volume_not_present(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._get_volume_path(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._execute('rm', None, run_as_root=True)
drv._post_prov_deprov_in_ssc(IgnoreArg())
drv._post_prov_deprov_in_ssc(mox_lib.IgnoreArg())
mox.ReplayAll()
@ -251,9 +255,9 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
'nfsvol')
drv.zapi_client.clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(IgnoreArg()).AndReturn('/nfs')
drv._post_prov_deprov_in_ssc(IgnoreArg())
drv._get_host_ip(mox_lib.IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(mox_lib.IgnoreArg()).AndReturn('/nfs')
drv._post_prov_deprov_in_ssc(mox_lib.IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
@ -371,7 +375,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
mox.ReplayAll()
res = drv._find_old_cache_files('share')
@ -396,7 +400,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
drv._get_mount_point_for_share('share').AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
drv._shortlist_del_eligible_files(
IgnoreArg(), r_files).AndReturn(r_files)
mox_lib.IgnoreArg(), r_files).AndReturn(r_files)
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
@ -413,7 +417,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_delete_file')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._delete_file('/mnt/img-cache-2').AndReturn(True)
drv._delete_file('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
@ -472,9 +476,10 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn(
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn(
[('share', 'file_name')])
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
drv._post_clone_image(volume)
@ -500,9 +505,11 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(False)
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share(
mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(False)
mox.ReplayAll()
(prop, cloned) = drv.clone_image(
@ -531,18 +538,20 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share(
mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._clone_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
drv._resize_image_file({'name': 'vol'}, mox_lib.IgnoreArg())
mox.ReplayAll()
drv.clone_image(
@ -569,22 +578,24 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw',
run_as_root=True)
image_utils.convert_image(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
'raw', run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv._register_image_in_cache(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
drv._resize_image_file({'name': 'vol'}, mox_lib.IgnoreArg())
mox.ReplayAll()
drv.clone_image(
@ -612,21 +623,24 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw',
run_as_root=True)
image_utils.convert_image(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(),
'raw', run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(False)
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._register_image_in_cache(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(False)
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
@ -661,24 +675,28 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw',
image_utils.convert_image(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg(), 'raw',
run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._register_image_in_cache(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg())
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
drv._resize_image_file(
IgnoreArg(), IgnoreArg()).AndRaise(exception.InvalidResults())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndRaise(exception.InvalidResults())
drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
@ -718,7 +736,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mox = self.mox
strg = 'nfs://10.61.222.333/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
@ -728,7 +747,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mox = self.mox
strg = 'nfs://10.61.222.333:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
@ -738,7 +758,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mox = self.mox
strg = 'nfs://com.netapp:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
@ -748,7 +769,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mox = self.mox
strg = 'nfs://netapp.com/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
@ -758,7 +780,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mox = self.mox
strg = 'nfs://netapp.com/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
drv._check_share_in_use(mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
@ -779,7 +802,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(utils, 'resolve_hostname')
utils.resolve_hostname(IgnoreArg()).AndRaise(Exception())
utils.resolve_hostname(mox_lib.IgnoreArg()).AndRaise(Exception())
mox.ReplayAll()
share = drv._check_share_in_use('incorrect:8989', '/dir')
mox.VerifyAll()
@ -792,7 +815,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
drv._mounted_shares = ['127.0.0.1:/dir/share']
mox.StubOutWithMock(utils, 'resolve_hostname')
mox.StubOutWithMock(drv, '_share_match_for_ip')
utils.resolve_hostname(IgnoreArg()).AndReturn('10.22.33.44')
utils.resolve_hostname(mox_lib.IgnoreArg()).AndReturn('10.22.33.44')
drv._share_match_for_ip(
'10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
mox.ReplayAll()
@ -914,7 +937,7 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
self._driver.zapi_client = mock.Mock()
@mock.patch.object(netapp_nfs_cmode, 'get_volume_extra_specs')
@mock.patch.object(utils, 'get_volume_extra_specs')
@mock.patch.object(utils, 'LOG', mock.Mock())
def test_create_volume(self, mock_volume_extra_specs):
drv = self._driver
@ -936,7 +959,7 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
drv.ssc_enabled = False
extra_specs = {'netapp:raid_type': 'raid4'}
mock_volume_extra_specs = mock.Mock()
self.mock_object(netapp_nfs_cmode,
self.mock_object(utils,
'get_volume_extra_specs',
mock_volume_extra_specs)
mock_volume_extra_specs.return_value = extra_specs
@ -957,7 +980,7 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
fake_share = 'localhost:myshare'
host = 'hostname@backend#' + fake_share
mock_volume_extra_specs = mock.Mock()
self.mock_object(netapp_nfs_cmode,
self.mock_object(utils,
'get_volume_extra_specs',
mock_volume_extra_specs)
mock_volume_extra_specs.return_value = extra_specs
@ -976,7 +999,7 @@ class NetAppCmodeNfsDriverOnlyTestCase(test.TestCase):
self.assertRaises(exception.InvalidHost,
self._driver.create_volume, FakeVolume(host, 1))
@mock.patch.object(netapp_nfs_cmode, 'get_volume_extra_specs')
@mock.patch.object(utils, 'get_volume_extra_specs')
def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
@ -1225,12 +1248,12 @@ class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
drv._get_provider_location(mox_lib.IgnoreArg())
drv._volume_not_present(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._get_volume_path(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
@ -1279,7 +1302,8 @@ class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
mox.StubOutWithMock(drv, '_get_export_ip_path')
drv._get_export_ip_path(
IgnoreArg(), IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
mox_lib.IgnoreArg(),
mox_lib.IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
return mox
def test_clone_volume_clear(self):
@ -1288,7 +1312,7 @@ class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
drv.zapi_client = mox.CreateMockAnything()
drv.zapi_client.get_actual_path_for_export('/nfs').AndReturn(
'/vol/vol1/nfs')
drv.zapi_client.clone_file(IgnoreArg(), IgnoreArg())
drv.zapi_client.clone_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
mox.ReplayAll()
@ -1321,7 +1345,7 @@ class NetApp7modeNfsDriverTestCase(NetAppCmodeNfsDriverTestCase):
drv.zapi_client = mox.CreateMockAnything()
drv.zapi_client.get_actual_path_for_export('/nfs').AndReturn(
'/vol/vol1/nfs')
drv.zapi_client.clone_file(IgnoreArg(), IgnoreArg())
drv.zapi_client.clone_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
mox.ReplayAll()

View File

@ -19,7 +19,7 @@ import copy
import httplib
from lxml import etree
from mox import IgnoreArg
import mox
import six
from cinder import exception
@ -395,14 +395,14 @@ class SscUtilsTestCase(test.TestCase):
mirrored)
raiddp = {'ha_policy': 'cfo', 'raid_type': 'raiddp'}
ssc_cmode.query_aggr_options(
na_server, IgnoreArg()).AndReturn(raiddp)
na_server, mox.IgnoreArg()).AndReturn(raiddp)
ssc_cmode.query_aggr_storage_disk(
na_server, IgnoreArg()).AndReturn('SSD')
na_server, mox.IgnoreArg()).AndReturn('SSD')
raid4 = {'ha_policy': 'cfo', 'raid_type': 'raid4'}
ssc_cmode.query_aggr_options(
na_server, IgnoreArg()).AndReturn(raid4)
na_server, mox.IgnoreArg()).AndReturn(raid4)
ssc_cmode.query_aggr_storage_disk(
na_server, IgnoreArg()).AndReturn('SAS')
na_server, mox.IgnoreArg()).AndReturn('SAS')
self.mox.ReplayAll()
res_vols = ssc_cmode.get_cluster_vols_with_ssc(

View File

@ -19,8 +19,6 @@ import os
import mock
import mox as mox_lib
from mox import IgnoreArg
from mox import IsA
from mox import stubout
from oslo_utils import units
@ -612,7 +610,7 @@ class NfsDriverTestCase(test.TestCase):
self.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
self.assertRaises(exception.NfsException,
drv.do_setup, IsA(context.RequestContext))
drv.do_setup, mox_lib.IsA(context.RequestContext))
def test_setup_should_throw_error_if_oversub_ratio_less_than_zero(self):
"""do_setup should throw error if nfs_oversub_ratio is less than 0."""
@ -620,7 +618,7 @@ class NfsDriverTestCase(test.TestCase):
self.configuration.nfs_oversub_ratio = -1
self.assertRaises(exception.NfsException,
drv.do_setup,
IsA(context.RequestContext))
mox_lib.IsA(context.RequestContext))
def test_setup_should_throw_error_if_used_ratio_less_than_zero(self):
"""do_setup should throw error if nfs_used_ratio is less than 0."""
@ -628,7 +626,7 @@ class NfsDriverTestCase(test.TestCase):
self.configuration.nfs_used_ratio = -1
self.assertRaises(exception.NfsException,
drv.do_setup,
IsA(context.RequestContext))
mox_lib.IsA(context.RequestContext))
def test_setup_should_throw_error_if_used_ratio_greater_than_one(self):
"""do_setup should throw error if nfs_used_ratio is greater than 1."""
@ -636,7 +634,7 @@ class NfsDriverTestCase(test.TestCase):
self.configuration.nfs_used_ratio = 2
self.assertRaises(exception.NfsException,
drv.do_setup,
IsA(context.RequestContext))
mox_lib.IsA(context.RequestContext))
def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
"""do_setup should throw error if nfs client is not installed."""
@ -652,7 +650,7 @@ class NfsDriverTestCase(test.TestCase):
mox.ReplayAll()
self.assertRaises(exception.NfsException,
drv.do_setup, IsA(context.RequestContext))
drv.do_setup, mox_lib.IsA(context.RequestContext))
mox.VerifyAll()
@ -732,8 +730,8 @@ class NfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, '_create_sparsed_file')
mox.StubOutWithMock(drv, '_set_rw_permissions')
drv._create_sparsed_file(IgnoreArg(), IgnoreArg())
drv._set_rw_permissions(IgnoreArg())
drv._create_sparsed_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._set_rw_permissions(mox_lib.IgnoreArg())
mox.ReplayAll()
@ -752,8 +750,8 @@ class NfsDriverTestCase(test.TestCase):
mox.StubOutWithMock(drv, '_create_regular_file')
mox.StubOutWithMock(drv, '_set_rw_permissions')
drv._create_regular_file(IgnoreArg(), IgnoreArg())
drv._set_rw_permissions(IgnoreArg())
drv._create_regular_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._set_rw_permissions(mox_lib.IgnoreArg())
mox.ReplayAll()

View File

@ -31,7 +31,7 @@ from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.image import fake as fake_image
from cinder.tests.test_volume import DriverTestCase
from cinder.tests import test_volume
from cinder.volume import configuration as conf
import cinder.volume.drivers.rbd as driver
from cinder.volume.flows.manager import create_volume
@ -1021,7 +1021,7 @@ class RBDImageIOWrapperTestCase(test.TestCase):
self.mock_rbd_wrapper.close()
class ManagedRBDTestCase(DriverTestCase):
class ManagedRBDTestCase(test_volume.DriverTestCase):
driver_name = "cinder.volume.drivers.rbd.RBDDriver"
def setUp(self):

View File

@ -23,7 +23,7 @@ from oslo_utils import units
from cinder.image import image_utils
from cinder import test
from cinder.volume.drivers.sheepdog import SheepdogDriver
from cinder.volume.drivers import sheepdog
COLLIE_NODE_INFO = """
@ -58,7 +58,7 @@ class FakeImageService:
class SheepdogTestCase(test.TestCase):
def setUp(self):
super(SheepdogTestCase, self).setUp()
self.driver = SheepdogDriver()
self.driver = sheepdog.SheepdogDriver()
def test_update_volume_stats(self):
def fake_stats(*args):
@ -120,7 +120,9 @@ class SheepdogTestCase(test.TestCase):
lambda w, x, y, z: None)
self.stubs.Set(image_utils, 'convert_image',
lambda x, y, z: None)
self.stubs.Set(SheepdogDriver, '_try_execute', fake_try_execute)
self.stubs.Set(sheepdog.SheepdogDriver,
'_try_execute',
fake_try_execute)
self.driver.copy_image_to_volume(None, {'name': 'test',
'size': 1},
FakeImageService(), None)
@ -154,7 +156,8 @@ class SheepdogTestCase(test.TestCase):
'id': ss_uuid,
'size': fake_size}
with mock.patch.object(SheepdogDriver, '_try_execute') as mock_exe:
with mock.patch.object(sheepdog.SheepdogDriver,
'_try_execute') as mock_exe:
self.driver.create_volume_from_snapshot(fake_vol, fake_snapshot)
args = ['qemu-img', 'create', '-b',
"sheepdog:%s:%s" % (fake_snapshot['volume_name'],

View File

@ -26,7 +26,7 @@ from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.solidfire import SolidFireDriver
from cinder.volume.drivers import solidfire
from cinder.volume import qos_specs
from cinder.volume import volume_types
@ -55,9 +55,11 @@ class SolidFireVolumeTestCase(test.TestCase):
self.configuration.sf_allow_template_caching = False
super(SolidFireVolumeTestCase, self).setUp()
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
self.stubs.Set(SolidFireDriver, '_build_endpoint_info',
self.stubs.Set(solidfire.SolidFireDriver,
'_build_endpoint_info',
self.fake_build_endpoint_info)
self.expected_qos_results = {'minIOPS': 1000,
@ -213,9 +215,11 @@ class SolidFireVolumeTestCase(test.TestCase):
return {'fake': 'fake-model'}
def test_create_with_qos_type(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
self.stubs.Set(SolidFireDriver, '_set_qos_by_volume_type',
self.stubs.Set(solidfire.SolidFireDriver,
'_set_qos_by_volume_type',
self.fake_set_qos_by_volume_type)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
@ -224,12 +228,13 @@ class SolidFireVolumeTestCase(test.TestCase):
'volume_type_id': 'fast',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertIsNotNone(model_update)
def test_create_volume(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
@ -238,13 +243,14 @@ class SolidFireVolumeTestCase(test.TestCase):
'volume_type_id': None,
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertIsNotNone(model_update)
self.assertIsNone(model_update.get('provider_geometry', None))
def test_create_volume_non_512(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
@ -254,16 +260,18 @@ class SolidFireVolumeTestCase(test.TestCase):
'created_at': timeutils.utcnow()}
self.configuration.sf_emulate_512 = False
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertEqual(model_update.get('provider_geometry', None),
'4096 4096')
self.configuration.sf_emulate_512 = True
def test_create_snapshot(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
self.stubs.Set(SolidFireDriver, '_get_model_info',
self.stubs.Set(solidfire.SolidFireDriver,
'_get_model_info',
self.fake_get_model_info)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
@ -280,14 +288,16 @@ class SolidFireVolumeTestCase(test.TestCase):
'volume_type_id': None,
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv.create_volume(testvol)
sfv.create_snapshot(testsnap)
def test_create_clone(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
self.stubs.Set(SolidFireDriver, '_get_model_info',
self.stubs.Set(solidfire.SolidFireDriver,
'_get_model_info',
self.fake_get_model_info)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
@ -303,7 +313,7 @@ class SolidFireVolumeTestCase(test.TestCase):
'volume_type_id': None,
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv.create_cloned_volume(testvol_b, testvol)
def test_initialize_connector_with_blocksizes(self):
@ -322,7 +332,7 @@ class SolidFireVolumeTestCase(test.TestCase):
'created_at': timeutils.utcnow(),
}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
properties = sfv.initialize_connection(testvol, connector)
self.assertEqual('4096', properties['data']['physical_block_size'])
self.assertEqual('4096', properties['data']['logical_block_size'])
@ -330,7 +340,8 @@ class SolidFireVolumeTestCase(test.TestCase):
def test_create_volume_with_qos(self):
preset_qos = {}
preset_qos['qos'] = 'fast'
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
@ -341,23 +352,25 @@ class SolidFireVolumeTestCase(test.TestCase):
'volume_type_id': None,
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertIsNotNone(model_update)
def test_create_volume_fails(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is inentional for this test
self.stubs.Set(SolidFireDriver, '_update_cluster_status',
self.stubs.Set(solidfire.SolidFireDriver,
'_update_cluster_status',
self.fake_update_cluster_status)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
try:
sfv.create_volume(testvol)
self.fail("Should have thrown Error")
@ -365,35 +378,40 @@ class SolidFireVolumeTestCase(test.TestCase):
pass
def test_create_sfaccount(self):
sfv = SolidFireDriver(configuration=self.configuration)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
account = sfv._create_sfaccount('project-id')
self.assertIsNotNone(account)
def test_create_sfaccount_fails(self):
sfv = SolidFireDriver(configuration=self.configuration)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request_fails)
account = sfv._create_sfaccount('project-id')
self.assertIsNone(account)
def test_get_sfaccount_by_name(self):
sfv = SolidFireDriver(configuration=self.configuration)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
account = sfv._get_sfaccount_by_name('some-name')
self.assertIsNotNone(account)
def test_get_sfaccount_by_name_fails(self):
sfv = SolidFireDriver(configuration=self.configuration)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request_fails)
account = sfv._get_sfaccount_by_name('some-name')
self.assertIsNone(account)
def test_delete_volume(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'test_volume',
@ -401,11 +419,12 @@ class SolidFireVolumeTestCase(test.TestCase):
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv.delete_volume(testvol)
def test_delete_volume_fails_no_volume(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
@ -413,7 +432,7 @@ class SolidFireVolumeTestCase(test.TestCase):
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
try:
sfv.delete_volume(testvol)
self.fail("Should have thrown Error")
@ -423,9 +442,11 @@ class SolidFireVolumeTestCase(test.TestCase):
def test_delete_volume_fails_account_lookup(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is inentional for this test
self.stubs.Set(SolidFireDriver, '_update_cluster_status',
self.stubs.Set(solidfire.SolidFireDriver,
'_update_cluster_status',
self.fake_update_cluster_status)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
@ -433,30 +454,34 @@ class SolidFireVolumeTestCase(test.TestCase):
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.SolidFireAccountNotFound,
sfv.delete_volume,
testvol)
def test_get_cluster_info(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv._get_cluster_info()
def test_get_cluster_info_fail(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is inentional for this test
self.stubs.Set(SolidFireDriver, '_update_cluster_status',
self.stubs.Set(solidfire.SolidFireDriver,
'_update_cluster_status',
self.fake_update_cluster_status)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request_fails)
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.SolidFireAPIException,
sfv._get_cluster_info)
def test_extend_volume(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'test_volume',
@ -464,17 +489,18 @@ class SolidFireVolumeTestCase(test.TestCase):
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv.extend_volume(testvol, 2)
def test_extend_volume_fails_no_volume(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
'size': 1,
'id': 'not-found'}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.VolumeNotFound,
sfv.extend_volume,
testvol, 2)
@ -482,9 +508,11 @@ class SolidFireVolumeTestCase(test.TestCase):
def test_extend_volume_fails_account_lookup(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is intentional for this test
self.stubs.Set(SolidFireDriver, '_update_cluster_status',
self.stubs.Set(solidfire.SolidFireDriver,
'_update_cluster_status',
self.fake_update_cluster_status)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
@ -492,13 +520,13 @@ class SolidFireVolumeTestCase(test.TestCase):
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.SolidFireAccountNotFound,
sfv.extend_volume,
testvol, 2)
def test_set_by_qos_spec_with_scoping(self):
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
qos_ref = qos_specs.create(self.ctxt,
'qos-specs-1', {'qos:minIOPS': '1000',
'qos:maxIOPS': '10000',
@ -514,7 +542,7 @@ class SolidFireVolumeTestCase(test.TestCase):
self.assertEqual(qos, self.expected_qos_results)
def test_set_by_qos_spec(self):
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
qos_ref = qos_specs.create(self.ctxt,
'qos-specs-1', {'minIOPS': '1000',
'maxIOPS': '10000',
@ -530,7 +558,7 @@ class SolidFireVolumeTestCase(test.TestCase):
self.assertEqual(qos, self.expected_qos_results)
def test_set_by_qos_by_type_only(self):
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:minIOPS": "100",
"qos:burstIOPS": "300",
@ -541,8 +569,9 @@ class SolidFireVolumeTestCase(test.TestCase):
'burstIOPS': 300})
def test_accept_transfer(self):
sfv = SolidFireDriver(configuration=self.configuration)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'test_volume',
@ -556,8 +585,9 @@ class SolidFireVolumeTestCase(test.TestCase):
expected)
def test_accept_transfer_volume_not_found_raises(self):
sfv = SolidFireDriver(configuration=self.configuration)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'test_volume',
@ -572,8 +602,9 @@ class SolidFireVolumeTestCase(test.TestCase):
'new_project')
def test_retype(self):
sfv = SolidFireDriver(configuration=self.configuration)
self.stubs.Set(SolidFireDriver, '_issue_api_request',
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:minIOPS": "500",
@ -615,14 +646,15 @@ class SolidFireVolumeTestCase(test.TestCase):
def _fake_get_qos_spec(ctxt, spec_id):
return test_qos_spec
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
self.stubs.Set(volume_types, 'get_volume_type',
_fake_get_volume_type)
self.stubs.Set(qos_specs, 'get_qos_specs',
_fake_get_qos_spec)
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
diff = {'encryption': {}, 'extra_specs': {},
'qos_specs': {'burstIOPS': ('10000', '2000'),
@ -635,15 +667,16 @@ class SolidFireVolumeTestCase(test.TestCase):
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertTrue(sfv.retype(self.ctxt,
testvol,
test_type, diff, host))
def test_update_cluster_status(self):
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv._update_cluster_status()
self.assertEqual(sfv.cluster_stats['free_capacity_gb'], 99.0)
self.assertEqual(sfv.cluster_stats['total_capacity_gb'], 100.0)
@ -655,9 +688,10 @@ class SolidFireVolumeTestCase(test.TestCase):
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
model_update = sfv.manage_existing(testvol, external_ref)
self.assertIsNotNone(model_update)
self.assertIsNone(model_update.get('provider_geometry', None))
@ -666,9 +700,11 @@ class SolidFireVolumeTestCase(test.TestCase):
def _fake_do_v_create(self, project_id, params):
return project_id, params
self.stubs.Set(SolidFireDriver, '_issue_api_request',
self.stubs.Set(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
self.stubs.Set(SolidFireDriver, '_do_volume_create', _fake_do_v_create)
self.stubs.Set(solidfire.SolidFireDriver,
'_do_volume_create', _fake_do_v_create)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
@ -679,7 +715,7 @@ class SolidFireVolumeTestCase(test.TestCase):
'migration_status': 'target:'
'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
proj_id, sf_vol_object = sfv.create_volume(testvol)
self.assertEqual('a720b3c0-d1f0-11e1-9b23-0800200c9a66',
sf_vol_object['attributes']['uuid'])
@ -688,10 +724,10 @@ class SolidFireVolumeTestCase(test.TestCase):
self.assertEqual('UUID-a720b3c0-d1f0-11e1-9b23-0800200c9a66',
sf_vol_object['name'])
@mock.patch.object(SolidFireDriver, '_issue_api_request')
@mock.patch.object(SolidFireDriver, '_get_sfaccount')
@mock.patch.object(SolidFireDriver, '_get_sf_volume')
@mock.patch.object(SolidFireDriver, '_create_image_volume')
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
@mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount')
@mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume')
@mock.patch.object(solidfire.SolidFireDriver, '_create_image_volume')
def test_verify_image_volume_out_of_date(self,
_mock_create_image_volume,
_mock_get_sf_volume,
@ -726,15 +762,15 @@ class SolidFireVolumeTestCase(test.TestCase):
325355)}
image_service = 'null'
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
_mock_issue_api_request.return_value = {'result': 'ok'}
sfv._verify_image_volume(self.ctxt, image_meta, image_service)
self.assertTrue(_mock_create_image_volume.called)
@mock.patch.object(SolidFireDriver, '_issue_api_request')
@mock.patch.object(SolidFireDriver, '_get_sfaccount')
@mock.patch.object(SolidFireDriver, '_get_sf_volume')
@mock.patch.object(SolidFireDriver, '_create_image_volume')
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
@mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount')
@mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume')
@mock.patch.object(solidfire.SolidFireDriver, '_create_image_volume')
def test_verify_image_volume_ok(self,
_mock_create_image_volume,
_mock_get_sf_volume,
@ -760,17 +796,17 @@ class SolidFireVolumeTestCase(test.TestCase):
325355)}
image_service = 'null'
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
_mock_issue_api_request.return_value = {'result': 'ok'}
sfv._verify_image_volume(self.ctxt, image_meta, image_service)
self.assertFalse(_mock_create_image_volume.called)
@mock.patch.object(SolidFireDriver, '_issue_api_request')
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
def test_clone_image_not_configured(self, _mock_issue_api_request):
_mock_issue_api_request.return_value = self.mock_stats_data
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertEqual((None, False),
sfv.clone_image(self.ctxt,
self.mock_volume,
@ -778,11 +814,11 @@ class SolidFireVolumeTestCase(test.TestCase):
self.fake_image_meta,
'fake'))
@mock.patch.object(SolidFireDriver, '_issue_api_request')
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
def test_clone_image_authorization(self, _mock_issue_api_request):
_mock_issue_api_request.return_value = self.mock_stats_data
self.configuration.sf_allow_template_caching = True
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
# Make sure if it's NOT public and we're NOT the owner it
# doesn't try and cache
@ -818,11 +854,11 @@ class SolidFireVolumeTestCase(test.TestCase):
self.mock_volume, 'fake',
_fake_image_meta, 'fake')
@mock.patch.object(SolidFireDriver, '_issue_api_request')
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
def test_clone_image_virt_size_not_set(self, _mock_issue_api_request):
_mock_issue_api_request.return_value = self.mock_stats_data
self.configuration.sf_allow_template_caching = True
sfv = SolidFireDriver(configuration=self.configuration)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
# Don't run clone_image if virtual_size property not on image
_fake_image_meta = {'id': '17c550bb-a411-44c0-9aaf-0d96dd47f501',

View File

@ -17,7 +17,7 @@
Test suite for VMware VMDK driver.
"""
from distutils.version import LooseVersion
from distutils import version as ver
import mock
import mox
@ -1850,12 +1850,12 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
def test_get_vc_version(self, session):
# test config overrides fetching from VC server
version = self._driver._get_vc_version()
self.assertEqual(LooseVersion(self.DEFAULT_VC_VERSION), version)
self.assertEqual(ver.LooseVersion(self.DEFAULT_VC_VERSION), version)
# explicitly remove config entry
self._driver.configuration.vmware_host_version = None
session.return_value.vim.service_content.about.version = '6.0.1'
version = self._driver._get_vc_version()
self.assertEqual(LooseVersion('6.0.1'), version)
self.assertEqual(ver.LooseVersion('6.0.1'), version)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
@ -1864,7 +1864,7 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
def test_do_setup_with_pbm_disabled(self, session, get_vc_version):
session_obj = mock.Mock(name='session')
session.return_value = session_obj
get_vc_version.return_value = LooseVersion('5.0')
get_vc_version.return_value = ver.LooseVersion('5.0')
self._driver.do_setup(mock.ANY)
@ -1878,7 +1878,7 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
'_get_vc_version')
def test_do_setup_with_invalid_pbm_wsdl(self, get_vc_version,
get_pbm_wsdl_location):
vc_version = LooseVersion('5.5')
vc_version = ver.LooseVersion('5.5')
get_vc_version.return_value = vc_version
get_pbm_wsdl_location.return_value = None
@ -1900,7 +1900,7 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
session_obj = mock.Mock(name='session')
session.return_value = session_obj
vc_version = LooseVersion('5.5')
vc_version = ver.LooseVersion('5.5')
get_vc_version.return_value = vc_version
get_pbm_wsdl_location.return_value = 'file:///pbm.wsdl'

View File

@ -23,7 +23,7 @@ import datetime
import os
import shutil
import socket
from sys import platform
import sys
import tempfile
import eventlet
@ -49,7 +49,7 @@ from cinder.openstack.common import log as logging
import cinder.policy
from cinder import quota
from cinder import test
from cinder.tests.brick.fake_lvm import FakeBrickLVM
from cinder.tests.brick import fake_lvm
from cinder.tests import conf_fixture
from cinder.tests import fake_driver
from cinder.tests import fake_notifier
@ -61,7 +61,7 @@ import cinder.volume
from cinder.volume import configuration as conf
from cinder.volume import driver
from cinder.volume.drivers import lvm
from cinder.volume.manager import VolumeManager
from cinder.volume import manager as vol_manager
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume.targets import tgt
from cinder.volume import utils as volutils
@ -74,7 +74,7 @@ CGQUOTAS = quota.CGQUOTAS
CONF = cfg.CONF
ENCRYPTION_PROVIDER = 'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor'
PLATFORM = platform
PLATFORM = sys.platform
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa'
@ -624,7 +624,7 @@ class VolumeTestCase(BaseVolumeTestCase):
with mock.patch.object(jsonutils, 'loads') as mock_loads:
mock_loads.return_value = fake_capabilities
manager = VolumeManager()
manager = vol_manager.VolumeManager()
manager.stats = {'pools': {}}
manager.driver.set_initialized()
manager.publish_service_capabilities(self.context)
@ -638,7 +638,8 @@ class VolumeTestCase(BaseVolumeTestCase):
def test_extra_capabilities_fail(self):
with mock.patch.object(jsonutils, 'loads') as mock_loads:
mock_loads.side_effect = exception.CinderException('test')
self.assertRaises(exception.CinderException, VolumeManager)
self.assertRaises(exception.CinderException,
vol_manager.VolumeManager)
def test_delete_busy_volume(self):
"""Test volume survives deletion if driver reports it as busy."""
@ -2348,10 +2349,10 @@ class VolumeTestCase(BaseVolumeTestCase):
def test_delete_busy_snapshot(self):
"""Test snapshot can be created and deleted."""
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
@ -2374,14 +2375,14 @@ class VolumeTestCase(BaseVolumeTestCase):
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_id)
@test.testtools.skipIf(platform == "darwin", "SKIP on OSX")
@test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX")
def test_delete_no_dev_fails(self):
"""Test delete snapshot with no dev file fails."""
self.stubs.Set(os.path, 'exists', lambda x: False)
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
@ -3960,10 +3961,10 @@ class LVMISCSIVolumeDriverTestCase(DriverTestCase):
self.stubs.Set(self.volume.driver, '_delete_volume',
lambda x: False)
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.stubs.Set(self.volume.driver.vg, 'lv_has_snapshot',
lambda x: True)
@ -4032,10 +4033,10 @@ class LVMISCSIVolumeDriverTestCase(DriverTestCase):
'cinder-volumes:default:0' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.driver.migrate_volume, self.context,
@ -4054,10 +4055,10 @@ class LVMISCSIVolumeDriverTestCase(DriverTestCase):
self.stubs.Set(volutils, 'get_all_volume_groups',
get_all_volume_groups)
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
@ -4101,10 +4102,10 @@ class LVMISCSIVolumeDriverTestCase(DriverTestCase):
self.stubs.Set(self.volume.driver, 'create_export',
lambda x, y, vg='vg': None)
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, True)
@ -4121,10 +4122,10 @@ class LVMISCSIVolumeDriverTestCase(DriverTestCase):
def _setup_stubs_for_manage_existing(self):
"""Helper to set up common stubs for the manage_existing tests."""
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.stubs.Set(self.volume.driver.vg, 'get_volume',
self._get_manage_existing_lvs)

View File

@ -24,8 +24,7 @@ from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.zadara import zadara_opts
from cinder.volume.drivers.zadara import ZadaraVPSAISCSIDriver
from cinder.volume.drivers import zadara
LOG = logging.getLogger("cinder.volume.driver")
@ -482,13 +481,14 @@ class ZadaraVPSADriverTestCase(test.TestCase):
RUNTIME_VARS = copy.deepcopy(DEFAULT_RUNTIME_VARS)
self.configuration = conf.Configuration(None)
self.configuration.append_config_values(zadara_opts)
self.configuration.append_config_values(zadara.zadara_opts)
self.configuration.reserved_percentage = 10
self.configuration.zadara_user = 'test'
self.configuration.zadara_password = 'test_password'
self.configuration.zadara_vpsa_poolname = 'pool-0001'
self.driver = ZadaraVPSAISCSIDriver(configuration=self.configuration)
self.driver = zadara.ZadaraVPSAISCSIDriver(
configuration=self.configuration)
self.stubs.Set(httplib, 'HTTPConnection', FakeHTTPConnection)
self.stubs.Set(httplib, 'HTTPSConnection', FakeHTTPSConnection)
self.driver.do_setup(None)

View File

@ -15,10 +15,9 @@
Unit tests for Oracle's ZFSSA Cinder volume driver
"""
import json
import mock
from json import JSONEncoder
from oslo_utils import units
from cinder.openstack.common import log as logging
@ -429,9 +428,9 @@ class FakeAddIni2InitGrp(object):
def get(self, path, **kwargs):
result = client.RestResult()
result.status = client.Status.OK
result.data = JSONEncoder().encode({'group':
{'initiators':
['iqn.1-0.org.deb:01:d7']}})
result.data = json.JSONEncoder().encode({'group':
{'initiators':
['iqn.1-0.org.deb:01:d7']}})
return result
def put(self, path, body="", **kwargs):

View File

@ -22,8 +22,7 @@ Tests for NetApp API layer
from cinder.i18n import _
from cinder import test
from cinder.volume.drivers.netapp.dataontap.client.api import NaElement
from cinder.volume.drivers.netapp.dataontap.client.api import NaServer
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
class NetAppApiElementTransTests(test.TestCase):
@ -34,7 +33,7 @@ class NetAppApiElementTransTests(test.TestCase):
def test_translate_struct_dict_unique_key(self):
"""Tests if dict gets properly converted to NaElements."""
root = NaElement('root')
root = netapp_api.NaElement('root')
child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'}
root.translate_struct(child)
self.assertEqual(len(root.get_children()), 3)
@ -44,7 +43,7 @@ class NetAppApiElementTransTests(test.TestCase):
def test_translate_struct_dict_nonunique_key(self):
"""Tests if list/dict gets properly converted to NaElements."""
root = NaElement('root')
root = netapp_api.NaElement('root')
child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}]
root.translate_struct(child)
self.assertEqual(len(root.get_children()), 3)
@ -57,7 +56,7 @@ class NetAppApiElementTransTests(test.TestCase):
def test_translate_struct_list(self):
"""Tests if list gets properly converted to NaElements."""
root = NaElement('root')
root = netapp_api.NaElement('root')
child = ['e1', 'e2']
root.translate_struct(child)
self.assertEqual(len(root.get_children()), 2)
@ -66,7 +65,7 @@ class NetAppApiElementTransTests(test.TestCase):
def test_translate_struct_tuple(self):
"""Tests if tuple gets properly converted to NaElements."""
root = NaElement('root')
root = netapp_api.NaElement('root')
child = ('e1', 'e2')
root.translate_struct(child)
self.assertEqual(len(root.get_children()), 2)
@ -75,13 +74,13 @@ class NetAppApiElementTransTests(test.TestCase):
def test_translate_invalid_struct(self):
"""Tests if invalid data structure raises exception."""
root = NaElement('root')
root = netapp_api.NaElement('root')
child = 'random child element'
self.assertRaises(ValueError, root.translate_struct, child)
def test_setter_builtin_types(self):
"""Tests str, int, float get converted to NaElement."""
root = NaElement('root')
root = netapp_api.NaElement('root')
root['e1'] = 'v1'
root['e2'] = 1
root['e3'] = 2.0
@ -94,19 +93,20 @@ class NetAppApiElementTransTests(test.TestCase):
def test_setter_na_element(self):
"""Tests na_element gets appended as child."""
root = NaElement('root')
root['e1'] = NaElement('nested')
root = netapp_api.NaElement('root')
root['e1'] = netapp_api.NaElement('nested')
self.assertEqual(len(root.get_children()), 1)
e1 = root.get_child_by_name('e1')
self.assertIsInstance(e1, NaElement)
self.assertIsInstance(e1.get_child_by_name('nested'), NaElement)
self.assertIsInstance(e1, netapp_api.NaElement)
self.assertIsInstance(e1.get_child_by_name('nested'),
netapp_api.NaElement)
def test_setter_child_dict(self):
"""Tests dict is appended as child to root."""
root = NaElement('root')
root = netapp_api.NaElement('root')
root['d'] = {'e1': 'v1', 'e2': 'v2'}
e1 = root.get_child_by_name('d')
self.assertIsInstance(e1, NaElement)
self.assertIsInstance(e1, netapp_api.NaElement)
sub_ch = e1.get_children()
self.assertEqual(len(sub_ch), 2)
for c in sub_ch:
@ -118,13 +118,13 @@ class NetAppApiElementTransTests(test.TestCase):
def test_setter_child_list_tuple(self):
"""Tests list/tuple are appended as child to root."""
root = NaElement('root')
root = netapp_api.NaElement('root')
root['l'] = ['l1', 'l2']
root['t'] = ('t1', 't2')
l = root.get_child_by_name('l')
self.assertIsInstance(l, NaElement)
self.assertIsInstance(l, netapp_api.NaElement)
t = root.get_child_by_name('t')
self.assertIsInstance(t, NaElement)
self.assertIsInstance(t, netapp_api.NaElement)
for le in l.get_children():
self.assertIn(le.get_name(), ['l1', 'l2'])
for te in t.get_children():
@ -132,22 +132,22 @@ class NetAppApiElementTransTests(test.TestCase):
def test_setter_no_value(self):
"""Tests key with None value."""
root = NaElement('root')
root = netapp_api.NaElement('root')
root['k'] = None
self.assertIsNone(root.get_child_content('k'))
def test_setter_invalid_value(self):
"""Tests invalid value raises exception."""
root = NaElement('root')
root = netapp_api.NaElement('root')
try:
root['k'] = NaServer('localhost')
root['k'] = netapp_api.NaServer('localhost')
except Exception as e:
if not isinstance(e, TypeError):
self.fail(_('Error not a TypeError.'))
def test_setter_invalid_key(self):
"""Tests invalid value raises exception."""
root = NaElement('root')
root = netapp_api.NaElement('root')
try:
root[None] = 'value'
except Exception as e:

View File

@ -23,7 +23,7 @@ from cinder import test
from cinder.tests.volume.drivers.netapp.dataontap import fakes as fake
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp.utils import hashabledict
from cinder.volume.drivers.netapp import utils as netapp_utils
CONNECTION_INFO = {'hostname': 'hostname',
'transport_type': 'https',
@ -168,8 +168,9 @@ class NetApp7modeClientTestCase(test.TestCase):
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
igroups = set([hashabledict(igroup) for igroup in igroups])
expected = set([hashabledict(fake.IGROUP1)])
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(fake.IGROUP1)])
self.assertSetEqual(igroups, expected)
@ -211,8 +212,9 @@ class NetApp7modeClientTestCase(test.TestCase):
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
igroups = set([hashabledict(igroup) for igroup in igroups])
expected = set([hashabledict(fake.IGROUP1)])
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(fake.IGROUP1)])
self.assertSetEqual(igroups, expected)

View File

@ -23,7 +23,7 @@ from cinder import exception
from cinder import test
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.utils import hashabledict
from cinder.volume.drivers.netapp import utils as netapp_utils
CONNECTION_INFO = {'hostname': 'hostname',
@ -283,8 +283,9 @@ class NetAppCmodeClientTestCase(test.TestCase):
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
igroups = set([hashabledict(igroup) for igroup in igroups])
expected = set([hashabledict(expected_igroup)])
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(expected_igroup)])
self.assertSetEqual(igroups, expected)
@ -328,8 +329,9 @@ class NetAppCmodeClientTestCase(test.TestCase):
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
igroups = set([hashabledict(igroup) for igroup in igroups])
expected = set([hashabledict(expected_igroup)])
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(expected_igroup)])
self.assertSetEqual(igroups, expected)
@ -400,9 +402,10 @@ class NetAppCmodeClientTestCase(test.TestCase):
igroups = self.client.get_igroup_by_initiators([initiator])
# make these lists of dicts comparable using hashable dictionaries
igroups = set([hashabledict(igroup) for igroup in igroups])
expected = set([hashabledict(expected_igroup1),
hashabledict(expected_igroup2)])
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(expected_igroup1),
netapp_utils.hashabledict(expected_igroup2)])
self.assertSetEqual(igroups, expected)

View File

@ -25,13 +25,8 @@ from cinder import test
import cinder.tests.volume.drivers.netapp.dataontap.fakes as fake
import cinder.tests.volume.drivers.netapp.fakes as na_fakes
from cinder.volume.drivers.netapp.dataontap import block_7mode
from cinder.volume.drivers.netapp.dataontap.block_7mode import \
NetAppBlockStorage7modeLibrary as block_lib_7mode
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.block_base import \
NetAppBlockStorageLibrary as block_lib
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client.api import NaApiError
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp import utils as na_utils
@ -43,7 +38,8 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
super(NetAppBlockStorage7modeLibraryTestCase, self).setUp()
kwargs = {'configuration': self.get_config_7mode()}
self.library = block_lib_7mode('driver', 'protocol', **kwargs)
self.library = block_7mode.NetAppBlockStorage7modeLibrary(
'driver', 'protocol', **kwargs)
self.library.zapi_client = mock.Mock()
self.zapi_client = self.library.zapi_client
@ -64,9 +60,11 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
@mock.patch.object(block_lib_7mode, '_get_root_volume_name')
@mock.patch.object(block_lib_7mode, '_do_partner_setup')
@mock.patch.object(block_lib, 'do_setup')
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_get_root_volume_name')
@mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
'_do_partner_setup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup')
def test_do_setup(self, super_do_setup, mock_do_partner_setup,
mock_get_root_volume_name):
mock_get_root_volume_name.return_value = 'vol0'
@ -95,7 +93,8 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
self.assertFalse(hasattr(self.library, 'partner_zapi_client'))
@mock.patch.object(block_lib, 'check_for_setup_error')
@mock.patch.object(
block_base.NetAppBlockStorageLibrary, 'check_for_setup_error')
def test_check_for_setup_error(self, super_check_for_setup_error):
self.zapi_client.get_ontapi_version.return_value = (1, 9)
@ -201,9 +200,9 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
self.assertIsNone(lun_id)
def test_find_mapped_lun_igroup_raises(self):
self.zapi_client.get_lun_map.side_effect = NaApiError
self.zapi_client.get_lun_map.side_effect = netapp_api.NaApiError
initiators = fake.FC_FORMATTED_INITIATORS
self.assertRaises(NaApiError,
self.assertRaises(netapp_api.NaApiError,
self.library._find_mapped_lun_igroup,
'path',
initiators)

View File

@ -27,9 +27,7 @@ from cinder import exception
from cinder import test
from cinder.tests.volume.drivers.netapp.dataontap import fakes as fake
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.block_base import \
NetAppBlockStorageLibrary as block_lib
from cinder.volume.drivers.netapp.dataontap.client.api import NaApiError
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
@ -39,7 +37,8 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
super(NetAppBlockStorageLibraryTestCase, self).setUp()
kwargs = {'configuration': mock.Mock()}
self.library = block_lib('driver', 'protocol', **kwargs)
self.library = block_base.NetAppBlockStorageLibrary(
'driver', 'protocol', **kwargs)
self.library.zapi_client = mock.Mock()
self.zapi_client = self.library.zapi_client
self.mock_request = mock.Mock()
@ -47,27 +46,33 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
def tearDown(self):
super(NetAppBlockStorageLibraryTestCase, self).tearDown()
@mock.patch.object(block_lib, '_get_lun_attr',
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr',
mock.Mock(return_value={'Volume': 'vol1'}))
def test_get_pool(self):
pool = self.library.get_pool({'name': 'volume-fake-uuid'})
self.assertEqual(pool, 'vol1')
@mock.patch.object(block_lib, '_get_lun_attr',
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr',
mock.Mock(return_value=None))
def test_get_pool_no_metadata(self):
pool = self.library.get_pool({'name': 'volume-fake-uuid'})
self.assertEqual(pool, None)
@mock.patch.object(block_lib, '_get_lun_attr',
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr',
mock.Mock(return_value=dict()))
def test_get_pool_volume_unknown(self):
pool = self.library.get_pool({'name': 'volume-fake-uuid'})
self.assertEqual(pool, None)
@mock.patch.object(block_lib, '_create_lun', mock.Mock())
@mock.patch.object(block_lib, '_create_lun_handle', mock.Mock())
@mock.patch.object(block_lib, '_add_lun_to_table', mock.Mock())
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_create_lun', mock.Mock())
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_create_lun_handle', mock.Mock())
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_add_lun_to_table', mock.Mock())
@mock.patch.object(na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=None))
@mock.patch.object(block_base, 'LOG', mock.Mock())
@ -86,8 +91,10 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
'id': uuid.uuid4(),
'host': 'hostname@backend'}) # missing pool
@mock.patch.object(block_lib, '_get_lun_attr')
@mock.patch.object(block_lib, '_get_or_create_igroup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_or_create_igroup')
def test_map_lun(self, mock_get_or_create_igroup, mock_get_lun_attr):
os = 'linux'
protocol = 'fcp'
@ -105,9 +112,12 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
self.zapi_client.map_lun.assert_called_once_with(
fake.LUN1, fake.IGROUP1_NAME, lun_id=None)
@mock.patch.object(block_lib, '_get_lun_attr')
@mock.patch.object(block_lib, '_get_or_create_igroup')
@mock.patch.object(block_lib, '_find_mapped_lun_igroup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_or_create_igroup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_find_mapped_lun_igroup')
def test_map_lun_preexisting(self, mock_find_mapped_lun_igroup,
mock_get_or_create_igroup, mock_get_lun_attr):
os = 'linux'
@ -115,7 +125,7 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
mock_get_lun_attr.return_value = {'Path': fake.LUN1, 'OsType': os}
mock_get_or_create_igroup.return_value = fake.IGROUP1_NAME
mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, '2')
self.zapi_client.map_lun.side_effect = NaApiError
self.zapi_client.map_lun.side_effect = netapp_api.NaApiError
lun_id = self.library._map_lun(
'fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None)
@ -124,9 +134,12 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
mock_find_mapped_lun_igroup.assert_called_once_with(
fake.LUN1, fake.FC_FORMATTED_INITIATORS)
@mock.patch.object(block_lib, '_get_lun_attr')
@mock.patch.object(block_lib, '_get_or_create_igroup')
@mock.patch.object(block_lib, '_find_mapped_lun_igroup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_or_create_igroup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_find_mapped_lun_igroup')
def test_map_lun_api_error(self, mock_find_mapped_lun_igroup,
mock_get_or_create_igroup, mock_get_lun_attr):
os = 'linux'
@ -134,12 +147,14 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
mock_get_lun_attr.return_value = {'Path': fake.LUN1, 'OsType': os}
mock_get_or_create_igroup.return_value = fake.IGROUP1_NAME
mock_find_mapped_lun_igroup.return_value = (None, None)
self.zapi_client.map_lun.side_effect = NaApiError
self.zapi_client.map_lun.side_effect = netapp_api.NaApiError
self.assertRaises(NaApiError, self.library._map_lun, 'fake_volume',
fake.FC_FORMATTED_INITIATORS, protocol, None)
self.assertRaises(netapp_api.NaApiError, self.library._map_lun,
'fake_volume', fake.FC_FORMATTED_INITIATORS,
protocol, None)
@mock.patch.object(block_lib, '_find_mapped_lun_igroup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_find_mapped_lun_igroup')
def test_unmap_lun(self, mock_find_mapped_lun_igroup):
mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, 1)
@ -188,8 +203,10 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
self.assertRaises(NotImplementedError,
self.library._get_fc_target_wwpns)
@mock.patch.object(block_lib, '_build_initiator_target_map')
@mock.patch.object(block_lib, '_map_lun')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_build_initiator_target_map')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_map_lun')
def test_initialize_connection_fc(self, mock_map_lun,
mock_build_initiator_target_map):
self.maxDiff = None
@ -204,8 +221,10 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
mock_map_lun.assert_called_once_with(
'fake_volume', fake.FC_FORMATTED_INITIATORS, 'fcp', None)
@mock.patch.object(block_lib, '_build_initiator_target_map')
@mock.patch.object(block_lib, '_map_lun')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_build_initiator_target_map')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_map_lun')
def test_initialize_connection_fc_no_wwpns(
self, mock_map_lun, mock_build_initiator_target_map):
@ -216,9 +235,12 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
fake.FC_VOLUME,
fake.FC_CONNECTOR)
@mock.patch.object(block_lib, '_has_luns_mapped_to_initiators')
@mock.patch.object(block_lib, '_unmap_lun')
@mock.patch.object(block_lib, '_get_lun_attr')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_has_luns_mapped_to_initiators')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_unmap_lun')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
def test_terminate_connection_fc(self, mock_get_lun_attr, mock_unmap_lun,
mock_has_luns_mapped_to_initiators):
@ -233,10 +255,14 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
mock_unmap_lun.assert_called_once_with(fake.LUN1,
fake.FC_FORMATTED_INITIATORS)
@mock.patch.object(block_lib, '_build_initiator_target_map')
@mock.patch.object(block_lib, '_has_luns_mapped_to_initiators')
@mock.patch.object(block_lib, '_unmap_lun')
@mock.patch.object(block_lib, '_get_lun_attr')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_build_initiator_target_map')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_has_luns_mapped_to_initiators')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_unmap_lun')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
def test_terminate_connection_fc_no_more_luns(
self, mock_get_lun_attr, mock_unmap_lun,
mock_has_luns_mapped_to_initiators,
@ -253,7 +279,8 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
self.assertDictEqual(target_info, fake.FC_TARGET_INFO_UNMAP)
@mock.patch.object(block_lib, '_get_fc_target_wwpns')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_fc_target_wwpns')
def test_build_initiator_target_map_no_lookup_service(
self, mock_get_fc_target_wwpns):
@ -267,7 +294,8 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
self.assertDictEqual(fake.FC_I_T_MAP_COMPLETE, init_targ_map)
self.assertEqual(0, num_paths)
@mock.patch.object(block_lib, '_get_fc_target_wwpns')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_fc_target_wwpns')
def test_build_initiator_target_map_with_lookup_service(
self, mock_get_fc_target_wwpns):
@ -283,9 +311,12 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
self.assertDictEqual(fake.FC_I_T_MAP, init_targ_map)
self.assertEqual(4, num_paths)
@mock.patch.object(block_lib, '_create_lun', mock.Mock())
@mock.patch.object(block_lib, '_create_lun_handle', mock.Mock())
@mock.patch.object(block_lib, '_add_lun_to_table', mock.Mock())
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_create_lun', mock.Mock())
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_create_lun_handle', mock.Mock())
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_add_lun_to_table', mock.Mock())
@mock.patch.object(na_utils, 'LOG', mock.Mock())
@mock.patch.object(na_utils, 'get_volume_extra_specs',
mock.Mock(return_value={'netapp:raid_type': 'raid4'}))
@ -300,9 +331,12 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
'Use netapp_raid_type instead.'
na_utils.LOG.warning.assert_called_once_with(warn_msg)
@mock.patch.object(block_lib, '_create_lun', mock.Mock())
@mock.patch.object(block_lib, '_create_lun_handle', mock.Mock())
@mock.patch.object(block_lib, '_add_lun_to_table', mock.Mock())
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_create_lun', mock.Mock())
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_create_lun_handle', mock.Mock())
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_add_lun_to_table', mock.Mock())
@mock.patch.object(na_utils, 'LOG', mock.Mock())
@mock.patch.object(na_utils, 'get_volume_extra_specs',
mock.Mock(return_value={'netapp_thick_provisioned':
@ -337,7 +371,8 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
'source-name': 'lun_path'})
self.assertEqual(1, self.zapi_client.get_lun_by_args.call_count)
@mock.patch.object(block_lib, '_extract_lun_info',
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_extract_lun_info',
mock.Mock(return_value=block_base.NetAppLun(
'lun0', 'lun0', '3', {'UUID': 'src_id'})))
def test_get_existing_vol_manage_lun(self):
@ -348,7 +383,8 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
self.library._extract_lun_info.assert_called_once_with('lun0')
self.assertEqual('lun0', lun.name)
@mock.patch.object(block_lib, '_get_existing_vol_with_manage_ref',
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_existing_vol_with_manage_ref',
mock.Mock(return_value=block_base.NetAppLun(
'handle', 'name', '1073742824', {})))
def test_manage_existing_get_size(self):

View File

@ -24,11 +24,7 @@ from cinder import test
import cinder.tests.volume.drivers.netapp.dataontap.fakes as fake
import cinder.tests.volume.drivers.netapp.fakes as na_fakes
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.block_base import \
NetAppBlockStorageLibrary as block_lib
from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.volume.drivers.netapp.dataontap.block_cmode import \
NetAppBlockStorageCmodeLibrary as block_lib_cmode
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
@ -42,7 +38,8 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
super(NetAppBlockStorageCmodeLibraryTestCase, self).setUp()
kwargs = {'configuration': self.get_config_cmode()}
self.library = block_lib_cmode('driver', 'protocol', **kwargs)
self.library = block_cmode.NetAppBlockStorageCmodeLibrary(
'driver', 'protocol', **kwargs)
self.library.zapi_client = mock.Mock()
self.zapi_client = self.library.zapi_client
@ -66,7 +63,7 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
@mock.patch.object(na_utils, 'check_flags')
@mock.patch.object(block_lib, 'do_setup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup')
def test_do_setup(self, super_do_setup, mock_check_flags):
context = mock.Mock()
@ -75,7 +72,8 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
super_do_setup.assert_called_once_with(context)
self.assertEqual(1, mock_check_flags.call_count)
@mock.patch.object(block_lib, 'check_for_setup_error')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'check_for_setup_error')
@mock.patch.object(ssc_cmode, 'check_ssc_api_permissions')
def test_check_for_setup_error(self, mock_check_ssc_api_permissions,
super_check_for_setup_error):

View File

@ -23,7 +23,7 @@ from cinder import test
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers.nfs import NfsDriver as nfs_lib
from cinder.volume.drivers import nfs
class NetAppNfsDriverTestCase(test.TestCase):
@ -38,7 +38,7 @@ class NetAppNfsDriverTestCase(test.TestCase):
return_value=mock.Mock()):
self.driver = nfs_base.NetAppNfsDriver(**kwargs)
@mock.patch.object(nfs_lib, 'do_setup')
@mock.patch.object(nfs.NfsDriver, 'do_setup')
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup(self, mock_check_flags, mock_super_do_setup):
self.driver.do_setup(mock.Mock())

View File

@ -25,7 +25,7 @@ from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap import nfs_cmode
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers.nfs import NfsDriver as nfs_lib
from cinder.volume.drivers import nfs
class NetAppCmodeNfsDriverTestCase(test.TestCase):
@ -52,7 +52,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
return config
@mock.patch.object(client_cmode, 'Client', mock.Mock())
@mock.patch.object(nfs_lib, 'do_setup')
@mock.patch.object(nfs.NfsDriver, 'do_setup')
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup(self, mock_check_flags, mock_super_do_setup):
self.driver.do_setup(mock.Mock())

View File

@ -48,8 +48,8 @@ class VHDUtilsTestCase(test.TestCase):
fake_ctypes.c_ulong = lambda x: x
mock.patch.multiple(
'cinder.volume.drivers.windows.vhdutils', ctypes=fake_ctypes,
windll=mock.DEFAULT, wintypes=mock.DEFAULT, kernel32=mock.DEFAULT,
'cinder.volume.drivers.windows.vhdutils',
ctypes=fake_ctypes, kernel32=mock.DEFAULT,
virtdisk=mock.DEFAULT, Win32_GUID=mock.DEFAULT,
Win32_RESIZE_VIRTUAL_DISK_PARAMETERS=mock.DEFAULT,
Win32_CREATE_VIRTUAL_DISK_PARAMETERS=mock.DEFAULT,
@ -109,7 +109,7 @@ class VHDUtilsTestCase(test.TestCase):
vhdutils.VIRTUAL_DISK_ACCESS_NONE, None,
vhdutils.CREATE_VIRTUAL_DISK_FLAG_NONE, 0,
vhdutils.ctypes.byref(fake_params), None,
vhdutils.ctypes.byref(vhdutils.wintypes.HANDLE()))
vhdutils.ctypes.byref(vhdutils.ctypes.wintypes.HANDLE()))
self.assertTrue(self._vhdutils._close.called)
def test_create_vhd_exception(self):
@ -150,7 +150,7 @@ class VHDUtilsTestCase(test.TestCase):
vhdutils.ctypes.byref(fake_vst),
vhdutils.ctypes.c_wchar_p(self._FAKE_VHD_PATH),
fake_access_mask, fake_open_flag, fake_params,
vhdutils.ctypes.byref(vhdutils.wintypes.HANDLE()))
vhdutils.ctypes.byref(vhdutils.ctypes.wintypes.HANDLE()))
self.assertEqual(fake_device_id, fake_vst.DeviceId)
@ -309,8 +309,8 @@ class VHDUtilsTestCase(test.TestCase):
fake_info_member = vhdutils.GET_VIRTUAL_DISK_INFO_SIZE
fake_info = mock.Mock()
fake_info.VhdInfo.Size._fields_ = [
("VirtualSize", vhdutils.wintypes.ULARGE_INTEGER),
("PhysicalSize", vhdutils.wintypes.ULARGE_INTEGER)]
("VirtualSize", vhdutils.ctypes.wintypes.ULARGE_INTEGER),
("PhysicalSize", vhdutils.ctypes.wintypes.ULARGE_INTEGER)]
fake_info.VhdInfo.Size.VirtualSize = self._FAKE_VHD_SIZE
fake_info.VhdInfo.Size.PhysicalSize = fake_physical_size

View File

@ -20,14 +20,13 @@
"""Unit tests for brcd fc zone client cli."""
import mock
from mock import patch
from oslo_concurrency import processutils
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.zonemanager.drivers.brocade.brcd_fc_zone_client_cli \
import BrcdFCZoneClientCLI
from cinder.zonemanager.drivers.brocade \
import brcd_fc_zone_client_cli as client_cli
import cinder.zonemanager.drivers.brocade.fc_zone_constants as ZoneConstant
LOG = logging.getLogger(__name__)
@ -64,7 +63,7 @@ supported_firmware = ['Kernel: 2.6', 'Fabric OS: v7.0.1']
unsupported_firmware = ['Fabric OS: v6.2.1']
class TestBrcdFCZoneClientCLI(BrcdFCZoneClientCLI, test.TestCase):
class TestBrcdFCZoneClientCLI(client_cli.BrcdFCZoneClientCLI, test.TestCase):
def setUp(self):
super(TestBrcdFCZoneClientCLI, self).setUp()
@ -73,7 +72,7 @@ class TestBrcdFCZoneClientCLI(BrcdFCZoneClientCLI, test.TestCase):
def __init__(self, *args, **kwargs):
test.TestCase.__init__(self, *args, **kwargs)
@patch.object(BrcdFCZoneClientCLI, '_get_switch_info')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info')
def test_get_active_zone_set(self, get_switch_info_mock):
cmd_list = [ZoneConstant.GET_ACTIVE_ZONE_CFG]
get_switch_info_mock.return_value = cfgactvshow
@ -81,15 +80,15 @@ class TestBrcdFCZoneClientCLI(BrcdFCZoneClientCLI, test.TestCase):
get_switch_info_mock.assert_called_once_with(cmd_list)
self.assertDictMatch(active_zoneset_returned, active_zoneset)
@patch.object(BrcdFCZoneClientCLI, '_run_ssh')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_get_active_zone_set_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.BrocadeZoningCliException,
self.get_active_zone_set)
@mock.patch.object(BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(BrcdFCZoneClientCLI, '_cfg_save')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save')
def test_add_zones_new_zone_no_activate(self, cfg_save_mock,
apply_zone_change_mock,
get_active_zs_mock):
@ -99,9 +98,9 @@ class TestBrcdFCZoneClientCLI(BrcdFCZoneClientCLI, test.TestCase):
self.assertEqual(3, apply_zone_change_mock.call_count)
cfg_save_mock.assert_called_once_with()
@mock.patch.object(BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(BrcdFCZoneClientCLI, 'activate_zoneset')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
def test_add_zones_new_zone_activate(self, activate_zoneset_mock,
apply_zone_change_mock,
get_active_zs_mock):
@ -111,20 +110,20 @@ class TestBrcdFCZoneClientCLI(BrcdFCZoneClientCLI, test.TestCase):
activate_zoneset_mock.assert_called_once_with(
active_zoneset['active_zone_config'])
@mock.patch.object(BrcdFCZoneClientCLI, '_ssh_execute')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test_activate_zoneset(self, ssh_execute_mock):
ssh_execute_mock.return_value = True
return_value = self.activate_zoneset('zoneset1')
self.assertTrue(return_value)
@mock.patch.object(BrcdFCZoneClientCLI, '_ssh_execute')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test_deactivate_zoneset(self, ssh_execute_mock):
ssh_execute_mock.return_value = True
return_value = self.deactivate_zoneset()
self.assertTrue(return_value)
@mock.patch.object(BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(BrcdFCZoneClientCLI, '_cfg_save')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save')
def test_delete_zones_activate_false(self, cfg_save_mock,
apply_zone_change_mock):
with mock.patch.object(self, '_zone_delete') as zone_delete_mock:
@ -134,8 +133,8 @@ class TestBrcdFCZoneClientCLI(BrcdFCZoneClientCLI, test.TestCase):
zone_delete_mock.assert_called_once_with(zone_names_to_delete)
cfg_save_mock.assert_called_once_with()
@patch.object(BrcdFCZoneClientCLI, 'apply_zone_change')
@patch.object(BrcdFCZoneClientCLI, 'activate_zoneset')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
def test_delete_zones_activate_true(self, activate_zs_mock,
apply_zone_change_mock):
with mock.patch.object(self, '_zone_delete') \
@ -147,7 +146,7 @@ class TestBrcdFCZoneClientCLI(BrcdFCZoneClientCLI, test.TestCase):
activate_zs_mock.assert_called_once_with(
active_zoneset['active_zone_config'])
@patch.object(BrcdFCZoneClientCLI, '_get_switch_info')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info')
def test_get_nameserver_info(self, get_switch_info_mock):
ns_info_list = []
ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29']
@ -155,26 +154,26 @@ class TestBrcdFCZoneClientCLI(BrcdFCZoneClientCLI, test.TestCase):
ns_info_list = self.get_nameserver_info()
self.assertEqual(ns_info_list, ns_info_list_expected)
@patch.object(BrcdFCZoneClientCLI, '_run_ssh')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_get_nameserver_info_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.BrocadeZoningCliException,
self.get_nameserver_info)
@patch.object(BrcdFCZoneClientCLI, '_ssh_execute')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test__cfg_save(self, ssh_execute_mock):
cmd_list = [ZoneConstant.CFG_SAVE]
self._cfg_save()
ssh_execute_mock.assert_called_once_with(cmd_list, True, 1)
@patch.object(BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test__zone_delete(self, apply_zone_change_mock):
zone_name = 'testzone'
cmd_list = ['zonedelete', '"testzone"']
self._zone_delete(zone_name)
apply_zone_change_mock.assert_called_once_with(cmd_list)
@patch.object(BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test__cfg_trans_abort(self, apply_zone_change_mock):
cmd_list = [ZoneConstant.CFG_ZONE_TRANS_ABORT]
with mock.patch.object(self, '_is_trans_abortable') \
@ -184,7 +183,7 @@ class TestBrcdFCZoneClientCLI(BrcdFCZoneClientCLI, test.TestCase):
is_trans_abortable_mock.assert_called_once_with()
apply_zone_change_mock.assert_called_once_with(cmd_list)
@patch.object(BrcdFCZoneClientCLI, '_run_ssh')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_true(self, run_ssh_mock):
cmd_list = [ZoneConstant.CFG_SHOW_TRANS]
run_ssh_mock.return_value = (Stream(ZoneConstant.TRANS_ABORTABLE),
@ -193,13 +192,13 @@ class TestBrcdFCZoneClientCLI(BrcdFCZoneClientCLI, test.TestCase):
self.assertTrue(data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@patch.object(BrcdFCZoneClientCLI, '_run_ssh')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_ssh_error(self, run_ssh_mock):
run_ssh_mock.return_value = (Stream(), Stream())
self.assertRaises(exception.BrocadeZoningCliException,
self._is_trans_abortable)
@patch.object(BrcdFCZoneClientCLI, '_run_ssh')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_false(self, run_ssh_mock):
cmd_list = [ZoneConstant.CFG_SHOW_TRANS]
cfgtransshow = 'There is no outstanding zoning transaction'
@ -208,14 +207,14 @@ class TestBrcdFCZoneClientCLI(BrcdFCZoneClientCLI, test.TestCase):
self.assertFalse(data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@patch.object(BrcdFCZoneClientCLI, '_run_ssh')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_apply_zone_change(self, run_ssh_mock):
cmd_list = [ZoneConstant.CFG_SAVE]
run_ssh_mock.return_value = (None, None)
self.apply_zone_change(cmd_list)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@patch.object(BrcdFCZoneClientCLI, '_run_ssh')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__get_switch_info(self, run_ssh_mock):
cmd_list = [ZoneConstant.NS_SHOW]
nsshow_list = [nsshow]
@ -233,22 +232,22 @@ class TestBrcdFCZoneClientCLI(BrcdFCZoneClientCLI, test.TestCase):
self.assertRaises(exception.InvalidParameterValue,
self._parse_ns_output, invalid_switch_data)
@patch.object(BrcdFCZoneClientCLI, '_execute_shell_cmd')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (supported_firmware, None)
self.assertTrue(self.is_supported_firmware())
@patch.object(BrcdFCZoneClientCLI, '_execute_shell_cmd')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_invalid(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (unsupported_firmware, None)
self.assertFalse(self.is_supported_firmware())
@patch.object(BrcdFCZoneClientCLI, '_execute_shell_cmd')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_no_ssh_response(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (None, Stream())
self.assertFalse(self.is_supported_firmware())
@patch.object(BrcdFCZoneClientCLI, '_execute_shell_cmd')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_ssh_error(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.BrocadeZoningCliException,

View File

@ -29,8 +29,7 @@ from cinder.i18n import _LI
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver \
import BrcdFCZoneDriver
from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver as driver
LOG = logging.getLogger(__name__)
@ -119,7 +118,7 @@ class TestBrcdFcZoneDriver(BrcdFcZoneDriverBaseTest, test.TestCase):
fabric_map = {}
return fabric_map
@mock.patch.object(BrcdFCZoneDriver, '_get_active_zone_set')
@mock.patch.object(driver.BrcdFCZoneDriver, '_get_active_zone_set')
def test_add_connection(self, get_active_zs_mock):
"""Normal flow for i-t mode."""
GlobalVars._is_normal_test = True
@ -132,7 +131,7 @@ class TestBrcdFcZoneDriver(BrcdFcZoneDriverBaseTest, test.TestCase):
self.driver.add_connection('BRCD_FAB_1', _initiator_target_map)
self.assertTrue(_zone_name in GlobalVars._zone_state)
@mock.patch.object(BrcdFCZoneDriver, '_get_active_zone_set')
@mock.patch.object(driver.BrcdFCZoneDriver, '_get_active_zone_set')
def test_delete_connection(self, get_active_zs_mock):
GlobalVars._is_normal_test = True
get_active_zs_mock.return_value = _active_cfg_before_delete
@ -140,7 +139,7 @@ class TestBrcdFcZoneDriver(BrcdFcZoneDriverBaseTest, test.TestCase):
'BRCD_FAB_1', _initiator_target_map)
self.assertFalse(_zone_name in GlobalVars._zone_state)
@mock.patch.object(BrcdFCZoneDriver, '_get_active_zone_set')
@mock.patch.object(driver.BrcdFCZoneDriver, '_get_active_zone_set')
def test_add_connection_for_initiator_mode(self, get_active_zs_mock):
"""Normal flow for i mode."""
GlobalVars._is_normal_test = True
@ -149,7 +148,7 @@ class TestBrcdFcZoneDriver(BrcdFcZoneDriverBaseTest, test.TestCase):
self.driver.add_connection('BRCD_FAB_1', _initiator_target_map)
self.assertTrue(_zone_name in GlobalVars._zone_state)
@mock.patch.object(BrcdFCZoneDriver, '_get_active_zone_set')
@mock.patch.object(driver.BrcdFCZoneDriver, '_get_active_zone_set')
def test_delete_connection_for_initiator_mode(self, get_active_zs_mock):
GlobalVars._is_normal_test = True
get_active_zs_mock.return_value = _active_cfg_before_delete

View File

@ -23,7 +23,7 @@ from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
from cinder.zonemanager import fc_san_lookup_service as san_service
LOG = logging.getLogger(__name__)
@ -36,7 +36,7 @@ _device_map_to_verify = {
_fabric_wwn = '100000051e55a100'
class TestFCSanLookupService(FCSanLookupService, test.TestCase):
class TestFCSanLookupService(san_service.FCSanLookupService, test.TestCase):
def setUp(self):
super(TestFCSanLookupService, self).setUp()

View File

@ -26,7 +26,7 @@ from cinder.volume import configuration as conf
import cinder.zonemanager.drivers.cisco.cisco_fc_san_lookup_service \
as cisco_lookup
import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant
from cinder.zonemanager.utils import get_formatted_wwn
from cinder.zonemanager import utils as zm_utils
nsshow = '20:1a:00:05:1e:e8:e3:29'
switch_data = ['VSAN 304\n',
@ -111,7 +111,7 @@ class TestCiscoFCSanLookupService(cisco_lookup.CiscoFCSanLookupService,
wwn_list = ['10008c7cff523b01']
return_wwn_list = []
expected_wwn_list = ['10:00:8c:7c:ff:52:3b:01']
return_wwn_list.append(get_formatted_wwn(wwn_list[0]))
return_wwn_list.append(zm_utils.get_formatted_wwn(wwn_list[0]))
self.assertEqual(return_wwn_list, expected_wwn_list)
@mock.patch.object(cisco_lookup.CiscoFCSanLookupService,

View File

@ -17,13 +17,13 @@
"""Unit tests for Cisco fc zone client cli."""
from mock import patch
import mock
from oslo_concurrency import processutils
from cinder import exception
from cinder import test
from cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli \
import CiscoFCZoneClientCLI
from cinder.zonemanager.drivers.cisco \
import cisco_fc_zone_client_cli as cli
import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant
nsshow = '20:1a:00:05:1e:e8:e3:29'
@ -122,7 +122,7 @@ new_zones = {'openstack10000012345678902001009876543210':
zone_names_to_delete = 'openstack50060b0000c26604201900051ee8e329'
class TestCiscoFCZoneClientCLI(CiscoFCZoneClientCLI, test.TestCase):
class TestCiscoFCZoneClientCLI(cli.CiscoFCZoneClientCLI, test.TestCase):
def setUp(self):
super(TestCiscoFCZoneClientCLI, self).setUp()
@ -132,7 +132,7 @@ class TestCiscoFCZoneClientCLI(CiscoFCZoneClientCLI, test.TestCase):
def __init__(self, *args, **kwargs):
test.TestCase.__init__(self, *args, **kwargs)
@patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
@mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_active_zone_set(self, get_switch_info_mock):
cmd_list = [ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan,
' | no-more']
@ -141,13 +141,13 @@ class TestCiscoFCZoneClientCLI(CiscoFCZoneClientCLI, test.TestCase):
get_switch_info_mock.assert_called_once_with(cmd_list)
self.assertDictMatch(active_zoneset_returned, active_zoneset)
@patch.object(CiscoFCZoneClientCLI, '_run_ssh')
@mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh')
def test_get_active_zone_set_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.CiscoZoningCliException,
self.get_active_zone_set)
@patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
@mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_zoning_status_basic(self, get_zoning_status_mock):
cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]
get_zoning_status_mock.return_value = zoning_status_data_basic
@ -155,7 +155,7 @@ class TestCiscoFCZoneClientCLI(CiscoFCZoneClientCLI, test.TestCase):
get_zoning_status_mock.assert_called_once_with(cmd_list)
self.assertDictMatch(zoning_status_returned, zoning_status_basic)
@patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
@mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_zoning_status_enhanced_nosess(self, get_zoning_status_mock):
cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]
get_zoning_status_mock.return_value =\
@ -165,7 +165,7 @@ class TestCiscoFCZoneClientCLI(CiscoFCZoneClientCLI, test.TestCase):
self.assertDictMatch(zoning_status_returned,
zoning_status_enhanced_nosess)
@patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
@mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_zoning_status_enhanced_sess(self, get_zoning_status_mock):
cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]
get_zoning_status_mock.return_value = zoning_status_data_enhanced_sess
@ -174,7 +174,7 @@ class TestCiscoFCZoneClientCLI(CiscoFCZoneClientCLI, test.TestCase):
self.assertDictMatch(zoning_status_returned,
zoning_status_enhanced_sess)
@patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
@mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_nameserver_info(self, get_switch_info_mock):
ns_info_list = []
ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29']
@ -182,19 +182,19 @@ class TestCiscoFCZoneClientCLI(CiscoFCZoneClientCLI, test.TestCase):
ns_info_list = self.get_nameserver_info()
self.assertEqual(ns_info_list, ns_info_list_expected)
@patch.object(CiscoFCZoneClientCLI, '_run_ssh')
@mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh')
def test_get_nameserver_info_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.CiscoZoningCliException,
self.get_nameserver_info)
@patch.object(CiscoFCZoneClientCLI, '_run_ssh')
@mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh')
def test__cfg_save(self, run_ssh_mock):
cmd_list = ['copy', 'running-config', 'startup-config']
self._cfg_save()
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@patch.object(CiscoFCZoneClientCLI, '_run_ssh')
@mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh')
def test__get_switch_info(self, run_ssh_mock):
cmd_list = [ZoneConstant.FCNS_SHOW, self.fabric_vsan]
nsshow_list = [nsshow]

View File

@ -20,7 +20,7 @@
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
from cinder.zonemanager import fc_san_lookup_service as san_service
_target_ns_map = {'100000051e55a100': ['20240002ac000a50']}
_initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']}
@ -31,7 +31,7 @@ _device_map_to_verify = {
_fabric_wwn = '100000051e55a100'
class TestFCSanLookupService(FCSanLookupService, test.TestCase):
class TestFCSanLookupService(san_service.FCSanLookupService, test.TestCase):
def setUp(self):
super(TestFCSanLookupService, self).setUp()

View File

@ -20,12 +20,11 @@
"""Unit tests for FC Zone Manager."""
import mock
from mock import Mock
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.zonemanager.drivers.fc_zone_driver import FCZoneDriver
from cinder.zonemanager.drivers import fc_zone_driver
from cinder.zonemanager import fc_zone_manager
fabric_name = 'BRCD_FAB_3'
@ -43,7 +42,7 @@ class TestFCZoneManager(test.TestCase):
config.fc_fabric_names = fabric_name
def fake_build_driver(self):
self.driver = Mock(FCZoneDriver)
self.driver = mock.Mock(fc_zone_driver.FCZoneDriver)
self.stubs.Set(fc_zone_manager.ZoneManager, '_build_driver',
fake_build_driver)
@ -51,7 +50,7 @@ class TestFCZoneManager(test.TestCase):
self.zm = fc_zone_manager.ZoneManager(configuration=config)
self.configuration = conf.Configuration(None)
self.configuration.fc_fabric_names = fabric_name
self.driver = Mock(FCZoneDriver)
self.driver = mock.Mock(fc_zone_driver.FCZoneDriver)
def __init__(self, *args, **kwargs):
super(TestFCZoneManager, self).__init__(*args, **kwargs)

View File

@ -25,11 +25,7 @@ from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.volume.drivers.cloudbyte.options import (
cloudbyte_create_volume_opts
)
from cinder.volume.drivers.cloudbyte.options import cloudbyte_add_qosgroup_opts
from cinder.volume.drivers.cloudbyte.options import cloudbyte_connection_opts
from cinder.volume.drivers.cloudbyte import options
from cinder.volume.drivers.san import san
LOG = logging.getLogger(__name__)
@ -47,9 +43,12 @@ class CloudByteISCSIDriver(san.SanISCSIDriver):
def __init__(self, *args, **kwargs):
super(CloudByteISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(cloudbyte_add_qosgroup_opts)
self.configuration.append_config_values(cloudbyte_create_volume_opts)
self.configuration.append_config_values(cloudbyte_connection_opts)
self.configuration.append_config_values(
options.cloudbyte_add_qosgroup_opts)
self.configuration.append_config_values(
options.cloudbyte_create_volume_opts)
self.configuration.append_config_values(
options.cloudbyte_connection_opts)
self.get_volume_stats()
def _get_url(self, cmd, params, apikey):

View File

@ -20,8 +20,7 @@ Fibre Channel Driver for EMC VNX array based on CLI.
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vnx_cli
from cinder.zonemanager.utils import AddFCZone
from cinder.zonemanager.utils import RemoveFCZone
from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
@ -118,7 +117,7 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
"""Make sure volume is exported."""
pass
@AddFCZone
@zm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
@ -169,7 +168,7 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
% {'conn_info': conn_info})
return conn_info
@RemoveFCZone
@zm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
conn_info = self.cli.terminate_connection(volume, connector)

View File

@ -31,7 +31,7 @@ from cinder.openstack.common import log as logging
# Handle case where we are running in a monkey patched environment
if patcher.is_monkey_patched('socket'):
from eventlet.green.OpenSSL.SSL import GreenConnection as Connection
from eventlet.green.OpenSSL import SSL
else:
raise ImportError
@ -78,7 +78,7 @@ class OpenSSLConnectionDelegator(object):
a delegator must be used.
"""
def __init__(self, *args, **kwargs):
self.connection = Connection(*args, **kwargs)
self.connection = SSL.Connection(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.connection, name)

View File

@ -16,7 +16,7 @@
import datetime
import random
import re
from xml.dom.minidom import parseString
from xml.dom import minidom
import six
@ -671,7 +671,7 @@ class EMCVMAXUtils(object):
myFile = open(fileName, 'r')
data = myFile.read()
myFile.close()
dom = parseString(data)
dom = minidom.parseString(data)
portGroupElements = dom.getElementsByTagName('PortGroup')
if portGroupElements is not None and len(portGroupElements) > 0:
@ -714,7 +714,7 @@ class EMCVMAXUtils(object):
myFile = open(fileName, 'r')
data = myFile.read()
myFile.close()
dom = parseString(data)
dom = minidom.parseString(data)
tag = dom.getElementsByTagName(stringToParse)
if tag is not None and len(tag) > 0:
strXml = tag[0].toxml()

View File

@ -36,12 +36,11 @@ from taskflow import task
from taskflow.types import failure
from cinder import exception
from cinder.exception import EMCVnxCLICmdError
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder import utils
from cinder.volume.configuration import Configuration
from cinder.volume import configuration as config
from cinder.volume.drivers.san import san
from cinder.volume import manager
from cinder.volume import utils as vol_utils
@ -352,10 +351,10 @@ class CommandLineHelper(object):
'-tieringPolicy', 'noMovement']}
def _raise_cli_error(self, cmd=None, rc=None, out='', **kwargs):
raise EMCVnxCLICmdError(cmd=cmd,
rc=rc,
out=out.split('\n'),
**kwargs)
raise exception.EMCVnxCLICmdError(cmd=cmd,
rc=rc,
out=out.split('\n'),
**kwargs)
def create_lun_with_advance_feature(self, pool, name, size,
provisioning, tiering,
@ -383,7 +382,7 @@ class CommandLineHelper(object):
if provisioning == 'compressed':
self.enable_or_disable_compression_on_lun(
name, 'on')
except EMCVnxCLICmdError as ex:
except exception.EMCVnxCLICmdError as ex:
with excutils.save_and_reraise_exception():
self.delete_lun(name)
LOG.error(_LE("Error on enable compression on lun %s."),
@ -394,7 +393,7 @@ class CommandLineHelper(object):
if consistencygroup_id:
self.add_lun_to_consistency_group(
consistencygroup_id, data['lun_id'])
except EMCVnxCLICmdError as ex:
except exception.EMCVnxCLICmdError as ex:
with excutils.save_and_reraise_exception():
self.delete_lun(name)
LOG.error(_LE("Error on adding lun to consistency"
@ -418,7 +417,7 @@ class CommandLineHelper(object):
return (data[self.LUN_STATE.key] == 'Ready' and
data[self.LUN_STATUS.key] == 'OK(0x0)' and
data[self.LUN_OPERATION.key] == 'None')
except EMCVnxCLICmdError as ex:
except exception.EMCVnxCLICmdError as ex:
orig_out = "\n".join(ex.kwargs["out"])
if orig_out.find(
self.CLI_RESP_PATTERN_LUN_NOT_EXIST) >= 0:
@ -820,7 +819,7 @@ class CommandLineHelper(object):
dst_name=None):
try:
self.migrate_lun(src_id, dst_id)
except EMCVnxCLICmdError as ex:
except exception.EMCVnxCLICmdError as ex:
migration_succeed = False
orig_out = "\n".join(ex.kwargs["out"])
if self._is_sp_unavailable_error(orig_out):
@ -1579,13 +1578,12 @@ class EMCVnxCliBase(object):
self.configuration.check_max_pool_luns_threshold)
# if zoning_mode is fabric, use lookup service to build itor_tgt_map
self.zonemanager_lookup_service = None
zm_conf = Configuration(manager.volume_manager_opts)
zm_conf = config.Configuration(manager.volume_manager_opts)
if (zm_conf.safe_get('zoning_mode') == 'fabric' or
self.configuration.safe_get('zoning_mode') == 'fabric'):
from cinder.zonemanager.fc_san_lookup_service \
import FCSanLookupService
from cinder.zonemanager import fc_san_lookup_service as fc_service
self.zonemanager_lookup_service = \
FCSanLookupService(configuration=configuration)
fc_service.FCSanLookupService(configuration=configuration)
self.max_retries = 5
if self.destroy_empty_sg:
LOG.warning(_LW("destroy_empty_storage_group: True. "
@ -1759,7 +1757,7 @@ class EMCVnxCliBase(object):
"""Deletes an EMC volume."""
try:
self._client.delete_lun(volume['name'])
except EMCVnxCLICmdError as ex:
except exception.EMCVnxCLICmdError as ex:
orig_out = "\n".join(ex.kwargs["out"])
if (self.force_delete_lun_in_sg and
(self._client.CLI_RESP_PATTERN_LUN_IN_SG_1 in orig_out or
@ -2288,7 +2286,7 @@ class EMCVnxCliBase(object):
def assure_host_in_storage_group(self, hostname, storage_group):
try:
self._client.connect_host_to_storage_group(hostname, storage_group)
except EMCVnxCLICmdError as ex:
except exception.EMCVnxCLICmdError as ex:
if ex.kwargs["rc"] == 83:
# SG was not created or was destroyed by another concurrent
# operation before connected.
@ -2498,7 +2496,7 @@ class EMCVnxCliBase(object):
try:
sgdata = self._client.get_storage_group(hostname,
poll=False)
except EMCVnxCLICmdError as ex:
except exception.EMCVnxCLICmdError as ex:
if ex.kwargs["rc"] != 83:
raise ex
# Storage Group has not existed yet
@ -2540,7 +2538,7 @@ class EMCVnxCliBase(object):
self.hlu_cache[hostname] = {}
self.hlu_cache[hostname][lun_id] = hlu
return hlu, sgdata
except EMCVnxCLICmdError as ex:
except exception.EMCVnxCLICmdError as ex:
LOG.debug("Add HLU to storagegroup failed, retry %s",
tried)
elif tried == 1:
@ -2705,7 +2703,7 @@ class EMCVnxCliBase(object):
try:
lun_map = self.get_lun_map(hostname)
self.hlu_cache[hostname] = lun_map
except EMCVnxCLICmdError as ex:
except exception.EMCVnxCLICmdError as ex:
if ex.kwargs["rc"] == 83:
LOG.warning(_LW("Storage Group %s is not found. "
"terminate_connection() is "

View File

@ -30,7 +30,7 @@ from cinder.i18n import _, _LE, _LW, _LI
from cinder.openstack.common import log as logging
from cinder import ssh_utils
from cinder import utils
from cinder.volume.drivers.san import SanISCSIDriver
from cinder.volume.drivers import san
LOG = logging.getLogger(__name__)
@ -90,7 +90,7 @@ def with_timeout(f):
return __inner
class DellEQLSanISCSIDriver(SanISCSIDriver):
class DellEQLSanISCSIDriver(san.SanISCSIDriver):
"""Implements commands for Dell EqualLogic SAN ISCSI management.
To enable the driver add the following line to the cinder configuration:

View File

@ -23,7 +23,7 @@ This common class is for FUJITSU ETERNUS DX volume drivers based on SMI-S.
import base64
import hashlib
import time
from xml.dom.minidom import parseString
from xml.dom import minidom
from oslo_config import cfg
from oslo_utils import units
@ -1109,7 +1109,7 @@ class FJDXCommon(object):
file = open(filename, 'r')
data = file.read()
file.close()
dom = parseString(data)
dom = minidom.parseString(data)
storageTypes = dom.getElementsByTagName('StorageType')
if storageTypes is not None and len(storageTypes) > 0:
storageType = storageTypes[0].toxml()
@ -1134,7 +1134,7 @@ class FJDXCommon(object):
file = open(filename, 'r')
data = file.read()
file.close()
dom = parseString(data)
dom = minidom.parseString(data)
snappools = dom.getElementsByTagName('SnapPool')
if snappools is not None and len(snappools) > 0:
snappool = snappools[0].toxml()
@ -1155,7 +1155,7 @@ class FJDXCommon(object):
file = open(filename, 'r')
data = file.read()
file.close()
dom = parseString(data)
dom = minidom.parseString(data)
timeouts = dom.getElementsByTagName('Timeout')
if timeouts is not None and len(timeouts) > 0:
timeout = timeouts[0].toxml().replace('<Timeout>', '')
@ -1173,7 +1173,7 @@ class FJDXCommon(object):
file = open(filename, 'r')
data = file.read()
file.close()
dom = parseString(data)
dom = minidom.parseString(data)
ecomUsers = dom.getElementsByTagName('EcomUserName')
if ecomUsers is not None and len(ecomUsers) > 0:
ecomUser = ecomUsers[0].toxml().replace('<EcomUserName>', '')
@ -1195,7 +1195,7 @@ class FJDXCommon(object):
file = open(filename, 'r')
data = file.read()
file.close()
dom = parseString(data)
dom = minidom.parseString(data)
ecomIps = dom.getElementsByTagName('EcomServerIp')
if ecomIps is not None and len(ecomIps) > 0:
ecomIp = ecomIps[0].toxml().replace('<EcomServerIp>', '')

View File

@ -31,7 +31,7 @@ from cinder import exception
from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.volume.drivers.san.san import SanISCSIDriver
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import volume_types
@ -191,7 +191,7 @@ class FIOconnection(object):
return
class FIOioControlDriver(SanISCSIDriver):
class FIOioControlDriver(san.SanISCSIDriver):
"""Fusion-io ioControl iSCSI volume driver."""
VERSION = '1.0.0'

View File

@ -29,7 +29,7 @@ from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.hds.hus_backend import HusBackend
from cinder.volume.drivers.hds import hus_backend
HDS_VERSION = '1.0.2'
@ -53,7 +53,7 @@ HUS_DEFAULT_CONFIG = {'hus_cmd': 'hus-cmd',
def factory_bend():
"""Factory over-ride in self-tests."""
return HusBackend()
return hus_backend.HusBackend()
def _loc_info(loc):

View File

@ -28,7 +28,7 @@ from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.hds.hnas_backend import HnasBackend
from cinder.volume.drivers.hds import hnas_backend
from cinder.volume import utils
@ -48,7 +48,7 @@ HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc', 'chap_enabled': 'True'}
def factory_bend(type):
return HnasBackend()
return hnas_backend.HnasBackend()
def _loc_info(loc):

View File

@ -30,7 +30,7 @@ from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.volume.drivers.hds.hnas_backend import HnasBackend
from cinder.volume.drivers.hds import hnas_backend
from cinder.volume.drivers import nfs
@ -125,7 +125,7 @@ def _read_config(xml_config_file):
def factory_bend():
"""Factory over-ride in self-tests."""
return HnasBackend()
return hnas_backend.HnasBackend()
class HDSNFSDriver(nfs.NfsDriver):

View File

@ -16,7 +16,6 @@ Common class for Hitachi storage drivers.
"""
from contextlib import nested
import re
import threading
@ -528,7 +527,7 @@ class HBSDCommon(object):
pool_id = self.configuration.hitachi_pool_id
lock = basic_lib.get_process_lock(self.storage_lock_file)
with nested(self.storage_obj_lock, lock):
with self.storage_obj_lock, lock:
ldev = self.create_ldev(size, ldev_range, pool_id, is_vvol)
return ldev

View File

@ -16,7 +16,6 @@ Fibre channel Cinder volume driver for Hitachi storage.
"""
from contextlib import nested
import os
import threading
@ -416,8 +415,8 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
msg = basic_lib.output_err(619, volume_id=volume['id'])
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
with nested(self.common.volume_info[ldev]['lock'],
self.common.volume_info[ldev]['in_use']):
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
hostgroups = self._initialize_connection(ldev, connector)
properties = self._get_properties(volume, hostgroups)
LOG.debug('Initialize volume_info: %s'
@ -457,8 +456,8 @@ class HBSDFCDriver(cinder.volume.driver.FibreChannelDriver):
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
with nested(self.common.volume_info[ldev]['lock'],
self.common.volume_info[ldev]['in_use']):
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
self._terminate_connection(ldev, connector, hostgroups)
properties = self._get_properties(volume, hostgroups,
terminate=True)

View File

@ -12,8 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from contextlib import nested
from functools import wraps
import functools
import os
import re
import shlex
@ -119,7 +118,7 @@ CONF.register_opts(volume_opts)
def horcm_synchronized(function):
@wraps(function)
@functools.wraps(function)
def wrapper(*args, **kargs):
if len(args) == 1:
inst = args[0].conf.hitachi_horcm_numbers[0]
@ -129,19 +128,19 @@ def horcm_synchronized(function):
raidcom_obj_lock = args[0].raidcom_pair_lock
raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
lock = basic_lib.get_process_lock(raidcom_lock_file)
with nested(raidcom_obj_lock, lock):
with raidcom_obj_lock, lock:
return function(*args, **kargs)
return wrapper
def storage_synchronized(function):
@wraps(function)
@functools.wraps(function)
def wrapper(*args, **kargs):
serial = args[0].conf.hitachi_serial_number
resource_lock = args[0].resource_lock
resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial)
lock = basic_lib.get_process_lock(resource_lock_file)
with nested(resource_lock, lock):
with resource_lock, lock:
return function(*args, **kargs)
return wrapper
@ -270,7 +269,7 @@ class HBSDHORCM(basic_lib.HBSDBasicLib):
raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
lock = basic_lib.get_process_lock(raidcom_lock_file)
with nested(raidcom_obj_lock, lock):
with raidcom_obj_lock, lock:
ret, stdout, stderr = self.exec_command(cmd, args=args,
printflag=printflag)
@ -290,7 +289,7 @@ class HBSDHORCM(basic_lib.HBSDBasicLib):
elif ret in HORCM_ERROR:
_ret = 0
with nested(raidcom_obj_lock, lock):
with raidcom_obj_lock, lock:
if self.check_horcm(inst) != HORCM_RUNNING:
_ret, _stdout, _stderr = self.start_horcm(inst)
if _ret and _ret != HORCM_RUNNING:

View File

@ -16,7 +16,6 @@ iSCSI Cinder volume driver for Hitachi storage.
"""
from contextlib import nested
import os
import threading
@ -348,8 +347,8 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
msg = basic_lib.output_err(619, volume_id=volume['id'])
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
with nested(self.common.volume_info[ldev]['lock'],
self.common.volume_info[ldev]['in_use']):
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
hostgroups = self._initialize_connection(ldev, connector)
protocol = 'iscsi'
properties = self._get_properties(volume, hostgroups)
@ -390,8 +389,8 @@ class HBSDISCSIDriver(cinder.volume.driver.ISCSIDriver):
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
with nested(self.common.volume_info[ldev]['lock'],
self.common.volume_info[ldev]['in_use']):
with self.common.volume_info[ldev]['lock'],\
self.common.volume_info[ldev]['in_use']:
self._terminate_connection(ldev, connector, hostgroups)
def create_export(self, context, volume):

View File

@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from contextlib import nested
import re
import shlex
import threading
@ -66,7 +65,7 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
def _wait_for_exec_hsnm(self, args, printflag, noretry, timeout, start):
lock = basic_lib.get_process_lock(self.hsnm_lock_file)
with nested(self.hsnm_lock, lock):
with self.hsnm_lock, lock:
ret, stdout, stderr = self.exec_command('env', args=args,
printflag=printflag)
@ -617,7 +616,7 @@ class HBSDSNM2(basic_lib.HBSDBasicLib):
import pexpect
lock = basic_lib.get_process_lock(self.hsnm_lock_file)
with nested(self.hsnm_lock, lock):
with self.hsnm_lock, lock:
try:
child = pexpect.spawn(cmd)
child.expect('Secret: ', timeout=CHAP_TIMEOUT)

View File

@ -17,7 +17,7 @@ Volume Drivers for Huawei OceanStor 18000 storage arrays.
"""
from cinder.volume import driver
from cinder.volume.drivers.huawei.rest_common import RestCommon
from cinder.volume.drivers.huawei import rest_common
from cinder.zonemanager import utils as fczm_utils
@ -36,7 +36,7 @@ class Huawei18000ISCSIDriver(driver.ISCSIDriver):
def do_setup(self, context):
"""Instantiate common class and log in storage system."""
self.common = RestCommon(configuration=self.configuration)
self.common = rest_common.RestCommon(configuration=self.configuration)
return self.common.login()
def check_for_setup_error(self):
@ -124,7 +124,7 @@ class Huawei18000FCDriver(driver.FibreChannelDriver):
def do_setup(self, context):
"""Instantiate common class and log in storage system."""
self.common = RestCommon(configuration=self.configuration)
self.common = rest_common.RestCommon(configuration=self.configuration)
return self.common.login()
def check_for_setup_error(self):

View File

@ -42,7 +42,7 @@ from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume.drivers import nfs
from cinder.volume.drivers.remotefs import nas_opts
from cinder.volume.drivers import remotefs
from cinder.volume.drivers.san import san
VERSION = '1.1.0'
@ -78,7 +78,7 @@ class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
def __init__(self, execute=utils.execute, *args, **kwargs):
self._context = None
super(IBMNAS_NFSDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(nas_opts)
self.configuration.append_config_values(remotefs.nas_opts)
self.configuration.append_config_values(platform_opts)
self.configuration.san_ip = self.configuration.nas_ip
self.configuration.san_login = self.configuration.nas_login

View File

@ -24,7 +24,7 @@ from cinder import exception
from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.netapp.options import netapp_proxy_opts
from cinder.volume.drivers.netapp import options
from cinder.volume.drivers.netapp import utils as na_utils
@ -69,7 +69,7 @@ class NetAppDriver(driver.ProxyVD):
raise exception.InvalidInput(
reason=_('Required configuration not found'))
config.append_config_values(netapp_proxy_opts)
config.append_config_values(options.netapp_proxy_opts)
na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config)
app_version = na_utils.OpenStackInfo().info()

View File

@ -28,7 +28,7 @@ import six
from cinder import exception
from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging
from cinder.volume.configuration import Configuration
from cinder.volume import configuration
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp import options as na_opts
@ -79,7 +79,8 @@ class NetAppBlockStorage7modeLibrary(block_base.
def _do_partner_setup(self):
partner_backend = self.configuration.netapp_partner_backend_name
if partner_backend:
config = Configuration(na_opts.netapp_7mode_opts, partner_backend)
config = configuration.Configuration(na_opts.netapp_7mode_opts,
partner_backend)
config.append_config_values(na_opts.netapp_connection_opts)
config.append_config_values(na_opts.netapp_basicauth_opts)
config.append_config_values(na_opts.netapp_transport_opts)

View File

@ -32,7 +32,7 @@ import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.volume.drivers.netapp.dataontap.client.api import NaApiError
from cinder.volume.drivers.netapp.dataontap.client import api as na_api
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
@ -267,7 +267,7 @@ class NetAppBlockStorageLibrary(object):
initiator_type, os)
try:
return self.zapi_client.map_lun(path, igroup_name, lun_id=lun_id)
except NaApiError:
except na_api.NaApiError:
exc_info = sys.exc_info()
(_igroup, lun_id) = self._find_mapped_lun_igroup(path,
initiator_list)

View File

@ -24,9 +24,6 @@ import six
from cinder.i18n import _LE, _LW, _LI
from cinder.openstack.common import log as logging
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client.api import NaApiError
from cinder.volume.drivers.netapp.dataontap.client.api import NaElement
from cinder.volume.drivers.netapp.dataontap.client.api import NaServer
LOG = logging.getLogger(__name__)
@ -35,11 +32,12 @@ LOG = logging.getLogger(__name__)
class Client(object):
def __init__(self, **kwargs):
self.connection = NaServer(host=kwargs['hostname'],
transport_type=kwargs['transport_type'],
port=kwargs['port'],
username=kwargs['username'],
password=kwargs['password'])
self.connection = netapp_api.NaServer(
host=kwargs['hostname'],
transport_type=kwargs['transport_type'],
port=kwargs['port'],
username=kwargs['username'],
password=kwargs['password'])
def get_ontapi_version(self, cached=True):
"""Gets the supported ontapi version."""
@ -58,7 +56,7 @@ class Client(object):
def check_is_naelement(self, elem):
"""Checks if object is instance of NaElement."""
if not isinstance(elem, NaElement):
if not isinstance(elem, netapp_api.NaElement):
raise ValueError('Expects NaElement')
def create_lun(self, volume_name, lun_name, size, metadata,
@ -255,7 +253,7 @@ class Client(object):
"""
def _create_ems(netapp_backend, app_version, server_type):
"""Create ems API request."""
ems_log = NaElement('ems-autosupport-log')
ems_log = netapp_api.NaElement('ems-autosupport-log')
host = socket.getfqdn() or 'Cinder_node'
if server_type == "cluster":
dest = "cluster node"
@ -275,13 +273,13 @@ class Client(object):
def _create_vs_get():
"""Create vs_get API request."""
vs_get = NaElement('vserver-get-iter')
vs_get = netapp_api.NaElement('vserver-get-iter')
vs_get.add_new_child('max-records', '1')
query = NaElement('query')
query = netapp_api.NaElement('query')
query.add_node_with_children('vserver-info',
**{'vserver-type': 'node'})
vs_get.add_child_elem(query)
desired = NaElement('desired-attributes')
desired = netapp_api.NaElement('desired-attributes')
desired.add_node_with_children(
'vserver-info', **{'vserver-name': '', 'vserver-type': ''})
vs_get.add_child_elem(desired)
@ -315,21 +313,23 @@ class Client(object):
if api_version:
major, minor = api_version
else:
raise NaApiError(code='Not found',
message='No API version found')
raise netapp_api.NaApiError(
code='Not found',
message='No API version found')
if major == 1 and minor > 15:
node = getattr(requester, 'vserver', None)
else:
node = _get_cluster_node(na_server)
if node is None:
raise NaApiError(code='Not found',
message='No vserver found')
raise netapp_api.NaApiError(
code='Not found',
message='No vserver found')
na_server.set_vserver(node)
else:
na_server.set_vfiler(None)
na_server.invoke_successfully(ems, True)
LOG.debug("ems executed successfully.")
except NaApiError as e:
except netapp_api.NaApiError as e:
LOG.warning(_LW("Failed to invoke ems. Message : %s") % e)
finally:
requester.last_ems = timeutils.utcnow()

View File

@ -17,8 +17,7 @@ Volume driver for NetApp Data ONTAP (7-mode) FibreChannel storage systems.
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap.block_7mode import \
NetAppBlockStorage7modeLibrary as lib_7mode
from cinder.volume.drivers.netapp.dataontap import block_7mode
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
@ -31,7 +30,8 @@ class NetApp7modeFibreChannelDriver(driver.FibreChannelDriver):
def __init__(self, *args, **kwargs):
super(NetApp7modeFibreChannelDriver, self).__init__(*args, **kwargs)
self.library = lib_7mode(self.DRIVER_NAME, 'FC', **kwargs)
self.library = block_7mode.NetAppBlockStorage7modeLibrary(
self.DRIVER_NAME, 'FC', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)

View File

@ -17,8 +17,7 @@ Volume driver for NetApp Data ONTAP (C-mode) FibreChannel storage systems.
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap.block_cmode import \
NetAppBlockStorageCmodeLibrary as lib_cmode
from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
@ -31,7 +30,8 @@ class NetAppCmodeFibreChannelDriver(driver.FibreChannelDriver):
def __init__(self, *args, **kwargs):
super(NetAppCmodeFibreChannelDriver, self).__init__(*args, **kwargs)
self.library = lib_cmode(self.DRIVER_NAME, 'FC', **kwargs)
self.library = block_cmode.NetAppBlockStorageCmodeLibrary(
self.DRIVER_NAME, 'FC', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)

View File

@ -17,8 +17,7 @@ Volume driver for NetApp Data ONTAP (7-mode) iSCSI storage systems.
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap.block_7mode import \
NetAppBlockStorage7modeLibrary as lib_7mode
from cinder.volume.drivers.netapp.dataontap import block_7mode
LOG = logging.getLogger(__name__)
@ -31,7 +30,8 @@ class NetApp7modeISCSIDriver(driver.ISCSIDriver):
def __init__(self, *args, **kwargs):
super(NetApp7modeISCSIDriver, self).__init__(*args, **kwargs)
self.library = lib_7mode(self.DRIVER_NAME, 'iSCSI', **kwargs)
self.library = block_7mode.NetAppBlockStorage7modeLibrary(
self.DRIVER_NAME, 'iSCSI', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)

View File

@ -17,8 +17,7 @@ Volume driver for NetApp Data ONTAP (C-mode) iSCSI storage systems.
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap.block_cmode import \
NetAppBlockStorageCmodeLibrary as lib_cmode
from cinder.volume.drivers.netapp.dataontap import block_cmode
LOG = logging.getLogger(__name__)
@ -31,7 +30,8 @@ class NetAppCmodeISCSIDriver(driver.ISCSIDriver):
def __init__(self, *args, **kwargs):
super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs)
self.library = lib_cmode(self.DRIVER_NAME, 'iSCSI', **kwargs)
self.library = block_cmode.NetAppBlockStorageCmodeLibrary(
self.DRIVER_NAME, 'iSCSI', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)

View File

@ -22,7 +22,7 @@ Volume driver for NetApp NFS storage.
import os
import re
from threading import Timer
import threading
import time
from oslo_concurrency import processutils
@ -276,7 +276,7 @@ class NetAppNfsDriver(nfs.NfsDriver):
else:
# Set cleaning to True
self.cleaning = True
t = Timer(0, self._clean_image_cache)
t = threading.Timer(0, self._clean_image_cache)
t.start()
def _clean_image_cache(self):

View File

@ -36,7 +36,6 @@ from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers.netapp.utils import get_volume_extra_specs
from cinder.volume import utils as volume_utils
@ -91,7 +90,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
extra_specs = get_volume_extra_specs(volume)
extra_specs = na_utils.get_volume_extra_specs(volume)
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
@ -107,7 +106,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
qos_policy_group)
return {'provider_location': volume['provider_location']}
except Exception as ex:
LOG.error(_LW("Exception creating vol %(name)s on "
LOG.error(_LW("Exception creattest_nfs.pying vol %(name)s on "
"share %(share)s. Details: %(ex)s")
% {'name': volume['name'],
'share': volume['provider_location'],
@ -329,7 +328,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
netapp_vol = self._get_vol_for_share(share)
LOG.debug("Found volume %(vol)s for share %(share)s."
% {'vol': netapp_vol, 'share': share})
extra_specs = get_volume_extra_specs(volume)
extra_specs = na_utils.get_volume_extra_specs(volume)
vols = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs)
return netapp_vol in vols

View File

@ -19,7 +19,7 @@ Storage service catalog utility functions and classes for NetApp systems.
"""
import copy
from threading import Timer
import threading
from oslo_utils import timeutils
import six
@ -511,8 +511,8 @@ def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False):
if synchronous:
get_cluster_latest_ssc(backend, na_server, vserver)
else:
t = Timer(0, get_cluster_latest_ssc,
args=[backend, na_server, vserver])
t = threading.Timer(0, get_cluster_latest_ssc,
args=[backend, na_server, vserver])
t.start()
elif getattr(backend, 'refresh_stale_running', None):
LOG.warning(_LW('refresh stale ssc job in progress. Returning... '))
@ -522,8 +522,8 @@ def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False):
if synchronous:
refresh_cluster_stale_ssc(backend, na_server, vserver)
else:
t = Timer(0, refresh_cluster_stale_ssc,
args=[backend, na_server, vserver])
t = threading.Timer(0, refresh_cluster_stale_ssc,
args=[backend, na_server, vserver])
t.start()

View File

@ -35,10 +35,7 @@ from cinder import utils as cinder_utils
from cinder.volume import driver
from cinder.volume.drivers.netapp.eseries import client
from cinder.volume.drivers.netapp.eseries import utils
from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
from cinder.volume.drivers.netapp.options import netapp_connection_opts
from cinder.volume.drivers.netapp.options import netapp_eseries_opts
from cinder.volume.drivers.netapp.options import netapp_transport_opts
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
@ -47,10 +44,10 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(netapp_basicauth_opts)
CONF.register_opts(netapp_connection_opts)
CONF.register_opts(netapp_eseries_opts)
CONF.register_opts(netapp_transport_opts)
CONF.register_opts(na_opts.netapp_basicauth_opts)
CONF.register_opts(na_opts.netapp_connection_opts)
CONF.register_opts(na_opts.netapp_eseries_opts)
CONF.register_opts(na_opts.netapp_transport_opts)
class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
@ -96,10 +93,11 @@ class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
def __init__(self, *args, **kwargs):
super(NetAppEseriesISCSIDriver, self).__init__(*args, **kwargs)
na_utils.validate_instantiation(**kwargs)
self.configuration.append_config_values(netapp_basicauth_opts)
self.configuration.append_config_values(netapp_connection_opts)
self.configuration.append_config_values(netapp_transport_opts)
self.configuration.append_config_values(netapp_eseries_opts)
self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
self.configuration.append_config_values(
na_opts.netapp_connection_opts)
self.configuration.append_config_values(na_opts.netapp_transport_opts)
self.configuration.append_config_values(na_opts.netapp_eseries_opts)
self._backend_name = self.configuration.safe_get("volume_backend_name")\
or "NetApp_ESeries"
self._objects = {'disk_pool_refs': [], 'pools': [],

View File

@ -31,7 +31,7 @@ from suds import client
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
from cinder.volume.drivers.san.san import SanISCSIDriver
from cinder.volume.drivers.san import san
DRIVER_VERSION = '1.0'
@ -67,7 +67,7 @@ class NimbleAPIException(exception.VolumeBackendAPIException):
message = _("Unexpected response from Nimble API")
class NimbleISCSIDriver(SanISCSIDriver):
class NimbleISCSIDriver(san.SanISCSIDriver):
"""OpenStack driver to enable Nimble Controller.

View File

@ -27,13 +27,13 @@ from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
from cinder.volume.drivers.san.san import SanISCSIDriver
from cinder.volume.drivers.san import san
LOG = logging.getLogger(__name__)
class HPLeftHandCLIQProxy(SanISCSIDriver):
class HPLeftHandCLIQProxy(san.SanISCSIDriver):
"""Executes commands relating to HP/LeftHand SAN ISCSI volumes.
We use the CLIQ interface, over SSH.

View File

@ -34,7 +34,7 @@ LeftHand array.
from cinder import exception
from cinder.i18n import _LE, _LI
from cinder.openstack.common import log as logging
from cinder.volume.driver import VolumeDriver
from cinder.volume import driver
from cinder.volume.drivers.san.hp import hp_lefthand_cliq_proxy as cliq_proxy
from cinder.volume.drivers.san.hp import hp_lefthand_rest_proxy as rest_proxy
@ -43,7 +43,7 @@ LOG = logging.getLogger(__name__)
MIN_CLIENT_VERSION = '1.0.3'
class HPLeftHandISCSIDriver(VolumeDriver):
class HPLeftHandISCSIDriver(driver.VolumeDriver):
"""Executes commands relating to HP/LeftHand SAN ISCSI volumes.
Version history:

View File

@ -22,7 +22,7 @@ from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.volume.driver import ISCSIDriver
from cinder.volume import driver
from cinder.volume import utils
from cinder.volume import volume_types
@ -30,7 +30,6 @@ LOG = logging.getLogger(__name__)
try:
import hplefthandclient
from hplefthandclient import client as hp_lh_client
from hplefthandclient import exceptions as hpexceptions
except ImportError:
import cinder.tests.fake_hp_lefthand_client as hplefthandclient
@ -80,7 +79,7 @@ extra_specs_value_map = {
}
class HPLeftHandRESTProxy(ISCSIDriver):
class HPLeftHandRESTProxy(driver.ISCSIDriver):
"""Executes REST commands relating to HP/LeftHand SAN ISCSI volumes.
Version history:
@ -121,7 +120,7 @@ class HPLeftHandRESTProxy(ISCSIDriver):
client.logout()
def _create_client(self):
return hp_lh_client.HPLeftHandClient(
return hplefthandclient.client.HPLeftHandClient(
self.configuration.hplefthand_api_url)
def do_setup(self, context):

View File

@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
from hashlib import md5
import hashlib
import urllib2
from lxml import etree
@ -49,7 +49,7 @@ class HPMSAClient(object):
def login(self):
"""Authenticates the service on the device."""
hash = md5("%s_%s" % (self._login, self._password))
hash = hashlib.md5("%s_%s" % (self._login, self._password))
digest = hash.hexdigest()
url = self._base_url + "/login/" + digest

View File

@ -24,14 +24,14 @@ from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import units
import requests
from six import wraps
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.volume.drivers.san.san import SanISCSIDriver
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import volume_types
@ -76,7 +76,7 @@ CONF.register_opts(sf_opts)
def retry(exc_tuple, tries=5, delay=1, backoff=2):
def retry_dec(f):
@wraps(f)
@six.wraps(f)
def func_retry(*args, **kwargs):
_tries, _delay = tries, delay
while _tries > 1:
@ -100,7 +100,7 @@ def retry(exc_tuple, tries=5, delay=1, backoff=2):
return retry_dec
class SolidFireDriver(SanISCSIDriver):
class SolidFireDriver(san.SanISCSIDriver):
"""OpenStack driver to enable SolidFire cluster.
Version history:

View File

@ -30,11 +30,8 @@ import ctypes
import os
if os.name == 'nt':
from ctypes import windll
from ctypes import wintypes
kernel32 = windll.kernel32
virtdisk = windll.virtdisk
kernel32 = ctypes.windll.kernel32
virtdisk = ctypes.windll.virtdisk
from cinder import exception
from cinder.i18n import _
@ -45,88 +42,88 @@ LOG = logging.getLogger(__name__)
if os.name == 'nt':
class Win32_GUID(ctypes.Structure):
_fields_ = [("Data1", wintypes.DWORD),
("Data2", wintypes.WORD),
("Data3", wintypes.WORD),
("Data4", wintypes.BYTE * 8)]
_fields_ = [("Data1", ctypes.wintypes.DWORD),
("Data2", ctypes.wintypes.WORD),
("Data3", ctypes.wintypes.WORD),
("Data4", ctypes.wintypes.BYTE * 8)]
class Win32_VIRTUAL_STORAGE_TYPE(ctypes.Structure):
_fields_ = [
('DeviceId', wintypes.ULONG),
('DeviceId', ctypes.wintypes.ULONG),
('VendorId', Win32_GUID)
]
class Win32_RESIZE_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('Version', ctypes.wintypes.DWORD),
('NewSize', ctypes.c_ulonglong)
]
class Win32_OPEN_VIRTUAL_DISK_PARAMETERS_V1(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('Version', ctypes.wintypes.DWORD),
('RWDepth', ctypes.c_ulong),
]
class Win32_OPEN_VIRTUAL_DISK_PARAMETERS_V2(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('GetInfoOnly', wintypes.BOOL),
('ReadOnly', wintypes.BOOL),
('Version', ctypes.wintypes.DWORD),
('GetInfoOnly', ctypes.wintypes.BOOL),
('ReadOnly', ctypes.wintypes.BOOL),
('ResiliencyGuid', Win32_GUID)
]
class Win32_MERGE_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('Version', ctypes.wintypes.DWORD),
('MergeDepth', ctypes.c_ulong)
]
class Win32_CREATE_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('Version', ctypes.wintypes.DWORD),
('UniqueId', Win32_GUID),
('MaximumSize', ctypes.c_ulonglong),
('BlockSizeInBytes', wintypes.ULONG),
('SectorSizeInBytes', wintypes.ULONG),
('PhysicalSectorSizeInBytes', wintypes.ULONG),
('ParentPath', wintypes.LPCWSTR),
('SourcePath', wintypes.LPCWSTR),
('OpenFlags', wintypes.DWORD),
('BlockSizeInBytes', ctypes.wintypes.ULONG),
('SectorSizeInBytes', ctypes.wintypes.ULONG),
('PhysicalSectorSizeInBytes', ctypes.wintypes.ULONG),
('ParentPath', ctypes.wintypes.LPCWSTR),
('SourcePath', ctypes.wintypes.LPCWSTR),
('OpenFlags', ctypes.wintypes.DWORD),
('ParentVirtualStorageType', Win32_VIRTUAL_STORAGE_TYPE),
('SourceVirtualStorageType', Win32_VIRTUAL_STORAGE_TYPE),
('ResiliencyGuid', Win32_GUID)
]
class Win32_SIZE(ctypes.Structure):
_fields_ = [("VirtualSize", wintypes.ULARGE_INTEGER),
("PhysicalSize", wintypes.ULARGE_INTEGER),
("BlockSize", wintypes.ULONG),
("SectorSize", wintypes.ULONG)]
_fields_ = [("VirtualSize", ctypes.wintypes.ULARGE_INTEGER),
("PhysicalSize", ctypes.wintypes.ULARGE_INTEGER),
("BlockSize", ctypes.wintypes.ULONG),
("SectorSize", ctypes.wintypes.ULONG)]
class Win32_PARENT_LOCATION(ctypes.Structure):
_fields_ = [('ParentResolved', wintypes.BOOL),
('ParentLocationBuffer', wintypes.WCHAR * 512)]
_fields_ = [('ParentResolved', ctypes.wintypes.BOOL),
('ParentLocationBuffer', ctypes.wintypes.WCHAR * 512)]
class Win32_PHYSICAL_DISK(ctypes.Structure):
_fields_ = [("LogicalSectorSize", wintypes.ULONG),
("PhysicalSectorSize", wintypes.ULONG),
("IsRemote", wintypes.BOOL)]
_fields_ = [("LogicalSectorSize", ctypes.wintypes.ULONG),
("PhysicalSectorSize", ctypes.wintypes.ULONG),
("IsRemote", ctypes.wintypes.BOOL)]
class Win32_VHD_INFO(ctypes.Union):
_fields_ = [("Size", Win32_SIZE),
("Identifier", Win32_GUID),
("ParentLocation", Win32_PARENT_LOCATION),
("ParentIdentifier", Win32_GUID),
("ParentTimestamp", wintypes.ULONG),
("ParentTimestamp", ctypes.wintypes.ULONG),
("VirtualStorageType", Win32_VIRTUAL_STORAGE_TYPE),
("ProviderSubtype", wintypes.ULONG),
("Is4kAligned", wintypes.BOOL),
("ProviderSubtype", ctypes.wintypes.ULONG),
("Is4kAligned", ctypes.wintypes.BOOL),
("PhysicalDisk", Win32_PHYSICAL_DISK),
("VhdPhysicalSectorSize", wintypes.ULONG),
("VhdPhysicalSectorSize", ctypes.wintypes.ULONG),
("SmallestSafeVirtualSize",
wintypes.ULARGE_INTEGER),
("FragmentationPercentage", wintypes.ULONG)]
ctypes.wintypes.ULARGE_INTEGER),
("FragmentationPercentage", ctypes.wintypes.ULONG)]
class Win32_GET_VIRTUAL_DISK_INFO_PARAMETERS(ctypes.Structure):
_fields_ = [("VERSION", ctypes.wintypes.UINT),
@ -134,8 +131,8 @@ if os.name == 'nt':
class Win32_SET_VIRTUAL_DISK_INFO_PARAMETERS(ctypes.Structure):
_fields_ = [
('Version', wintypes.DWORD),
('ParentFilePath', wintypes.LPCWSTR)
('Version', ctypes.wintypes.DWORD),
('ParentFilePath', ctypes.wintypes.LPCWSTR)
]
@ -194,7 +191,7 @@ class VHDUtils(object):
guid.Data1 = 0xec984aec
guid.Data2 = 0xa0f9
guid.Data3 = 0x47e9
ByteArray8 = wintypes.BYTE * 8
ByteArray8 = ctypes.wintypes.BYTE * 8
guid.Data4 = ByteArray8(0x90, 0x1f, 0x71, 0x41, 0x5a, 0x66, 0x34, 0x5b)
return guid
@ -207,7 +204,7 @@ class VHDUtils(object):
vst.DeviceId = device_id
vst.VendorId = self._msft_vendor_id
handle = wintypes.HANDLE()
handle = ctypes.wintypes.HANDLE()
ret_val = virtdisk.OpenVirtualDisk(ctypes.byref(vst),
ctypes.c_wchar_p(vhd_path),
@ -297,7 +294,7 @@ class VHDUtils(object):
params.SourceVirtualStorageType.DeviceId = src_device_id
params.SourceVirtualStorageType.VendorId = self._msft_vendor_id
handle = wintypes.HANDLE()
handle = ctypes.wintypes.HANDLE()
create_virtual_disk_flag = self.create_virtual_disk_flags.get(
new_vhd_type)
@ -337,7 +334,7 @@ class VHDUtils(object):
infoSize = ctypes.sizeof(virt_disk_info)
virtdisk.GetVirtualDiskInformation.restype = wintypes.DWORD
virtdisk.GetVirtualDiskInformation.restype = ctypes.wintypes.DWORD
ret_val = virtdisk.GetVirtualDiskInformation(
vhd_file, ctypes.byref(ctypes.c_ulong(infoSize)),

Some files were not shown because too many files have changed in this diff Show More