Move driver initialization check into the method

Volumes and backups managers' methods are decorated with
`require_initialized_driver` which checks whether the driver has been
initialized or not. The decorator fails with a `DriverNotInitialized`
exception if the driver hasn't been initialized.

This early failure leaves volumes and backups in a wrong status which is
not just confusing for the user but it also makes it difficult to do
anything with the resources after they've been left in a 'bogus' status.

For example, when a volume creation is requested, the volume is first
created in the database and its status is set to 'creating'. Then the
scheduler will pick an available volume node and send the task to it. If
the driver has not been initialized, the volume status will be left as
'creating' instead of 'error'.

This patch fixes that issue by moving the driver initialization check
into the various manager's methods. In some cases this check is done at
the very beginning of the method, in some others - either to avoid code
duplication or because the lines above the check made sense to be
executed first - this check is done later in the method.

NOTE: Regardless the conflicts noted below, this patch should be
backported. The issue it fixes is a source of several bug reports, user
frustration and confusion. The conflicts were related to some additions
in the master branch. Resolving the conflicts was pretty
straightforward.

Conflicts:
	cinder/tests/test_volume.py
	cinder/utils.py
	cinder/volume/flows/create_volume/__init__.py
	cinder/volume/manager.py

Closes-bug: #1242942
(cherry picked from commit 5be4620ae5)
Change-Id: I2610be6ba1aa7df417f1a1f7bb27af30273e4814
This commit is contained in:
Flavio Percoco 2013-12-10 12:31:50 +01:00
parent 240c81d00a
commit 4228c0ebc2
6 changed files with 259 additions and 32 deletions

View File

@ -217,7 +217,6 @@ class BackupManager(manager.SchedulerDependentManager):
LOG.info(_('Resuming delete on backup: %s.') % backup['id'])
self.delete_backup(ctxt, backup['id'])
@utils.require_driver_initialized
def create_backup(self, context, backup_id):
"""Create volume backups using configured backup service."""
backup = self.db.backup_get(context, backup_id)
@ -258,6 +257,12 @@ class BackupManager(manager.SchedulerDependentManager):
raise exception.InvalidBackup(reason=err)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught,
# the volume status will be set back to available and
# the backup status to 'error'
utils.require_driver_initialized(self.driver)
backup_service = self.service.get_backup_driver(context)
self._get_driver(backend).backup_volume(context, backup,
backup_service)
@ -276,7 +281,6 @@ class BackupManager(manager.SchedulerDependentManager):
self.az})
LOG.info(_('Create backup finished. backup: %s.'), backup_id)
@utils.require_driver_initialized
def restore_backup(self, context, backup_id, volume_id):
"""Restore volume backups from configured backup service."""
LOG.info(_('Restore backup started, backup: %(backup_id)s '
@ -334,6 +338,12 @@ class BackupManager(manager.SchedulerDependentManager):
raise exception.InvalidBackup(reason=err)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught,
# the volume status will be set back to available and
# the backup status to 'error'
utils.require_driver_initialized(self.driver)
backup_service = self.service.get_backup_driver(context)
self._get_driver(backend).restore_backup(context, backup,
volume,
@ -351,9 +361,21 @@ class BackupManager(manager.SchedulerDependentManager):
' to volume %(volume_id)s.') %
{'backup_id': backup_id, 'volume_id': volume_id})
@utils.require_driver_initialized
def delete_backup(self, context, backup_id):
"""Delete volume backup from configured backup service."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the backup status updated. Fail early since there
# are no other status to change but backup's
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized as err:
with excutils.save_and_reraise_exception():
self.db.backup_update(context, backup_id,
{'status': 'error',
'fail_reason':
unicode(err)})
LOG.info(_('Delete backup started, backup: %s.'), backup_id)
backup = self.db.backup_get(context, backup_id)
self.db.backup_update(context, backup_id, {'host': self.host})

View File

@ -127,7 +127,7 @@ class ImageNotAuthorized(CinderException):
class DriverNotInitialized(CinderException):
message = _("Volume driver '%(driver)s' not initialized.")
message = _("Volume driver not ready.")
class Invalid(CinderException):

View File

@ -140,6 +140,73 @@ class VolumeTestCase(BaseVolumeTestCase):
self.assertEqual(volume['status'], "error")
self.volume.delete_volume(self.context, volume_id)
@mock.patch.object(QUOTAS, 'reserve')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(QUOTAS, 'rollback')
def test_create_driver_not_initialized(self, reserve, commit, rollback):
self.volume.driver._initialized = False
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit_and_rollback(context, reservations, project_id=None):
pass
reserve.return_value = fake_reserve
commit.return_value = fake_commit_and_rollback
rollback.return_value = fake_commit_and_rollback
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
volume_id = volume['id']
self.assertIsNone(volume['encryption_key_id'])
self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
self.assertRaises(exception.DriverNotInitialized,
self.volume.create_volume,
self.context, volume_id)
# NOTE(flaper87): The volume status should be error_deleting.
volume = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(volume.status, "error")
db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch.object(QUOTAS, 'reserve')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(QUOTAS, 'rollback')
def test_delete_driver_not_initialized(self, reserve, commit, rollback):
# NOTE(flaper87): Set initialized to False
self.volume.driver._initialized = False
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit_and_rollback(context, reservations, project_id=None):
pass
reserve.return_value = fake_reserve
commit.return_value = fake_commit_and_rollback
rollback.return_value = fake_commit_and_rollback
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
volume_id = volume['id']
self.assertIsNone(volume['encryption_key_id'])
self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
self.assertRaises(exception.DriverNotInitialized,
self.volume.delete_volume,
self.context, volume_id)
# NOTE(flaper87): The volume status should be error.
volume = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(volume.status, "error_deleting")
db.volume_destroy(context.get_admin_context(), volume_id)
def test_create_delete_volume(self):
"""Test volume can be created and deleted."""
# Need to stub out reserve, commit, and rollback
@ -442,6 +509,30 @@ class VolumeTestCase(BaseVolumeTestCase):
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_src['id'])
def test_create_snapshot_driver_not_initialized(self):
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
snapshot_id = self._create_snapshot(volume_src['id'])['id']
# NOTE(flaper87): Set initialized to False
self.volume.driver._initialized = False
self.assertRaises(exception.DriverNotInitialized,
self.volume.create_snapshot,
self.context, volume_src['id'],
snapshot_id)
# NOTE(flaper87): The volume status should be error.
snapshot = db.snapshot_get(context.get_admin_context(), snapshot_id)
self.assertEqual(snapshot.status, "error")
# NOTE(flaper87): Set initialized to True,
# lets cleanup the mess
self.volume.driver._initialized = True
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_src['id'])
def test_create_volume_from_snapshot_with_encryption(self):
"""Test volume can be created from a snapshot of
an encrypted volume.
@ -1516,6 +1607,29 @@ class VolumeTestCase(BaseVolumeTestCase):
# clean up
self.volume.delete_volume(self.context, volume['id'])
def test_extend_volume_driver_not_initialized(self):
"""Test volume can be extended at API level."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context, size=2,
status='available',
host=CONF.host)
self.volume.create_volume(self.context, volume['id'])
# NOTE(flaper87): Set initialized to False
self.volume.driver._initialized = False
self.assertRaises(exception.DriverNotInitialized,
self.volume.extend_volume,
self.context, volume['id'], 3)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume.status, 'error_extending')
# NOTE(flaper87): Set initialized to True,
# lets cleanup the mess.
self.volume.driver._initialized = True
self.volume.delete_volume(self.context, volume['id'])
def test_extend_volume_manager(self):
"""Test volume can be extended at the manager level."""
def fake_reserve(context, expire=None, project_id=None, **deltas):
@ -1773,6 +1887,25 @@ class VolumeTestCase(BaseVolumeTestCase):
self.assertEqual(volume['host'], 'newhost')
self.assertEqual(volume['migration_status'], None)
def test_migrate_driver_not_initialized(self):
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.volume.driver._initialized = False
self.assertRaises(exception.DriverNotInitialized,
self.volume.migrate_volume,
self.context, volume['id'],
host_obj, True)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume.migration_status, 'error')
# NOTE(flaper87): Set initialized to True,
# lets cleanup the mess.
self.volume.driver._initialized = True
self.volume.delete_volume(self.context, volume['id'])
def test_update_volume_readonly_flag(self):
"""Test volume readonly flag can be updated at API level."""
# create a volume and assign to host

View File

@ -22,7 +22,6 @@
import contextlib
import datetime
import functools
import hashlib
import inspect
import os
@ -812,17 +811,6 @@ def brick_get_connector(protocol, driver=None,
*args, **kwargs)
def require_driver_initialized(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
# we can't do anything if the driver didn't init
if not self.driver.initialized:
driver_name = self.driver.__class__.__name__
raise exception.DriverNotInitialized(driver=driver_name)
return func(self, *args, **kwargs)
return wrapper
def get_file_mode(path):
"""This primarily exists to make unit testing easier."""
return stat.S_IMODE(os.stat(path).st_mode)
@ -831,3 +819,18 @@ def get_file_mode(path):
def get_file_gid(path):
"""This primarily exists to make unit testing easier."""
return os.stat(path).st_gid
def require_driver_initialized(driver):
"""Verifies if `driver` is initialized
If the driver is not initialized, an exception will be raised.
:params driver: The driver instance.
:raises: `exception.DriverNotInitialized`
"""
# we can't do anything if the driver didn't init
if not driver.initialized:
driver_name = driver.__class__.__name__
LOG.error(_("Volume driver %s not initialized") % driver_name)
raise exception.DriverNotInitialized()

View File

@ -1470,19 +1470,25 @@ class CreateVolumeFromSpecTask(base.CinderTask):
return self.driver.create_volume(volume_ref)
def __call__(self, context, volume_ref, volume_spec):
volume_spec = dict(volume_spec)
volume_id = volume_spec.pop('volume_id', None)
# we can't do anything if the driver didn't init
if not self.driver.initialized:
LOG.error(_("Unable to create volume, driver not initialized"))
driver_name = self.driver.__class__.__name__
raise exception.DriverNotInitialized(driver=driver_name)
LOG.error(_("Unable to create volume. "
"Volume driver %s not initialized") % driver_name)
# NOTE(flaper87): Set the error status before
# raising any exception.
self.db.volume_update(context, volume_id, dict(status='error'))
raise exception.DriverNotInitialized()
create_type = volume_spec.pop('type', None)
create_functor = self._create_func_mapping.get(create_type)
if not create_functor:
raise exception.VolumeTypeNotFound(volume_type_id=create_type)
volume_spec = dict(volume_spec)
volume_id = volume_spec.pop('volume_id', None)
if not volume_id:
volume_id = volume_ref['id']
LOG.info(_("Volume %(volume_id)s: being created using %(functor)s "

View File

@ -232,7 +232,6 @@ class VolumeManager(manager.SchedulerDependentManager):
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
@utils.require_driver_initialized
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
snapshot_id=None, image_id=None, source_volid=None):
@ -262,7 +261,6 @@ class VolumeManager(manager.SchedulerDependentManager):
self._reset_stats()
return volume_id
@utils.require_driver_initialized
def delete_volume(self, context, volume_id):
"""Deletes and unexports volume."""
context = context.elevated()
@ -284,6 +282,11 @@ class VolumeManager(manager.SchedulerDependentManager):
self._notify_about_volume_usage(context, volume_ref, "delete.start")
self._reset_stats()
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug(_("volume %s: removing export"), volume_ref['id'])
self.driver.remove_export(context, volume_ref)
LOG.debug(_("volume %s: deleting"), volume_ref['id'])
@ -340,7 +343,6 @@ class VolumeManager(manager.SchedulerDependentManager):
return True
@utils.require_driver_initialized
def create_snapshot(self, context, volume_id, snapshot_id):
"""Creates and exports the snapshot."""
caller_context = context
@ -352,6 +354,11 @@ class VolumeManager(manager.SchedulerDependentManager):
context, snapshot_ref, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
LOG.debug(_("snapshot %(snap_id)s: creating"),
{'snap_id': snapshot_ref['id']})
@ -390,7 +397,6 @@ class VolumeManager(manager.SchedulerDependentManager):
self._notify_about_snapshot_usage(context, snapshot_ref, "create.end")
return snapshot_id
@utils.require_driver_initialized
def delete_snapshot(self, context, snapshot_id):
"""Deletes and unexports snapshot."""
caller_context = context
@ -403,6 +409,11 @@ class VolumeManager(manager.SchedulerDependentManager):
context, snapshot_ref, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
LOG.debug(_("snapshot %s: deleting"), snapshot_ref['id'])
# Pass context so that drivers that want to use it, can,
@ -452,7 +463,6 @@ class VolumeManager(manager.SchedulerDependentManager):
QUOTAS.commit(context, reservations, project_id=project_id)
return True
@utils.require_driver_initialized
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached"""
@ -508,6 +518,11 @@ class VolumeManager(manager.SchedulerDependentManager):
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
volume,
instance_uuid,
@ -525,7 +540,6 @@ class VolumeManager(manager.SchedulerDependentManager):
mountpoint)
return do_attach()
@utils.require_driver_initialized
def detach_volume(self, context, volume_id):
"""Updates db to show volume is detached"""
# TODO(vish): refactor this into a more general "unreserve"
@ -533,6 +547,11 @@ class VolumeManager(manager.SchedulerDependentManager):
volume = self.db.volume_get(context, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.detach_volume(context, volume)
except Exception:
with excutils.save_and_reraise_exception():
@ -550,7 +569,6 @@ class VolumeManager(manager.SchedulerDependentManager):
volume['name'] not in volume['provider_location']):
self.driver.ensure_export(context, volume)
@utils.require_driver_initialized
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
@ -560,6 +578,11 @@ class VolumeManager(manager.SchedulerDependentManager):
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume = self.db.volume_get(context, volume_id)
self.driver.ensure_export(context.elevated(), volume)
image_service, image_id = \
@ -581,7 +604,6 @@ class VolumeManager(manager.SchedulerDependentManager):
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
@utils.require_driver_initialized
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
@ -619,6 +641,11 @@ class VolumeManager(manager.SchedulerDependentManager):
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume = self.db.volume_get(context, volume_id)
self.driver.validate_connector(connector)
conn_info = self.driver.initialize_connection(volume, connector)
@ -650,17 +677,25 @@ class VolumeManager(manager.SchedulerDependentManager):
conn_info['data']['access_mode'] = access_mode
return conn_info
@utils.require_driver_initialized
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
self.driver.terminate_connection(volume_ref, connector, force=force)
@utils.require_driver_initialized
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
@ -732,6 +767,16 @@ class VolumeManager(manager.SchedulerDependentManager):
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'error'})
volume = self.db.volume_get(ctxt, volume_id)
new_volume = self.db.volume_get(ctxt, new_volume_id)
rpcapi = volume_rpcapi.VolumeAPI()
@ -757,9 +802,18 @@ class VolumeManager(manager.SchedulerDependentManager):
self.db.volume_update(ctxt, volume_id, {'migration_status': None})
return volume['id']
@utils.require_driver_initialized
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False):
"""Migrate the volume to the specified host (called on source host)."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'error'})
volume_ref = self.db.volume_get(ctxt, volume_id)
model_update = None
moved = False
@ -843,8 +897,17 @@ class VolumeManager(manager.SchedulerDependentManager):
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
@utils.require_driver_initialized
def extend_volume(self, context, volume_id, new_size):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'error_extending'})
volume = self.db.volume_get(context, volume_id)
size_increase = (int(new_size)) - volume['size']