Delete volume when rescheduling
If a reschedulable failure happens in the driver after the volume was
created on the backend, it will get orphaned and only way to remove it
will be through backend internals. This commit adds an additional
delete_volume call to the driver after the volume got rescheduled to
make sure no volumes will get orphaned.
Change-Id: Idd86a4842bdc6ecf0cabbeff0a9c9704e030302a
Closes-Bug: 1561579
(cherry picked from commit 7f09229d6c
)
This commit is contained in:
parent
31e013fd9f
commit
b3b2e136c7
|
@ -630,6 +630,7 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||||
|
|
||||||
def test_create_driver_not_initialized_rescheduling(self):
|
def test_create_driver_not_initialized_rescheduling(self):
|
||||||
self.volume.driver._initialized = False
|
self.volume.driver._initialized = False
|
||||||
|
mock_delete = self.mock_object(self.volume.driver, 'delete_volume')
|
||||||
|
|
||||||
volume = tests_utils.create_volume(
|
volume = tests_utils.create_volume(
|
||||||
self.context,
|
self.context,
|
||||||
|
@ -648,6 +649,10 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||||
# allocated_capacity tracking.
|
# allocated_capacity tracking.
|
||||||
self.assertEqual({}, self.volume.stats['pools'])
|
self.assertEqual({}, self.volume.stats['pools'])
|
||||||
|
|
||||||
|
# NOTE(dulek): As we've rescheduled, make sure delete_volume was
|
||||||
|
# called.
|
||||||
|
self.assertTrue(mock_delete.called)
|
||||||
|
|
||||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||||
|
|
||||||
def test_create_non_cinder_exception_rescheduling(self):
|
def test_create_non_cinder_exception_rescheduling(self):
|
||||||
|
@ -3683,12 +3688,16 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||||
|
|
||||||
self.stubs.Set(self.volume.driver, 'copy_image_to_volume',
|
self.stubs.Set(self.volume.driver, 'copy_image_to_volume',
|
||||||
fake_copy_image_to_volume)
|
fake_copy_image_to_volume)
|
||||||
|
mock_delete = self.mock_object(self.volume.driver, 'delete_volume')
|
||||||
self.assertRaises(exception.ImageCopyFailure,
|
self.assertRaises(exception.ImageCopyFailure,
|
||||||
self._create_volume_from_image)
|
self._create_volume_from_image)
|
||||||
# NOTE(dulek): Rescheduling should not occur, so lets assert that
|
# NOTE(dulek): Rescheduling should not occur, so lets assert that
|
||||||
# allocated_capacity is incremented.
|
# allocated_capacity is incremented.
|
||||||
self.assertDictEqual(self.volume.stats['pools'],
|
self.assertDictEqual(self.volume.stats['pools'],
|
||||||
{'_pool0': {'allocated_capacity_gb': 1}})
|
{'_pool0': {'allocated_capacity_gb': 1}})
|
||||||
|
# NOTE(dulek): As we haven't rescheduled, make sure no delete_volume
|
||||||
|
# was called.
|
||||||
|
self.assertFalse(mock_delete.called)
|
||||||
|
|
||||||
@mock.patch('cinder.utils.brick_get_connector_properties')
|
@mock.patch('cinder.utils.brick_get_connector_properties')
|
||||||
@mock.patch('cinder.utils.brick_get_connector')
|
@mock.patch('cinder.utils.brick_get_connector')
|
||||||
|
|
|
@ -59,7 +59,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
|
||||||
this volume elsewhere.
|
this volume elsewhere.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, reschedule_context, db, scheduler_rpcapi,
|
def __init__(self, reschedule_context, db, driver, scheduler_rpcapi,
|
||||||
do_reschedule):
|
do_reschedule):
|
||||||
requires = ['filter_properties', 'request_spec', 'volume',
|
requires = ['filter_properties', 'request_spec', 'volume',
|
||||||
'context']
|
'context']
|
||||||
|
@ -68,6 +68,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
|
||||||
self.do_reschedule = do_reschedule
|
self.do_reschedule = do_reschedule
|
||||||
self.scheduler_rpcapi = scheduler_rpcapi
|
self.scheduler_rpcapi = scheduler_rpcapi
|
||||||
self.db = db
|
self.db = db
|
||||||
|
self.driver = driver
|
||||||
self.reschedule_context = reschedule_context
|
self.reschedule_context = reschedule_context
|
||||||
# These exception types will trigger the volume to be set into error
|
# These exception types will trigger the volume to be set into error
|
||||||
# status rather than being rescheduled.
|
# status rather than being rescheduled.
|
||||||
|
@ -154,6 +155,16 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
|
||||||
|
|
||||||
LOG.debug("Volume %s: re-scheduled", volume.id)
|
LOG.debug("Volume %s: re-scheduled", volume.id)
|
||||||
|
|
||||||
|
# NOTE(dulek): Here we should be sure that rescheduling occurred and
|
||||||
|
# host field will be erased. Just in case volume was already created at
|
||||||
|
# the backend, we attempt to delete it.
|
||||||
|
try:
|
||||||
|
self.driver.delete_volume(volume)
|
||||||
|
except Exception:
|
||||||
|
# Most likely the volume weren't created at the backend. We can
|
||||||
|
# safely ignore this.
|
||||||
|
pass
|
||||||
|
|
||||||
def revert(self, context, result, flow_failures, volume, **kwargs):
|
def revert(self, context, result, flow_failures, volume, **kwargs):
|
||||||
# NOTE(dulek): Revert is occurring and manager need to know if
|
# NOTE(dulek): Revert is occurring and manager need to know if
|
||||||
# rescheduling happened. We're returning boolean flag that will
|
# rescheduling happened. We're returning boolean flag that will
|
||||||
|
@ -943,9 +954,8 @@ def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume,
|
||||||
# status when reverting the flow. Meanwhile, no need to revert process of
|
# status when reverting the flow. Meanwhile, no need to revert process of
|
||||||
# ExtractVolumeRefTask.
|
# ExtractVolumeRefTask.
|
||||||
do_reschedule = allow_reschedule and request_spec and retry
|
do_reschedule = allow_reschedule and request_spec and retry
|
||||||
volume_flow.add(OnFailureRescheduleTask(reschedule_context, db,
|
volume_flow.add(OnFailureRescheduleTask(reschedule_context, db, driver,
|
||||||
scheduler_rpcapi,
|
scheduler_rpcapi, do_reschedule))
|
||||||
do_reschedule))
|
|
||||||
|
|
||||||
LOG.debug("Volume reschedule parameters: %(allow)s "
|
LOG.debug("Volume reschedule parameters: %(allow)s "
|
||||||
"retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry})
|
"retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry})
|
||||||
|
|
Loading…
Reference in New Issue