Handle volume API failure in _post_live_migration
Previously, if the call to Cinder in _post_live_migration failed, the exception went unhandled and prevented us from calling post_live_migration_at_destination - which is where we set instance host and task state. This left the system in an inconsistent state, with the instance actually running on the destination, but with instance.host still set to the source. This patch simply wraps the Cinder API calls in a try/except, and logs the exception instead of blowing up. While "dumb", this has the virtue of being simple and minimizing potential side effects. A comprehensive refactoring of when, where and how we set instance host and task state to try to guarantee consistency is left as a TODO. Conflicts in nova/compute/manager.py due to absence of new Cinder flow conditional (and corresponding modifications to tests). Partial-bug: 1628606 Change-Id: Icb0bdaf454935b3713c35339394d260b33520de5 (cherry picked from commit5513f48dea
) (cherry picked from commitcf3c2f391a
) (cherry picked from commit53f9c8e510
)
This commit is contained in:
parent
3e07bcb327
commit
28bc3c8221
|
@ -5750,8 +5750,18 @@ class ComputeManager(manager.Manager):
|
|||
# remove the volume connection without detaching from hypervisor
|
||||
# because the instance is not running anymore on the current host
|
||||
if bdm.is_volume:
|
||||
self.volume_api.terminate_connection(ctxt, bdm.volume_id,
|
||||
connector)
|
||||
# Detaching volumes is a call to an external API that can fail.
|
||||
# If it does, we need to handle it gracefully so that the call
|
||||
# to post_live_migration_at_destination - where we set instance
|
||||
# host and task state - still happens.
|
||||
try:
|
||||
self.volume_api.terminate_connection(ctxt, bdm.volume_id,
|
||||
connector)
|
||||
except Exception as e:
|
||||
LOG.error('Connection for volume %s not terminated on '
|
||||
'source host %s during post_live_migration: %s',
|
||||
bdm.volume_id, self.host, six.text_type(e),
|
||||
instance=instance)
|
||||
|
||||
# Releasing vlan.
|
||||
# (not necessary in current implementation?)
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
# Copyright 2018 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import exception
|
||||
from nova.tests import fixtures as nova_fixtures
|
||||
from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.unit import fake_notifier
|
||||
from nova.tests import uuidsentinel as uuids
|
||||
from nova.virt import fake
|
||||
|
||||
|
||||
class FakeCinderError(object):
|
||||
"""Poor man's Mock because we're stubbing out and not mock.patching. Stubs
|
||||
out both terminate_connection and attachment_delete. We keep a raise and
|
||||
call count to simulate a single volume error while being able to assert
|
||||
that we still got called for all of an instance's volumes.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.raise_count = 0
|
||||
self.call_count = 0
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
self.call_count += 1
|
||||
if self.raise_count == 0:
|
||||
self.raise_count += 1
|
||||
raise exception.CinderConnectionFailed(reason='Fake Cinder error')
|
||||
|
||||
|
||||
class LiveMigrationCinderFailure(integrated_helpers._IntegratedTestBase,
|
||||
integrated_helpers.InstanceHelperMixin):
|
||||
api_major_version = 'v2.1'
|
||||
microversion = 'latest'
|
||||
|
||||
def setUp(self):
|
||||
super(LiveMigrationCinderFailure, self).setUp()
|
||||
fake_notifier.stub_notifier(self)
|
||||
self.addCleanup(fake_notifier.reset)
|
||||
# Start a second compte node (the first one was started for us by
|
||||
# _IntegratedTestBase. set_nodes() is needed to avoid duplicate
|
||||
# nodenames. See comments in test_bug_1702454.py.
|
||||
fake.set_nodes(['host2'])
|
||||
self.addCleanup(fake.restore_nodes)
|
||||
self.compute2 = self.start_service('compute', host='host2')
|
||||
|
||||
def test_live_migrate_terminate_connection_fails(self):
|
||||
self.useFixture(nova_fixtures.CinderFixture(self))
|
||||
server = self.api.post_server({
|
||||
'server': {
|
||||
'flavorRef': 1,
|
||||
'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
|
||||
'name': 'live-migrate-terminate-connection-fail-test',
|
||||
'networks': 'none',
|
||||
'block_device_mapping_v2': [
|
||||
{'boot_index': 0,
|
||||
'uuid': uuids.broken_volume,
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume'},
|
||||
{'boot_index': 1,
|
||||
'uuid': uuids.working_volume,
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume'}]}})
|
||||
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
|
||||
|
||||
source = server['OS-EXT-SRV-ATTR:host']
|
||||
if source == self.compute.host:
|
||||
dest = self.compute2.host
|
||||
else:
|
||||
dest = self.compute.host
|
||||
|
||||
post = {
|
||||
'os-migrateLive': {
|
||||
'host': dest,
|
||||
'block_migration': False,
|
||||
}
|
||||
}
|
||||
stub_terminate_connection = FakeCinderError()
|
||||
self.stub_out('nova.volume.cinder.API.terminate_connection',
|
||||
stub_terminate_connection)
|
||||
self.api.post_server_action(server['id'], post)
|
||||
# Live migration should complete despite a volume failing to detach.
|
||||
# Waiting for ACTIVE on dest is essentially an assert for just that.
|
||||
self._wait_for_server_parameter(self.api, server,
|
||||
{'OS-EXT-SRV-ATTR:host': dest,
|
||||
'status': 'ACTIVE'})
|
||||
self.assertEqual(2, stub_terminate_connection.call_count)
|
||||
self.assertEqual(1, stub_terminate_connection.raise_count)
|
Loading…
Reference in New Issue