Regression test for local delete with an attached volume

Once an instance is in a cell and we do a local delete from
the API, we aren't actually detaching volumes and destroying
BDMs.

Related-Bug: #1675570

Change-Id: Ie3e2dfd4b0f1bb3dff4080f460bf8bb40d69f4f4
(cherry picked from commit 3ae12fdc6f)
This commit is contained in:
Matt Riedemann 2017-03-23 15:49:27 -04:00
parent 60a9b725a6
commit 2b66c6780a
3 changed files with 229 additions and 10 deletions

View File

@ -17,6 +17,7 @@
"""Fixtures for Nova tests."""
from __future__ import absolute_import
import collections
from contextlib import contextmanager
import logging as std_logging
import os
@ -1089,11 +1090,15 @@ class CinderFixture(fixtures.Fixture):
self.swap_error = False
self.swap_volume_instance_uuid = None
self.swap_volume_instance_error_uuid = None
# This is a map of instance UUIDs mapped to a list of volume IDs.
# This map gets updated on attach/detach operations.
self.attachments = collections.defaultdict(list)
def setUp(self):
super(CinderFixture, self).setUp()
def fake_get(self_api, context, volume_id):
# Check for the special swap volumes.
if volume_id in (CinderFixture.SWAP_OLD_VOL,
CinderFixture.SWAP_ERR_OLD_VOL):
volume = {
@ -1122,14 +1127,33 @@ class CinderFixture(fixtures.Fixture):
'attach_status': 'attached'
})
return volume
else:
return {
'status': 'available',
'display_name': 'TEST2',
'attach_status': 'detached',
'id': volume_id,
'size': 1
}
# Check to see if the volume is attached.
for instance_uuid, volumes in self.attachments.items():
if volume_id in volumes:
# The volume is attached.
return {
'status': 'in-use',
'display_name': volume_id,
'attach_status': 'attached',
'id': volume_id,
'size': 1,
'attachments': {
instance_uuid: {
'attachment_id': volume_id,
'mountpoint': '/dev/vdb'
}
}
}
# This is a test that does not care about the actual details.
return {
'status': 'available',
'display_name': 'TEST2',
'attach_status': 'detached',
'id': volume_id,
'size': 1
}
def fake_initialize_connection(self, context, volume_id, connector):
if volume_id == CinderFixture.SWAP_ERR_NEW_VOL:
@ -1147,8 +1171,35 @@ class CinderFixture(fixtures.Fixture):
# the reservation on SWAP_ERR_NEW_VOL.
self.swap_error = True
def fake_attach(_self, context, volume_id, instance_uuid,
mountpoint, mode='rw'):
# Check to see if the volume is already attached to any server.
for instance, volumes in self.attachments.items():
if volume_id in volumes:
raise exception.InvalidInput(
reason='Volume %s is already attached to '
'instance %s' % (volume_id, instance))
# It's not attached so let's "attach" it.
self.attachments[instance_uuid].append(volume_id)
self.test.stub_out('nova.volume.cinder.API.attach',
lambda *args, **kwargs: None)
fake_attach)
def fake_detach(_self, context, volume_id, instance_uuid=None,
attachment_id=None):
if instance_uuid is not None:
# If the volume isn't attached to this instance it will
# result in a ValueError which indicates a broken test or
# code, so we just let that raise up.
self.attachments[instance_uuid].remove(volume_id)
else:
for instance, volumes in self.attachments.items():
if volume_id in volumes:
volumes.remove(volume_id)
break
self.test.stub_out('nova.volume.cinder.API.detach', fake_detach)
self.test.stub_out('nova.volume.cinder.API.begin_detaching',
lambda *args, **kwargs: None)
self.test.stub_out('nova.volume.cinder.API.check_attach',

View File

@ -192,7 +192,7 @@ class TestOpenStackClient(object):
raise OpenStackApiAuthorizationException(response=response)
else:
raise OpenStackApiException(
message="Unexpected status code",
message="Unexpected status code: %s" % response.text,
response=response)
return response

View File

@ -0,0 +1,168 @@
# Copyright 2017 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.unit import cast_as_call
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
LOG = logging.getLogger(__name__)
class TestLocalDeleteAttachedVolumes(test.TestCase):
"""Test local delete in the API of a server with a volume attached.
This test creates a server, then shelve-offloads it, attaches a
volume, and then deletes the server.
Since the server is shelved-offloaded it does not have instance.host
set which should trigger a local delete flow in the API. During local
delete we should try to cleanup any attached volumes.
This test asserts that on a local delete we also detach any volumes
and destroy the related BlockDeviceMappings.
"""
def setUp(self):
super(TestLocalDeleteAttachedVolumes, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
# We need the CinderFixture to stub out the volume API.
self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
# The NeutronFixture is needed to stub out validate_networks in API.
self.useFixture(nova_fixtures.NeutronFixture(self))
# Use the PlacementFixture to avoid annoying warnings in the logs.
self.useFixture(nova_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
# We want to use 2.37 for passing networks='none' on server create.
# We also need this since you can only attach a volume to a
# shelved-offloaded server in microversion 2.20+.
self.api.microversion = 'latest'
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.start_service('conductor')
self.flags(driver='chance_scheduler', group='scheduler')
self.start_service('scheduler')
self.start_service('compute')
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.flavor_id = self.api.get_flavors()[0]['id']
def _wait_for_instance_status(self, server_id, status):
timeout = 0.0
server = self.api.get_server(server_id)
while server['status'] != status and timeout < 10.0:
time.sleep(.1)
timeout += .1
server = self.api.get_server(server_id)
if server['status'] != status:
self.fail('Timed out waiting for server %s to have status: %s. '
'Current status: %s' %
(server_id, status, server['status']))
return server
def _wait_for_instance_delete(self, server_id):
timeout = 0.0
while timeout < 10.0:
try:
server = self.api.get_server(server_id)
except client.OpenStackApiNotFoundException:
# the instance is gone so we're happy
return
else:
time.sleep(.1)
timeout += .1
self.fail('Timed out waiting for server %s to be deleted. '
'Current vm_state: %s. Current task_state: %s' %
(server_id, server['OS-EXT-STS:vm_state'],
server['OS-EXT-STS:task_state']))
def _delete_server(self, server_id):
try:
self.api.delete_server(server_id)
except client.OpenStackApiNotFoundException:
pass
def _wait_for_volume_attach(self, server_id, volume_id):
timeout = 0.0
server = self.api.get_server(server_id)
attached_vols = [vol['id'] for vol in
server['os-extended-volumes:volumes_attached']]
while volume_id not in attached_vols and timeout < 10.0:
time.sleep(.1)
timeout += .1
server = self.api.get_server(server_id)
attached_vols = [vol['id'] for vol in
server['os-extended-volumes:volumes_attached']]
if volume_id not in attached_vols:
self.fail('Timed out waiting for volume %s to be attached to '
'server %s. Currently attached volumes: %s' %
(volume_id, server_id, attached_vols))
def test_local_delete_with_volume_attached(self):
LOG.info('Creating server and waiting for it to be ACTIVE.')
server = dict(
name='local-delete-volume-attach-test',
# The image ID comes from nova.tests.unit.image.fake.
imageRef='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
flavorRef=self.flavor_id,
# Bypass network setup on the compute.
networks='none')
server = self.api.post_server({'server': server})
server_id = server['id']
self.addCleanup(self._delete_server, server_id)
self._wait_for_instance_status(server_id, 'ACTIVE')
LOG.info('Shelve-offloading server %s', server_id)
self.api.post_server_action(server_id, {'shelve': None})
# Wait for the server to be offloaded.
self._wait_for_instance_status(server_id, 'SHELVED_OFFLOADED')
volume_id = '9a695496-44aa-4404-b2cc-ccab2501f87e'
LOG.info('Attaching volume %s to server %s', volume_id, server_id)
self.api.post_server_volume(
server_id, dict(volumeAttachment=dict(volumeId=volume_id)))
# Wait for the volume to show up in the server's list of attached
# volumes.
LOG.info('Validating that volume %s is attached to server %s.',
volume_id, server_id)
self._wait_for_volume_attach(server_id, volume_id)
# Check to see that the fixture is tracking the server and volume
# attachment.
self.assertIn(volume_id, self.cinder.attachments[server_id])
# At this point the instance.host is no longer set, so deleting
# the server will take the local delete path in the API.
LOG.info('Deleting shelved-offloaded server %s.', server_id)
self._delete_server(server_id)
# Now wait for the server to be gone.
self._wait_for_instance_delete(server_id)
LOG.info('Validating that volume %s was detached from server %s.',
volume_id, server_id)
# When bug 1675570 is fixed, this should be assertNotIn.
self.assertIn(volume_id, self.cinder.attachments[server_id])