Merge "Add regression test for rebuilding a volume-backed server" into stable/ocata

This commit is contained in:
Zuul 2017-12-14 11:34:44 +00:00 committed by Gerrit Code Review
commit 85e89a752c
2 changed files with 127 additions and 9 deletions

View File

@ -1084,6 +1084,10 @@ class CinderFixture(fixtures.Fixture):
SWAP_ERR_OLD_VOL = '828419fa-3efb-4533-b458-4267ca5fe9b1'
SWAP_ERR_NEW_VOL = '9c6d9c2d-7a8f-4c80-938d-3bf062b8d489'
# This represents a bootable image-backed volume to test
# boot-from-volume scenarios.
IMAGE_BACKED_VOL = '6ca404f3-d844-4169-bb96-bc792f37de98'
def __init__(self, test):
super(CinderFixture, self).__init__()
self.test = test
@ -1132,7 +1136,7 @@ class CinderFixture(fixtures.Fixture):
for instance_uuid, volumes in self.attachments.items():
if volume_id in volumes:
# The volume is attached.
return {
volume = {
'status': 'in-use',
'display_name': volume_id,
'attach_status': 'attached',
@ -1145,15 +1149,34 @@ class CinderFixture(fixtures.Fixture):
}
}
}
break
else:
# This is a test that does not care about the actual details.
volume = {
'status': 'available',
'display_name': 'TEST2',
'attach_status': 'detached',
'id': volume_id,
'size': 1
}
# This is a test that does not care about the actual details.
return {
'status': 'available',
'display_name': 'TEST2',
'attach_status': 'detached',
'id': volume_id,
'size': 1
}
# update the status based on existing attachments
has_attachment = any(
[volume['id'] in attachments
for attachments in self.attachments.values()])
volume['status'] = 'attached' if has_attachment else 'detached'
# Check for our special image-backed volume.
if volume_id == self.IMAGE_BACKED_VOL:
# Make it a bootable volume.
volume['bootable'] = True
# Add the image_id metadata.
volume['volume_image_metadata'] = {
# There would normally be more image metadata in here...
'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
}
return volume
def fake_initialize_connection(self, context, volume_id, connector):
if volume_id == CinderFixture.SWAP_ERR_NEW_VOL:

View File

@ -0,0 +1,95 @@
# Copyright 2017 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova.conf
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
CONF = nova.conf.CONF
class RebuildVolumeBackedSameImage(integrated_helpers._IntegratedTestBase,
integrated_helpers.InstanceHelperMixin):
"""Tests the regression in bug 1732947 where rebuilding a volume-backed
instance with the original image still results in conductor calling the
scheduler to validate the image. This is because the instance.image_ref
is not set for a volume-backed instance, so the conditional check in the
API to see if the provided image_ref for rebuild is different than the
original image.
"""
api_major_version = 'v2.1'
microversion = 'latest'
def setUp(self):
super(RebuildVolumeBackedSameImage, self).setUp()
# We are creating a volume-backed server so we need the CinderFixture.
self.useFixture(nova_fixtures.CinderFixture(self))
def _setup_scheduler_service(self):
self.useFixture(nova_fixtures.PlacementFixture())
# Add the IsolatedHostsFilter to the list of enabled filters since it
# is not enabled by default.
enabled_filters = CONF.filter_scheduler.enabled_filters
enabled_filters.append('IsolatedHostsFilter')
# Remove the DiskFilter since we're using Placement for filtering on
# DISK_GB.
if 'DiskFilter' in enabled_filters:
enabled_filters.remove('DiskFilter')
self.flags(enabled_filters=enabled_filters, group='filter_scheduler')
return self.start_service('scheduler')
def test_volume_backed_rebuild_same_image(self):
# First create our server as normal.
server_req_body = {
# There is no imageRef because this is boot from volume.
'server': {
'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture,
'name': 'test_volume_backed_rebuild_same_image',
# We don't care about networking for this test. This requires
# microversion >= 2.37.
'networks': 'none',
'block_device_mapping_v2': [{
'boot_index': 0,
'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
'source_type': 'volume',
'destination_type': 'volume'
}]
}
}
server = self.api.post_server(server_req_body)
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
# For a volume-backed server, the image ref will be an empty string
# in the server response.
self.assertEqual('', server['image'])
# Now we mark the host that the instance is running on as isolated
# but we won't mark the image as isolated, meaning the rebuild
# will fail for that image on that host.
self.flags(isolated_hosts=[self.compute.host],
group='filter_scheduler')
# Now rebuild the server with the same image that was used to create
# our fake volume.
rebuild_req_body = {
'rebuild': {
'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
}
}
# Since we're using the CastAsCall fixture, the NoValidHost error
# should actually come back to the API and result in a 500 error.
# Normally the user would get a 202 response because nova-api RPC casts
# to nova-conductor which RPC calls the scheduler which raises the
# NoValidHost.
self.api.api_post('/servers/%s/action' % server['id'],
rebuild_req_body, check_response_status=[500])