Add regression test for rebuilding a volume-backed server
Commit984dd8ad6a
makes rebuild check to see if the user is rebuilding an instance with a new image and if so, to run the scheduler filters again since the new image might not work with the current host for the instance, and we rebuild to the same host that the instance is already running on. The problem is the instance.image_ref attribute is not set for a volume-backed (boot-from-volume) instance, so the conditional in the rebuild() method is always True, which means we always run through the scheduler for volume-backed instances during rebuild, even if the image in the root disk isn't changing. This adds a functional regression test to recreate the bug. Conflicts: nova/tests/fixtures.py NOTE(mriedem): The CinderFixture conflict is due to not having1928ec5606
in Ocata. However, this change needs the part of that patch to the CinderFixture to work, so it's added here. NOTE(mriedem): The functional test needed to change slightly to use the PlacementFixture and disable the DiskFilter. Change-Id: If79c554b46c44a7f70f8367426e7da362d7234c8 Related-Bug: #1732947 (cherry picked from commita4eebd5de7
) (cherry picked from commitc7991653a4
)
This commit is contained in:
parent
bbfc4230ef
commit
fe5e3e7e61
|
@ -1084,6 +1084,10 @@ class CinderFixture(fixtures.Fixture):
|
|||
SWAP_ERR_OLD_VOL = '828419fa-3efb-4533-b458-4267ca5fe9b1'
|
||||
SWAP_ERR_NEW_VOL = '9c6d9c2d-7a8f-4c80-938d-3bf062b8d489'
|
||||
|
||||
# This represents a bootable image-backed volume to test
|
||||
# boot-from-volume scenarios.
|
||||
IMAGE_BACKED_VOL = '6ca404f3-d844-4169-bb96-bc792f37de98'
|
||||
|
||||
def __init__(self, test):
|
||||
super(CinderFixture, self).__init__()
|
||||
self.test = test
|
||||
|
@ -1132,7 +1136,7 @@ class CinderFixture(fixtures.Fixture):
|
|||
for instance_uuid, volumes in self.attachments.items():
|
||||
if volume_id in volumes:
|
||||
# The volume is attached.
|
||||
return {
|
||||
volume = {
|
||||
'status': 'in-use',
|
||||
'display_name': volume_id,
|
||||
'attach_status': 'attached',
|
||||
|
@ -1145,15 +1149,34 @@ class CinderFixture(fixtures.Fixture):
|
|||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
else:
|
||||
# This is a test that does not care about the actual details.
|
||||
volume = {
|
||||
'status': 'available',
|
||||
'display_name': 'TEST2',
|
||||
'attach_status': 'detached',
|
||||
'id': volume_id,
|
||||
'size': 1
|
||||
}
|
||||
|
||||
# This is a test that does not care about the actual details.
|
||||
return {
|
||||
'status': 'available',
|
||||
'display_name': 'TEST2',
|
||||
'attach_status': 'detached',
|
||||
'id': volume_id,
|
||||
'size': 1
|
||||
}
|
||||
# update the status based on existing attachments
|
||||
has_attachment = any(
|
||||
[volume['id'] in attachments
|
||||
for attachments in self.attachments.values()])
|
||||
volume['status'] = 'attached' if has_attachment else 'detached'
|
||||
|
||||
# Check for our special image-backed volume.
|
||||
if volume_id == self.IMAGE_BACKED_VOL:
|
||||
# Make it a bootable volume.
|
||||
volume['bootable'] = True
|
||||
# Add the image_id metadata.
|
||||
volume['volume_image_metadata'] = {
|
||||
# There would normally be more image metadata in here...
|
||||
'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
|
||||
}
|
||||
|
||||
return volume
|
||||
|
||||
def fake_initialize_connection(self, context, volume_id, connector):
|
||||
if volume_id == CinderFixture.SWAP_ERR_NEW_VOL:
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
# Copyright 2017 Huawei Technologies Co.,LTD.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import nova.conf
|
||||
from nova.tests import fixtures as nova_fixtures
|
||||
from nova.tests.functional import integrated_helpers
|
||||
|
||||
CONF = nova.conf.CONF
|
||||
|
||||
|
||||
class RebuildVolumeBackedSameImage(integrated_helpers._IntegratedTestBase,
|
||||
integrated_helpers.InstanceHelperMixin):
|
||||
"""Tests the regression in bug 1732947 where rebuilding a volume-backed
|
||||
instance with the original image still results in conductor calling the
|
||||
scheduler to validate the image. This is because the instance.image_ref
|
||||
is not set for a volume-backed instance, so the conditional check in the
|
||||
API to see if the provided image_ref for rebuild is different than the
|
||||
original image.
|
||||
"""
|
||||
api_major_version = 'v2.1'
|
||||
microversion = 'latest'
|
||||
|
||||
def setUp(self):
|
||||
super(RebuildVolumeBackedSameImage, self).setUp()
|
||||
# We are creating a volume-backed server so we need the CinderFixture.
|
||||
self.useFixture(nova_fixtures.CinderFixture(self))
|
||||
|
||||
def _setup_scheduler_service(self):
|
||||
self.useFixture(nova_fixtures.PlacementFixture())
|
||||
# Add the IsolatedHostsFilter to the list of enabled filters since it
|
||||
# is not enabled by default.
|
||||
enabled_filters = CONF.filter_scheduler.enabled_filters
|
||||
enabled_filters.append('IsolatedHostsFilter')
|
||||
# Remove the DiskFilter since we're using Placement for filtering on
|
||||
# DISK_GB.
|
||||
if 'DiskFilter' in enabled_filters:
|
||||
enabled_filters.remove('DiskFilter')
|
||||
self.flags(enabled_filters=enabled_filters, group='filter_scheduler')
|
||||
return self.start_service('scheduler')
|
||||
|
||||
def test_volume_backed_rebuild_same_image(self):
|
||||
# First create our server as normal.
|
||||
server_req_body = {
|
||||
# There is no imageRef because this is boot from volume.
|
||||
'server': {
|
||||
'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture,
|
||||
'name': 'test_volume_backed_rebuild_same_image',
|
||||
# We don't care about networking for this test. This requires
|
||||
# microversion >= 2.37.
|
||||
'networks': 'none',
|
||||
'block_device_mapping_v2': [{
|
||||
'boot_index': 0,
|
||||
'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume'
|
||||
}]
|
||||
}
|
||||
}
|
||||
server = self.api.post_server(server_req_body)
|
||||
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
|
||||
# For a volume-backed server, the image ref will be an empty string
|
||||
# in the server response.
|
||||
self.assertEqual('', server['image'])
|
||||
|
||||
# Now we mark the host that the instance is running on as isolated
|
||||
# but we won't mark the image as isolated, meaning the rebuild
|
||||
# will fail for that image on that host.
|
||||
self.flags(isolated_hosts=[self.compute.host],
|
||||
group='filter_scheduler')
|
||||
|
||||
# Now rebuild the server with the same image that was used to create
|
||||
# our fake volume.
|
||||
rebuild_req_body = {
|
||||
'rebuild': {
|
||||
'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
|
||||
}
|
||||
}
|
||||
# Since we're using the CastAsCall fixture, the NoValidHost error
|
||||
# should actually come back to the API and result in a 500 error.
|
||||
# Normally the user would get a 202 response because nova-api RPC casts
|
||||
# to nova-conductor which RPC calls the scheduler which raises the
|
||||
# NoValidHost.
|
||||
self.api.api_post('/servers/%s/action' % server['id'],
|
||||
rebuild_req_body, check_response_status=[500])
|
Loading…
Reference in New Issue