Validate new image via scheduler during rebuild

During a rebuild we bypass the scheduler because we are
always rebuilding the instance on the same host it's already
on. However, we allow passing a new image during rebuild
and that new image needs to be validated to work with the
instance host by running it through the scheduler filters,
like the ImagePropertiesFilter. Otherwise the new image
could violate constraints placed on the host by the admin.

This change checks to see if there is a new image provided
and if so, modifies the request spec passed to the scheduler
so that the new image is validated all while restricting
the scheduler to still pick the same host that the instance
is running on. If the image is not valid for the host, the
scheduler will raise NoValidHost and the rebuild stops.

A functional test is added to show the recreate of the bug
and that we probably stop the rebuild now in conductor by
calling the scheduler to validate the image.

Co-Authored-By: Sylvain Bauza <sbauza@redhat.com>

Closes-Bug: #1664931

Conflicts:
      nova/conductor/manager.py
      nova/tests/functional/integrated_helpers.py
      nova/tests/functional/test_servers.py

NOTE(mriedem): There are a few changes needed for Ocata:

1. I6590f0eda4ec4996543ad40d8c2640b83fc3dd9d changed some
   of the conditional logic in the conductor rebuild_instance
   method. That refactor is not replayed here, just the
   necessary change for the fix.
2. The _wait_for_action_fail_completion method didn't exist
   in Ocata.
3. The PlacementFixture wasn't used in _IntegratedTestBase
   so it's done as part of the test setup.
4. The default scheduler filters were different in Ocata so
   the test just restricts to using ImagePropertiesFilter.
5. A few imports were needed in the test module.

Change-Id: I11746d1ea996a0f18b7c54b4c9c21df58cc4714b
(cherry picked from commit 984dd8ad6a)
(cherry picked from commit 9e2d63da94)
This commit is contained in:
Matt Riedemann 2017-10-27 16:03:15 -04:00
parent f8a9b10736
commit b72105c1c4
7 changed files with 159 additions and 6 deletions

View File

@ -3051,9 +3051,24 @@ class API(base.Base):
# sure that all our instances are currently migrated to have an
# attached RequestSpec object but let's consider that the operator only
# half migrated all their instances in the meantime.
host = instance.host
try:
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
# If a new image is provided on rebuild, we will need to run
# through the scheduler again, but we want the instance to be
# rebuilt on the same host it's already on.
if orig_image_ref != image_href:
request_spec.requested_destination = objects.Destination(
host=instance.host,
node=instance.node)
# We have to modify the request spec that goes to the scheduler
# to contain the new image. We persist this since we've already
# changed the instance.image_ref above so we're being
# consistent.
request_spec.image = objects.ImageMeta.from_dict(image)
request_spec.save()
host = None # This tells conductor to call the scheduler.
except exception.RequestSpecNotFound:
# Some old instances can still have no RequestSpec object attached
# to them, we need to support the old way
@ -3063,7 +3078,7 @@ class API(base.Base):
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, host=instance.host,
preserve_ephemeral=preserve_ephemeral, host=host,
request_spec=request_spec,
kwargs=kwargs)

View File

@ -705,7 +705,7 @@ class ComputeTaskManager(base.Base):
filter_properties = {'ignore_hosts': [instance.host]}
request_spec = scheduler_utils.build_request_spec(
context, image_ref, [instance])
else:
elif recreate:
# NOTE(sbauza): Augment the RequestSpec object by excluding
# the source host for avoiding the scheduler to pick it
request_spec.ignore_hosts = request_spec.ignore_hosts or []
@ -720,6 +720,10 @@ class ComputeTaskManager(base.Base):
filter_properties = request_spec.\
to_legacy_filter_properties_dict()
request_spec = request_spec.to_legacy_request_spec_dict()
else:
filter_properties = request_spec. \
to_legacy_filter_properties_dict()
request_spec = request_spec.to_legacy_request_spec_dict()
try:
hosts = self._schedule_instances(
context, request_spec, filter_properties)

View File

@ -303,6 +303,16 @@ class TestOpenStackClient(object):
def delete_image(self, image_id):
return self.api_delete('/images/%s' % image_id)
def put_image_meta_key(self, image_id, key, value):
"""Creates or updates a given image metadata key/value pair."""
req_body = {
'meta': {
key: value
}
}
return self.api_put('/images/%s/metadata/%s' % (image_id, key),
req_body)
def get_flavor(self, flavor_id):
return self.api_get('/flavors/%s' % flavor_id).body['flavor']

View File

@ -77,10 +77,11 @@ class _IntegratedTestBase(test.TestCase):
self.flags(use_neutron=self.USE_NEUTRON)
nova.tests.unit.image.fake.stub_out_image_service(self)
self._setup_services()
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self._setup_services()
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
def _setup_compute_service(self):
@ -248,3 +249,40 @@ class InstanceHelperMixin(object):
if networks is not None:
server['networks'] = networks
return server
def _wait_for_action_fail_completion(
self, server, expected_action, event_name, api=None):
"""Polls instance action events for the given instance, action and
action event name until it finds the action event with an error
result.
"""
if api is None:
api = self.api
completion_event = None
for attempt in range(10):
actions = api.get_instance_actions(server['id'])
# Look for the migrate action.
for action in actions:
if action['action'] == expected_action:
events = (
api.api_get(
'/servers/%s/os-instance-actions/%s' %
(server['id'], action['request_id'])
).body['instanceAction']['events'])
# Look for the action event being in error state.
for event in events:
if (event['event'] == event_name and
event['result'] is not None and
event['result'].lower() == 'error'):
completion_event = event
# Break out of the events loop.
break
if completion_event:
# Break out of the actions loop.
break
# We didn't find the completion event yet, so wait a bit.
time.sleep(0.5)
if completion_event is None:
self.fail('Timed out waiting for %s failure event. Current '
'instance actions: %s' % (event_name, actions))

View File

@ -23,17 +23,20 @@ from oslo_serialization import base64
from oslo_utils import timeutils
from nova.compute import api as compute_api
from nova.compute import instance_actions
from nova.compute import rpcapi
from nova import context
from nova import exception
from nova import objects
from nova.objects import block_device as block_device_obj
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
from nova import volume
@ -907,3 +910,75 @@ class ServerTestV220(ServersTestBase):
self.assertTrue(mock_clean_vols.called)
self._delete_server(server_id)
class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase,
integrated_helpers.InstanceHelperMixin):
api_major_version = 'v2.1'
# We have to cap the microversion at 2.38 because that's the max we
# can use to update image metadata via our compute images proxy API.
microversion = '2.38'
# We need the ImagePropertiesFilter so override the base class setup
# which configures to use the chance_scheduler.
def _setup_scheduler_service(self):
# Make sure we're using the PlacementFixture before starting the
# scheduler since the FilterScheduler relies on Placement.
self.useFixture(nova_fixtures.PlacementFixture())
self.flags(enabled_filters=['ImagePropertiesFilter'],
group='filter_scheduler')
return self.start_service('scheduler')
def test_rebuild_with_image_novalidhost(self):
"""Creates a server with an image that is valid for the single compute
that we have. Then rebuilds the server, passing in an image with
metadata that does not fit the single compute which should result in
a NoValidHost error. The ImagePropertiesFilter filter is enabled by
default so that should filter out the host based on the image meta.
"""
server_req_body = {
'server': {
# We hard-code from a fake image since we can't get images
# via the compute /images proxy API with microversion > 2.35.
'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture,
'name': 'test_rebuild_with_image_novalidhost',
# We don't care about networking for this test. This requires
# microversion >= 2.37.
'networks': 'none'
}
}
server = self.api.post_server(server_req_body)
self._wait_for_state_change(self.api, server, 'ACTIVE')
# Now update the image metadata to be something that won't work with
# the fake compute driver we're using since the fake driver has an
# "x86_64" architecture.
rebuild_image_ref = (
nova.tests.unit.image.fake.AUTO_DISK_CONFIG_ENABLED_IMAGE_UUID)
self.api.put_image_meta_key(
rebuild_image_ref, 'hw_architecture', 'unicore32')
# Now rebuild the server with that updated image and it should result
# in a NoValidHost failure from the scheduler.
rebuild_req_body = {
'rebuild': {
'imageRef': rebuild_image_ref
}
}
# Since we're using the CastAsCall fixture, the NoValidHost error
# should actually come back to the API and result in a 500 error.
# Normally the user would get a 202 response because nova-api RPC casts
# to nova-conductor which RPC calls the scheduler which raises the
# NoValidHost. We can mimic the end user way to figure out the failure
# by looking for the failed 'rebuild' instance action event.
self.api.api_post('/servers/%s/action' % server['id'],
rebuild_req_body, check_response_status=[500])
# Look for the failed rebuild action.
self._wait_for_action_fail_completion(
server, instance_actions.REBUILD, 'rebuild_server',
# Before microversion 2.51 events are only returned for instance
# actions if you're an admin.
self.api_fixture.admin_api)
# Unfortunately the server's image_ref is updated to be the new image
# even though the rebuild should not work.
server = self.api.get_server(server['id'])
self.assertEqual(rebuild_image_ref, server['image']['id'])

View File

@ -3182,6 +3182,7 @@ class _ComputeAPIUnitTestMixIn(object):
None, image, flavor, {}, [], None)
self.assertNotEqual(orig_system_metadata, instance.system_metadata)
@mock.patch.object(objects.RequestSpec, 'save')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.Instance, 'get_flavor')
@ -3193,7 +3194,7 @@ class _ComputeAPIUnitTestMixIn(object):
def test_rebuild_change_image(self, _record_action_start,
_checks_for_create_and_rebuild, _check_auto_disk_config,
_get_image, bdm_get_by_instance_uuid, get_flavor, instance_save,
req_spec_get_by_inst_uuid):
req_spec_get_by_inst_uuid, req_spec_save):
orig_system_metadata = {}
get_flavor.return_value = test_flavor.fake_flavor
orig_image_href = 'orig_image'
@ -3240,8 +3241,15 @@ class _ComputeAPIUnitTestMixIn(object):
injected_files=files_to_inject, image_ref=new_image_href,
orig_image_ref=orig_image_href,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
preserve_ephemeral=False, host=None,
request_spec=fake_spec, kwargs={})
# assert the request spec was modified so the scheduler picks
# the existing instance host/node
req_spec_save.assert_called_once_with()
self.assertIn('requested_destination', fake_spec)
requested_destination = fake_spec.requested_destination
self.assertEqual(instance.host, requested_destination.host)
self.assertEqual(instance.node, requested_destination.node)
_check_auto_disk_config.assert_called_once_with(image=new_image)
_checks_for_create_and_rebuild.assert_called_once_with(self.context,

View File

@ -1378,7 +1378,10 @@ class _BaseTaskTestCase(object):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
reset_fd.assert_called_once_with()
if rebuild_args['recreate']:
reset_fd.assert_called_once_with()
else:
reset_fd.assert_not_called()
to_reqspec.assert_called_once_with()
to_filtprops.assert_called_once_with()
fp_mock.assert_called_once_with(self.context, request_spec,