Change conductor to cast to build_and_run_instance

With build_and_run_instance taking over for run_instance in the compute
manager, this switches over to calling it.  With this change the build
process will not cast back to the scheduler on a build retry.

This is the final part of a series of patches to build up the
build_and_run_instance method.

bp remove-cast-to-schedule-run-instance
Change-Id: Iefab71047996b7cc08107794d5bc628c11680a70
This commit is contained in:
Andrew Laski 2013-11-20 10:16:25 -05:00
parent e239791fc7
commit 70196c4854
5 changed files with 222 additions and 33 deletions

View File

@ -241,6 +241,8 @@ class ComputeAPI(object):
3.21 - Made rebuild take new-world BDM objects
3.22 - Made terminate_instance take new-world BDM objects
3.23 - Added external_instance_event()
- build_and_run_instance was added in Havana and not used or
documented.
... Icehouse supports message version 3.23. So, any changes to
existing methods in 3.x after that point should be done such that they
@ -991,6 +993,21 @@ class ComputeAPI(object):
cctxt.cast(ctxt, 'external_instance_event', instances=instances,
events=events)
def build_and_run_instance(self, ctxt, instance, host, image, request_spec,
filter_properties, admin_password=None, injected_files=None,
requested_networks=None, security_groups=None,
block_device_mapping=None, node=None, limits=None):
cctxt = self.client.prepare(server=host, version='3.23')
cctxt.cast(ctxt, 'build_and_run_instance', instance=instance,
image=image, request_spec=request_spec,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping, node=node,
limits=limits)
class SecurityGroupAPI(object):
'''Client side of the security group rpc API.

View File

@ -14,6 +14,9 @@
"""Handles database requests from other nova services."""
import copy
import itertools
from oslo import messaging
import six
@ -34,6 +37,7 @@ from nova import network
from nova.network.security_group import openstack_driver
from nova import notifications
from nova.objects import base as nova_object
from nova.objects import block_device as block_device_object
from nova.objects import instance as instance_obj
from nova.objects import migration as migration_obj
from nova.objects import quotas as quotas_obj
@ -43,6 +47,7 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import driver as scheduler_driver
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova.scheduler import utils as scheduler_utils
@ -788,14 +793,40 @@ class ComputeTaskManager(base.Base):
security_groups, block_device_mapping, legacy_bdm=True):
request_spec = scheduler_utils.build_request_spec(context, image,
instances)
# NOTE(alaski): For compatibility until a new scheduler method is used.
request_spec.update({'block_device_mapping': block_device_mapping,
'security_group': security_groups})
self.scheduler_rpcapi.run_instance(context, request_spec=request_spec,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks, is_first_time=True,
filter_properties=filter_properties,
legacy_bdm_in_spec=legacy_bdm)
try:
hosts = self.scheduler_rpcapi.select_destinations(context,
request_spec, filter_properties)
except Exception as exc:
for instance in instances:
scheduler_driver.handle_schedule_error(context, exc,
instance.uuid, request_spec)
return
for (instance, host) in itertools.izip(instances, hosts):
try:
instance.refresh()
except exception.InstanceNotFound:
LOG.debug('Instance deleted during build', instance=instance)
continue
local_filter_props = copy.deepcopy(filter_properties)
scheduler_utils.populate_filter_properties(local_filter_props,
host)
# The block_device_mapping passed from the api doesn't contain
# instance specific information
bdo = block_device_object
bdms = bdo.BlockDeviceMappingList.get_by_instance_uuid(context,
instance.uuid)
self.compute_rpcapi.build_and_run_instance(context,
instance=instance, host=host['host'], image=image,
request_spec=request_spec,
filter_properties=local_filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=bdms, node=host['nodename'],
limits=host['limits'])
def _delete_image(self, context, image_id):
(image_service, image_id) = glance.get_remote_image_service(context,

View File

@ -821,3 +821,12 @@ class ComputeRpcAPITestCase(test.TestCase):
instances=[self.fake_instance],
events=['event'],
version='3.23')
def test_build_and_run_instance(self):
self._test_compute_api('build_and_run_instance', 'cast',
instance=self.fake_instance, host='host', image='image',
request_spec={'request': 'spec'}, filter_properties=[],
admin_password='passwd', injected_files=None,
requested_networks=['network1'], security_groups=None,
block_device_mapping=None, node='node', limits=[],
version='3.23')

View File

@ -35,6 +35,7 @@ from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import instance as instance_obj
@ -44,6 +45,7 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import quota
from nova import rpc
from nova.scheduler import driver as scheduler_driver
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests import cast_as_call
@ -1334,47 +1336,92 @@ class _BaseTaskTestCase(object):
False, False, flavor, None, None, [])
def test_build_instances(self):
instance_type = flavors.get_default_flavor()
system_metadata = flavors.save_flavor_info({}, instance_type)
# NOTE(alaski): instance_type -> system_metadata -> instance_type
# loses some data (extra_specs). This build process is using
# scheduler/utils:build_request_spec() which extracts flavor from
# system_metadata and will re-query the DB for extra_specs.. so
# we need to test this properly
expected_instance_type = flavors.extract_flavor(
{'system_metadata': system_metadata})
expected_instance_type['extra_specs'] = 'fake-specs'
system_metadata = flavors.save_flavor_info({},
flavors.get_default_flavor())
instances = [fake_instance.fake_instance_obj(
self.context,
system_metadata=system_metadata,
expected_attrs=['system_metadata']) for i in xrange(2)]
instance_type = flavors.extract_flavor(instances[0])
instance_type['extra_specs'] = 'fake-specs'
instance_properties = jsonutils.to_primitive(instances[0])
self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
'run_instance')
'select_destinations')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
db.flavor_extra_specs_get(
self.context,
instance_type['flavorid']).AndReturn('fake-specs')
self.conductor_manager.scheduler_rpcapi.run_instance(self.context,
self.conductor_manager.scheduler_rpcapi.select_destinations(
self.context, {'image': {'fake_data': 'should_pass_silently'},
'instance_properties': jsonutils.to_primitive(
instances[0]),
'instance_type': instance_type,
'instance_uuids': [inst.uuid for inst in instances],
'num_instances': 2}, {}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
db.instance_get_by_uuid(self.context, instances[0].uuid,
columns_to_join=['system_metadata'],
use_slave=False).AndReturn(
jsonutils.to_primitive(instances[0]))
db.block_device_mapping_get_all_by_instance(self.context,
instances[0].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host1',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': {'system_metadata': system_metadata,
'uuid': 'fakeuuid'},
'instance_type': expected_instance_type,
'instance_uuids': ['fakeuuid', 'fakeuuid2'],
'block_device_mapping': 'block_device_mapping',
'security_group': 'security_groups',
'instance_properties': instance_properties,
'instance_type': instance_type,
'instance_uuids': [inst.uuid for inst in instances],
'num_instances': 2},
filter_properties={'limits': []},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks', is_first_time=True,
filter_properties={}, legacy_bdm_in_spec=False)
requested_networks='requested_networks',
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node1', limits=[])
db.instance_get_by_uuid(self.context, instances[1].uuid,
columns_to_join=['system_metadata'],
use_slave=False).AndReturn(
jsonutils.to_primitive(instances[1]))
db.block_device_mapping_get_all_by_instance(self.context,
instances[1].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host2',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type,
'instance_uuids': [inst.uuid for inst in instances],
'num_instances': 2},
filter_properties={'limits': []},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks',
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=[{'uuid': 'fakeuuid',
'system_metadata': system_metadata},
{'uuid': 'fakeuuid2'}],
instances=instances,
image={'fake_data': 'should_pass_silently'},
filter_properties={},
admin_password='admin_password',
@ -1384,6 +1431,40 @@ class _BaseTaskTestCase(object):
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_build_instances_scheduler_failure(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs'}
exception = exc.NoValidHost(reason='fake-reason')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
'select_destinations')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
self.conductor_manager.scheduler_rpcapi.select_destinations(
self.context, spec, {}).AndRaise(exception)
for instance in instances:
scheduler_driver.handle_schedule_error(self.context, exception,
instance.uuid, spec)
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks',
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_unshelve_instance_on_host(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
@ -1819,6 +1900,55 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
self.context, inst_obj, 'flavor',
filter_props, [resvs])
def test_build_instances_instance_not_found(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
self.mox.StubOutWithMock(instances[0], 'refresh')
self.mox.StubOutWithMock(instances[1], 'refresh')
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs'}
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
'select_destinations')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
self.conductor_manager.scheduler_rpcapi.select_destinations(
self.context, spec, {}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
instances[0].refresh().AndRaise(
exc.InstanceNotFound(instance_id=instances[0].uuid))
instances[1].refresh()
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context, instance=instances[1], host='host2',
image={'fake-data': 'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': []},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks',
security_groups='security_groups',
block_device_mapping=mox.IsA(objects.BlockDeviceMappingList),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks',
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
test_compute.BaseTestCase):

View File

@ -18,6 +18,7 @@ import time
import zlib
from nova import context
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.tests import fake_network
@ -68,8 +69,9 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
# Create a server which will enter error state.
fake_network.set_stub_network_methods(self.stubs)
def throw_error(*_):
raise Exception()
def throw_error(*args, **kwargs):
raise exception.BuildAbortException(reason='',
instance_uuid='fake')
self.stubs.Set(nova.virt.fake.FakeDriver, 'spawn', throw_error)