Add Selection.availability_zone field

This adds an availability_zone field to the Selection object,
using the same type and nullable value as the same field in the
Instance object. This will be used to store the service_host
AZ to pass from the superconductor layer to the compute and cell
conductor layer to avoid an up-call to get the host AZ information
from the API DB during a reschedule.

Note that the field is nullable because a host may not be in an
AZ and CONF.default_availability_zone can technically be set to
None though it defaults to "nova".

Change-Id: Ia50c5f4dd2204f1cafa669097d1e744479c4d8c8
Related-Bug: #1781286
This commit is contained in:
Matt Riedemann 2019-09-30 17:56:58 -04:00
parent f07697ebff
commit bcd4584a7c
4 changed files with 89 additions and 15 deletions

View File

@ -13,11 +13,16 @@
# under the License.
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import fields
from nova import conf
from nova import objects
from nova.objects import base
from nova.scheduler.filters import utils as filter_utils
CONF = conf.CONF
@base.NovaObjectRegistry.register
@ -27,7 +32,8 @@ class Selection(base.NovaObject, ovo_base.ComparableVersionedObject):
"""
# Version 1.0: Initial version
VERSION = "1.0"
# Version 1.1: Added availability_zone field.
VERSION = "1.1"
fields = {
"compute_node_uuid": fields.UUIDField(),
@ -39,8 +45,23 @@ class Selection(base.NovaObject, ovo_base.ComparableVersionedObject):
# as an encoded string.
"allocation_request": fields.StringField(nullable=True),
"allocation_request_version": fields.StringField(nullable=True),
# The availability_zone represents the AZ the service_host is in at
# the time of scheduling. This is nullable for two reasons:
# 1. The Instance.availability_zone field is nullable - though that's
# not a great reason, the bigger reason is:
# 2. The host may not be in an AZ, and CONF.default_availability_zone
# is a StrOpt which technically could be set to None, so we have to
# account for it being a None value (rather than just not setting
# the field).
'availability_zone': fields.StringField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
super(Selection, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 1):
primitive.pop('availability_zone', None)
@classmethod
def from_host_state(cls, host_state, allocation_request=None,
allocation_request_version=None):
@ -51,13 +72,24 @@ class Selection(base.NovaObject, ovo_base.ComparableVersionedObject):
"""
allocation_request_json = jsonutils.dumps(allocation_request)
limits = objects.SchedulerLimits.from_dict(host_state.limits)
# Note that the AZ logic here is similar to the AvailabilityZoneFilter.
metadata = filter_utils.aggregate_metadata_get_by_host(
host_state, key='availability_zone')
availability_zone = metadata.get('availability_zone')
if availability_zone:
# aggregate_metadata_get_by_host returns a set for the value but
# a host can only be in one AZ.
availability_zone = list(availability_zone)[0]
else:
availability_zone = CONF.default_availability_zone
return cls(compute_node_uuid=host_state.uuid,
service_host=host_state.host,
nodename=host_state.nodename,
cell_uuid=host_state.cell_uuid,
limits=limits,
allocation_request=allocation_request_json,
allocation_request_version=allocation_request_version)
allocation_request_version=allocation_request_version,
availability_zone=availability_zone)
def to_dict(self):
if self.limits is not None:

View File

@ -1132,7 +1132,7 @@ object_data = {
'SecurityGroupList': '1.1-c655ed13298e630f4d398152f7d08d71',
'SecurityGroupRule': '1.1-ae1da17b79970012e8536f88cb3c6b29',
'SecurityGroupRuleList': '1.2-0005c47fcd0fb78dd6d7fd32a1409f5b',
'Selection': '1.0-7f5c065097371fe527dd1245f1530653',
'Selection': '1.1-548e3c2f04da2a61ceaf9c4e1589f264',
'Service': '1.22-8a740459ab9bf258a19c8fcb875c2d9a',
'ServiceList': '1.19-5325bce13eebcbf22edc9678285270cc',
'Tag': '1.1-8b8d7d5b48887651a0e01241672e2963',

View File

@ -13,11 +13,13 @@
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from nova import conf
from nova import objects
from nova.objects import numa
from nova.scheduler import host_manager
from nova.tests.unit.objects import test_objects
CONF = conf.CONF
fake_numa_limit1 = numa.NUMATopologyLimits(cpu_allocation_ratio=1.0,
ram_allocation_ratio=1.0)
@ -148,7 +150,47 @@ class _TestSelectionObject(object):
class TestSelectionObject(test_objects._LocalTest,
_TestSelectionObject):
pass
# NOTE(mriedem): The tests below are for methods which are not remotable
# so they can go in the local-only test class rather than the mixin above.
def test_obj_make_compatible(self):
selection = objects.Selection(service_host='host1',
availability_zone='zone1')
primitive = selection.obj_to_primitive(
target_version='1.1')['nova_object.data']
self.assertIn('availability_zone', primitive)
primitive = selection.obj_to_primitive(
target_version='1.0')['nova_object.data']
self.assertNotIn('availability_zone', primitive)
self.assertIn('service_host', primitive)
def test_from_host_state_az_via_aggregate_metadata(self):
"""Tests the scenario that the host is in multiple aggregates and
one has the availability_zone aggregate metadata key which is used
on the selection object.
"""
host_state = host_manager.HostState('host', 'node', uuids.cell_uuid)
host_state.uuid = uuids.compute_node_uuid
host_state.limits = {}
host_state.aggregates = [
objects.Aggregate(metadata={'foo': 'bar'}),
objects.Aggregate(metadata={'availability_zone': 'zone1'})
]
selection = objects.Selection.from_host_state(host_state)
self.assertEqual('zone1', selection.availability_zone)
def test_from_host_state_az_via_config(self):
"""Tests the scenario that the host is not in an aggregate with the
availability_zone metadata key so the AZ comes from config.
"""
host_state = host_manager.HostState('host', 'node', uuids.cell_uuid)
host_state.uuid = uuids.compute_node_uuid
host_state.limits = {}
host_state.aggregates = []
selection = objects.Selection.from_host_state(host_state)
self.assertEqual(CONF.default_availability_zone,
selection.availability_zone)
class TestRemoteSelectionObject(test_objects._RemoteTest,

View File

@ -92,7 +92,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
limits={})
limits={}, aggregates=[])
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
@ -158,7 +158,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
host_state = mock.Mock(spec=host_manager.HostState,
host="fake_host", nodename="fake_node", uuid=uuids.cn1,
limits={}, cell_uuid=uuids.cell, instances={})
limits={}, cell_uuid=uuids.cell, instances={}, aggregates=[])
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
@ -210,7 +210,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
host_state = mock.Mock(spec=host_manager.HostState,
host="fake_host", nodename="fake_node", uuid=uuids.cn1,
cell_uuid=uuids.cell1, limits={})
cell_uuid=uuids.cell1, limits={}, aggregates=[])
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
@ -384,13 +384,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
host_state0 = mock.Mock(spec=host_manager.HostState,
host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
cell_uuid=uuids.cell, limits={})
cell_uuid=uuids.cell, limits={}, aggregates=[])
host_state1 = mock.Mock(spec=host_manager.HostState,
host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
cell_uuid=uuids.cell, limits={})
cell_uuid=uuids.cell, limits={}, aggregates=[])
host_state2 = mock.Mock(spec=host_manager.HostState,
host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
cell_uuid=uuids.cell, limits={})
cell_uuid=uuids.cell, limits={}, aggregates=[])
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
@ -450,13 +450,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
host_state0 = mock.Mock(spec=host_manager.HostState,
host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
cell_uuid=uuids.cell, limits={})
cell_uuid=uuids.cell, limits={}, aggregates=[])
host_state1 = mock.Mock(spec=host_manager.HostState,
host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
cell_uuid=uuids.cell, limits={})
cell_uuid=uuids.cell, limits={}, aggregates=[])
host_state2 = mock.Mock(spec=host_manager.HostState,
host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
cell_uuid=uuids.cell, limits={})
cell_uuid=uuids.cell, limits={}, aggregates=[])
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
mock_get_hosts.return_value = all_host_states
@ -524,10 +524,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
nodename="node1", limits={}, uuid=uuids.cn1,
cell_uuid=uuids.cell1, instances={})
cell_uuid=uuids.cell1, instances={}, aggregates=[])
hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
nodename="node2", limits={}, uuid=uuids.cn2,
cell_uuid=uuids.cell2, instances={})
cell_uuid=uuids.cell2, instances={}, aggregates=[])
all_host_states = [hs1, hs2]
mock_get_all_states.return_value = all_host_states
mock_claim.return_value = True