Merge "Adapt _validate_instance_group_policy to new policy model"

This commit is contained in:
Zuul 2018-07-13 13:30:58 +00:00 committed by Gerrit Code Review
commit 706257d20b
3 changed files with 48 additions and 6 deletions

View File

@ -1275,15 +1275,28 @@ class ComputeManager(manager.Manager):
@utils.synchronized(group_hint)
def _do_validation(context, instance, group_hint):
group = objects.InstanceGroup.get_by_hint(context, group_hint)
if 'anti-affinity' in group.policies:
group_hosts = group.get_hosts(exclude=[instance.uuid])
if self.host in group_hosts:
if group.policy and 'anti-affinity' == group.policy:
instances_uuids = objects.InstanceList.get_uuids_by_host(
context, self.host)
ins_on_host = set(instances_uuids)
members = set(group.members)
# Determine the set of instance group members on this host
# which are not the instance in question. This is used to
# determine how many other members from the same anti-affinity
# group can be on this host.
members_on_host = ins_on_host & members - set([instance.uuid])
rules = group.rules
if rules and 'max_server_per_host' in rules:
max_server = rules['max_server_per_host']
else:
max_server = 1
if len(members_on_host) >= max_server:
msg = _("Anti-affinity instance group policy "
"was violated.")
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=msg)
elif 'affinity' in group.policies:
elif group.policy and 'affinity' == group.policy:
group_hosts = group.get_hosts(exclude=[instance.uuid])
if group_hosts and self.host not in group_hosts:
msg = _("Affinity instance group policy was violated.")

View File

@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 32
SERVICE_VERSION = 33
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@ -138,6 +138,9 @@ SERVICE_VERSION_HISTORY = (
# migration if 'file_backed_memory' is enabled and the source does not
# support 'file_backed_memory'
{'compute_rpc': '5.0'},
# Version 33: Add support for check on the server group with
# 'max_server_per_host' rules
{'compute_rpc': '5.0'},
)

View File

@ -5720,7 +5720,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def test_validate_policy_honors_workaround_disabled(self, mock_get):
instance = objects.Instance(uuid=uuids.instance)
hints = {'group': 'foo'}
mock_get.return_value = objects.InstanceGroup(policies=[])
mock_get.return_value = objects.InstanceGroup(policy=None)
self.compute._validate_instance_group_policy(self.context,
instance, hints)
mock_get.assert_called_once_with(self.context, 'foo')
@ -5746,6 +5746,32 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
instance, hints)
mock_get.assert_called_once_with(self.context, uuids.group_hint)
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
@mock.patch('nova.objects.InstanceGroup.get_by_hint')
def test_validate_instance_group_policy_with_rules(self, mock_get_by_hint,
mock_get_by_host):
# Create 2 instance in same host, inst2 created before inst1
instance = objects.Instance(uuid=uuids.inst1)
hints = {'group': [uuids.group_hint]}
existing_insts = [uuids.inst1, uuids.inst2]
members_uuids = [uuids.inst1, uuids.inst2]
mock_get_by_host.return_value = existing_insts
# if group policy rules limit to 1, raise RescheduledException
mock_get_by_hint.return_value = objects.InstanceGroup(
policy='anti-affinity', rules={'max_server_per_host': '1'},
hosts=['host1'], members=members_uuids)
self.assertRaises(exception.RescheduledException,
self.compute._validate_instance_group_policy,
self.context, instance, hints)
# if group policy rules limit change to 2, validate OK
mock_get_by_hint.return_value = objects.InstanceGroup(
policy='anti-affinity', rules={'max_server_per_host': 2},
hosts=['host1'], members=members_uuids)
self.compute._validate_instance_group_policy(self.context,
instance, hints)
@mock.patch.object(virt_driver.ComputeDriver,
'prepare_networks_before_block_device_mapping')
@mock.patch.object(virt_driver.ComputeDriver,