From 345510ca1adb158c811ee06c85a6274f1d2b493d Mon Sep 17 00:00:00 2001 From: Rodolfo Alonso Hernandez Date: Wed, 28 Sep 2016 15:18:48 +0100 Subject: [PATCH] Add new Nova Scheduler filter: AggregateExtraSpecsFilter This new filter will deprecate the current AggregateInstanceExtraSpecsFilter filter. This filter, AggregateInstanceTypeFilter, will be backwards compatible with the deprecated one and will introduce the use of sentinel values to add more flexibility during the process of creating filtering rules. Change-Id: Id5b04db40f9e9d6379bafafd562b76d8b9f20ec7 --- doc/source/index.rst | 10 +- .../aggregate-instance-type-filter.rst | 247 +++++++++ doc/source/usage.rst | 4 +- nfv_filters/nova/__init__.py | 0 nfv_filters/nova/scheduler/__init__.py | 0 .../nova/scheduler/filters/__init__.py | 0 .../filters/aggregate_instance_type_filter.py | 287 +++++++++++ nfv_filters/tests/test_nfv_filters.py | 28 - nfv_filters/tests/unit/__init__.py | 0 nfv_filters/tests/unit/nova/__init__.py | 0 .../tests/unit/nova/scheduler/__init__.py | 0 .../unit/nova/scheduler/filters/__init__.py | 0 .../test_aggregate_instance_type_filter.py | 481 ++++++++++++++++++ ...instance-type-filter-99d668f52dc6337a.yaml | 4 + test-requirements.txt | 3 + tox.ini | 2 +- 16 files changed, 1034 insertions(+), 32 deletions(-) create mode 100644 doc/source/scheduler_filters/aggregate-instance-type-filter.rst create mode 100644 nfv_filters/nova/__init__.py create mode 100644 nfv_filters/nova/scheduler/__init__.py create mode 100644 nfv_filters/nova/scheduler/filters/__init__.py create mode 100644 nfv_filters/nova/scheduler/filters/aggregate_instance_type_filter.py delete mode 100644 nfv_filters/tests/test_nfv_filters.py create mode 100644 nfv_filters/tests/unit/__init__.py create mode 100644 nfv_filters/tests/unit/nova/__init__.py create mode 100644 nfv_filters/tests/unit/nova/scheduler/__init__.py create mode 100644 nfv_filters/tests/unit/nova/scheduler/filters/__init__.py create mode 100644 nfv_filters/tests/unit/nova/scheduler/filters/test_aggregate_instance_type_filter.py create mode 100644 releasenotes/notes/add-aggregate-instance-type-filter-99d668f52dc6337a.yaml diff --git a/doc/source/index.rst b/doc/source/index.rst index 33b4d7e..c80506b 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -3,8 +3,9 @@ You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. +======================================= Welcome to nfv-filters's documentation! -======================================================== +======================================= Contents: @@ -23,3 +24,10 @@ Indices and tables * :ref:`modindex` * :ref:`search` +Nova scheduler filters +====================== + +.. toctree:: + :maxdepth: 2 + + scheduler_filters/aggregate-instance-type-filter diff --git a/doc/source/scheduler_filters/aggregate-instance-type-filter.rst b/doc/source/scheduler_filters/aggregate-instance-type-filter.rst new file mode 100644 index 0000000..5bb1082 --- /dev/null +++ b/doc/source/scheduler_filters/aggregate-instance-type-filter.rst @@ -0,0 +1,247 @@ +======================================= +Filter - Aggregate Instance Type Filter +======================================= + +Problem description +=================== + +At present the filter scheduler allows operators to associate an instance +type with a host aggregate via the ``AggregateInstanceExtraSpecsFilter`` [1]. +This filter enforces that an aggregate of hosts satisfies all conditions, +defined as extra specifications in the flavor; these conditions are set as +metadata in the aggregate class. + +However, now the operator must include in the extra specs all the values to +be match in the aggregate metadata and can’t specify a unique value to +represent all data [use case 1]. Also, each variable included in the extra +specs must be present in the aggregate metadata, not giving the operator the +choice of making this variable not mandatory [use case 2] or force the +absence of this variable [use case 3]. + +Another limitation of the current implementation of +``AggregateInstanceExtraSpecsFilter`` filter is the logic imposed. The current +implemented logic implies an injective logic from flavor extra specifications +to aggregate metadata. That means, all elements present in the flavor must be +present in the aggregate to pass the filter. If the flavor has, for example, +three extra specs, the aggregate must have those three specs and the values +must satisfy the logic conditions present in the flavor extra specs. This new +filter has, as an aggregate option, a surjective logic. That means that, +instead of forcing the aggregate to satisfy the extra specs conditions +present in the flavor, now the flavor is enforced to satisfy the conditions +given by the aggregate defined as metadata. In this case, all metadata +elements must be present in flavor extra specs and must satisfy the +logic conditions present [use case 4]. + +To add also more flexibility to this new feature, the new sentinels defined in +this filter could be used in the aggregate metadata [use case 5]. + +Other limitation seen in the old filter [1] is the inability of using the +operators defined in AggregateInstanceExtraSpecsFilter [use case 6]. + +Finally, the latest limitation detected is how the namespaced variables are +filtered. Only those variables using a defined scope are actually used to +filter the host, nevertheless the rest of them are skipped and not being used +as part of the filter [use case 7]. + + +Description +=========== + +A new host aggregates metadata entry flavor_extra_spec is added. + +The sentinel value asterisk ``*`` may be used to specify that any value +is valid if the key is present. e.g. "key1" = "*" + +The sentinel value tilde ``~`` may be used to specify that a key may +optionally be omitted. e.g. "key1" = " 1 ~" + +The sentinel value exclamation mark ``!`` may be used to specify that the key +must not be present. e.g. "key1" = "!" + +Tilde (optional value) sentinel and asterisk (present with any value) sentinel +are not mutually exclusive. Exclamation mark sentinel is exclusive with tilde +and asterisk. e.g. "key" = " * ~" is logically correct, meaning the +key may or may not exist with any value. In the case of having the key +"force_metadata_check" equal to "True", this is equivalent to not specifying +the value in the aggregate metadata. E.g: + +flavor 1 extra specs: {"key": "~"} +flavor 2 extra specs: {"key": " * ~"} +flavor 3 extra specs: {"key": "*"} +flavor 4 extra specs: {} +aggregate 1 metadata: {"key": "1", "force_metadata_check": "False"} +aggregate 2 metadata: {} +aggregate 3 metadata: {"other key": "1", "force_metadata_check": "False"} + +In this example: + +* Flavor 1 can be booted in aggregate 2 and 3. + +* Flavor 2 can be booted on all aggregates. + +* Flavor 3 can be booted on aggregate 1 only. + +* Flavor 4 can be booted on all aggregates. + +The extra spec key ``force_metadata_check`` will be a reserved word. Its use +is explained in [use case 4], [use case 5] and [use case 6]. It's a boolean +value; if the key is present and the value is "True", the logic explained in +those use cases will apply. However, if the key is not present or the value +is "False", the filter won't apply this new logic. + + +Use Cases +========= + +Use case 1 +---------- +An operator wants to filter hosts having a key in their aggregate +metadata, independently of its value, e.g.: + +flavor extra specs: {"key": "*"} + +All hosts inside a host aggregate containing this key, despite of the value, +will pass this check. + +Use case 2 +---------- +An operator wants to filter hosts having a specific value, but if the +aggregate doesn’t have this key, the host should pass anyway, e.g.. + +flavor extra specs: {"key": " 1 ~"} +aggregate 1 metadata: {"key": "1"} +aggregate 2 metadata: {"key": "2"} +aggregate 3 metadata: {} + +Hosts in aggregate 1 and 3 will pass this filter. + +Use case 3 +---------- +In this case, the operator wants to stop any host inside an aggregate +containing a defined key, e.g.: + +flavor extra specs: {"key": "!"} +aggregate 1 metadata: {"key": "1"} +aggregate 2 metadata: {} + +Only hosts in aggregate 2 will pass. + +Use case 4 +---------- +This use case could be used to force a flavor to contain a set of keys present +in the aggregate metadata. This constraint is added to the normal filter +process, which will try to match the keys present in the flavor with the keys +in the aggregate metadata. To activate in the filter this new verification +logic, a new metadata key is introduced: {"force_metadata_check": "True"}. +E.g.: + +flavor 1 extra specs: {"key": "1"} +flavor 2 extra specs: {"key": "2"} +flavor 3 extra specs: {} +aggregate metadata: {"key": "1", "force_metadata_check": "True"} + +In this example, hosts in this aggregate will pass only using the flavor 1. +Without the key ``force_metadata_check`` set to "True", the flavor 3 will +allow the use of hosts in the aggregate. + +Use case 5 +---------- +If the key ``force_metadata_check``, explained in the last use case, is set, +the administrator will be able to use also the sentinels in the aggregate +metadata keys, e.g.: + +flavor 1 extra specs: {"key": "1"} +flavor 2 extra specs: {"key": "2"} +flavor 3 extra specs: {} +aggregate metadata: {"key": "*", "force_metadata_check": "True"} + +In this example, flavor 1 and 2 will pass hosts belonging to this aggregate. + +Using this example, if the key ``force_metadata_check`` is removed (or set to +"False"), the unique accepted flavor will be 3. E.g.: + +flavor 1 extra specs: {"key": "1"} +flavor 2 extra specs: {"key": "2"} +flavor 3 extra specs: {} +flavor 4 extra specs: {"key": "*", "key2": "2"} +aggregate metadata: {"key": "*"} + +* Flavor 1 key value, "1", doesn't match the string "*". + +* The same behaviour applies to flavor 2. + +* Because flavor 3 doesn't have any requirement, it's accepted in this host + aggregate; any flavor with extra specs will be accepted. + +* Flavor 4 won't pass because "key2" doesn't exists in the aggregate metadata. + +flavor 1 extra specs: {"key": "1"} +flavor 2 extra specs: {"key": "2"} +flavor 3 extra specs: {} +aggregate metadata: {"key": "!", "force_metadata_check": "True"} + +In this third example, only flavor 3 will allow hosts in the aggregate. + +This additional logic is backwards compatible with the existing one. + +Use case 6 +---------- +Again, if the key ``force_metadata_check`` is set in the aggregate metadata, +the operator will be able to use the operator ```` to define multiple +values for a key. This change doesn’t break the logic of the old filter: +aggregate metadata checked inside the filter is a set of values combining the +data contained in the key of each aggregate metadata; this set now will +contain also the values inside the "or" junction. E.g.: + +flavor 1 extra specs: {"key": "1"} +flavor 2 extra specs: {"key": "2"} +flavor 3 extra specs: {"key": " 2 3"} +flavor 4 extra specs: {} +aggregate metadata: {"key": " 1 2", "force_metadata_check": "True"} + +In this example, only flavor 4 won’t pass the hosts inside the aggregate. + +It should be noted that if the key ``force_metadata_check`` is not set, the +strings contained in the aggregate metadata keys will be checked literally. +Using the last example, if the key ``force_metadata_check`` is removed (or set +to "False"), the filter will use the aggregate metadata key value strings +without the new logic added by this filter, to maintain backwards +compatibility. E.g.: + +flavor 1 extra specs: {"key": "1"} +flavor 2 extra specs: {"key": "2"} +flavor 3 extra specs: {"key": " 2 3"} +flavor 4 extra specs: {} +flavor 5 extra specs: {"key": " 1 2"} +aggregate metadata: {"key": " 1 2"} + +In this second example, no flavor will pass the filter. The 5th flavor have the +same string value in "key", but the current filter, +``AggregateInstanceExtraSpecsFilter``, compares each value in flavour's key, +"1" and "2", independently. + +Use case 7 +---------- +The use of namespaced variables could be extended, allowing the operator to +filter hosts by these values. To maintain the backward compatibility, any +namespaced key without the escape scope used in the old filter, +``aggregate_instance_extra_specs``, will be considered optional: if the key is +not present in the aggregate metadata, the filter will skip this key; if the +key is also present in the aggregate metadata, the value will be checked as a +regular key. E.g.: + +flavor extra specs: {"hw:cpu_policy": "shared"} +aggregate 1 metadata: {"hw:cpu_policy": "shared"} +aggregate 2 metadata: {"hw:cpu_policy": "dedicated"} +aggregate 3 metadata: {} + +In this example, hosts in aggregates 1 and 3 will pass. But hosts in aggregate +2 won’t because namespace key is present in both extra specs and metadata and +the value is different. This new feature could collide with the old behaviour. + +In the aggregate metadata, if the key ``force_metadata_check`` is set, all +keys, with or without namespace will be checked. This new feature check allows +the operator to define host aggregates with restrictions to spawn virtual +machines within their hosts. With this extension, the operator can easily, +only modifying the aggregate metadata (instead of the flavor definition), +define sets of hosts with specific properties and use restrictions. diff --git a/doc/source/usage.rst b/doc/source/usage.rst index 389c6f7..a807b45 100644 --- a/doc/source/usage.rst +++ b/doc/source/usage.rst @@ -1,6 +1,6 @@ -======== +===== Usage -======== +===== To use nfv-filters in a project:: diff --git a/nfv_filters/nova/__init__.py b/nfv_filters/nova/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nfv_filters/nova/scheduler/__init__.py b/nfv_filters/nova/scheduler/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nfv_filters/nova/scheduler/filters/__init__.py b/nfv_filters/nova/scheduler/filters/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nfv_filters/nova/scheduler/filters/aggregate_instance_type_filter.py b/nfv_filters/nova/scheduler/filters/aggregate_instance_type_filter.py new file mode 100644 index 0000000..cbe27aa --- /dev/null +++ b/nfv_filters/nova/scheduler/filters/aggregate_instance_type_filter.py @@ -0,0 +1,287 @@ +# Copyright (c) 2016, Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +import six + +from nova.i18n import _LW +from nova.scheduler import filters +from nova.scheduler.filters import extra_specs_ops as ops +from nova.scheduler.filters import utils + + +LOG = logging.getLogger(__name__) + +_SCOPE = 'aggregate_instance_extra_specs' +FORCE_METADATA_CHECK = 'force_metadata_check' + +ASTERISK = '*' +TILDE = '~' +EXCLAMATION = '!' +SENTINELS = [ASTERISK, TILDE, EXCLAMATION] + +OR = '' + + +class AggregateInstanceTypeFilter(filters.BaseHostFilter): + """AggregateInstanceTypeFilter works with InstanceType records.""" + + # Aggregate data and instance type does not change within a request + run_filter_once_per_request = True + + @staticmethod + def _read_sentinels(value): + """Read sentinel values. + + :param value: value or set of values contained in a dictionary entry of + host aggregate metadata or instance extra specs. + :type value: set or string, no other types will be allowed. + + Flavor extra specs values can contain also the following sentinel + values: + + * (asterisk): may be used to specify that any value is valid, can be + used alone or inside a junction. + ~ (tilde): may be used to specify that a key may optionally be omitted, + can be used alone or inside a junction. + ! (exclamation): may be used to specify that the key must not be + present, it's exclusive. + """ + + if not isinstance(value, (set, basestring)): + LOG.warning(_LW("Value passed to '_read_sentinels' is not valid: " + "%(value)s"), {'value': value}) + return False, None + + if not isinstance(value, set): + value = {value} + + sentinels = set() + other_values = set() + for val in [val for val in value if val]: + words = val.split(' ') + op = words.pop(0) + if op == OR: + sentinels |= set([word for word in words + if word in SENTINELS]) + other_values |= set([word for word in words if + word not in SENTINELS + [OR]]) + if EXCLAMATION in sentinels and len(sentinels) > 1: + LOG.warning(_LW("Sentinel value '!' is exclusive and " + "cannot be joint with other values")) + return False, None + elif op in SENTINELS: + sentinels.add(op) + else: + other_values.add(op) + + return sentinels, other_values + + def _execute_sentinel_actions(self, check_val, req_val): + """Execute the filter actions depending on the possible sentinel values + + :param check_val: set of values for a key stored in aggregate metadata + or flavor extra specs + :type check_val: set + :param req_val: string value of a key, stored in aggregate metadata or + in flavor extra specs, containing sentinel values + :type req_val: str + """ + + sentinels, other_values = self._read_sentinels(req_val) + if sentinels is False: + return False + + if EXCLAMATION in sentinels: + if check_val is None: + return True + else: + return False + if TILDE in sentinels and (not req_val or + (req_val and not other_values)): + return True + if ASTERISK in sentinels: + if check_val is not None and len(check_val) > 0: + return True + else: + return False + + return None + + @staticmethod + def _split_values(value): + ret_value = set() + if not value: + return None + + if isinstance(value, basestring): + value = {value} + + for element in value: + words = element.split() + if words and words[0] == OR: + ret_value |= set([v.strip() + for v in element.split(OR) if v]) + else: + ret_value.add(element.strip()) + + return ret_value + + def _check_by_instance_type(self, host_state, spec_obj, metadata): + """Checks if the image extra_specs are satisfied in the HA metadata + + :param host_state: host information + :type host_state: class nova.scheduler.host_manager.HostState + :param spec_obj: filter_properties + :type spec_obj: class nova.objects.request_spec.RequestSpec + :param metadata: aggregate metadata + :type metadata: dict of sets + :return: True if the aggregate metadata fulfills the conditions defined + in the flavor extra specs. + """ + + instance_type = spec_obj.flavor + if not instance_type.get('extra_specs', None): + return True + + LOG.debug("Instance extra specs: %s", instance_type.extra_specs) + LOG.debug("Host aggregate metadata: %s", metadata) + for key, especs_vals in six.iteritems(instance_type.extra_specs): + scope = key.split(':', 1) + scoped = False + if len(scope) > 1: + if scope[0] == _SCOPE: + key = scope[1] + else: + scoped = True + + aggregate_vals = self._split_values(metadata.get(key)) + ret = self._execute_sentinel_actions(aggregate_vals, especs_vals) + if ret is True: + continue + elif ret is False: + return ret + + if not aggregate_vals and not scoped: + LOG.debug("%(host_state)s fails instance type extra specs " + "requirements. Extra spec %(key)s is not in " + "aggregate metadata.", + {'host_state': host_state, 'key': key}) + return False + elif not aggregate_vals and scoped: + LOG.debug("Not mandatory extra_spec %(key)s is not in " + "aggregate metadata.", {'key': key}) + continue + + if not aggregate_vals: + LOG.debug("%(host_state)s fails instance type extra specs " + "requirements. Extra spec %(key)s is not in " + "aggregate.", {'host_state': host_state, 'key': key}) + return False + + for aggregate_value in aggregate_vals: + if ops.match(aggregate_value, especs_vals): + break + else: + LOG.debug("%(host_state)s fails instance type extra specs " + "requirements. '%(aggregate_vals)s' do not " + "match '%(req)s'", + {'host_state': host_state, 'req': especs_vals, + 'aggregate_vals': aggregate_vals}) + return False + return True + + def _check_by_aggregate(self, host_state, spec_obj, metadata): + """Checks if the HA metadata is satisfied in the image extra_specs + + :param host_state: host information + :type host_state: class nova.scheduler.host_manager.HostState + :param spec_obj: filter_properties + :type spec_obj: class nova.objects.request_spec.RequestSpec + :param metadata: aggregate metadata + :type metadata: dict of sets + :return: True if the flavor extra specs fulfills the conditions defined + in the aggregate metadata. + """ + + if not metadata: + return True + instance_type = spec_obj.flavor + extra_specs = instance_type.get('extra_specs', {})\ + + LOG.debug("Instance extra specs: %s", extra_specs) + LOG.debug("Host aggregate metadata: %s", metadata) + for key, aggregate_vals in six.iteritems(metadata): + scope = key.split(':', 1) + scoped = False + if len(scope) > 1: + if scope[0] == _SCOPE: + key = scope[1] + else: + scoped = True + + especs_vals = self._split_values(extra_specs.get(key)) + ret = self._execute_sentinel_actions(especs_vals, aggregate_vals) + if ret is True: + continue + elif ret is False: + return ret + + if not especs_vals and not scoped: + LOG.debug("%(host_state)s fails instance type extra specs " + "requirements. Aggregate metadata key %(key)s is " + "not in extra specs", + {'host_state': host_state, 'key': key}) + return False + elif not especs_vals and scoped: + LOG.debug("Not mandatory aggregate metadata key %(key)s is " + "not in instance type extra specs", {'key': key}) + continue + + if not especs_vals: + LOG.debug("%(host_state)s fails instance type extra_specs " + "requirements. Aggregate metadata key %(key)s is " + "not in instance type extra specs.", + {'host_state': host_state, 'key': key}) + return False + + for aggregate_value in aggregate_vals: + for specs_value in especs_vals: + if ops.match(specs_value, aggregate_value): + break + else: + break + else: + continue + + LOG.debug("%(host_state)s fails instance type extra specs " + "requirements. '%(aggregate_vals)s' do not " + "match '%(req)s'", + {'host_state': host_state, 'req': especs_vals, + 'aggregate_vals': aggregate_vals}) + return False + return True + + def host_passes(self, host_state, spec_obj): + """Checks host in aggregate metadata key/value match image properties + + """ + + metadata = utils.aggregate_metadata_get_by_host(host_state) + if FORCE_METADATA_CHECK in six.iterkeys(metadata): + metadata.pop(FORCE_METADATA_CHECK) + return self._check_by_aggregate(host_state, spec_obj, metadata) + else: + return self._check_by_instance_type(host_state, spec_obj, metadata) diff --git a/nfv_filters/tests/test_nfv_filters.py b/nfv_filters/tests/test_nfv_filters.py deleted file mode 100644 index aa5c0ce..0000000 --- a/nfv_filters/tests/test_nfv_filters.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- coding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -test_nfv_filters ----------------------------------- - -Tests for `nfv_filters` module. -""" - -from nfv_filters.tests import base - - -class TestNfv_filters(base.TestCase): - - def test_something(self): - pass diff --git a/nfv_filters/tests/unit/__init__.py b/nfv_filters/tests/unit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nfv_filters/tests/unit/nova/__init__.py b/nfv_filters/tests/unit/nova/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nfv_filters/tests/unit/nova/scheduler/__init__.py b/nfv_filters/tests/unit/nova/scheduler/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nfv_filters/tests/unit/nova/scheduler/filters/__init__.py b/nfv_filters/tests/unit/nova/scheduler/filters/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nfv_filters/tests/unit/nova/scheduler/filters/test_aggregate_instance_type_filter.py b/nfv_filters/tests/unit/nova/scheduler/filters/test_aggregate_instance_type_filter.py new file mode 100644 index 0000000..ec75621 --- /dev/null +++ b/nfv_filters/tests/unit/nova/scheduler/filters/test_aggregate_instance_type_filter.py @@ -0,0 +1,481 @@ +# Copyright (c) 2016, Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +import mock + +from nfv_filters.nova.scheduler.filters import (aggregate_instance_type_filter + as agg_ins_type) +from nova import objects +from nova import test +from nova.tests.unit.scheduler import fakes + + +class TestAggregateExtraSpecsFilter(test.NoDBTestCase): + + def setUp(self): + super(TestAggregateExtraSpecsFilter, self).setUp() + self.filt_cls = agg_ins_type.AggregateInstanceTypeFilter() + + def _create_host_state(self, + host=None, + node=None, + capabilities=None, + aggr_metadata=None): + host = 'host1' if host is None else host + node = 'node1' if node is None else node + capabilities = {'opt1': 1} if capabilities is None else capabilities + host_state = fakes.FakeHostState(host, node, capabilities) + if not isinstance(aggr_metadata, list): + aggr_metadata = [aggr_metadata] + for aggr_metadata_item in aggr_metadata: + aggr = objects.Aggregate(context='test') + aggr.metadata = aggr_metadata_item + host_state.aggregates.append(aggr) + return host_state + + def _do_test_aggr_filter_extra_specs(self, extra_specs, aggr_metadata, + passes): + spec_obj = objects.RequestSpec( + context=mock.sentinel.ctx, + flavor=objects.Flavor(memory_mb=1024, extra_specs=extra_specs)) + capabilities = {'free_ram_mb': 1024} + host = self._create_host_state(capabilities=capabilities, + aggr_metadata=aggr_metadata) + assertion = self.assertTrue if passes else self.assertFalse + assertion(self.filt_cls.host_passes(host, spec_obj)) + + @mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host') + def test_passes_empty_extra_specs(self, agg_mock): + capabilities = {'opt1': 1, 'opt2': 2} + spec_obj = objects.RequestSpec( + context=mock.sentinel.ctx, + flavor=objects.Flavor(memory_mb=1024, extra_specs={})) + host = fakes.FakeHostState('host1', 'node1', capabilities) + self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) + self.assertTrue(agg_mock.called) + + # Sentinel checks. + @mock.patch.object(logging.LoggerAdapter, 'warning') + def test_exclamation_exclusive(self, mock_logging): + aggr_metadata = { + 'opt1': '1', + } + extra_specs = { + 'opt1': ' ! *', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=False) + mock_logging.assert_called_with("Sentinel value '!' is exclusive and " + "cannot be joint with other values") + + def test_read_sentinels(self): + # Sentinels alone. + self.assertEqual(({'!'}, set()), + self.filt_cls._read_sentinels('!')) + self.assertEqual(({'*'}, set()), + self.filt_cls._read_sentinels('*')) + self.assertEqual(({'~'}, set()), + self.filt_cls._read_sentinels('~')) + # Value strings without sentinels. + self.assertEqual((set(), {'value'}), + self.filt_cls._read_sentinels('value')) + self.assertEqual((set(), set()), + self.filt_cls._read_sentinels('')) + self.assertEqual((False, None), + self.filt_cls._read_sentinels(None)) + # Combination of sentinels. + self.assertEqual(({'~'}, {'value'}), + self.filt_cls._read_sentinels(' value ~')) + self.assertEqual(({'~', '*'}, {'value'}), + self.filt_cls._read_sentinels( + ' value ~ *')) + self.assertEqual((False, None), + self.filt_cls._read_sentinels( + ' value ! *')) + + # _split_value checks. + def test_split_values(self): + def _check(ref_vals, input_vals): + self.assertEqual(ref_vals, self.filt_cls._split_values(input_vals)) + + # Test string input. + _check({'val_1'}, 'val_1') + # Test set input. + _check({'val_1', 'val_2'}, {'val_1', 'val_2'}) + # Remove blank spaces. + _check({'val_1'}, 'val_1 ') + _check({'val_1'}, ' val_1') + _check({'val_1'}, ' val_1 ') + _check({'val_1', 'val_2', 'val_3'}, {' val_1', 'val_2 ', ' val_3 '}) + # Test junction inside a string; string must start with . + _check({'val_1', 'val_2'}, ' val_1 val_2') + _check({'val_1', 'val_2', 'val_3'}, {'val_3', ' val_1 val_2'}) + _check({'val_1 val_2'}, 'val_1 val_2') + _check({'val_1', 'val_2', 'val_3', 'val_4'}, + {'val_1', ' val_1 val_2', ' val_3 ', + ' val_3 val_4 '}) + + # No 'force_metadata_check' value in aggregate metadata. + def test_check_by_instance_no_metadata(self): + aggr_metadata = {} + extra_specs = { + 'opt1': '1', + 'aggregate_instance_extra_specs:opt2': '2', + 'trust:trusted_host': 'true', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=False) + + def test_check_by_instance_correct_metadata(self): + aggr_metadata = { + 'opt1': '1', + 'opt2': '2' + } + extra_specs = { + 'opt1': '1', + 'opt2': '2', + 'trust:trusted_host': 'true', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_instance_scoped_key_present(self): + aggr_metadata = { + 'opt1': '1', + 'opt2': '2' + } + extra_specs = { + 'opt1': '1', + 'aggregate_instance_extra_specs:opt2': '2', + 'trust:trusted_host': 'true', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_instance_scoped_no_present(self): + aggr_metadata = { + 'opt1': '1' + } + extra_specs = { + 'opt1': '1', + 'aggregate_instance_extra_specs:opt2': '2', + 'trust:trusted_host': 'true', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=False) + + def test_check_by_instance_no_scoped_key_present(self): + aggr_metadata = { + 'opt1': '1', + 'opt2': '2', + 'trust:trusted_host': 'true', + } + extra_specs = { + 'opt1': '1', + 'aggregate_instance_extra_specs:opt2': '2', + 'trust:trusted_host': 'true', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_instance_no_scoped_key_no_present(self): + aggr_metadata = { + 'opt1': '1', + 'opt2': '2', + } + extra_specs = { + 'opt1': '1', + 'aggregate_instance_extra_specs:opt2': '2', + 'trust:trusted_host': 'true', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_instance_sentinel_asterisk_key_present(self): + aggr_metadata = { + 'opt1': '1', + 'opt2': '2', + } + extra_specs = { + 'opt1': '1', + 'opt2': '*', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_instance_sentinel_asterisk_key_not_present(self): + aggr_metadata = { + 'opt1': '1', + } + extra_specs = { + 'opt1': '1', + 'opt2': '*', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=False) + + def test_check_by_instance_sentinel_any_key_present(self): + aggr_metadata = { + 'opt1': '1', + 'opt2': '2', + } + extra_specs = { + 'opt1': '1', + 'opt2': '~', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_instance_sentinel_any_and_other_value_key_present(self): + aggr_metadata1 = { + 'opt1': '1', + 'opt2': '2', + } + aggr_metadata2 = { + 'opt1': '1', + 'opt2': '20', + } + extra_specs = { + 'opt1': '1', + 'opt2': ' ~ 2', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata1, passes=True) + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata2, passes=False) + + def test_check_by_instance_sentinel_any_key_not_present(self): + aggr_metadata = { + 'opt1': '1', + } + extra_specs = { + 'opt1': '1', + 'opt2': '~', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_instance_sentinel_exclamation_key_present(self): + aggr_metadata = { + 'opt1': '1', + 'opt2': '2', + } + extra_specs = { + 'opt1': '1', + 'opt2': '!', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=False) + + def test_check_by_instance_sentinel_exclamation_key_not_present(self): + aggr_metadata = { + 'opt1': '1', + } + extra_specs = { + 'opt1': '1', + 'opt2': '!', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_instance_multiple_aggregates(self): + aggr1 = { + 'opt2': '20', + } + aggr2 = { + 'opt1': '10', + } + extra_specs = { + 'opt1': '10', + 'opt2': '20', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr1, passes=False) + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr2, passes=False) + self._do_test_aggr_filter_extra_specs( + extra_specs, [aggr1, aggr2], passes=True) + + # 'force_metadata_check' value in aggregate metadata. + def test_check_by_aggregate_no_extra_specs(self): + aggr_metadata = { + 'force_metadata_check': '', + } + extra_specs = { + 'opt1': '1', + 'opt2': '2', + 'trust:trusted_host': 'true', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_aggregate_correct_extra_specs(self): + aggr_metadata = { + 'force_metadata_check': '', + 'opt1': '1', + 'opt2': '2', + } + extra_specs = { + 'opt1': '1', + 'opt2': '2', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_aggregate_scoped_key_present(self): + aggr_metadata = { + 'force_metadata_check': '', + 'opt1': '1', + 'aggregate_instance_extra_specs:opt2': '2', + 'trust:trusted_host': 'true', + } + extra_specs = { + 'opt1': '1', + 'opt2': '2', + 'trust:trusted_host': 'true', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_aggregate_scoped_key_not_present(self): + aggr_metadata = { + 'force_metadata_check': '', + 'opt1': '1', + 'aggregate_instance_extra_specs:opt2': '2', + 'trust:trusted_host': 'true', + } + extra_specs = { + 'opt1': '1', + 'trust:trusted_host': 'true', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=False) + + def test_check_by_aggregate_sentinel_asterisk_key_present(self): + aggr_metadata = { + 'force_metadata_check': '', + 'opt1': '1', + 'opt2': '*', + } + extra_specs = { + 'opt1': '1', + 'opt2': '2', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_aggregate_sentinel_asterisk_key_not_present(self): + aggr_metadata = { + 'force_metadata_check': '', + 'opt1': '1', + 'opt2': '*', + } + extra_specs = { + 'opt1': '1', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=False) + + def test_check_by_aggregate_sentinel_any_key_present(self): + aggr_metadata = { + 'force_metadata_check': '', + 'opt1': '1', + 'opt2': '~', + } + extra_specs = { + 'opt1': '1', + 'opt2': '2', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_aggregate_sentinel_any_key_not_present(self): + aggr_metadata = { + 'force_metadata_check': '', + 'opt1': '1', + 'opt2': '~', + } + extra_specs = { + 'opt1': '1', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_aggregate_sentinel_any_and_other_value_key_present(self): + aggr_metadata = { + 'force_metadata_check': '', + 'opt1': '1', + 'opt2': ' ~ 2', + } + extra_specs1 = { + 'opt1': '1', + 'opt2': '2', + } + extra_specs2 = { + 'opt1': '1', + 'opt2': '20', + } + self._do_test_aggr_filter_extra_specs( + extra_specs1, aggr_metadata, passes=True) + self._do_test_aggr_filter_extra_specs( + extra_specs2, aggr_metadata, passes=False) + + def test_check_by_aggregate_sentinel_exclamation_key_present(self): + aggr_metadata = { + 'force_metadata_check': '', + 'opt1': '1', + 'opt2': '!', + } + extra_specs = { + 'opt1': '1', + 'opt2': '2', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=False) + + def test_check_by_aggregate_sentinel_exclamation_key_not_present(self): + aggr_metadata = { + 'force_metadata_check': '', + 'opt1': '1', + 'opt2': '!', + } + extra_specs = { + 'opt1': '1', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata, passes=True) + + def test_check_by_aggregate_multiple_aggregates(self): + aggr_metadata1 = { + 'force_metadata_check': '', + 'opt1': '1', + } + aggr_metadata2 = { + 'force_metadata_check': '', + 'opt2': '2', + } + + extra_specs = { + 'opt1': '1', + 'opt2': '2', + } + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata1, passes=True) + self._do_test_aggr_filter_extra_specs( + extra_specs, aggr_metadata2, passes=True) + self._do_test_aggr_filter_extra_specs( + extra_specs, [aggr_metadata1, aggr_metadata2], passes=True) diff --git a/releasenotes/notes/add-aggregate-instance-type-filter-99d668f52dc6337a.yaml b/releasenotes/notes/add-aggregate-instance-type-filter-99d668f52dc6337a.yaml new file mode 100644 index 0000000..6457782 --- /dev/null +++ b/releasenotes/notes/add-aggregate-instance-type-filter-99d668f52dc6337a.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added new Nova Scheduler filter: ``AggregateInstanceTypeFilter``. diff --git a/test-requirements.txt b/test-requirements.txt index a3fcd81..c899a4c 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -15,3 +15,6 @@ testtools>=1.4.0 # MIT # releasenotes reno>=1.8.0 # Apache2 + +# OpenStack projects +-e git+https://github.com/openstack/nova.git@master#egg=nova diff --git a/tox.ini b/tox.ini index 1d761d5..eb10deb 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] minversion = 2.0 -envlist = py27,pep8 +envlist = py27,pep8,docs skipsdist = True [testenv]