From 6c1efee2d085334cb00d0a4fbf69008b99af73f4 Mon Sep 17 00:00:00 2001 From: James Vaughn Date: Mon, 9 Aug 2021 14:09:19 +0100 Subject: [PATCH] Add aggregate filtering and isolation options Add limit-tenants-to-placement-aggregate, placement-aggregate-required-for-tenants and enable-isolated-aggregate-filtering options. Closes-Bug: #1920982 Change-Id: Ibbb0e412d1a821cdb56e447ca88b91afca50c8c6 --- config.yaml | 79 +++++++++++++++++++++++++++++ hooks/nova_cc_context.py | 6 +++ templates/train/nova.conf | 10 ++++ unit_tests/test_nova_cc_contexts.py | 8 +++ 4 files changed, 103 insertions(+) diff --git a/config.yaml b/config.yaml index bbef82d5..d2249a87 100644 --- a/config.yaml +++ b/config.yaml @@ -684,3 +684,82 @@ options: cloud utilization. . Note: only effective from Pike onward + limit-tenants-to-placement-aggregate: + type: boolean + default: False + description: | + This setting causes the scheduler to look up a host aggregate with the + metadata key of filter_tenant_id set to the project of an incoming + request, and request results from placement be limited to that + aggregate. Multiple tenants may be added to a single aggregate by + appending a serial number to the key, such as filter_tenant_id:123. + . + The matching aggregate UUID must be mirrored in placement for proper + operation. If no host aggregate with the tenant id is found, or that + aggregate does not match one in placement, the result will be the same + as not finding any suitable hosts for the request. + . + Set this option to True if you require instances for a particular tenant + to be placed in a particular host aggregate (i.e. a particular host or + set of hosts). After enabling this option, follow + https://docs.openstack.org/nova/latest/admin/aggregates.html#tenant-isolation-with-placement + for details on creating and configuring host aggregates and resource + providers. + . + Note that this will not prevent other tenants, who aren't associated with + a host aggregate, from launching instances on hosts within this + aggregate. + . + Also see the placement-aggregate-required-for-tenants and + enable-isolated-aggregate-filtering options. + . + This is only supported on OpenStack Train or later releases. + placement-aggregate-required-for-tenants: + type: boolean + default: False + description: | + This setting, which only has an effect when + limit-tenants-to-placement-aggregate is set to True, will control whether + or not a tenant with no aggregate affinity will be allowed to schedule to + any available node. If aggregates are used to limit some tenants but not + all, then this should be False. If all tenants should be confined via + aggregate, then this should be True to prevent them from receiving + unrestricted scheduling to any available node. + . + Set this option to True under the rare circumstance where you want to + manually control instance placement by associating every tenant with + a host aggregate. If you set this option to True and have tenants that + are not associated with a host aggregate, those tenants will no longer be + able to launch instances. + . + Also see the limit-tenants-to-placement-aggregate and + enable-isolated-aggregate-filtering options. + . + This is only supported on OpenStack Train or later releases. + enable-isolated-aggregate-filtering: + type: boolean + default: False + description: | + This setting allows the scheduler to restrict hosts in aggregates based + on matching required traits in the aggregate metadata and the instance + flavor/image. If an aggregate is configured with a property with key + trait:$TRAIT_NAME and value required, the instance flavor extra_specs + and/or image metadata must also contain trait:$TRAIT_NAME=required to be + eligible to be scheduled to hosts in that aggregate. + . + Set this option to True if you require that only instances with matching + traits (via flavor or image metadata) be placed on particular + hosts. This may also be a suitable workaround approach if you need to + give a tenant or tenants exclusivity for a compute host or set of hosts + (through the use of a custom trait) but otherwise want placement to + function normally for other hosts. + . + After enabling this option, follow + https://docs.openstack.org/nova/latest/reference/isolate-aggregates.html + for details on creating and creating and configuring traits, resource + providers and host aggregates. + . + Also see the limit-tenants-to-placement-aggregate and + placement-aggregate-required-for-tenants options. + . + This is only supported on OpenStack Train or later releases. diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index 83731346..063a1f17 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -439,6 +439,12 @@ class NovaConfigContext(ch_context.WorkerConfigContext): ctxt['unique_server_names'] = hookenv.config('unique-server-names') ctxt['skip_hosts_with_build_failures'] = hookenv.config( 'skip-hosts-with-build-failures') + ctxt['limit_tenants_to_placement_aggregate'] = hookenv.config( + 'limit-tenants-to-placement-aggregate') + ctxt['placement_aggregate_required_for_tenants'] = hookenv.config( + 'placement-aggregate-required-for-tenants') + ctxt['enable_isolated_aggregate_filtering'] = hookenv.config( + 'enable-isolated-aggregate-filtering') return ctxt diff --git a/templates/train/nova.conf b/templates/train/nova.conf index 95131a46..487267b6 100644 --- a/templates/train/nova.conf +++ b/templates/train/nova.conf @@ -201,6 +201,16 @@ discover_hosts_in_cells_interval = 30 workers = {{ workers }} +{% if limit_tenants_to_placement_aggregate -%} +limit_tenants_to_placement_aggregate = True +{%- endif %} +{%- if placement_aggregate_required_for_tenants %} +placement_aggregate_required_for_tenants = True +{%- endif %} +{%- if enable_isolated_aggregate_filtering %} +enable_isolated_aggregate_filtering = True +{%- endif %} + [filter_scheduler] {% if additional_neutron_filters is defined %} enabled_filters = {{ scheduler_default_filters }},{{ additional_neutron_filters }} diff --git a/unit_tests/test_nova_cc_contexts.py b/unit_tests/test_nova_cc_contexts.py index b0085933..ea9f6afa 100644 --- a/unit_tests/test_nova_cc_contexts.py +++ b/unit_tests/test_nova_cc_contexts.py @@ -393,6 +393,14 @@ class NovaComputeContextTests(CharmTestCase): self.config('unique-server-names')) self.assertEqual(ctxt['skip_hosts_with_build_failures'], self.config('skip-hosts-with-build-failures')) + self.assertEqual(ctxt['limit_tenants_to_placement_aggregate'], + self.config('limit-tenants-to-placement-aggregate')) + self.assertEqual( + ctxt["placement_aggregate_required_for_tenants"], + self.config("placement-aggregate-required-for-tenants"), + ) + self.assertEqual(ctxt['enable_isolated_aggregate_filtering'], + self.config('enable-isolated-aggregate-filtering')) _pci_alias1 = { "name": "IntelNIC",