From 5e749150649bd5eb9916f43c9121f9a1904f36e7 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Wed, 29 May 2019 17:46:13 +0000 Subject: [PATCH] Follow up for counting quota usage from placement This addresses comments from the series: * Remove usage-specific info from docstring * Add note to nova-next job description "changelog" * Add info about data migration to config option help * Consolidate code under count_usage_from_placement conditional * Consolidate variables for checking data migration doneness * Remove hard-coded user_id and project_id from func test * Re-word code comment about checking data migration doneness Related to blueprint count-quota-usage-from-placement Change-Id: Ida2de9256fcc9e092fb9977b8ac067fc1472c316 --- .zuul.yaml | 1 + nova/conf/quota.py | 9 +++++++++ nova/objects/instance_mapping.py | 4 ---- nova/quota.py | 23 +++++++++++------------ nova/tests/functional/db/test_quota.py | 4 +++- 5 files changed, 24 insertions(+), 17 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index b9e0213b3f59..742c68f31aaa 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -165,6 +165,7 @@ TLS console proxy code in the libvirt driver. Starting in Stein, the job was changed to run with python 3 and enabled volume multi-attach testing. + Starting in Train, the job enabled counting quota usage from placement. Runs all tempest compute API and most scenario tests concurrently. run: playbooks/legacy/nova-next/run.yaml post-run: playbooks/legacy/nova-next/post.yaml diff --git a/nova/conf/quota.py b/nova/conf/quota.py index b856096b1a62..06c10bc1ba00 100644 --- a/nova/conf/quota.py +++ b/nova/conf/quota.py @@ -332,6 +332,15 @@ consume quota usage for cores and ram. Note that because of this, it will be possible for a request to unshelve a server to be rejected if the user does not have enough quota available to support the cores and ram needed by the server to be unshelved. + +The ``populate_queued_for_delete`` and ``populate_user_id`` online data +migrations must be completed before usage can be counted from placement. Until +the data migration is complete, the system will fall back to legacy quota usage +counting from cell databases depending on the result of an EXISTS database +query during each quota check, if this configuration option is set to True. +Operators who want to avoid the performance hit from the EXISTS queries should +wait to set this configuration option to True until after they have completed +their online data migrations via ``nova-manage db online_data_migrations``. """), ] diff --git a/nova/objects/instance_mapping.py b/nova/objects/instance_mapping.py index 224594ed92f0..2affea3f1dcc 100644 --- a/nova/objects/instance_mapping.py +++ b/nova/objects/instance_mapping.py @@ -417,10 +417,6 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject): not included in the count (deleted and SOFT_DELETED instances). Instances that are queued_for_deleted=None are not included in the count because we are not certain about whether or not they are deleted. - When counting quota usage, we will fall back on the legacy counting - method and count instances, cores, and ram from cell databases if any - unmigrated instance mappings (user_id=None or queued_for_delete=None) - are detected, to avoid using a potentially inaccurate count. :param context: The request context for database access :param project_id: The project_id to count across diff --git a/nova/quota.py b/nova/quota.py index 2222f63349bb..8fe142e2d5d7 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -1176,8 +1176,9 @@ def _server_group_count_members_by_user(context, group, user_id): {'user': 'server_group_members': }} """ # Because server group members quota counting is not scoped to a project, - # but scoped to a particular InstanceGroup and user, we cannot filter our - # user_id/queued_for_delete populated check on project_id or user_id. + # but scoped to a particular InstanceGroup and user, we have no reasonable + # way of pruning down our migration check to only a subset of all instance + # mapping records. # So, we check whether user_id/queued_for_delete is populated for all # records and cache the result to prevent unnecessary checking once the # data migration has been completed. @@ -1185,9 +1186,8 @@ def _server_group_count_members_by_user(context, group, user_id): if not UID_QFD_POPULATED_CACHE_ALL: LOG.debug('Checking whether user_id and queued_for_delete are ' 'populated for all projects') - uid_qfd_populated = _user_id_queued_for_delete_populated(context) - if uid_qfd_populated: - UID_QFD_POPULATED_CACHE_ALL = True + UID_QFD_POPULATED_CACHE_ALL = _user_id_queued_for_delete_populated( + context) if UID_QFD_POPULATED_CACHE_ALL: count = objects.InstanceMappingList.get_count_by_uuids_and_user( @@ -1308,13 +1308,12 @@ def _instances_cores_ram_count(context, project_id, user_id=None): UID_QFD_POPULATED_CACHE_BY_PROJECT.add(project_id) else: uid_qfd_populated = True - if not uid_qfd_populated: - LOG.warning('Falling back to legacy quota counting method for ' - 'instances, cores, and ram') - - if CONF.quota.count_usage_from_placement and uid_qfd_populated: - return _instances_cores_ram_count_api_db_placement(context, project_id, - user_id=user_id) + if uid_qfd_populated: + return _instances_cores_ram_count_api_db_placement(context, + project_id, + user_id=user_id) + LOG.warning('Falling back to legacy quota counting method for instances, ' + 'cores, and ram') return _instances_cores_ram_count_legacy(context, project_id, user_id=user_id) diff --git a/nova/tests/functional/db/test_quota.py b/nova/tests/functional/db/test_quota.py index e2b65b0deb59..4554db9c09e5 100644 --- a/nova/tests/functional/db/test_quota.py +++ b/nova/tests/functional/db/test_quota.py @@ -196,7 +196,9 @@ class QuotaTestCase(test.NoDBTestCase): self.assertEqual(1536, count['user']['ram']) def test_user_id_queued_for_delete_populated(self): - ctxt = context.RequestContext('fake-user', 'fake-project') + ctxt = context.RequestContext( + test_instance_mapping.sample_mapping['user_id'], + test_instance_mapping.sample_mapping['project_id']) # One deleted or SOFT_DELETED instance with user_id=None, should not be # considered by the check.