From f6996903d2ef0fdb40135b506c83ed6517b28e19 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 1 Nov 2018 12:55:20 -0400 Subject: [PATCH] Add DownCellFixture This adds a new testing fixture which allows controlling when nova.context.scatter_gather_cells returns exception results for down cells. This will be useful in functional tests for the down-cell support API microversion to show the API returning minimal construct results for resources in down cells like instances and services. Part of blueprint handling-down-cell Change-Id: Iabc1e55550e8d9d82e20facfaf84316892c5564a --- nova/tests/fixtures.py | 90 ++++++++++++++++++++++++++++++++ nova/tests/unit/test_fixtures.py | 64 +++++++++++++++++++++++ 2 files changed, 154 insertions(+) diff --git a/nova/tests/fixtures.py b/nova/tests/fixtures.py index 3f062b36aac1..db8b374c733d 100644 --- a/nova/tests/fixtures.py +++ b/nova/tests/fixtures.py @@ -30,6 +30,7 @@ import mock from neutronclient.common import exceptions as neutron_client_exc from oslo_concurrency import lockutils from oslo_config import cfg +from oslo_db import exception as db_exc import oslo_messaging as messaging from oslo_messaging import conffixture as messaging_conffixture from oslo_privsep import daemon as privsep_daemon @@ -41,6 +42,7 @@ from wsgi_intercept import interceptor from nova.api.openstack.compute import tenant_networks from nova.api.openstack import wsgi_app from nova.api import wsgi +from nova.compute import multi_cell_list from nova.compute import rpcapi as compute_rpcapi from nova import context from nova.db import migration @@ -1891,3 +1893,91 @@ class NoopQuotaDriverFixture(fixtures.Fixture): # When using self.flags, the concurrent test failures returned. CONF.set_override('driver', 'nova.quota.NoopQuotaDriver', 'quota') self.addCleanup(CONF.clear_override, 'driver', 'quota') + + +class DownCellFixture(fixtures.Fixture): + """A fixture to simulate when a cell is down either due to error or timeout + + This fixture will stub out the scatter_gather_cells routine used in various + cells-related API operations like listing/showing server details to return + a ``oslo_db.exception.DBError`` per cell in the results. Therefore + it is best used with a test scenario like this: + + 1. Create a server successfully. + 2. Using the fixture, list/show servers. Depending on the microversion + used, the API should either return minimal results or by default skip + the results from down cells. + + Example usage:: + + with nova_fixtures.DownCellFixture(): + # List servers with down cells. + self.api.get_servers() + # Show a server in a down cell. + self.api.get_server(server['id']) + # List services with down cells. + self.admin_api.api_get('/os-services') + """ + def __init__(self, down_cell_mappings=None): + self.down_cell_mappings = down_cell_mappings + + def setUp(self): + super(DownCellFixture, self).setUp() + + def stub_scatter_gather_cells(ctxt, cell_mappings, timeout, fn, *args, + **kwargs): + # Return a dict with an entry per cell mapping where the results + # are some kind of exception. + up_cell_mappings = objects.CellMappingList() + if not self.down_cell_mappings: + # User has not passed any down cells explicitly, so all cells + # are considered as down cells. + self.down_cell_mappings = cell_mappings + else: + # User has passed down cell mappings, so the rest of the cells + # should be up meaning we should return the right results. + # We assume that down cells will be a subset of the + # cell_mappings. + down_cell_uuids = [cell.uuid + for cell in self.down_cell_mappings] + up_cell_mappings.objects = [cell + for cell in cell_mappings + if cell.uuid not in down_cell_uuids] + + def wrap(cell_uuid, thing): + # We should embed the cell_uuid into the context before + # wrapping since its used to calcualte the cells_timed_out and + # cells_failed properties in the object. + ctxt.cell_uuid = cell_uuid + return multi_cell_list.RecordWrapper(ctxt, sort_ctx, thing) + + if fn is multi_cell_list.query_wrapper: + # If the function called through scatter-gather utility is the + # multi_cell_list.query_wrapper, we should wrap the exception + # object into the multi_cell_list.RecordWrapper. This is + # because unlike the other functions where the exception object + # is returned directly, the query_wrapper wraps this into the + # RecordWrapper object format. So if we do not wrap it will + # blow up at the point of generating results from heapq further + # down the stack. + sort_ctx = multi_cell_list.RecordSortContext([], []) + ret1 = { + cell_mapping.uuid: [wrap(cell_mapping.uuid, + db_exc.DBError())] + for cell_mapping in self.down_cell_mappings + } + else: + ret1 = { + cell_mapping.uuid: db_exc.DBError() + for cell_mapping in self.down_cell_mappings + } + ret2 = {} + for cell in up_cell_mappings: + with context.target_cell(ctxt, cell) as cctxt: + ctxt.cell_uuid = cell.uuid + result = fn(cctxt, *args, **kwargs) + ret2[cell.uuid] = result + return dict(list(ret1.items()) + list(ret2.items())) + + self.useFixture(fixtures.MonkeyPatch( + 'nova.context.scatter_gather_cells', stub_scatter_gather_cells)) diff --git a/nova/tests/unit/test_fixtures.py b/nova/tests/unit/test_fixtures.py index 3fff3763d686..00808792b3e7 100644 --- a/nova/tests/unit/test_fixtures.py +++ b/nova/tests/unit/test_fixtures.py @@ -20,6 +20,7 @@ import sys import fixtures as fx import mock from oslo_config import cfg +from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils.fixture import uuidsentinel as uuids from oslo_utils import timeutils @@ -38,6 +39,7 @@ from nova.objects import service as service_obj from nova import test from nova.tests import fixtures from nova.tests.unit import conf_fixture +from nova.tests.unit import fake_instance from nova import utils CONF = cfg.CONF @@ -512,3 +514,65 @@ class TestWarningsFixture(test.TestCase): invalid_migration_kwargs["uuid"] = "fake_id" self.assertRaises(FutureWarning, objects.migration.Migration, **invalid_migration_kwargs) + + +class TestDownCellFixture(test.TestCase): + + def test_fixture(self): + # The test setup creates two cell mappings (cell0 and cell1) by + # default. Let's first list servers across all cells while they are + # "up" to make sure that works as expected. We'll create a single + # instance in cell1. + ctxt = context.get_admin_context() + cell1 = self.cell_mappings[test.CELL1_NAME] + with context.target_cell(ctxt, cell1) as cctxt: + inst = fake_instance.fake_instance_obj(cctxt) + if 'id' in inst: + delattr(inst, 'id') + inst.create() + + # Now list all instances from all cells (should get one back). + results = context.scatter_gather_all_cells( + ctxt, objects.InstanceList.get_all) + self.assertEqual(2, len(results)) + self.assertEqual(0, len(results[objects.CellMapping.CELL0_UUID])) + self.assertEqual(1, len(results[cell1.uuid])) + + # Now do the same but with the DownCellFixture which should result + # in exception results from both cells. + with fixtures.DownCellFixture(): + results = context.scatter_gather_all_cells( + ctxt, objects.InstanceList.get_all) + self.assertEqual(2, len(results)) + for result in results.values(): + self.assertIsInstance(result, db_exc.DBError) + + def test_fixture_when_explicitly_passing_down_cell_mappings(self): + # The test setup creates two cell mappings (cell0 and cell1) by + # default. We'll create one instance per cell and pass cell0 as + # the down cell. We should thus get db_exc.DBError for cell0 and + # correct InstanceList object from cell1. + ctxt = context.get_admin_context() + cell0 = self.cell_mappings['cell0'] + cell1 = self.cell_mappings['cell1'] + with context.target_cell(ctxt, cell0) as cctxt: + inst1 = fake_instance.fake_instance_obj(cctxt) + if 'id' in inst1: + delattr(inst1, 'id') + inst1.create() + with context.target_cell(ctxt, cell1) as cctxt: + inst2 = fake_instance.fake_instance_obj(cctxt) + if 'id' in inst2: + delattr(inst2, 'id') + inst2.create() + with fixtures.DownCellFixture([cell0]): + results = context.scatter_gather_all_cells( + ctxt, objects.InstanceList.get_all) + self.assertEqual(2, len(results)) + for cell_uuid, result in results.items(): + if cell_uuid == cell0.uuid: + self.assertIsInstance(result, db_exc.DBError) + else: + self.assertIsInstance(result, objects.InstanceList) + self.assertEqual(1, len(result)) + self.assertEqual(inst2.uuid, result[0].uuid)