cleanup mapping/reqspec after archive instance

This patch aims at deleting the records of the archived instances from
the instance_mappings and request_specs tables in the API database
immediately following their archival from instances to shadow_instances
table. So upon running the 'nova-manage db archive_deleted_rows' command
the records of the archived instances will be automatically removed from
the instance_mappings and request_specs tables as well. A warning has
also been added to fix the issue of 'nova-manage verify_instance'
returning a valid instance mapping even after the instance is deleted.

The patch also adds InstanceMappingList.destory_bulk() and
RequestSpec.destroy_bulk() methods for ease of bulk deletion of records.

Change-Id: I483701a55576c245d091ff086b32081b392f746e
Closes-Bug: #1724621
Closes-Bug: #1678056
(cherry picked from commit 32fd58813f)
This commit is contained in:
Surya Seetharaman 2017-10-25 13:43:43 +02:00 committed by Matt Riedemann
parent 966a5a2154
commit 70de423255
10 changed files with 223 additions and 31 deletions

View File

@ -154,10 +154,12 @@ Nova Cells v2
the cells v2 environment is properly setup, specifically in terms of the
cell, host, and instance mapping records required. Returns 0 when the
instance is successfully mapped to a cell, 1 if the instance is not
mapped to a cell (see the ``map_instances`` command), and 2 if the cell
mapped to a cell (see the ``map_instances`` command), 2 if the cell
mapping is missing (see the ``map_cell_and_hosts`` command if you are
upgrading from a cells v1 environment, and the ``simple_cell_setup`` if
you are upgrading from a non-cells v1 environment).
you are upgrading from a non-cells v1 environment), 3 if it is a deleted
instance which has instance mapping, and 4 if it is an archived instance
which still has an instance mapping.
``nova-manage cell_v2 create_cell [--name <cell_name>] [--transport-url <transport_url>] [--database_connection <database_connection>] [--verbose]``

View File

@ -745,11 +745,12 @@ Error: %s""") % six.text_type(e))
return 2
table_to_rows_archived = {}
deleted_instance_uuids = []
if until_complete and verbose:
sys.stdout.write(_('Archiving') + '..') # noqa
while True:
try:
run = db.archive_deleted_rows(max_rows)
run, deleted_instance_uuids = db.archive_deleted_rows(max_rows)
except KeyboardInterrupt:
run = {}
if until_complete and verbose:
@ -758,6 +759,16 @@ Error: %s""") % six.text_type(e))
for k, v in run.items():
table_to_rows_archived.setdefault(k, 0)
table_to_rows_archived[k] += v
if deleted_instance_uuids:
table_to_rows_archived.setdefault('instance_mappings', 0)
table_to_rows_archived.setdefault('request_specs', 0)
ctxt = context.get_admin_context()
deleted_mappings = objects.InstanceMappingList.destroy_bulk(
ctxt, deleted_instance_uuids)
table_to_rows_archived['instance_mappings'] += deleted_mappings
deleted_specs = objects.RequestSpec.destroy_bulk(
ctxt, deleted_instance_uuids)
table_to_rows_archived['request_specs'] += deleted_specs
if not until_complete:
break
elif not run:
@ -1537,14 +1548,15 @@ class CellV2Commands(object):
This prints one of three strings (and exits with a code) indicating
whether the instance is successfully mapped to a cell (0), is unmapped
due to an incomplete upgrade (1), or unmapped due to normally transient
state (2).
due to an incomplete upgrade (1), unmapped due to normally transient
state (2), it is a deleted instance which has instance mapping (3),
or it is an archived instance which still has an instance mapping (4).
"""
def say(string):
if not quiet:
print(string)
ctxt = context.RequestContext()
ctxt = context.get_admin_context()
try:
mapping = objects.InstanceMapping.get_by_instance_uuid(
ctxt, uuid)
@ -1557,6 +1569,28 @@ class CellV2Commands(object):
say('Instance %s is not mapped to a cell' % uuid)
return 2
else:
with context.target_cell(ctxt, mapping.cell_mapping) as cctxt:
try:
instance = objects.Instance.get_by_uuid(cctxt, uuid)
except exception.InstanceNotFound:
try:
el_ctx = cctxt.elevated(read_deleted='yes')
instance = objects.Instance.get_by_uuid(el_ctx, uuid)
# instance is deleted
if instance:
say('The instance with uuid %s has been deleted.'
% uuid)
say('Execute `nova-manage db archive_deleted_rows`'
'command to archive this deleted instance and'
'remove its instance_mapping.')
return 3
except exception.InstanceNotFound:
# instance is archived
say('The instance with uuid %s has been archived.'
% uuid)
say('However its instance_mapping remains.')
return 4
# instance is alive and mapped to a cell
say('Instance %s is in cell: %s (%s)' % (
uuid,
mapping.cell_mapping.name,

View File

@ -6404,11 +6404,12 @@ def _archive_deleted_rows_for_table(tablename, max_rows):
shadow_tablename = _SHADOW_TABLE_PREFIX + tablename
rows_archived = 0
deleted_instance_uuids = []
try:
shadow_table = Table(shadow_tablename, metadata, autoload=True)
except NoSuchTableError:
# No corresponding shadow table; skip it.
return rows_archived
return rows_archived, deleted_instance_uuids
if tablename == "dns_domains":
# We have one table (dns_domains) where the key is called
@ -6462,10 +6463,24 @@ def _archive_deleted_rows_for_table(tablename, max_rows):
order_by(column).limit(max_rows)
delete_statement = DeleteFromSelect(table, query_delete, column)
# NOTE(tssurya): In order to facilitate the deletion of records from
# instance_mappings table in the nova_api DB, the rows of deleted instances
# from the instances table are stored prior to their deletion from
# the instances table. Basically the uuids of the archived instances
# are queried and returned.
if tablename == "instances":
query_delete = query_delete.column(table.c.uuid)
rows = conn.execute(query_delete).fetchall()
deleted_instance_uuids = [r[1] for r in rows]
try:
# Group the insert and delete in a transaction.
with conn.begin():
conn.execute(insert)
if tablename == "instances":
delete_statement = table.delete().where(table.c.uuid.in_(
deleted_instance_uuids))
result_delete = conn.execute(delete_statement)
rows_archived = result_delete.rowcount
except db_exc.DBReferenceError as ex:
@ -6484,7 +6499,7 @@ def _archive_deleted_rows_for_table(tablename, max_rows):
conn, limit)
rows_archived += extra
return rows_archived
return rows_archived, deleted_instance_uuids
def archive_deleted_rows(max_rows=None):
@ -6504,26 +6519,31 @@ def archive_deleted_rows(max_rows=None):
"""
table_to_rows_archived = {}
deleted_instance_uuids = []
total_rows_archived = 0
meta = MetaData(get_engine(use_slave=True))
meta.reflect()
# Reverse sort the tables so we get the leaf nodes first for processing.
for table in reversed(meta.sorted_tables):
tablename = table.name
rows_archived = 0
# skip the special sqlalchemy-migrate migrate_version table and any
# shadow tables
if (tablename == 'migrate_version' or
tablename.startswith(_SHADOW_TABLE_PREFIX)):
continue
rows_archived = _archive_deleted_rows_for_table(
tablename, max_rows=max_rows - total_rows_archived)
rows_archived,\
deleted_instance_uuid = _archive_deleted_rows_for_table(
tablename, max_rows=max_rows - total_rows_archived)
total_rows_archived += rows_archived
if tablename == 'instances':
deleted_instance_uuids = deleted_instance_uuid
# Only report results for tables that had updates.
if rows_archived:
table_to_rows_archived[tablename] = rows_archived
if total_rows_archived >= max_rows:
break
return table_to_rows_archived
return table_to_rows_archived, deleted_instance_uuids
@pick_context_manager_writer

View File

@ -184,3 +184,14 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject):
db_mappings = cls._get_by_instance_uuids_from_db(context, uuids)
return base.obj_make_list(context, cls(), objects.InstanceMapping,
db_mappings)
@staticmethod
@db_api.api_context_manager.writer
def _destroy_bulk_in_db(context, instance_uuids):
return context.session.query(api_models.InstanceMapping).filter(
api_models.InstanceMapping.instance_uuid.in_(instance_uuids)).\
delete(synchronize_session=False)
@classmethod
def destroy_bulk(cls, context, instance_uuids):
return cls._destroy_bulk_in_db(context, instance_uuids)

View File

@ -556,6 +556,17 @@ class RequestSpec(base.NovaObject):
def destroy(self):
self._destroy_in_db(self._context, self.instance_uuid)
@staticmethod
@db.api_context_manager.writer
def _destroy_bulk_in_db(context, instance_uuids):
return context.session.query(api_models.RequestSpec).filter(
api_models.RequestSpec.instance_uuid.in_(instance_uuids)).\
delete(synchronize_session=False)
@classmethod
def destroy_bulk(cls, context, instance_uuids):
return cls._destroy_bulk_in_db(context, instance_uuids)
def reset_forced_destinations(self):
"""Clears the forced destination fields from the RequestSpec object.

View File

@ -91,7 +91,7 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
self.assertTrue(len(instance.system_metadata),
'No system_metadata for instance: %s' % server_id)
# Now try and archive the soft deleted records.
results = db.archive_deleted_rows(max_rows=100)
results, deleted_instance_uuids = db.archive_deleted_rows(max_rows=100)
# verify system_metadata was dropped
self.assertIn('instance_system_metadata', results)
self.assertEqual(len(instance.system_metadata),
@ -133,7 +133,7 @@ class TestDatabaseArchive(test_servers.ServersTestBase):
self.assertTrue(len(instance.system_metadata),
'No system_metadata for instance: %s' % server_id)
# Now try and archive the soft deleted records.
results = db.archive_deleted_rows(max_rows=100)
results, deleted_instance_uuids = db.archive_deleted_rows(max_rows=100)
# verify system_metadata was dropped
self.assertIn('instance_system_metadata', results)
self.assertEqual(len(instance.system_metadata),

View File

@ -9207,7 +9207,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
# Archive 2 rows
results = db.archive_deleted_rows(max_rows=2)
expected = dict(instance_id_mappings=2)
self._assertEqualObjects(expected, results)
self._assertEqualObjects(expected, results[0])
rows = self.conn.execute(qiim).fetchall()
# Verify we have 4 left in main
self.assertEqual(len(rows), 4)
@ -9217,7 +9217,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
# Archive 2 more rows
results = db.archive_deleted_rows(max_rows=2)
expected = dict(instance_id_mappings=2)
self._assertEqualObjects(expected, results)
self._assertEqualObjects(expected, results[0])
rows = self.conn.execute(qiim).fetchall()
# Verify we have 2 left in main
self.assertEqual(len(rows), 2)
@ -9227,7 +9227,7 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
# Try to archive more, but there are no deleted rows left.
results = db.archive_deleted_rows(max_rows=2)
expected = dict()
self._assertEqualObjects(expected, results)
self._assertEqualObjects(expected, results[0])
rows = self.conn.execute(qiim).fetchall()
# Verify we still have 2 left in main
self.assertEqual(len(rows), 2)
@ -9364,15 +9364,15 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
# The first try to archive console_pools should fail, due to FK.
num = sqlalchemy_api._archive_deleted_rows_for_table("console_pools",
max_rows=None)
self.assertEqual(num, 0)
self.assertEqual(num[0], 0)
# Then archiving consoles should work.
num = sqlalchemy_api._archive_deleted_rows_for_table("consoles",
max_rows=None)
self.assertEqual(num, 1)
self.assertEqual(num[0], 1)
# Then archiving console_pools should work.
num = sqlalchemy_api._archive_deleted_rows_for_table("console_pools",
max_rows=None)
self.assertEqual(num, 1)
self.assertEqual(num[0], 1)
self._assert_shadow_tables_empty_except(
'shadow_console_pools',
'shadow_consoles'
@ -9391,15 +9391,15 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
# The first try to archive instances should fail, due to FK.
num = sqlalchemy_api._archive_deleted_rows_for_table("instances",
max_rows=None)
self.assertEqual(0, num)
self.assertEqual(0, num[0])
# Then archiving migrations should work.
num = sqlalchemy_api._archive_deleted_rows_for_table("migrations",
max_rows=None)
self.assertEqual(1, num)
self.assertEqual(1, num[0])
# Then archiving instances should work.
num = sqlalchemy_api._archive_deleted_rows_for_table("instances",
max_rows=None)
self.assertEqual(1, num)
self.assertEqual(1, num[0])
self._assert_shadow_tables_empty_except(
'shadow_instances',
'shadow_migrations'

View File

@ -168,6 +168,20 @@ class _TestInstanceMappingListObject(object):
comparators={
'cell_mapping': self._check_cell_map_value})
@mock.patch.object(instance_mapping.InstanceMappingList,
'_destroy_bulk_in_db')
def test_destroy_bulk(self, destroy_bulk_in_db):
uuids_to_be_deleted = []
for i in range(0, 5):
uuid = uuidutils.generate_uuid()
uuids_to_be_deleted.append(uuid)
destroy_bulk_in_db.return_value = 5
result = objects.InstanceMappingList.destroy_bulk(self.context,
uuids_to_be_deleted)
destroy_bulk_in_db.assert_called_once_with(self.context,
uuids_to_be_deleted)
self.assertEqual(5, result)
class TestInstanceMappingListObject(test_objects._LocalTest,
_TestInstanceMappingListObject):

View File

@ -584,6 +584,19 @@ class _TestRequestSpecObject(object):
destroy_in_db.assert_called_once_with(req_obj._context,
req_obj.instance_uuid)
@mock.patch.object(request_spec.RequestSpec, '_destroy_bulk_in_db')
def test_destroy_bulk(self, destroy_bulk_in_db):
uuids_to_be_deleted = []
for i in range(0, 5):
uuid = uuidutils.generate_uuid()
uuids_to_be_deleted.append(uuid)
destroy_bulk_in_db.return_value = 5
result = objects.RequestSpec.destroy_bulk(self.context,
uuids_to_be_deleted)
destroy_bulk_in_db.assert_called_once_with(self.context,
uuids_to_be_deleted)
self.assertEqual(5, result)
def test_reset_forced_destinations(self):
req_obj = fake_request_spec.fake_spec_obj()
# Making sure the fake object has forced hosts and nodes

View File

@ -392,6 +392,8 @@ class ProjectCommandsTestCase(test.TestCase):
class DBCommandsTestCase(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(DBCommandsTestCase, self).setUp()
self.output = StringIO()
@ -406,7 +408,7 @@ class DBCommandsTestCase(test.NoDBTestCase):
self.assertEqual(2, self.commands.archive_deleted_rows(large_number))
@mock.patch.object(db, 'archive_deleted_rows',
return_value=dict(instances=10, consoles=5))
return_value=(dict(instances=10, consoles=5), list()))
def _test_archive_deleted_rows(self, mock_db_archive, verbose=False):
result = self.commands.archive_deleted_rows(20, verbose=verbose)
mock_db_archive.assert_called_once_with(20)
@ -437,9 +439,9 @@ class DBCommandsTestCase(test.NoDBTestCase):
def test_archive_deleted_rows_until_complete(self, mock_db_archive,
verbose=False):
mock_db_archive.side_effect = [
{'instances': 10, 'instance_extra': 5},
{'instances': 5, 'instance_faults': 1},
{}]
({'instances': 10, 'instance_extra': 5}, list()),
({'instances': 5, 'instance_faults': 1}, list()),
({}, list())]
result = self.commands.archive_deleted_rows(20, verbose=verbose,
until_complete=True)
self.assertEqual(1, result)
@ -469,8 +471,8 @@ Archiving.....complete
def test_archive_deleted_rows_until_stopped(self, mock_db_archive,
verbose=True):
mock_db_archive.side_effect = [
{'instances': 10, 'instance_extra': 5},
{'instances': 5, 'instance_faults': 1},
({'instances': 10, 'instance_extra': 5}, list()),
({'instances': 5, 'instance_faults': 1}, list()),
KeyboardInterrupt]
result = self.commands.archive_deleted_rows(20, verbose=verbose,
until_complete=True)
@ -497,7 +499,7 @@ Archiving.....stopped
def test_archive_deleted_rows_until_stopped_quiet(self):
self.test_archive_deleted_rows_until_stopped(verbose=False)
@mock.patch.object(db, 'archive_deleted_rows', return_value={})
@mock.patch.object(db, 'archive_deleted_rows', return_value=({}, []))
def test_archive_deleted_rows_verbose_no_results(self, mock_db_archive):
result = self.commands.archive_deleted_rows(20, verbose=True)
mock_db_archive.assert_called_once_with(20)
@ -505,6 +507,54 @@ Archiving.....stopped
self.assertIn('Nothing was archived.', output)
self.assertEqual(0, result)
@mock.patch.object(db, 'archive_deleted_rows')
@mock.patch.object(objects.RequestSpec, 'destroy_bulk')
def test_archive_deleted_rows_and_instance_mappings_and_request_specs(self,
mock_destroy, mock_db_archive, verbose=True):
self.useFixture(nova_fixtures.Database())
self.useFixture(nova_fixtures.Database(database='api'))
ctxt = context.RequestContext('fake-user', 'fake_project')
cell_uuid = uuidutils.generate_uuid()
cell_mapping = objects.CellMapping(context=ctxt,
uuid=cell_uuid,
database_connection='fake:///db',
transport_url='fake:///mq')
cell_mapping.create()
uuids = []
for i in range(2):
uuid = uuidutils.generate_uuid()
uuids.append(uuid)
objects.Instance(ctxt, project_id=ctxt.project_id, uuid=uuid)\
.create()
objects.InstanceMapping(ctxt, project_id=ctxt.project_id,
cell_mapping=cell_mapping, instance_uuid=uuid)\
.create()
mock_db_archive.return_value = (dict(instances=2, consoles=5), uuids)
mock_destroy.return_value = 2
result = self.commands.archive_deleted_rows(20, verbose=verbose)
self.assertEqual(1, result)
mock_db_archive.assert_called_once_with(20)
self.assertEqual(1, mock_destroy.call_count)
output = self.output.getvalue()
if verbose:
expected = '''\
+-------------------+-------------------------+
| Table | Number of Rows Archived |
+-------------------+-------------------------+
| consoles | 5 |
| instance_mappings | 2 |
| instances | 2 |
| request_specs | 2 |
+-------------------+-------------------------+
'''
self.assertEqual(expected, output)
else:
self.assertEqual(0, len(output))
@mock.patch.object(migration, 'db_null_instance_uuid_scan',
return_value={'foo': 0})
def test_null_instance_uuid_scan_no_records_found(self, mock_scan):
@ -1278,10 +1328,14 @@ class CellV2CommandsTestCase(test.NoDBTestCase):
self.assertEqual(2, r)
@mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
def test_instance_verify_has_all_mappings(self, mock_get):
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch.object(context, 'target_cell')
def test_instance_verify_has_all_mappings(self, mock_target_cell,
mock_get2, mock_get1):
cm = objects.CellMapping(name='foo', uuid=uuidsentinel.cel)
im = objects.InstanceMapping(cell_mapping=cm)
mock_get.return_value = im
mock_get1.return_value = im
mock_get2.return_value = None
r = self.commands.verify_instance(uuidsentinel.instance)
self.assertEqual(0, r)
@ -1291,6 +1345,39 @@ class CellV2CommandsTestCase(test.NoDBTestCase):
self.assertEqual(1, self.commands.verify_instance(uuidsentinel.foo,
quiet=True))
@mock.patch.object(context, 'target_cell')
def test_instance_verify_has_instance_mapping_but_no_instance(self,
mock_target_cell):
ctxt = context.RequestContext('fake-user', 'fake_project')
cell_uuid = uuidutils.generate_uuid()
cell_mapping = objects.CellMapping(context=ctxt,
uuid=cell_uuid,
database_connection='fake:///db',
transport_url='fake:///mq')
cell_mapping.create()
mock_target_cell.return_value.__enter__.return_value = ctxt
uuid = uuidutils.generate_uuid()
objects.Instance(ctxt, project_id=ctxt.project_id, uuid=uuid).create()
objects.InstanceMapping(ctxt, project_id=ctxt.project_id,
cell_mapping=cell_mapping, instance_uuid=uuid)\
.create()
# a scenario where an instance is deleted, but not archived.
inst = objects.Instance.get_by_uuid(ctxt, uuid)
inst.destroy()
r = self.commands.verify_instance(uuid)
self.assertEqual(3, r)
self.assertIn('has been deleted', self.output.getvalue())
# a scenario where there is only the instance mapping but no instance
# like when an instance has been archived but the instance mapping
# was not deleted.
uuid = uuidutils.generate_uuid()
objects.InstanceMapping(ctxt, project_id=ctxt.project_id,
cell_mapping=cell_mapping, instance_uuid=uuid)\
.create()
r = self.commands.verify_instance(uuid)
self.assertEqual(4, r)
self.assertIn('has been archived', self.output.getvalue())
def _return_compute_nodes(self, ctxt, num=1):
nodes = []
for i in range(num):