Clean up RPC versions and database migrations for Rocky

Change-Id: I08f5db70cdbdae4b61b2384ffdc17f6533c57896
This commit is contained in:
Dmitry Tantsur 2018-02-09 09:32:00 +01:00
parent a0e08d026b
commit 6b995c07ed
5 changed files with 0 additions and 227 deletions

View File

@ -62,9 +62,6 @@ dbapi = db_api.get_instance()
# object, in case it is lazy loaded. The attribute will be accessed when needed
# by doing getattr on the object
ONLINE_MIGRATIONS = (
# Added in Pike, modified in Queens
# TODO(rloo): remove in Rocky
(dbapi, 'backfill_version_column'),
# TODO(dtantsur): remove when classic drivers are removed (Rocky?)
(dbapi, 'migrate_to_hardware_types'),
)

View File

@ -55,45 +55,6 @@
# oldest named release.
RELEASE_MAPPING = {
'8.0': {
'api': '1.31',
'rpc': '1.40',
'objects': {
'Node': ['1.21'],
'Conductor': ['1.2'],
'Chassis': ['1.3'],
'Port': ['1.6'],
'Portgroup': ['1.3'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'9.0': {
'api': '1.34',
'rpc': '1.41',
'objects': {
'Node': ['1.21'],
'Conductor': ['1.2'],
'Chassis': ['1.3'],
'Port': ['1.7'],
'Portgroup': ['1.3'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'9.1': {
'api': '1.34',
'rpc': '1.41',
'objects': {
'Node': ['1.21'],
'Conductor': ['1.2'],
'Chassis': ['1.3'],
'Port': ['1.7'],
'Portgroup': ['1.3'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'9.2': {
'rpc': '1.41',
'api': '1.35',
@ -164,7 +125,6 @@ RELEASE_MAPPING = {
# release (that we are no longer supporting for a rolling upgrade).
#
# There should be at most two named mappings here.
RELEASE_MAPPING['pike'] = RELEASE_MAPPING['9.1']
RELEASE_MAPPING['queens'] = RELEASE_MAPPING['10.1']
# List of available versions with named versions first; 'master' is excluded.

View File

@ -899,31 +899,6 @@ class Connection(object):
False otherwise.
"""
@abc.abstractmethod
def backfill_version_column(self, max_count):
"""Backfill the Conductor version column with Pike version.
The version column was added to all the resource tables in the Pike
release (via 'ironic-dbsync upgrade'). After upgrading (from Ocata to
Pike), the 'ironic-dbsync online_data_migrations' command would have
populated (backfilled) the version column for all objects.
Unfortunately, in the Pike release, we forgot to set the value for the
conductor's version column. For the Queens release, we are setting
the conductor version, however, we still need to backfill in case new
conductors were added between the time the operator ran Pike's
'ironic-dbsync online_data_migrations' and their upgrade to Queens.
The version used will be the conductor object version from the Pike
release.
:param max_count: The maximum number of objects to migrate. Must be
>= 0. If zero, all the objects will be migrated.
:returns: A 2-tuple, 1. the total number of objects that need to be
migrated (at the beginning of this call) and 2. the number
of migrated objects.
"""
# TODO(rloo) Delete this in Rocky cycle.
@abc.abstractmethod
def migrate_to_hardware_types(self, context, max_count,
reset_unsupported_interfaces=False):

View File

@ -1207,98 +1207,6 @@ class Connection(api.Connection):
return False
return True
@oslo_db_api.retry_on_deadlock
def backfill_version_column(self, context, max_count):
"""Backfill the Conductor version column with Pike version.
The version column was added to all the resource tables in the Pike
release (via 'ironic-dbsync upgrade'). After upgrading (from Ocata to
Pike), the 'ironic-dbsync online_data_migrations' command would have
populated (backfilled) the version column for all objects.
Unfortunately, in the Pike release, we forgot to set the value for the
conductor's version column. For the Queens release, we are setting
the conductor version, however, we still need to backfill in case new
conductors were added between the time the operator ran Pike's
'ironic-dbsync online_data_migrations' and their upgrade to Queens.
The version used will be the conductor object version from the Pike
release.
:param context: the admin context (not used)
:param max_count: The maximum number of objects to migrate. Must be
>= 0. If zero, all the objects will be migrated.
:returns: A 2-tuple, 1. the total number of objects that need to be
migrated (at the beginning of this call) and 2. the number
of migrated objects.
"""
# TODO(rloo): Delete this in Rocky cycle.
prior_release = 'pike'
mapping = release_mappings.RELEASE_MAPPING[prior_release]['objects']
total_to_migrate = 0
total_migrated = 0
# backfill only the Conductor.
# NOTE(rloo) This code was used in Pike to backfill all the objects.
# To make it easier to review, etc., we are re-using that code with
# minimal code changes to only backfill the 'Conductor' object.
sql_models = [models.Conductor]
for model in sql_models:
query = model_query(model).filter(model.version.is_(None))
total_to_migrate += query.count()
if not total_to_migrate:
return total_to_migrate, 0
# NOTE(xek): Each of these operations happen in different transactions.
# This is to ensure a minimal load on the database, but at the same
# time it can cause an inconsistency in the amount of total and
# migrated objects returned (total could be > migrated). This is
# because some objects may have already migrated or been deleted from
# the database between the time the total was computed (above) to the
# time we do the updating (below).
#
# By the time this script is run, only the new release version is
# running, so the impact of this error will be minimal - e.g. the
# operator will run this script more than once to ensure that all
# data have been migrated.
# If max_count is zero, we want to migrate all the objects.
max_to_migrate = max_count or total_to_migrate
for model in sql_models:
num_migrated = 0
with _session_for_write():
query = model_query(model).filter(model.version.is_(None))
# NOTE(rloo) Caution here; after doing query.count(), it is
# possible that the value is different in the
# next invocation of the query.
if max_to_migrate < query.count():
# Only want to update max_to_migrate objects; cannot use
# sql's limit(), so we generate a new query with
# max_to_migrate objects.
ids = []
for obj in query.slice(0, max_to_migrate):
ids.append(obj['id'])
num_migrated = (
model_query(model).
filter(sql.and_(model.id.in_(ids),
model.version.is_(None))).
update({model.version: mapping[model.__name__][0]},
synchronize_session=False))
else:
num_migrated = (
model_query(model).
filter(model.version.is_(None)).
update({model.version: mapping[model.__name__][0]},
synchronize_session=False))
total_migrated += num_migrated
max_to_migrate -= num_migrated
if max_to_migrate <= 0:
break
return total_to_migrate, total_migrated
@oslo_db_api.retry_on_deadlock
def migrate_to_hardware_types(self, context, max_count,
reset_unsupported_interfaces=False):

View File

@ -74,73 +74,6 @@ class UpgradingTestCase(base.DbTestCase):
self.assertFalse(self.dbapi.check_versions())
class BackfillVersionTestCase(base.DbTestCase):
def setUp(self):
super(BackfillVersionTestCase, self).setUp()
self.context = context.get_admin_context()
self.dbapi = db_api.get_instance()
obj_mapping = release_mappings.RELEASE_MAPPING['pike']['objects']
self.conductor_ver = obj_mapping['Conductor'][0]
def test_empty_db(self):
self.assertEqual((0, 0),
self.dbapi.backfill_version_column(self.context, 10))
def test_version_exists(self):
utils.create_test_conductor()
self.assertEqual((0, 0),
self.dbapi.backfill_version_column(self.context, 10))
def test_one_conductor(self):
conductors = self._create_conductors(1)
self.assertEqual((1, 1),
self.dbapi.backfill_version_column(self.context, 10))
res = self.dbapi.get_conductor(conductors[0])
self.assertEqual(self.conductor_ver, res.version)
def test_max_count_zero(self):
conductors = self._create_conductors(2)
self.assertEqual((2, 2),
self.dbapi.backfill_version_column(self.context, 0))
for hostname in conductors:
conductor = self.dbapi.get_conductor(hostname)
self.assertEqual(self.conductor_ver, conductor.version)
def _create_conductors(self, num, version=None):
conductors = []
for i in range(0, num):
conductor = utils.create_test_conductor(
version=version,
hostname='test_name_%d' % i,
uuid=uuidutils.generate_uuid())
conductors.append(conductor.hostname)
for hostname in conductors:
conductor = self.dbapi.get_conductor(hostname)
self.assertEqual(version, conductor.version)
return conductors
def test_no_version_max_count_2_some_conductors(self):
conductors = self._create_conductors(5)
self.assertEqual((5, 2),
self.dbapi.backfill_version_column(self.context, 2))
self.assertEqual((3, 3),
self.dbapi.backfill_version_column(self.context, 10))
for hostname in conductors:
conductor = self.dbapi.get_conductor(hostname)
self.assertEqual(self.conductor_ver, conductor.version)
def test_no_version_max_count_same(self):
conductors = self._create_conductors(5)
self.assertEqual((5, 5),
self.dbapi.backfill_version_column(self.context, 5))
for hostname in conductors:
conductor = self.dbapi.get_conductor(hostname)
self.assertEqual(self.conductor_ver, conductor.version)
@mock.patch.object(driver_factory, 'calculate_migration_delta', autospec=True)
@mock.patch.object(driver_factory, 'classic_drivers_to_migrate', autospec=True)
class MigrateToHardwareTypesTestCase(base.DbTestCase):