Fix DBMS storage plugin

In previous patches (I469b4b9dd9966be8477f3cefbd6284bdaf17ca17 and
I4981ad558195895341c91fa37803713c8c0ab684) we assumed that unit tests
were failing because they needed to be updated, but that's only half of
the issue.  The other half is that the DBMS plugin needs to be updated
as well.

This misunderstanding lead to a broken DBMS system that unit tests don't
detect.

This patch fixes the DBMS system and reverts changes to unit tests that
those 2 patches introduced and are not necessary as well as removed the
FIXME comments that are now fixed.

Here's a brief description of why some of the issues were happening:

- Locked database issues were happening when a unit test from
  TestBasePersistence run before a DBMS unit test. It was caused by the
  replacement of the main_context_manager when it had not been used.  No
  longer a problem since we no longer replace it and we only replace its
  transaction factory when it has been used.

- "ValueError: Backend named fake_backend already exists with a
  different configuration" error was caused because in TestDBPersistence
  the tearDown method only called the parent's tearDown after the
  cleanup of the DB, so if the cleanup of the DB failed then the global
  existing backend would remain and a following test would have trouble
  with it.  Calling it first now to fix this.

- Splitting the running of unit tests in tox.ini was necessary because
  methods in Cinder DB API were always using the context manager
  transaction factory from the first DBMS Test Case class.  So some DBMS
  plugin methods would end up writing data to the previous database
  while others would write to the new database.  Now that we replace the
  transaction factory everything is going to the same DB.

Change-Id: I74ce945291697bc1fcc63a35d60ef71494fdaf19
This commit is contained in:
Gorka Eguileor 2023-01-13 12:47:45 +01:00
parent 436bc6d74b
commit a123ff5f82
6 changed files with 55 additions and 135 deletions

View File

@ -34,6 +34,31 @@ from cinderlib.persistence import base as persistence_base
LOG = log.getLogger(__name__)
def db_writer(func):
"""Decorator to start a DB writing transaction.
With the new Oslo DB Transaction Sessions everything needs to use the
sessions of the enginefacade using the function decorator or the context
manager approach: https://docs.openstack.org/oslo.db/ocata/usage.html
This plugin cannot use the decorator form because its fuctions don't
receive a Context objects that the decorator can find and use, so we use
this decorator instead.
Cinder DB API methods already have a decorator, so methods calling them
don't require this decorator, but methods that directly call the DB using
sqlalchemy or using the model_query method do.
Using this decorator at this level also allows us to enclose everything in
a single transaction, and it doesn't have any problems with the existing
Cinder decorators.
"""
def wrapper(*args, **kwargs):
with sqla_api.main_context_manager.writer.using(objects.CONTEXT):
return func(*args, **kwargs)
return wrapper
class KeyValue(models.BASE, oslo_db_models.ModelBase, objects.KeyValue):
__tablename__ = 'cinderlib_persistence_key_value'
key = sa.Column(sa.String(255), primary_key=True)
@ -179,8 +204,7 @@ class DBPersistence(persistence_base.PersistenceDriverBase):
for ovo in ovos.objects]
return result
def _get_kv(self, key=None, session=None):
session = objects.CONTEXT.session
def _get_kv(self, session, key=None):
query = session.query(KeyValue)
if key is not None:
query = query.filter_by(key=key)
@ -191,8 +215,10 @@ class DBPersistence(persistence_base.PersistenceDriverBase):
return [objects.KeyValue(r.key, r.value) for r in res]
def get_key_values(self, key=None):
return self._get_kv(key)
with sqla_api.main_context_manager.reader.using(objects.CONTEXT) as s:
return self._get_kv(s, key)
@db_writer
def set_volume(self, volume):
changed = self.get_changed_fields(volume)
if not changed:
@ -255,6 +281,7 @@ class DBPersistence(persistence_base.PersistenceDriverBase):
self.db.volume_update(objects.CONTEXT, volume.id, changed)
super(DBPersistence, self).set_volume(volume)
@db_writer
def set_snapshot(self, snapshot):
changed = self.get_changed_fields(snapshot)
if not changed:
@ -274,6 +301,7 @@ class DBPersistence(persistence_base.PersistenceDriverBase):
self.db.snapshot_update(objects.CONTEXT, snapshot.id, changed)
super(DBPersistence, self).set_snapshot(snapshot)
@db_writer
def set_connection(self, connection):
changed = self.get_changed_fields(connection)
if not changed:
@ -300,13 +328,15 @@ class DBPersistence(persistence_base.PersistenceDriverBase):
changed)
super(DBPersistence, self).set_connection(connection)
@db_writer
def set_key_value(self, key_value):
session = objects.CONTEXT.session
kv = self._get_kv(key_value.key, session)
kv = self._get_kv(session, key_value.key)
kv = kv[0] if kv else KeyValue(key=key_value.key)
kv.value = key_value.value
session.add(kv)
@db_writer
def delete_volume(self, volume):
delete_type = (volume.volume_type_id != self.DEFAULT_TYPE.id
and volume.volume_type_id)
@ -349,6 +379,7 @@ class DBPersistence(persistence_base.PersistenceDriverBase):
)).delete()
super(DBPersistence, self).delete_volume(volume)
@db_writer
def delete_snapshot(self, snapshot):
if self.soft_deletes:
LOG.debug('soft deleting snapshot %s', snapshot.id)
@ -359,6 +390,7 @@ class DBPersistence(persistence_base.PersistenceDriverBase):
query.filter_by(id=snapshot.id).delete()
super(DBPersistence, self).delete_snapshot(snapshot)
@db_writer
def delete_connection(self, connection):
if self.soft_deletes:
LOG.debug('soft deleting connection %s', connection.id)
@ -370,6 +402,7 @@ class DBPersistence(persistence_base.PersistenceDriverBase):
query.filter_by(id=connection.id).delete()
super(DBPersistence, self).delete_connection(connection)
@db_writer
def delete_key_value(self, key_value):
session = objects.CONTEXT.session
query = session.query(KeyValue)

View File

@ -31,7 +31,7 @@ class BaseTest(unittest.TestCase):
if not self.PERSISTENCE_CFG:
cfg = {'storage': utils.get_mock_persistence()}
cinderlib.Backend.set_persistence(cfg)
self.backend_name = 'fake_backend_%s' % id(self)
self.backend_name = 'fake_backend'
self.backend = utils.FakeBackend(volume_backend_name=self.backend_name)
self.persistence = self.backend.persistence
cinderlib.Backend._volumes_inflight = {}

View File

@ -27,8 +27,6 @@ class BasePersistenceTest(helper.TestHelper):
def setUp(self):
super(BasePersistenceTest, self).setUp()
self.backend_name = 'fake_backend'
self.backend = utils.FakeBackend(volume_backend_name=self.backend_name)
def assertListEqualObj(self, expected, actual):
exp = [self._convert_to_dict(e) for e in expected]

View File

@ -45,7 +45,15 @@ class TestHelper(base.BaseTest):
def tearDownClass(cls):
volume_cmd.session.IMPL = cls.original_impl
cinderlib.Backend.global_initialization = False
api.main_context_manager = api.enginefacade.transaction_context()
# Cannot just replace the context manager itself because it is already
# decorating cinder DB methods and those would continue accessing the
# old database, so we replace the existing CM'sinternal transaction
# factory, efectively "reseting" the context manager.
cm = api.main_context_manager
if cm.is_started:
cm._root_factory = api.enginefacade._TransactionFactory()
for ovo_name, methods in cls.ovo_methods.items():
ovo_cls = getattr(objects, ovo_name)
for method_name, method in methods.items():
@ -69,12 +77,7 @@ class TestHelper(base.BaseTest):
d.setdefault('backend_or_vol', self.backend)
vol = cinderlib.Volume(**d)
vols.append(vol)
# db_instance is a property of DBMS plugin
if hasattr(self.persistence, 'db_instance'):
with api.main_context_manager.writer.using(self.context):
self.persistence.set_volume(vol)
else:
self.persistence.set_volume(vol)
self.persistence.set_volume(vol)
if sort:
return self.sorted(vols)
return vols
@ -103,12 +106,7 @@ class TestHelper(base.BaseTest):
for i in range(2):
kv = cinderlib.KeyValue(key='key%i' % i, value='value%i' % i)
kvs.append(kv)
# db_instance is a property of DBMS plugin
if hasattr(self.persistence, 'db_instance'):
with api.main_context_manager.writer.using(self.context):
self.persistence.set_key_value(kv)
else:
self.persistence.set_key_value(kv)
self.persistence.set_key_value(kv)
return kvs
def _convert_to_dict(self, obj):

View File

@ -32,15 +32,13 @@ class TestDBPersistence(base.BasePersistenceTest):
'connection': CONNECTION}
def tearDown(self):
super(TestDBPersistence, self).tearDown()
with sqla_api.main_context_manager.writer.using(self.context):
sqla_api.model_query(self.context, sqla_models.Snapshot).delete()
sqla_api.model_query(self.context,
sqla_models.VolumeAttachment).delete()
sqla_api.model_query(self.context, sqla_models.Volume).delete()
self.context.session.query(dbms.KeyValue).delete()
# FIXME: should this be inside or outside the context manager?
# Doesn't seem to matter for our current tests.
super(TestDBPersistence, self).tearDown()
def test_db(self):
self.assertIsInstance(self.persistence.db,
@ -106,8 +104,7 @@ class TestDBPersistence(base.BasePersistenceTest):
self.assertListEqual([], res)
expected = [dbms.KeyValue(key='key', value='value')]
with sqla_api.main_context_manager.writer.using(self.context):
self.persistence.set_key_value(expected[0])
self.persistence.set_key_value(expected[0])
with sqla_api.main_context_manager.reader.using(self.context):
actual = self.context.session.query(dbms.KeyValue).all()
@ -128,120 +125,21 @@ class TestDBPersistence(base.BasePersistenceTest):
self.assertEqual('__DEFAULT__', self.persistence.DEFAULT_TYPE.name)
def test_delete_volume_with_metadata(self):
# FIXME: figure out why this sometimes (often enough to skip)
# raises sqlite3.OperationalError: database is locked
if not isinstance(self, TestMemoryDBPersistence):
self.skipTest("FIXME!")
vols = self.create_volumes([{'size': i, 'name': 'disk%s' % i,
'metadata': {'k': 'v', 'k2': 'v2'},
'admin_metadata': {'k': '1'}}
for i in range(1, 3)])
with sqla_api.main_context_manager.writer.using(self.context):
self.persistence.delete_volume(vols[0])
with sqla_api.main_context_manager.reader.using(self.context):
res = self.persistence.get_volumes()
self.persistence.delete_volume(vols[0])
res = self.persistence.get_volumes()
self.assertListEqualObj([vols[1]], res)
for model in (dbms.models.VolumeMetadata,
dbms.models.VolumeAdminMetadata):
with sqla_api.main_context_manager.reader.using(self.context):
query = dbms.sqla_api.model_query(self.context, model)
res = query.filter_by(volume_id=vols[0].id).all()
res = query.filter_by(volume_id=vols[0].id).all()
self.assertEqual([], res)
def test_delete_volume(self):
vols = self.create_n_volumes(2)
with sqla_api.main_context_manager.writer.using(self.context):
self.persistence.delete_volume(vols[0])
res = self.persistence.get_volumes()
self.assertListEqualObj([vols[1]], res)
def test_delete_volume_not_found(self):
vols = self.create_n_volumes(2)
fake_vol = cinderlib.Volume(backend_or_vol=self.backend)
with sqla_api.main_context_manager.writer.using(self.context):
self.persistence.delete_volume(fake_vol)
res = self.persistence.get_volumes()
self.assertListEqualObj(vols, self.sorted(res))
def test_get_snapshots_by_multiple_not_found(self):
with sqla_api.main_context_manager.writer.using(self.context):
snaps = self.create_snapshots()
with sqla_api.main_context_manager.reader.using(self.context):
res = self.persistence.get_snapshots(snapshot_name=snaps[1].name,
volume_id=snaps[0].volume.id)
self.assertListEqualObj([], res)
def test_delete_snapshot(self):
with sqla_api.main_context_manager.writer.using(self.context):
snaps = self.create_snapshots()
self.persistence.delete_snapshot(snaps[0])
with sqla_api.main_context_manager.reader.using(self.context):
res = self.persistence.get_snapshots()
self.assertListEqualObj([snaps[1]], res)
def test_delete_snapshot_not_found(self):
with sqla_api.main_context_manager.writer.using(self.context):
snaps = self.create_snapshots()
fake_snap = cinderlib.Snapshot(snaps[0].volume)
with sqla_api.main_context_manager.writer.using(self.context):
self.persistence.delete_snapshot(fake_snap)
with sqla_api.main_context_manager.reader.using(self.context):
res = self.persistence.get_snapshots()
self.assertListEqualObj(snaps, self.sorted(res))
def test_delete_connection(self):
conns = self.create_connections()
with sqla_api.main_context_manager.writer.using(self.context):
self.persistence.delete_connection(conns[1])
with sqla_api.main_context_manager.reader.using(self.context):
res = self.persistence.get_connections()
self.assertListEqualObj([conns[0]], res)
def test_delete_connection_not_found(self):
conns = self.create_connections()
fake_conn = cinderlib.Connection(
self.backend,
volume=conns[0].volume,
connection_info={'conn': {'data': {}}})
with sqla_api.main_context_manager.writer.using(self.context):
self.persistence.delete_connection(fake_conn)
with sqla_api.main_context_manager.reader.using(self.context):
res = self.persistence.get_connections()
self.assertListEqualObj(conns, self.sorted(res))
def test_get_key_values_by_key(self):
with sqla_api.main_context_manager.writer.using(self.context):
kvs = self.create_key_values()
with sqla_api.main_context_manager.reader.using(self.context):
res = self.persistence.get_key_values(key=kvs[1].key)
self.assertListEqual([kvs[1]], res)
def test_get_key_values_by_key_not_found(self):
with sqla_api.main_context_manager.writer.using(self.context):
self.create_key_values()
with sqla_api.main_context_manager.reader.using(self.context):
res = self.persistence.get_key_values(key='fake-uuid')
self.assertListEqual([], res)
def test_delete_key_value(self):
kvs = self.create_key_values()
with sqla_api.main_context_manager.writer.using(self.context):
self.persistence.delete_key_value(kvs[1])
with sqla_api.main_context_manager.reader.using(self.context):
res = self.persistence.get_key_values()
self.assertListEqual([kvs[0]], res)
def test_delete_key_not_found(self):
kvs = self.create_key_values()
fake_key = cinderlib.KeyValue('fake-key')
with sqla_api.main_context_manager.writer.using(self.context):
self.persistence.delete_key_value(fake_key)
with sqla_api.main_context_manager.reader.using(self.context):
res = self.persistence.get_key_values()
self.assertListEqual(kvs, self.sorted(res, 'key'))
class TestDBPersistenceNewerSchema(base.helper.TestHelper):
"""Test DBMS plugin can start when the DB has a newer schema."""

View File

@ -41,14 +41,7 @@ deps =
commands =
find . -ignore_readdir_race -type f -name "*.pyc" -delete
# FIXME: figure out why these need to be run in extreme isolation
stestr run --exclude-regex cinderlib.tests.unit.persistence.test_dbms {posargs}
stestr run --combine {posargs} cinderlib.tests.unit.persistence.test_dbms.TestDBPersistence
stestr run --combine {posargs} cinderlib.tests.unit.persistence.test_dbms.TestMemoryDBPersistence
# TODO: figure out how to arrange the above so that you can run a
# a single test. For example, invoking
# tox -e py310 -- cinderlib.tests.unit.test_cinderlib.TestCinderlib.test__set_priv_helper
# runs that test and then TestDBPersistence tests and then TestMemoryDBPersistence tests
stestr run {posargs}
stestr slowest
allowlist_externals =