Add support for IBM SVC

When initializing IBM SVC with the memory persistence we get an exception:

  return IMPL.volume_get_all_by_host(context, host, filters=filters)
  AttributeError: 'DB' object has no attribute 'volume_get_all_by_host'

The issue is caused by the SVC driver accessing the database directly,
which is not considered good practice in Cinder nowadays.

The problem with drivers using the DB directly is that cinderlib doesn't
require a DB, it can work storing everything in memory. To facilitate
storage of the resources' metadata cinderlib has its own persistence
plugin mechanism, and has some ad-hoc "workarounds" in the DB class for
drivers that access the DB directly.

The IBM driver has the following DB dependencies:

- volume_get_all_by_host when using objects.VolumeList.get_all_by_host

- volume_type_get when using objects.VolumeType.get_by_name_or_id

- volume_type_get_by_name when using
  objects.VolumeType.get_by_name_or_id

- volume_get directly called by get_by_id when using
  objects.Volume.get_by_id

- _group_type_get_full directly called by get_by_id when using
  objects.GroupType.get_by_id

- volume_admin_metadata_get direct call

- volume_admin_metadata_update direct call

- volume_admin_metadata_delete direct call

In cinderlib we don't currently support groups or consistency groups, so
we don't need to implement `_group_type_get_full` and we have already
implemented workaround for methods `volume_get` and `volume_type_get`,
so this patch implements the remaining 5 methods volume_get_all_by_host,
volume_type_get_by_name, volume_admin_metadata_get,
volume_admin_metadata_update, and volume_admin_metadata_delete in the DB
class.

Closes-Bug: #1821898
Change-Id: I94b4864c5823976f4a9cac2e40943b74aca4066e
This commit is contained in:
Gorka Eguileor 2019-03-27 19:46:05 +01:00
parent d6bec909e0
commit 385e6f4a1e
2 changed files with 195 additions and 1 deletions

View File

@ -158,8 +158,17 @@ class DB(object):
ovo_cls = getattr(objects, ovo_name)
ovo_cls.save = lambda *args, **kwargs: None
def __volume_get(self, volume_id, as_ovo=True):
in_memory = volume_id in cinderlib.Backend._volumes_inflight
if in_memory:
vol = cinderlib.Backend._volumes_inflight[volume_id]
else:
vol = self.persistence.get_volumes(volume_id)[0]
vol_result = vol._ovo if as_ovo else vol
return in_memory, vol_result
def volume_get(self, context, volume_id, *args, **kwargs):
return self.persistence.get_volumes(volume_id)[0]._ovo
return self.__volume_get(volume_id)[1]
def snapshot_get(self, context, snapshot_id, *args, **kwargs):
return self.persistence.get_snapshots(snapshot_id)[0]._ovo
@ -175,6 +184,10 @@ class DB(object):
return None
return vol_type_to_dict(vol._ovo.volume_type)
# Our volume type name is the same as the id and the volume name
def _volume_type_get_by_name(self, context, name, session=None):
return self.volume_type_get(context, name)
def qos_specs_get(self, context, qos_specs_id, inactive=False):
if qos_specs_id in cinderlib.Backend._volumes_inflight:
vol = cinderlib.Backend._volumes_inflight[qos_specs_id]
@ -192,6 +205,45 @@ class DB(object):
method = getattr(self, self.GET_METHODS_PER_DB_MODEL[model])
return method(context, id)
def volume_get_all_by_host(self, context, host, filters=None):
backend_name = host.split('#')[0].split('@')[1]
result = self.persistence.get_volumes(backend_name=backend_name)
return [vol._ovo for vol in result]
def _volume_admin_metadata_get(self, context, volume_id, session=None):
vol = self.volume_get(context, volume_id)
return vol.admin_metadata
def _volume_admin_metadata_update(self, context, volume_id, metadata,
delete, session=None, add=True,
update=True):
vol_in_memory, vol = self.__volume_get(volume_id, as_ovo=False)
changed = False
if delete:
remove = set(vol.admin_metadata.keys()).difference(metadata.keys())
changed = bool(remove)
for k in remove:
del vol.admin_metadata[k]
for k, v in metadata.items():
is_in = k in vol.admin_metadata
if (not is_in and add) or (is_in and update):
vol.admin_metadata[k] = v
changed = True
if changed and not vol_in_memory:
vol._changed_fields.add('admin_metadata')
self.persistence.set_volume(vol)
def volume_admin_metadata_delete(self, context, volume_id, key):
vol_in_memory, vol = self.__volume_get(volume_id, as_ovo=False)
if key in vol.admin_metadata:
del vol.admin_metadata[key]
if not vol_in_memory:
vol._changed_fields.add('admin_metadata')
self.persistence.set_volume(vol)
def vol_type_to_dict(volume_type):
res = serialization.obj_to_primitive(volume_type)

View File

@ -13,7 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
import cinderlib
from cinderlib.persistence import base as persistence_base
from cinderlib.tests.unit.persistence import helper
from cinderlib.tests.unit import utils
@ -378,3 +382,141 @@ class BasePersistenceTest(helper.TestHelper):
self.persistence.delete_key_value(fake_key)
res = self.persistence.get_key_values()
self.assertListEqual(kvs, self.sorted(res, 'key'))
@mock.patch('cinderlib.persistence.base.DB.volume_type_get')
def test__volume_type_get_by_name(self, get_mock):
# Only test when using our fake DB class. We cannot use
# unittest2.skipUnless because persistence is configure in setUpClass,
# which is called after the decorator.
if not isinstance(cinderlib.objects.Backend.persistence.db,
persistence_base.DB):
return
# Volume type id and name are the same, so method must be too
res = self.persistence.db._volume_type_get_by_name(self.context,
mock.sentinel.name)
self.assertEqual(get_mock.return_value, res)
get_mock.assert_called_once_with(self.context, mock.sentinel.name)
def test_volume_type_get_by_id(self):
extra_specs = [{'k1': 'v1', 'k2': 'v2'},
{'kk1': 'vv1', 'kk2': 'vv2', 'kk3': 'vv3'}]
vols = self.create_volumes(
[{'size': 1, 'extra_specs': extra_specs[0]},
{'size': 2, 'extra_specs': extra_specs[1]}],
sort=False)
res = self.persistence.db.volume_type_get(self.context, vols[0].id)
self.assertEqual(vols[0].id, res['id'])
self.assertEqual(vols[0].id, res['name'])
self.assertEqual(extra_specs[0], res['extra_specs'])
def test_volume_get_all_by_host(self):
# Only test when using our fake DB class. We cannot use
# unittest2.skipUnless because persistence is configure in setUpClass,
# which is called after the decorator.
if not isinstance(cinderlib.objects.Backend.persistence.db,
persistence_base.DB):
return
persistence_db = self.persistence.db
host = '%s@%s' % (cfg.CONF.host, self.backend.id)
vols = [v._ovo for v in self.create_n_volumes(2)]
backend2 = utils.FakeBackend(volume_backend_name='fake2')
vol = self.create_volumes([{'backend_or_vol': backend2, 'size': 3}])
# We should be able to get it using the host@backend
res = persistence_db.volume_get_all_by_host(self.context, host)
self.assertListEqualObj(vols, self.sorted(res))
# Confirm it also works when we pass a host that includes the pool
res = persistence_db.volume_get_all_by_host(self.context, vols[0].host)
self.assertListEqualObj(vols, self.sorted(res))
# Check we also get the other backend's volume
host = '%s@%s' % (cfg.CONF.host, backend2.id)
res = persistence_db.volume_get_all_by_host(self.context, host)
self.assertListEqualObj(vol[0]._ovo, res[0])
def test__volume_admin_metadata_get(self):
# Only test when using our fake DB class. We cannot use
# unittest2.skipUnless because persistence is configure in setUpClass,
# which is called after the decorator.
if not isinstance(cinderlib.objects.Backend.persistence.db,
persistence_base.DB):
return
admin_metadata = {'k': 'v'}
vols = self.create_volumes([{'size': 1,
'admin_metadata': admin_metadata}])
result = self.persistence.db._volume_admin_metadata_get(self.context,
vols[0].id)
self.assertDictEqual(admin_metadata, result)
def test__volume_admin_metadata_update(self):
# Only test when using our fake DB class. We cannot use
# unittest2.skipUnless because persistence is configure in setUpClass,
# which is called after the decorator.
if not isinstance(cinderlib.objects.Backend.persistence.db,
persistence_base.DB):
return
create_admin_metadata = {'k': 'v', 'k2': 'v2'}
admin_metadata = {'k2': 'v2.1', 'k3': 'v3'}
vols = self.create_volumes([{'size': 1,
'admin_metadata': create_admin_metadata}])
self.persistence.db._volume_admin_metadata_update(self.context,
vols[0].id,
admin_metadata,
delete=True,
add=True,
update=True)
result = self.persistence.db._volume_admin_metadata_get(self.context,
vols[0].id)
self.assertDictEqual({'k2': 'v2.1', 'k3': 'v3'}, result)
def test__volume_admin_metadata_update_do_nothing(self):
# Only test when using our fake DB class. We cannot use
# unittest2.skipUnless because persistence is configure in setUpClass,
# which is called after the decorator.
if not isinstance(cinderlib.objects.Backend.persistence.db,
persistence_base.DB):
return
create_admin_metadata = {'k': 'v', 'k2': 'v2'}
admin_metadata = {'k2': 'v2.1', 'k3': 'v3'}
vols = self.create_volumes([{'size': 1,
'admin_metadata': create_admin_metadata}])
# Setting delete, add, and update to False means we don't do anything
self.persistence.db._volume_admin_metadata_update(self.context,
vols[0].id,
admin_metadata,
delete=False,
add=False,
update=False)
result = self.persistence.db._volume_admin_metadata_get(self.context,
vols[0].id)
self.assertDictEqual(create_admin_metadata, result)
def test_volume_admin_metadata_delete(self):
# Only test when using our fake DB class. We cannot use
# unittest2.skipUnless because persistence is configure in setUpClass,
# which is called after the decorator.
if not isinstance(cinderlib.objects.Backend.persistence.db,
persistence_base.DB):
return
admin_metadata = {'k': 'v', 'k2': 'v2'}
vols = self.create_volumes([{'size': 1,
'admin_metadata': admin_metadata}])
self.persistence.db.volume_admin_metadata_delete(self.context,
vols[0].id,
'k2')
result = self.persistence.db._volume_admin_metadata_get(self.context,
vols[0].id)
self.assertDictEqual({'k': 'v'}, result)