Add DB persistence plugin

This patch adds 2 new metadata storage options based on Cinder's
SQLAlchemy support:

- DB (db entrypoint)
- SQLite in memory DB (db_memory entrypoint)

Using the DB metadata storage we can use any of the DBMSs supported by
the Cinder project.

The benefit of using the "db_memory" plugin is that some drivers that
may fail with the default "memory" plugin may work with it if the driver
is accessing the DB directly from the driver and Cinderlib hasn't mokey
patched those DB calls.

The JSON serialization mechanism that got broken when we introduced the
metadata plugin mechanism has not been fixed in this patch.
This commit is contained in:
Gorka Eguileor 2018-04-06 12:18:04 +02:00
parent ffc5312518
commit 153fd3deaf
8 changed files with 327 additions and 66 deletions

View File

@ -11,6 +11,7 @@ History
- Support setting custom root_helper.
- Setting default project_id and user_id.
- Persistence plugin mechanism
- DB persistence plugin
- Bug fixes:

View File

@ -1,13 +1,11 @@
from __future__ import absolute_import
import cinderlib.cinderlib as clib
from cinderlib.cinderlib import * # noqa
from cinderlib import cinderlib
from cinderlib import serialization
from cinderlib import objects
__author__ = """Gorka Eguileor"""
__email__ = 'geguileo@redhat.com'
__version__ = '0.1.0'
__all__ = clib.__all__
DEFAULT_PROJECT_ID = objects.DEFAULT_PROJECT_ID
DEFAULT_USER_ID = objects.DEFAULT_USER_ID
@ -18,3 +16,6 @@ Connection = objects.Connection
load = serialization.load
json = serialization.json
jsons = serialization.jsons
setup = cinderlib.setup
Backend = cinderlib.Backend

View File

@ -21,10 +21,12 @@ from cinder import context
# NOTE(geguileo): Probably a good idea not to depend on cinder.cmd.volume
# having all the other imports as they could change.
from cinder.cmd import volume as volume_cmd
from cinder import objects as cinder_objs
from cinder.objects import base as cinder_base_ovo
from cinder import utils
from oslo_versionedobjects import base as base_ovo
from os_brick import exception as brick_exception
import six
from cinderlib import exception
@ -48,7 +50,10 @@ class Object(object):
overwrite=False)
def __init__(self, backend, **fields_data):
self.backend = backend
if isinstance(backend, six.string_types):
self.backend = self.backend_class.backends[backend]
else:
self.backend = backend
__ovo = fields_data.get('__ovo')
if __ovo:
@ -193,6 +198,10 @@ class Volume(NamedObject):
_ignore_keys = ('id', 'volume_attachment', 'snapshots')
def __init__(self, backend_or_vol, **kwargs):
# Accept backend name for convenience
if isinstance(backend_or_vol, six.string_types):
backend_or_vol = self.backend_class.backends[backend_or_vol]
# Accept a volume as additional source data
if isinstance(backend_or_vol, Volume):
# Availability zone (backend) will be the same as the source
@ -212,14 +221,42 @@ class Volume(NamedObject):
volume_cmd.objects.SnapshotList(context=self.CONTEXT))
super(Volume, self).__init__(backend_or_vol, **kwargs)
self.snapshots = set()
self.connections = []
self._snapshots = None
self._connections = None
self._populate_data()
def _to_primitive(self):
local_attach = self.local_attach.id if self.local_attach else None
return {'local_attach': local_attach}
@property
def snapshots(self):
# Lazy loading
if self._snapshots is None:
self._snapshots = self.persistence.get_snapshots(volume_id=self.id)
for snap in self._snapshots:
snap.volume = self
ovos = [snap._ovo for snap in self._snapshots]
self._ovo.snapshots = cinder_objs.SnapshotList(objects=ovos)
self._ovo.obj_reset_changes(('snapshots',))
return self._snapshots
@property
def connections(self):
# Lazy loading
if self._connections is None:
self._connections = self.persistence.get_connections(
volume_id=self.id)
for conn in self._connections:
conn.volume = self
ovos = [conn._ovo for conn in self._connections]
self._ovo.volume_attachment = cinder_objs.VolumeAttachmentList(
objects=ovos)
self._ovo.obj_reset_changes(('volume_attachment',))
return self._connections
@classmethod
def get_by_id(cls, volume_id):
result = cls.persistence.get_volumes(volume_id=volume_id)
@ -234,8 +271,8 @@ class Volume(NamedObject):
@classmethod
def _load(cls, backend, ovo):
# Restore snapshot's circular reference removed on serialization
for snap in ovo.snapshots:
snap.volume = ovo
# for snap in ovo.snapshots:
# snap.volume = ovo
# If this object is already present it will be replaced
obj = Object.objects['Volume'].get(ovo.id)
@ -250,37 +287,37 @@ class Volume(NamedObject):
self._populate_data()
def _populate_data(self):
old_snapshots = {snap.id: snap for snap in self.snapshots}
# old_snapshots = {snap.id: snap for snap in self.snapshots}
for snap_ovo in self._ovo.snapshots:
snap = Object.objects['Snapshot'].get(snap_ovo.id)
if snap:
snap._replace_ovo(snap_ovo)
del old_snapshots[snap.id]
else:
snap = Snapshot(self, __ovo=snap_ovo)
self.snapshots.add(snap)
# for snap_ovo in self._ovo.snapshots:
# snap = Object.objects['Snapshot'].get(snap_ovo.id)
# if snap:
# snap._replace_ovo(snap_ovo)
# del old_snapshots[snap.id]
# else:
# snap = Snapshot(self, __ovo=snap_ovo)
# self.snapshots.append(snap)
for snap_id, snap in old_snapshots.items():
self.snapshots.discard(snap)
# We leave snapshots in the global DB just in case...
# del Object.objects['Snapshot'][snap_id]
# for snap_id, snap in old_snapshots.items():
# self.snapshots.remove(snap)
# # We leave snapshots in the global DB just in case...
# # del Object.objects['Snapshot'][snap_id]
old_connections = {conn.id: conn for conn in self.connections}
# old_connections = {conn.id: conn for conn in self.connections}
for conn_ovo in self._ovo.volume_attachment:
conn = Object.objects['Connection'].get(conn_ovo.id)
if conn:
conn._replace_ovo(conn_ovo)
del old_connections[conn.id]
else:
conn = Connection(self.backend, volume=self, __ovo=conn_ovo)
self.connections.append(conn)
# for conn_ovo in self._ovo.volume_attachment:
# conn = Object.objects['Connection'].get(conn_ovo.id)
# if conn:
# conn._replace_ovo(conn_ovo)
# del old_connections[conn.id]
# else:
# conn = Connection(self.backend, volume=self, __ovo=conn_ovo)
# self.connections.append(conn)
for conn_id, conn in old_connections.items():
self.connections.remove(conn)
# We leave connections in the global DB just in case...
# del Object.objects['Connection'][conn_id]
# for conn_id, conn in old_connections.items():
# self.connections.remove(conn)
# # We leave connections in the global DB just in case...
# # del Object.objects['Connection'][conn_id]
data = getattr(self._ovo, 'cinderlib_data', {})
self.local_attach = data.get('local_attach', None)
@ -347,9 +384,9 @@ class Volume(NamedObject):
def create_snapshot(self, name='', description='', **kwargs):
snap = Snapshot(self, name=name, description=description, **kwargs)
snap.create()
self.snapshots.add(snap)
self._ovo.snapshots.objects.append(snap._ovo)
self.persistence.reset_change_tracker(self, 'snapshots')
if self._snapshots is not None:
self._snapshots.append(snap)
self._ovo.snapshots.objects.append(snap._ovo)
return snap
def attach(self):
@ -392,7 +429,9 @@ class Volume(NamedObject):
try:
conn = Connection.connect(self, connector_dict, **ovo_fields)
self.connections.append(conn)
if self._connections is not None:
self._connections.append(conn)
self._ovo.volume_attachment.objects.append(conn._ovo)
self._ovo.status = 'in-use'
self.persistence.set_volume(self)
except Exception:
@ -402,9 +441,14 @@ class Volume(NamedObject):
return conn
def _disconnect(self, connection):
self.connections.remove(connection)
self._remove_export()
self._ovo.status = 'available'
if self._connections is not None:
self._connections.remove(connection)
self._ovo.volume_attachment.objects.remove(connection._ovo)
if not self.connections:
self._ovo.status = 'available'
self.persistence.set_volume(self)
def disconnect(self, connection, force=False):
connection._disconnect(force)
@ -434,23 +478,34 @@ class Connection(Object):
connection_info=conn_info,
*kwargs)
cls.persistence.set_connection(conn)
volume._ovo.volume_attachment.objects.append(conn._ovo)
cls.persistence.reset_change_tracker(volume, 'volume_attachment')
return conn
def __init__(self, *args, **kwargs):
self.connected = True
self.volume = kwargs.pop('volume')
self._volume = kwargs.pop('volume')
self.connector = kwargs.pop('connector', None)
self.attach_info = kwargs.pop('attach_info', None)
if '__ovo' not in kwargs:
kwargs['volume'] = self.volume._ovo
kwargs['volume_id'] = self.volume._ovo.id
kwargs['volume'] = self._volume._ovo
kwargs['volume_id'] = self._volume._ovo.id
super(Connection, self).__init__(*args, **kwargs)
self._populate_data()
@property
def volume(self):
# Lazy loading
if self._volume is None:
self._volume = Volume.get_by_id(self.volume_id)
self._ovo.volume = self._volume._ovo
return self._volume
@volume.setter
def volume(self, value):
self._volume = value
self._ovo.volume = value._ovo
def _to_primitive(self):
result = {
'connector': self.connector,
@ -492,7 +547,6 @@ class Connection(Object):
def _load(cls, backend, ovo):
# Turn this around and do a Volume load
volume = ovo.volume
volume.volume_attachment.objects.append(ovo)
# Remove circular reference
delattr(ovo, base_ovo._get_attrname('volume'))
Volume._load(backend, volume)
@ -504,7 +558,6 @@ class Connection(Object):
force=force)
self.connected = False
self._ovo.volume.volume_attachment.objects.remove(self._ovo)
self._ovo.status = 'detached'
self._ovo.deleted = True
self.persistence.delete_connection(self)
@ -548,6 +601,16 @@ class Connection(Object):
raise exception.ConnectionNotFound(filter=msg)
return result[0]
@property
def backend(self):
if self._backend is None:
self._backend = self.volume.backend
return self._backend
@backend.setter
def backend(self, value):
self._backend = value
class Snapshot(NamedObject):
OVO_CLASS = volume_cmd.objects.Snapshot
@ -557,10 +620,13 @@ class Snapshot(NamedObject):
}
def __init__(self, volume, **kwargs):
self.volume = volume
self._volume = volume
if '__ovo' in kwargs:
# Ensure circular reference is set
kwargs['__ovo'].volume = volume._ovo
# Ensure circular reference is set if present
if volume:
kwargs['__ovo'].volume = volume._ovo
backend = kwargs['__ovo']['progress']
else:
kwargs.setdefault('user_id', volume.user_id)
kwargs.setdefault('project_id', volume.project_id)
@ -568,14 +634,29 @@ class Snapshot(NamedObject):
kwargs['volume_size'] = volume.size
kwargs['volume_type_id'] = volume.volume_type_id
kwargs['volume'] = volume._ovo
if volume:
backend = volume.backend.id
kwargs['progress'] = backend
super(Snapshot, self).__init__(volume.backend, **kwargs)
super(Snapshot, self).__init__(backend=backend, **kwargs)
@property
def volume(self):
# Lazy loading
if self._volume is None:
self._volume = Volume.get_by_id(self.volume_id)
self._ovo.volume = self._volume._ovo
return self._volume
@volume.setter
def volume(self, value):
self._volume = value
self._ovo.volume = value._ovo
@classmethod
def _load(cls, backend, ovo):
# Turn this around and do a Volume load
volume = ovo.volume
volume.snapshots.objects.append(ovo)
# Remove circular reference
delattr(ovo, base_ovo._get_attrname('volume'))
Volume._load(backend, volume)
@ -608,13 +689,12 @@ class Snapshot(NamedObject):
# just didn't complete.
# TODO: raise with the snap info
raise
# Instead of refreshing from the DB we modify the lists manually
self.volume.snapshots.discard(self)
try:
self.volume._ovo.snapshots.objects.remove(self._ovo)
self.persistence.reset_change_tracker(self.volume, 'snapshots')
except ValueError:
pass
if self._volume is not None and self._volume._snapshots is not None:
try:
self._volume._snapshots.remove(self)
self._volume._ovo.snapshots.objects.remove(self._ovo)
except ValueError:
pass
def create_volume(self, **new_vol_params):
new_vol_params.setdefault('size', self.volume_size)

View File

@ -12,7 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import inspect
import six
@ -21,8 +21,6 @@ from stevedore import driver
from cinderlib import exception
from cinderlib.persistence import base
__all__ = ('setup',)
DEFAULT_STORAGE = 'memory'

View File

@ -18,6 +18,7 @@
from cinder.cmd import volume as volume_cmd
from cinder.objects import base as cinder_base_ovo
from oslo_utils import timeutils
from oslo_versionedobjects import fields
import six
@ -75,7 +76,13 @@ class PersistenceDriverBase(object):
resource._ovo.obj_reset_changes(fields)
def get_changed_fields(self, resource):
return resource._ovo.cinder_obj_get_changes()
# NOTE(geguileo): We don't use cinder_obj_get_changes to prevent
# recursion to children OVO which we are not interested and may result
# in circular references.
result = {key: getattr(resource._ovo, key)
for key in resource._changed_fields
if not isinstance(resource.fields[key], fields.ObjectField)}
return result
class DB(object):
@ -108,10 +115,10 @@ class DB(object):
ovo_cls.save = lambda *args, **kwargs: None
def volume_get(self, context, volume_id, *args, **kwargs):
return self.persistence.get_volumes(volume_id)._ovo
return self.persistence.get_volumes(volume_id)[0]._ovo
def snapshot_get(self, context, snapshot_id, *args, **kwargs):
return self.persistence.get_snapshots(snapshot_id)._ovo
return self.persistence.get_snapshots(snapshot_id)[0]._ovo
@classmethod
def image_volume_cache_get_by_volume_id(cls, context, volume_id):

View File

@ -0,0 +1,172 @@
# Copyright (c) 2018, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from cinder.cmd import volume as volume_cmd
from cinder.db import api as db_api
from cinder.db import migration
from cinder.db.sqlalchemy import api as sqla_api
from cinder import objects as cinder_objs
# from oslo_log import log as logging
from cinderlib import objects
from cinderlib.persistence import base as persistence_base
LOG = logging.getLogger(__name__)
class DBPersistence(persistence_base.PersistenceDriverBase):
def __init__(self, connection, sqlite_synchronous=True,
soft_deletes=False):
self.soft_deletes = soft_deletes
volume_cmd.CONF.database.connection = connection
volume_cmd.CONF.database.sqlite_synchronous = sqlite_synchronous
# Suppress logging for migration
migrate_logger = logging.getLogger('migrate')
migrate_logger.setLevel(logging.WARNING)
self.db_instance = db_api.oslo_db_api.DBAPI.from_config(
conf=volume_cmd.CONF, backend_mapping=db_api._BACKEND_MAPPING,
lazy=True)
migration.db_sync()
super(DBPersistence, self).__init__()
@property
def db(self):
return self.db_instance
@staticmethod
def _build_filter(**kwargs):
return {key: value for key, value in kwargs.items() if value}
def get_volumes(self, volume_id=None, volume_name=None, backend_name=None):
filters = self._build_filter(id=volume_id, display_name=volume_name,
availability_zone=backend_name)
LOG.debug('get_volumes for %s', filters)
ovos = cinder_objs.VolumeList.get_all(objects.CONTEXT, filters=filters)
result = [objects.Volume(ovo.availability_zone, __ovo=ovo)
for ovo in ovos.objects]
return result
def get_snapshots(self, snapshot_id=None, snapshot_name=None,
volume_id=None):
filters = self._build_filter(id=snapshot_id, volume_id=volume_id,
display_name=snapshot_name)
LOG.debug('get_snapshots for %s', filters)
ovos = cinder_objs.SnapshotList.get_all(objects.CONTEXT,
filters=filters)
result = [objects.Snapshot(None, __ovo=ovo) for ovo in ovos.objects]
return result
def get_connections(self, connection_id=None, volume_id=None):
filters = self._build_filter(id=connection_id, volume_id=volume_id)
LOG.debug('get_connections for %s', filters)
ovos = cinder_objs.VolumeAttachmentList.get_all(objects.CONTEXT,
filters)
# Leverage lazy loading of the volume and backend in Connection
result = [objects.Connection(None, volume=None, __ovo=ovo)
for ovo in ovos.objects]
return result
def set_volume(self, volume):
changed = self.get_changed_fields(volume)
if not changed:
return
# Create
if 'id' in changed:
LOG.debug('set_volume creating %s', changed)
self.db.volume_create(objects.CONTEXT, changed)
else:
LOG.debug('set_volume updating %s', changed)
self.db.volume_update(objects.CONTEXT, volume.id, changed)
super(DBPersistence, self).set_volume(volume)
def set_snapshot(self, snapshot):
changed = self.get_changed_fields(snapshot)
if not changed:
return
# Create
if 'id' in changed:
LOG.debug('set_snapshot creating %s', changed)
self.db.snapshot_create(objects.CONTEXT, changed)
else:
LOG.debug('set_snapshot updating %s', changed)
self.db.snapshot_update(objects.CONTEXT, snapshot.id, changed)
super(DBPersistence, self).set_snapshot(snapshot)
def set_connection(self, connection):
changed = self.get_changed_fields(connection)
if not changed:
return
if 'connection_info' in changed:
connection._convert_connection_info_to_db_format(changed)
if 'connector' in changed:
connection._convert_connector_to_db_format(changed)
# Create
if 'id' in changed:
LOG.debug('set_connection creating %s', changed)
sqla_api.volume_attach(objects.CONTEXT, changed)
else:
LOG.debug('set_connection updating %s', changed)
self.db.volume_attachment_update(objects.CONTEXT, connection.id,
changed)
super(DBPersistence, self).set_connection(connection)
def delete_volume(self, volume):
if self.soft_deletes:
LOG.debug('soft deleting volume %s', volume.id)
self.db.volume_destroy(objects.CONTEXT, volume.id)
else:
LOG.debug('hard deleting volume %s', volume.id)
sqla_api.model_query(objects.CONTEXT,
sqla_api.models.Volume
).filter_by(id=volume.id).delete()
super(DBPersistence, self).delete_volume(volume)
def delete_snapshot(self, snapshot):
if self.soft_deletes:
LOG.debug('soft deleting snapshot %s', snapshot.id)
self.db.snapshot_destroy(objects.CONTEXT, snapshot.id)
else:
LOG.debug('hard deleting snapshot %s', snapshot.id)
sqla_api.model_query(objects.CONTEXT,
sqla_api.models.Snapshot
).filter_by(id=snapshot.id).delete()
super(DBPersistence, self).delete_snapshot(snapshot)
def delete_connection(self, connection):
if self.soft_deletes:
LOG.debug('soft deleting connection %s', connection.id)
self.db.attachment_destroy(objects.CONTEXT, connection.id)
else:
LOG.debug('hard deleting connection %s', connection.id)
sqla_api.model_query(objects.CONTEXT,
sqla_api.models.VolumeAttachment
).filter_by(id=connection.id).delete()
super(DBPersistence, self).delete_connection(connection)
class MemoryDBPersistence(DBPersistence):
def __init__(self):
super(MemoryDBPersistence, self).__init__(connection='sqlite://')

View File

@ -22,9 +22,9 @@ class MemoryPersistence(persistence_base.PersistenceDriverBase):
connections = {}
def __init__(self):
super(MemoryPersistence, self).__init__()
# Create fake DB for drivers
self.fake_db = persistence_base.DB(self)
super(MemoryPersistence, self).__init__()
@property
def db(self):

View File

@ -80,6 +80,8 @@ setuptools.setup(
entry_points={
'cinderlib.persistence.storage': [
'memory = cinderlib.persistence.memory:MemoryPersistence',
'db = cinderlib.persistence.dbms:DBPersistence',
'memory_db = cinderlib.persistence.dbms:MemoryDBPersistence',
],
},
)