Stop using AZ to store backend name
We were using the availability_zone field from the volume table to store the backend name, this makes it harder to migrate or transfer volumes from Cinder. This patch changes this behavior and stores the backend name under the host field. THIS IS A BREAKING CHANGE.
This commit is contained in:
parent
696ad6da40
commit
09abcf4df4
|
@ -12,6 +12,7 @@ History
|
|||
- Features:
|
||||
|
||||
- Provide better message when device is not available.
|
||||
- Backend name stored in host instead of in the AZ (backward incompatible).
|
||||
|
||||
0.2.2 (2018-07-24)
|
||||
------------------
|
||||
|
|
|
@ -66,7 +66,7 @@ class Backend(object):
|
|||
conf.volume_driver,
|
||||
configuration=conf,
|
||||
db=self.persistence.db,
|
||||
host=volume_cmd.CONF.host,
|
||||
host='%s@%s' % (objects.CONFIGURED_HOST, volume_backend_name),
|
||||
cluster_name=None, # No clusters for now: volume_cmd.CONF.cluster,
|
||||
active_backend_id=None) # No failover for now
|
||||
self.driver.do_setup(objects.CONTEXT)
|
||||
|
@ -141,7 +141,7 @@ class Backend(object):
|
|||
suppress_requests_ssl_warnings=True, disable_logs=True,
|
||||
non_uuid_ids=False, output_all_backend_info=False,
|
||||
project_id=None, user_id=None, persistence_config=None,
|
||||
fail_on_missing_backend=True, **log_params):
|
||||
fail_on_missing_backend=True, host=None, **log_params):
|
||||
# Global setup can only be set once
|
||||
if cls.global_initialization:
|
||||
raise Exception('Already setup')
|
||||
|
@ -151,6 +151,7 @@ class Backend(object):
|
|||
cls.project_id = project_id
|
||||
cls.user_id = user_id
|
||||
cls.non_uuid_ids = non_uuid_ids
|
||||
objects.CONFIGURED_HOST = host or volume_cmd.CONF.host
|
||||
|
||||
cls.set_persistence(persistence_config)
|
||||
|
||||
|
|
|
@ -37,10 +37,10 @@ from cinderlib import exception
|
|||
LOG = logging.getLogger(__name__)
|
||||
DEFAULT_PROJECT_ID = 'cinderlib'
|
||||
DEFAULT_USER_ID = 'cinderlib'
|
||||
BACKEND_NAME_VOLUME_FIELD = 'availability_zone'
|
||||
BACKEND_NAME_SNAPSHOT_FIELD = 'progress'
|
||||
CONNECTIONS_OVO_FIELD = 'volume_attachment'
|
||||
|
||||
CONFIGURED_HOST = 'cinderlib'
|
||||
|
||||
# This cannot go in the setup method because cinderlib objects need them to
|
||||
# be setup to set OVO_CLASS
|
||||
|
@ -265,7 +265,6 @@ class Volume(NamedObject):
|
|||
'size': 1,
|
||||
'user_id': Object.CONTEXT.user_id,
|
||||
'project_id': Object.CONTEXT.project_id,
|
||||
'host': volume_cmd.CONF.host,
|
||||
'status': 'creating',
|
||||
'attach_status': 'detached',
|
||||
'metadata': {},
|
||||
|
@ -279,14 +278,14 @@ class Volume(NamedObject):
|
|||
def __init__(self, backend_or_vol, **kwargs):
|
||||
# Accept backend name for convenience
|
||||
if isinstance(backend_or_vol, six.string_types):
|
||||
kwargs.setdefault(BACKEND_NAME_VOLUME_FIELD, backend_or_vol)
|
||||
kwargs.setdefault('host',
|
||||
'%s@%s' % (CONFIGURED_HOST, backend_or_vol))
|
||||
backend_or_vol = self._get_backend(backend_or_vol)
|
||||
elif isinstance(backend_or_vol, self.backend_class):
|
||||
kwargs.setdefault(BACKEND_NAME_VOLUME_FIELD, backend_or_vol.id)
|
||||
kwargs.setdefault('host',
|
||||
'%s@%s' % (CONFIGURED_HOST, backend_or_vol.id))
|
||||
# Accept a volume as additional source data
|
||||
elif isinstance(backend_or_vol, Volume):
|
||||
# Availability zone (backend) will be the same as the source
|
||||
kwargs.pop(BACKEND_NAME_VOLUME_FIELD, None)
|
||||
for key in backend_or_vol._ovo.fields:
|
||||
if (backend_or_vol._ovo.obj_attr_is_set(key) and
|
||||
key not in self._ignore_keys):
|
||||
|
|
|
@ -82,16 +82,26 @@ class DBPersistence(persistence_base.PersistenceDriverBase):
|
|||
return {key: value for key, value in kwargs.items() if value}
|
||||
|
||||
def get_volumes(self, volume_id=None, volume_name=None, backend_name=None):
|
||||
if backend_name:
|
||||
host = '%s@%s' % (objects.CONFIGURED_HOST, backend_name)
|
||||
else:
|
||||
host = None
|
||||
filters = self._build_filter(id=volume_id, display_name=volume_name,
|
||||
availability_zone=backend_name)
|
||||
host=host)
|
||||
LOG.debug('get_volumes for %s', filters)
|
||||
ovos = cinder_objs.VolumeList.get_all(objects.CONTEXT, filters=filters)
|
||||
result = [objects.Volume(ovo.availability_zone, __ovo=ovo)
|
||||
for ovo in ovos.objects]
|
||||
for r in result:
|
||||
if r.volume_type_id:
|
||||
r.volume_type.extra_specs # Trigger extra specs load
|
||||
r.volume_type.qos_specs # Trigger qos specs load
|
||||
result = []
|
||||
for ovo in ovos:
|
||||
# We have stored the backend reversed with the host, switch it back
|
||||
backend = ovo.host.split('@')[1].split('#')[0]
|
||||
|
||||
# Trigger lazy loading of specs
|
||||
if ovo.volume_type_id:
|
||||
ovo.volume_type.extra_specs
|
||||
ovo.volume_type.qos_specs
|
||||
|
||||
result.append(objects.Volume(backend, __ovo=ovo))
|
||||
|
||||
return result
|
||||
|
||||
def get_snapshots(self, snapshot_id=None, snapshot_name=None,
|
||||
|
|
|
@ -31,10 +31,17 @@ class MemoryPersistence(persistence_base.PersistenceDriverBase):
|
|||
def db(self):
|
||||
return self.fake_db
|
||||
|
||||
@staticmethod
|
||||
def _get_field(res, field):
|
||||
res = getattr(res, field)
|
||||
if field == 'host':
|
||||
res = res.split('@')[1].split('#')[0]
|
||||
return res
|
||||
|
||||
def _filter_by(self, values, field, value):
|
||||
if not value:
|
||||
return values
|
||||
return [res for res in values if getattr(res, field) == value]
|
||||
return [res for res in values if self._get_field(res, field) == value]
|
||||
|
||||
def get_volumes(self, volume_id=None, volume_name=None, backend_name=None):
|
||||
try:
|
||||
|
@ -43,7 +50,7 @@ class MemoryPersistence(persistence_base.PersistenceDriverBase):
|
|||
except KeyError:
|
||||
return []
|
||||
res = self._filter_by(res, 'display_name', volume_name)
|
||||
res = self._filter_by(res, 'availability_zone', backend_name)
|
||||
res = self._filter_by(res, 'host', backend_name)
|
||||
return res
|
||||
|
||||
def get_snapshots(self, snapshot_id=None, snapshot_name=None,
|
||||
|
|
|
@ -24,7 +24,7 @@ The method definition is as follows:
|
|||
suppress_requests_ssl_warnings=True, disable_logs=True,
|
||||
non_uuid_ids=False, output_all_backend_info=False,
|
||||
project_id=None, user_id=None, persistence_config=None,
|
||||
fail_on_missing_backend=True, **log_params):
|
||||
fail_on_missing_backend=True, host=None, **log_params):
|
||||
|
||||
The meaning of the library's configuration options are:
|
||||
|
||||
|
@ -166,6 +166,22 @@ initialization.
|
|||
This is useful if we are sharing the metadata persistence storage and we want
|
||||
to load a volume that is already connected to do just the attachment.
|
||||
|
||||
host
|
||||
----
|
||||
|
||||
Host configuration option used for all volumes created by this cinderlib
|
||||
execution.
|
||||
|
||||
On cinderlib volumes are selected based on the backend name, not on the
|
||||
host@backend combination like cinder does. Therefore backend names must be
|
||||
unique across all cinderlib applications that are using the same persistence
|
||||
storage backend.
|
||||
|
||||
A second application running cinderlib with a different host value will have
|
||||
access to the same resources if it uses the same backend name.
|
||||
|
||||
Defaults to the host's hostname.
|
||||
|
||||
other keyword arguments
|
||||
-----------------------
|
||||
|
||||
|
|
|
@ -107,8 +107,11 @@ Some of the fields we could be interested in are:
|
|||
- `snapshot_id`: ID of the source snapshot used to create the volume. This
|
||||
will be filled by *cinderlib*.
|
||||
|
||||
- `host`: In *Cinder* used to store the *host@backend#pool* information, here
|
||||
we can just keep some identification of the process that wrote this.
|
||||
- `host`: Used to store the backend name information together with the host
|
||||
name where cinderlib is running. This information is stored as a string in
|
||||
the form of *host@backend*. This is an optional parameter, and passing it to
|
||||
`create_volume` will override default value. Issues will arise if parameter
|
||||
doesn't contain correct information.
|
||||
|
||||
- `size`: Volume size in GBi.
|
||||
|
||||
|
|
Loading…
Reference in New Issue